[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 3/9] xen/ppc: Implement atomic.h


  • To: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Mon, 7 Aug 2023 18:13:31 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=M9BBDvlTNSHInOuR4isqlN/DZv5qAdgN4AiiOd9csnU=; b=AGnUs6vbVHhgcU8TMab+tATw/X0GPPOt4Jj8kdpcclR/ljWx5zf9zKglA11rraxBh5fxgdQIjY9H9NIMRmwr/M4Qf6x4EhMUL/tNIoF/8R3u50/IyMgym+Gvv3iI2j0atbx6qMK4iMXbab4SX3xSJohRG65Ae/oFyBrt3fdGjJDAi2mAWpmhDwaydH3V9bRdn/SWseyEL0B/Ry1mp8kynGDp/kkhqFjBUXFieNta+w8ZBKUxX5jo5LWlzgbNYcZCUpb5d6QEVumjGwrxEhoKjbYAL61IOS3/4W2D86Xil8QPBbH4jNvgqGo1wu4xCrT2j+m5is+SEju5VLLqAxZnAg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=EIGsn4o9IN+nOUoG8xhwQRTEaJkU1z3U2A8q2nX+wR7sPhx8z/qOBPv47lntBimvn6ys7yL//KqrtHxQIvJNtxkcXHxnyXDP4TszY3gCcjAoAiZADKRjb9lANvh1Z+369EhYwjBVH53yYc/5XQLytRJAQMux2R+akLXrl5/YUrzOsH/5Gdon7FI16CvYcR8tm1L55qxY6RWgi1U7i7IpKw6RCF98SS7sdyOHTbNfn9pVeg2oFRVD6ezyuOurPv9fsyNjs8Cn6kGMyvlTSzx/m306UdOBs5dBkdxhStBZxXBhTD83RezIWj8HjwvcEohyxQvxszc3pTzVz1jcxelIDA==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Timothy Pearson <tpearson@xxxxxxxxxxxxxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
  • Delivery-date: Mon, 07 Aug 2023 16:13:45 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 03.08.2023 01:02, Shawn Anastasio wrote:
> Implement atomic.h for PPC, based off of the original Xen 3.2
> implementation.

Since likely that originally came from Linux, did you cross check that
Linux hasn't gained any bug fixes in the meantime?

Other than this just a couple of nits; I'm not really qualified to
review in particular the inline assembly here, I'm afraid.

> --- /dev/null
> +++ b/xen/arch/ppc/include/asm/atomic.h
> @@ -0,0 +1,387 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * PowerPC64 atomic operations
> + *
> + * Copyright (C) 2001 Paul Mackerras <paulus@xxxxxxxxxx>, IBM
> + * Copyright (C) 2001 Anton Blanchard <anton@xxxxxxxxxx>, IBM
> + * Copyright Raptor Engineering LLC
> + *
> + * This program is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU General Public License
> + * as published by the Free Software Foundation; either version
> + * 2 of the License, or (at your option) any later version.
> + */
> +
> +#ifndef _ASM_PPC64_ATOMIC_H_
> +#define _ASM_PPC64_ATOMIC_H_

To fit the name, no "64" please.

> +#include <xen/atomic.h>
> +
> +#include <asm/memory.h>
> +#include <asm/system.h>
> +
> +static inline int atomic_read(const atomic_t *v)
> +{
> +    return *(volatile int *)&v->counter;
> +}
> +
> +static inline int _atomic_read(atomic_t v)
> +{
> +    return v.counter;
> +}
> +
> +static inline void atomic_set(atomic_t *v, int i)
> +{
> +    v->counter = i;
> +}
> +
> +static inline void _atomic_set(atomic_t *v, int i)
> +{
> +    v->counter = i;
> +}
> +
> +void __bad_atomic_read(const volatile void *p, void *res);
> +void __bad_atomic_size(void);
> +
> +#define build_atomic_read(name, insn, type)                                  
>   \
> +    static inline type name(const volatile type *addr)                       
>   \
> +    {                                                                        
>   \
> +        type ret;                                                            
>   \
> +        asm volatile ( insn "%U1%X1 %0,%1" : "=r"(ret) : "m<>"(*addr) );     
>   \

As I think I had mentioned before, asm() contraints want a blank between
closing quote and opend paren. I.e. like this

        asm volatile ( insn "%U1%X1 %0,%1" : "=r" (ret) : "m<>" (*addr) );

> +#define read_atomic(p)                                                       
>   \
> +    ({                                                                       
>   \
> +        union {                                                              
>   \
> +            typeof(*(p)) val;                                                
>   \
> +            char c[0];                                                       
>   \
> +        } x_;                                                                
>   \
> +        read_atomic_size(p, x_.c, sizeof(*(p)));                             
>   \
> +        x_.val;                                                              
>   \
> +    })
> +
> +#define write_atomic(p, x)                                                   
>   \
> +    do                                                                       
>   \
> +    {                                                                        
>   \
> +        typeof(*(p)) x_ = (x);                                               
>   \
> +        write_atomic_size(p, &x_, sizeof(*(p)));                             
>   \
> +    } while ( 0 )

Up to here you use underscore-suffixed locals, but then ...

> +#define add_sized(p, x)                                                      
>   \
> +    ({                                                                       
>   \
> +        typeof(*(p)) __x = (x);                                              
>   \

... you have even two prefixing underscores here.

> +        switch ( sizeof(*(p)) )                                              
>   \
> +        {                                                                    
>   \
> +        case 1:                                                              
>   \
> +            add_u8_sized((uint8_t *) (p), __x);                              
>   \
> +            break;                                                           
>   \
> +        case 2:                                                              
>   \
> +            add_u16_sized((uint16_t *) (p), __x);                            
>   \
> +            break;                                                           
>   \
> +        case 4:                                                              
>   \
> +            add_u32_sized((uint32_t *) (p), __x);                            
>   \
> +            break;                                                           
>   \
> +        default:                                                             
>   \
> +            __bad_atomic_size();                                             
>   \
> +            break;                                                           
>   \
> +        }                                                                    
>   \
> +    })
> +
> +static inline void atomic_add(int a, atomic_t *v)
> +{
> +    int t;
> +
> +    asm volatile ( "1: lwarx %0,0,%3\n"
> +                   "add %0,%2,%0\n"
> +                   "stwcx. %0,0,%3\n"
> +                   "bne- 1b"
> +                   : "=&r"(t), "=m"(v->counter)
> +                   : "r"(a), "r"(&v->counter), "m"(v->counter) : "cc" );

"+m" and then drop the last input?

> +static inline int atomic_dec_if_positive(atomic_t *v)
> +{
> +    int t;
> +
> +    asm volatile(PPC_ATOMIC_ENTRY_BARRIER
> +                 "1:    lwarx    %0,0,%1        # atomic_dec_if_positive\n"
> +                 "addic.    %0,%0,-1\n"
> +                 "blt-    2f\n"
> +                 "stwcx.    %0,0,%1\n"
> +                 "bne-    1b\n"
> +                 PPC_ATOMIC_EXIT_BARRIER
> +                 "2:" : "=&r"(t)
> +                 : "r"(&v->counter) : "cc", "memory");

Missing blanks near the parentheses. Would also be nice if the padding
blanks actually vertically aligned the operands.

> +    return t;
> +}
> +
> +static inline atomic_t atomic_compareandswap(atomic_t old, atomic_t new,
> +                                             atomic_t *v)
> +{
> +    atomic_t rc;
> +    rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, 
> sizeof(int));
> +    return rc;
> +}
> +
> +#define arch_cmpxchg(ptr, o, n)                                              
>   \
> +    ({                                                                       
>   \
> +        __typeof__(*(ptr)) _o_ = (o);                                        
>   \
> +        __typeof__(*(ptr)) _n_ = (n);                                        
>   \

Problematic leading underscores again.

> +        (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long) _o_,           
>   \
> +                                       (unsigned long) _n_, sizeof(*(ptr))); 
>   \
> +    })
> +
> +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
> +{
> +    return arch_cmpxchg(&v->counter, old, new);
> +}
> +
> +#define ATOMIC_OP(op, insn, suffix, sign) \
> +    static inline void atomic_##op(int a, atomic_t *v)                       
>     \
> +    {                                                                        
>     \
> +        int t;                                                               
>     \
> +        asm volatile ( "1: lwarx %0,0,%3\n"                                  
>     \
> +                       insn "%I2" suffix " %0,%0,%2\n"                       
>   \
> +                       "stwcx. %0,0,%3 \n"                                   
>     \
> +                       "bne- 1b\n"                                           
>     \
> +                       : "=&r"(t), "+m"(v->counter)                          
>     \
> +                       : "r" #sign(a), "r"(&v->counter)                      
>     \
> +                       : "cc" );                                             
>     \
> +    }
> +
> +ATOMIC_OP(and, "and", ".", K)
> +
> +static inline int atomic_sub_and_test(int i, atomic_t *v)
> +{
> +    return atomic_sub_return(i, v) == 0;
> +}
> +
> +static inline int atomic_inc_and_test(atomic_t *v)
> +{
> +    return atomic_add_return(1, v) == 0;
> +}
> +
> +static inline int atomic_dec_and_test(atomic_t *v)
> +{
> +    return atomic_sub_return(1, v) == 0;
> +}
> +
> +static inline int atomic_add_negative(int i, atomic_t *v)
> +{
> +    return atomic_add_return(i, v) < 0;
> +}
> +
> +static inline int __atomic_add_unless(atomic_t *v, int a, int u)
> +{
> +     int c, old;
> +
> +     c = atomic_read(v);
> +     while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)

Btw, no real need to parenthesize v in cases like this one. Otoh a needs
parenthesizing.

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.