[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay slot of a jmpl instruction. UltraSPARC-I, II, IIi, and IIe have a bug, documented in the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51 The long and short of it is that if the IMU unit misses on a branch or jmpl, and there is a store buffer synchronizing membar in the delay slot, the chip can stop fetching instructions. If interrupts are enabled or some other trap is enabled, the chip will unwedge itself, but performance will suffer. We already had a workaround for this bug in a few spots, but it's better to have the entire tree sanitized for this rule. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
020f46a39e
commit
b445e26cbf
15 changed files with 172 additions and 111 deletions
|
|
@ -55,8 +55,9 @@ static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
|
|||
"add %%g1, %1, %%g7\n\t"
|
||||
"cas [%2], %%g1, %%g7\n\t"
|
||||
"cmp %%g1, %%g7\n\t"
|
||||
"membar #StoreLoad | #StoreStore\n\t"
|
||||
"bne,pn %%icc, 1b\n\t"
|
||||
" membar #StoreLoad | #StoreStore\n\t"
|
||||
" nop\n\t"
|
||||
"mov %%g7, %0\n\t"
|
||||
: "=&r" (tmp)
|
||||
: "0" (tmp), "r" (sem)
|
||||
|
|
|
|||
|
|
@ -52,12 +52,14 @@ static inline void _raw_spin_lock(spinlock_t *lock)
|
|||
|
||||
__asm__ __volatile__(
|
||||
"1: ldstub [%1], %0\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" brnz,pn %0, 2f\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: ldub [%1], %0\n"
|
||||
" membar #LoadLoad\n"
|
||||
" brnz,pt %0, 2b\n"
|
||||
" membar #LoadLoad\n"
|
||||
" nop\n"
|
||||
" ba,a,pt %%xcc, 1b\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp)
|
||||
|
|
@ -95,16 +97,18 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
|
|||
|
||||
__asm__ __volatile__(
|
||||
"1: ldstub [%2], %0\n"
|
||||
" brnz,pn %0, 2f\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" brnz,pn %0, 2f\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: rdpr %%pil, %1\n"
|
||||
" wrpr %3, %%pil\n"
|
||||
"3: ldub [%2], %0\n"
|
||||
" brnz,pt %0, 3b\n"
|
||||
" membar #LoadLoad\n"
|
||||
" brnz,pt %0, 3b\n"
|
||||
" nop\n"
|
||||
" ba,pt %%xcc, 1b\n"
|
||||
" wrpr %1, %%pil\n"
|
||||
" wrpr %1, %%pil\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "r"(lock), "r"(flags)
|
||||
|
|
@ -162,12 +166,14 @@ static void inline __read_lock(rwlock_t *lock)
|
|||
"4: add %0, 1, %1\n"
|
||||
" cas [%2], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" bne,pn %%icc, 1b\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: ldsw [%2], %0\n"
|
||||
" membar #LoadLoad\n"
|
||||
" brlz,pt %0, 2b\n"
|
||||
" membar #LoadLoad\n"
|
||||
" nop\n"
|
||||
" ba,a,pt %%xcc, 4b\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
|
|
@ -204,12 +210,14 @@ static void inline __write_lock(rwlock_t *lock)
|
|||
"4: or %0, %3, %1\n"
|
||||
" cas [%2], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" bne,pn %%icc, 1b\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" nop\n"
|
||||
" .subsection 2\n"
|
||||
"2: lduw [%2], %0\n"
|
||||
" membar #LoadLoad\n"
|
||||
" brnz,pt %0, 2b\n"
|
||||
" membar #LoadLoad\n"
|
||||
" nop\n"
|
||||
" ba,a,pt %%xcc, 4b\n"
|
||||
" .previous"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
|
|
@ -240,8 +248,9 @@ static int inline __write_trylock(rwlock_t *lock)
|
|||
" or %0, %4, %1\n"
|
||||
" cas [%3], %0, %1\n"
|
||||
" cmp %0, %1\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" bne,pn %%icc, 1b\n"
|
||||
" membar #StoreLoad | #StoreStore\n"
|
||||
" nop\n"
|
||||
" mov 1, %2\n"
|
||||
"2:"
|
||||
: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
|
||||
|
|
|
|||
|
|
@ -111,7 +111,6 @@ static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long
|
|||
"membar #Sync"
|
||||
: /* No outputs */
|
||||
: "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
|
||||
__asm__ __volatile__ ("membar #Sync" : : : "memory");
|
||||
}
|
||||
|
||||
/* The instruction cache lines are flushed with this, but note that
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue