|  | /* | 
|  | *  arch/s390/lib/spinlock.c | 
|  | *    Out of line spinlock code. | 
|  | * | 
|  | *    Copyright (C) IBM Corp. 2004, 2006 | 
|  | *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 
|  | */ | 
|  |  | 
|  | #include <linux/types.h> | 
|  | #include <linux/module.h> | 
|  | #include <linux/spinlock.h> | 
|  | #include <linux/init.h> | 
|  | #include <asm/io.h> | 
|  |  | 
|  | int spin_retry = 1000; | 
|  |  | 
|  | /** | 
|  | * spin_retry= parameter | 
|  | */ | 
|  | static int __init spin_retry_setup(char *str) | 
|  | { | 
|  | spin_retry = simple_strtoul(str, &str, 0); | 
|  | return 1; | 
|  | } | 
|  | __setup("spin_retry=", spin_retry_setup); | 
|  |  | 
|  | static inline void _raw_yield(void) | 
|  | { | 
|  | if (MACHINE_HAS_DIAG44) | 
|  | asm volatile("diag 0,0,0x44"); | 
|  | } | 
|  |  | 
|  | static inline void _raw_yield_cpu(int cpu) | 
|  | { | 
|  | if (MACHINE_HAS_DIAG9C) | 
|  | asm volatile("diag %0,0,0x9c" | 
|  | : : "d" (__cpu_logical_map[cpu])); | 
|  | else | 
|  | _raw_yield(); | 
|  | } | 
|  |  | 
|  | void arch_spin_lock_wait(arch_spinlock_t *lp) | 
|  | { | 
|  | int count = spin_retry; | 
|  | unsigned int cpu = ~smp_processor_id(); | 
|  |  | 
|  | while (1) { | 
|  | if (count-- <= 0) { | 
|  | unsigned int owner = lp->owner_cpu; | 
|  | if (owner != 0) | 
|  | _raw_yield_cpu(~owner); | 
|  | count = spin_retry; | 
|  | } | 
|  | if (arch_spin_is_locked(lp)) | 
|  | continue; | 
|  | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 
|  | return; | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL(arch_spin_lock_wait); | 
|  |  | 
|  | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | 
|  | { | 
|  | int count = spin_retry; | 
|  | unsigned int cpu = ~smp_processor_id(); | 
|  |  | 
|  | local_irq_restore(flags); | 
|  | while (1) { | 
|  | if (count-- <= 0) { | 
|  | unsigned int owner = lp->owner_cpu; | 
|  | if (owner != 0) | 
|  | _raw_yield_cpu(~owner); | 
|  | count = spin_retry; | 
|  | } | 
|  | if (arch_spin_is_locked(lp)) | 
|  | continue; | 
|  | local_irq_disable(); | 
|  | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 
|  | return; | 
|  | local_irq_restore(flags); | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL(arch_spin_lock_wait_flags); | 
|  |  | 
|  | int arch_spin_trylock_retry(arch_spinlock_t *lp) | 
|  | { | 
|  | unsigned int cpu = ~smp_processor_id(); | 
|  | int count; | 
|  |  | 
|  | for (count = spin_retry; count > 0; count--) { | 
|  | if (arch_spin_is_locked(lp)) | 
|  | continue; | 
|  | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 
|  | return 1; | 
|  | } | 
|  | return 0; | 
|  | } | 
|  | EXPORT_SYMBOL(arch_spin_trylock_retry); | 
|  |  | 
|  | void arch_spin_relax(arch_spinlock_t *lock) | 
|  | { | 
|  | unsigned int cpu = lock->owner_cpu; | 
|  | if (cpu != 0) | 
|  | _raw_yield_cpu(~cpu); | 
|  | } | 
|  | EXPORT_SYMBOL(arch_spin_relax); | 
|  |  | 
|  | void _raw_read_lock_wait(arch_rwlock_t *rw) | 
|  | { | 
|  | unsigned int old; | 
|  | int count = spin_retry; | 
|  |  | 
|  | while (1) { | 
|  | if (count-- <= 0) { | 
|  | _raw_yield(); | 
|  | count = spin_retry; | 
|  | } | 
|  | if (!arch_read_can_lock(rw)) | 
|  | continue; | 
|  | old = rw->lock & 0x7fffffffU; | 
|  | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 
|  | return; | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL(_raw_read_lock_wait); | 
|  |  | 
|  | void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | 
|  | { | 
|  | unsigned int old; | 
|  | int count = spin_retry; | 
|  |  | 
|  | local_irq_restore(flags); | 
|  | while (1) { | 
|  | if (count-- <= 0) { | 
|  | _raw_yield(); | 
|  | count = spin_retry; | 
|  | } | 
|  | if (!arch_read_can_lock(rw)) | 
|  | continue; | 
|  | old = rw->lock & 0x7fffffffU; | 
|  | local_irq_disable(); | 
|  | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 
|  | return; | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL(_raw_read_lock_wait_flags); | 
|  |  | 
|  | int _raw_read_trylock_retry(arch_rwlock_t *rw) | 
|  | { | 
|  | unsigned int old; | 
|  | int count = spin_retry; | 
|  |  | 
|  | while (count-- > 0) { | 
|  | if (!arch_read_can_lock(rw)) | 
|  | continue; | 
|  | old = rw->lock & 0x7fffffffU; | 
|  | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 
|  | return 1; | 
|  | } | 
|  | return 0; | 
|  | } | 
|  | EXPORT_SYMBOL(_raw_read_trylock_retry); | 
|  |  | 
|  | void _raw_write_lock_wait(arch_rwlock_t *rw) | 
|  | { | 
|  | int count = spin_retry; | 
|  |  | 
|  | while (1) { | 
|  | if (count-- <= 0) { | 
|  | _raw_yield(); | 
|  | count = spin_retry; | 
|  | } | 
|  | if (!arch_write_can_lock(rw)) | 
|  | continue; | 
|  | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 
|  | return; | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL(_raw_write_lock_wait); | 
|  |  | 
|  | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | 
|  | { | 
|  | int count = spin_retry; | 
|  |  | 
|  | local_irq_restore(flags); | 
|  | while (1) { | 
|  | if (count-- <= 0) { | 
|  | _raw_yield(); | 
|  | count = spin_retry; | 
|  | } | 
|  | if (!arch_write_can_lock(rw)) | 
|  | continue; | 
|  | local_irq_disable(); | 
|  | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 
|  | return; | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL(_raw_write_lock_wait_flags); | 
|  |  | 
|  | int _raw_write_trylock_retry(arch_rwlock_t *rw) | 
|  | { | 
|  | int count = spin_retry; | 
|  |  | 
|  | while (count-- > 0) { | 
|  | if (!arch_write_can_lock(rw)) | 
|  | continue; | 
|  | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 
|  | return 1; | 
|  | } | 
|  | return 0; | 
|  | } | 
|  | EXPORT_SYMBOL(_raw_write_trylock_retry); |