83 |
ret; }) |
ret; }) |
84 |
#endif |
#endif |
85 |
|
|
86 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) |
/* |
87 |
#define smp_read_barrier_depends smp_rmb |
#ifndef list_for_each |
88 |
|
#define list_for_each(pos, head) \ |
89 |
|
for (pos = (head)->next; prefetch(pos->next), pos != (head); \ |
90 |
|
pos = pos->next) |
91 |
#endif |
#endif |
92 |
|
|
93 |
#ifndef ACCESS_ONCE |
#ifndef list_for_each_entry |
94 |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
#define list_for_each_entry(pos, head, member) \ |
95 |
#endif |
for (pos = list_entry((head)->next, typeof(*pos), member); \ |
|
|
|
|
#ifndef rcu_dereference |
|
|
#define rcu_dereference(p) ({ \ |
|
|
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
|
|
smp_read_barrier_depends(); /* see RCU */ \ |
|
|
(_________p1); \ |
|
|
}) |
|
|
#endif |
|
|
|
|
|
#ifndef rcu_assign_pointer |
|
|
#define rcu_assign_pointer(p, v) \ |
|
|
({ \ |
|
|
if (!__builtin_constant_p(v) || \ |
|
|
((v) != NULL)) \ |
|
|
smp_wmb(); /* see RCU */ \ |
|
|
(p) = (v); \ |
|
|
}) |
|
|
#endif |
|
|
|
|
|
#ifndef list_for_each_rcu |
|
|
#define list_for_each_rcu(pos, head) \ |
|
|
for (pos = rcu_dereference((head)->next); \ |
|
|
prefetch(pos->next), pos != (head); \ |
|
|
pos = rcu_dereference(pos->next)) |
|
|
#endif |
|
|
|
|
|
#ifndef list_for_each_entry_rcu |
|
|
#define list_for_each_entry_rcu(pos, head, member) \ |
|
|
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), \ |
|
|
member); \ |
|
96 |
prefetch(pos->member.next), &pos->member != (head); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
97 |
pos = list_entry(rcu_dereference(pos->member.next), \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
|
typeof(*pos), member)) |
|
98 |
#endif |
#endif |
99 |
|
*/ |
100 |
|
|
101 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
102 |
#define s_fs_info u.generic_sbp |
#define s_fs_info u.generic_sbp |