1 |
/* |
2 |
* security/ccsecurity/compat.h |
3 |
* |
4 |
* Copyright (C) 2005-2010 NTT DATA CORPORATION |
5 |
* |
6 |
* Version: 1.8.0-pre 2010/09/01 |
7 |
* |
8 |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
9 |
* See README.ccs for ChangeLog. |
10 |
* |
11 |
*/ |
12 |
|
13 |
#define false 0 |
14 |
#define true 1 |
15 |
|
16 |
#ifndef __user |
17 |
#define __user |
18 |
#endif |
19 |
|
20 |
#ifndef current_uid |
21 |
#define current_uid() (current->uid) |
22 |
#endif |
23 |
#ifndef current_gid |
24 |
#define current_gid() (current->gid) |
25 |
#endif |
26 |
#ifndef current_euid |
27 |
#define current_euid() (current->euid) |
28 |
#endif |
29 |
#ifndef current_egid |
30 |
#define current_egid() (current->egid) |
31 |
#endif |
32 |
#ifndef current_suid |
33 |
#define current_suid() (current->suid) |
34 |
#endif |
35 |
#ifndef current_sgid |
36 |
#define current_sgid() (current->sgid) |
37 |
#endif |
38 |
#ifndef current_fsuid |
39 |
#define current_fsuid() (current->fsuid) |
40 |
#endif |
41 |
#ifndef current_fsgid |
42 |
#define current_fsgid() (current->fsgid) |
43 |
#endif |
44 |
|
45 |
#ifndef DEFINE_SPINLOCK |
46 |
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED |
47 |
#endif |
48 |
|
49 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) |
50 |
#define bool _Bool |
51 |
#endif |
52 |
|
53 |
#ifndef KERN_CONT |
54 |
#define KERN_CONT "" |
55 |
#endif |
56 |
|
57 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) |
58 |
#define mutex semaphore |
59 |
#define mutex_init(mutex) init_MUTEX(mutex) |
60 |
#define mutex_unlock(mutex) up(mutex) |
61 |
#define mutex_lock(mutex) down(mutex) |
62 |
#define mutex_lock_interruptible(mutex) down_interruptible(mutex) |
63 |
#define mutex_trylock(mutex) (!down_trylock(mutex)) |
64 |
#define DEFINE_MUTEX(mutexname) DECLARE_MUTEX(mutexname) |
65 |
#endif |
66 |
|
67 |
#ifndef container_of |
68 |
#define container_of(ptr, type, member) ({ \ |
69 |
const typeof(((type *)0)->member) *__mptr = (ptr); \ |
70 |
(type *)((char *)__mptr - offsetof(type, member)); }) |
71 |
#endif |
72 |
|
73 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) |
74 |
#define kzalloc(size, flags) ({ \ |
75 |
void *ret = kmalloc((size), (flags)); \ |
76 |
if (ret) \ |
77 |
memset(ret, 0, (size)); \ |
78 |
ret; }) |
79 |
#endif |
80 |
|
81 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) |
82 |
#define smp_read_barrier_depends smp_rmb |
83 |
#endif |
84 |
|
85 |
#ifndef ACCESS_ONCE |
86 |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
87 |
#endif |
88 |
|
89 |
#ifndef rcu_dereference |
90 |
#define rcu_dereference(p) ({ \ |
91 |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
92 |
smp_read_barrier_depends(); /* see RCU */ \ |
93 |
(_________p1); \ |
94 |
}) |
95 |
#endif |
96 |
|
97 |
#ifndef rcu_assign_pointer |
98 |
#define rcu_assign_pointer(p, v) \ |
99 |
({ \ |
100 |
if (!__builtin_constant_p(v) || \ |
101 |
((v) != NULL)) \ |
102 |
smp_wmb(); /* see RCU */ \ |
103 |
(p) = (v); \ |
104 |
}) |
105 |
#endif |
106 |
|
107 |
#ifndef list_for_each_entry_rcu |
108 |
#define list_for_each_entry_rcu(pos, head, member) \ |
109 |
for (pos = list_entry(rcu_dereference((head)->next), \ |
110 |
typeof(*pos), member); \ |
111 |
prefetch(pos->member.next), &pos->member != (head); \ |
112 |
pos = list_entry(rcu_dereference(pos->member.next), \ |
113 |
typeof(*pos), member)) |
114 |
#endif |
115 |
|
116 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) |
117 |
#undef list_for_each_entry_rcu |
118 |
#define list_for_each_entry_rcu(pos, head, member) \ |
119 |
for (pos = list_entry(srcu_dereference((head)->next, &ccs_ss), \ |
120 |
typeof(*pos), member); \ |
121 |
prefetch(pos->member.next), &pos->member != (head); \ |
122 |
pos = list_entry(srcu_dereference(pos->member.next, &ccs_ss), \ |
123 |
typeof(*pos), member)) |
124 |
#endif |
125 |
|
126 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
127 |
static inline void __list_add_rcu(struct list_head *new, |
128 |
struct list_head *prev, |
129 |
struct list_head *next) |
130 |
{ |
131 |
new->next = next; |
132 |
new->prev = prev; |
133 |
rcu_assign_pointer(prev->next, new); |
134 |
next->prev = new; |
135 |
} |
136 |
|
137 |
static inline void list_add_tail_rcu(struct list_head *new, |
138 |
struct list_head *head) |
139 |
{ |
140 |
__list_add_rcu(new, head->prev, head); |
141 |
} |
142 |
|
143 |
static inline void list_add_rcu(struct list_head *new, struct list_head *head) |
144 |
{ |
145 |
__list_add_rcu(new, head, head->next); |
146 |
} |
147 |
|
148 |
#ifndef LIST_POISON2 |
149 |
#define LIST_POISON2 ((void *) 0x00200200) |
150 |
#endif |
151 |
|
152 |
static inline void list_del_rcu(struct list_head *entry) |
153 |
{ |
154 |
__list_del(entry->prev, entry->next); |
155 |
entry->prev = LIST_POISON2; |
156 |
} |
157 |
#endif |
158 |
|
159 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 30) |
160 |
#undef ssleep |
161 |
#define ssleep(secs) { \ |
162 |
set_current_state(TASK_UNINTERRUPTIBLE); \ |
163 |
schedule_timeout((HZ * secs) + 1); \ |
164 |
} |
165 |
#endif |
166 |
|
167 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
168 |
#define s_fs_info u.generic_sbp |
169 |
#endif |
170 |
|
171 |
#ifndef list_for_each_entry_safe |
172 |
#define list_for_each_entry_safe(pos, n, head, member) \ |
173 |
for (pos = list_entry((head)->next, typeof(*pos), member), \ |
174 |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
175 |
&pos->member != (head); \ |
176 |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
177 |
#endif |
178 |
|
179 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
180 |
#define sk_family family |
181 |
#define sk_protocol protocol |
182 |
#define sk_type type |
183 |
static inline struct socket *SOCKET_I(struct inode *inode) |
184 |
{ |
185 |
return inode->i_sock ? &inode->u.socket_i : NULL; |
186 |
} |
187 |
#endif |
188 |
|
189 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) |
190 |
#if defined(__LITTLE_ENDIAN) |
191 |
#define HIPQUAD(addr) \ |
192 |
((unsigned char *)&addr)[3], \ |
193 |
((unsigned char *)&addr)[2], \ |
194 |
((unsigned char *)&addr)[1], \ |
195 |
((unsigned char *)&addr)[0] |
196 |
#elif defined(__BIG_ENDIAN) |
197 |
#define HIPQUAD NIPQUAD |
198 |
#else |
199 |
#error "Please fix asm/byteorder.h" |
200 |
#endif /* __LITTLE_ENDIAN */ |
201 |
#endif |
202 |
|
203 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) |
204 |
struct path { |
205 |
struct vfsmount *mnt; |
206 |
struct dentry *dentry; |
207 |
}; |
208 |
#endif |
209 |
|
210 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) |
211 |
|
212 |
#include <linux/mount.h> |
213 |
|
214 |
static inline void path_get(struct path *path) |
215 |
{ |
216 |
dget(path->dentry); |
217 |
mntget(path->mnt); |
218 |
} |
219 |
|
220 |
static inline void path_put(struct path *path) |
221 |
{ |
222 |
dput(path->dentry); |
223 |
mntput(path->mnt); |
224 |
} |
225 |
|
226 |
#endif |
227 |
|
228 |
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35) |
229 |
|
230 |
#include <linux/fs_struct.h> |
231 |
|
232 |
static inline void get_fs_root(struct fs_struct *fs, struct path *root) |
233 |
{ |
234 |
read_lock(&fs->lock); |
235 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) |
236 |
*root = fs->root; |
237 |
path_get(root); |
238 |
#else |
239 |
root->dentry = dget(fs->root); |
240 |
root->mnt = mntget(fs->rootmnt); |
241 |
#endif |
242 |
read_unlock(&fs->lock); |
243 |
} |
244 |
|
245 |
#endif |