1 |
/* |
/* |
2 |
* security/ccsecurity/compat.h |
* security/ccsecurity/compat.h |
3 |
* |
* |
4 |
* Copyright (C) 2005-2009 NTT DATA CORPORATION |
* Copyright (C) 2005-2010 NTT DATA CORPORATION |
5 |
* |
* |
6 |
* Version: 1.7.0-pre 2009/07/03 |
* Version: 1.8.0-pre 2010/08/01 |
7 |
* |
* |
8 |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
9 |
* See README.ccs for ChangeLog. |
* See README.ccs for ChangeLog. |
42 |
#define current_fsgid() (current->fsgid) |
#define current_fsgid() (current->fsgid) |
43 |
#endif |
#endif |
44 |
|
|
|
#ifndef WARN_ON |
|
|
#define WARN_ON(x) do { } while (0) |
|
|
#endif |
|
|
|
|
45 |
#ifndef DEFINE_SPINLOCK |
#ifndef DEFINE_SPINLOCK |
46 |
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED |
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED |
47 |
#endif |
#endif |
54 |
#define KERN_CONT "" |
#define KERN_CONT "" |
55 |
#endif |
#endif |
56 |
|
|
|
/* To support PID namespace. */ |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) |
|
|
#define find_task_by_pid find_task_by_vpid |
|
|
#endif |
|
|
|
|
57 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) |
58 |
#define mutex semaphore |
#define mutex semaphore |
59 |
#define mutex_init(mutex) init_MUTEX(mutex) |
#define mutex_init(mutex) init_MUTEX(mutex) |
|
#define mutex_lock(mutex) down(mutex) |
|
60 |
#define mutex_unlock(mutex) up(mutex) |
#define mutex_unlock(mutex) up(mutex) |
61 |
|
#define mutex_lock(mutex) down(mutex) |
62 |
#define mutex_lock_interruptible(mutex) down_interruptible(mutex) |
#define mutex_lock_interruptible(mutex) down_interruptible(mutex) |
63 |
#define mutex_trylock(mutex) !down_trylock(mutex) |
#define mutex_trylock(mutex) (!down_trylock(mutex)) |
64 |
#define DEFINE_MUTEX(mutexname) DECLARE_MUTEX(mutexname) |
#define DEFINE_MUTEX(mutexname) DECLARE_MUTEX(mutexname) |
65 |
#endif |
#endif |
66 |
|
|
71 |
#endif |
#endif |
72 |
|
|
73 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) |
74 |
#define kzalloc(size, flags) ({ \ |
#define kzalloc(size, flags) ({ \ |
75 |
void *ret = kmalloc((size), (flags)); \ |
void *ret = kmalloc((size), (flags)); \ |
76 |
if (ret) \ |
if (ret) \ |
77 |
memset(ret, 0, (size)); \ |
memset(ret, 0, (size)); \ |
78 |
ret; }) |
ret; }) |
79 |
#endif |
#endif |
80 |
|
|
87 |
#endif |
#endif |
88 |
|
|
89 |
#ifndef rcu_dereference |
#ifndef rcu_dereference |
90 |
#define rcu_dereference(p) ({ \ |
#define rcu_dereference(p) ({ \ |
91 |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
92 |
smp_read_barrier_depends(); /* see RCU */ \ |
smp_read_barrier_depends(); /* see RCU */ \ |
93 |
(_________p1); \ |
(_________p1); \ |
94 |
}) |
}) |
95 |
#endif |
#endif |
96 |
|
|
97 |
#ifndef rcu_assign_pointer |
#ifndef rcu_assign_pointer |
98 |
#define rcu_assign_pointer(p, v) \ |
#define rcu_assign_pointer(p, v) \ |
99 |
({ \ |
({ \ |
100 |
if (!__builtin_constant_p(v) || \ |
if (!__builtin_constant_p(v) || \ |
101 |
((v) != NULL)) \ |
((v) != NULL)) \ |
102 |
smp_wmb(); /* see RCU */ \ |
smp_wmb(); /* see RCU */ \ |
103 |
(p) = (v); \ |
(p) = (v); \ |
104 |
}) |
}) |
105 |
#endif |
#endif |
106 |
|
|
|
#ifndef list_for_each_rcu |
|
|
#define list_for_each_rcu(pos, head) \ |
|
|
for (pos = rcu_dereference((head)->next); \ |
|
|
prefetch(pos->next), pos != (head); \ |
|
|
pos = rcu_dereference(pos->next)) |
|
|
#endif |
|
|
|
|
107 |
#ifndef list_for_each_entry_rcu |
#ifndef list_for_each_entry_rcu |
108 |
#define list_for_each_entry_rcu(pos, head, member) \ |
#define list_for_each_entry_rcu(pos, head, member) \ |
109 |
for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), \ |
for (pos = list_entry(rcu_dereference((head)->next), \ |
110 |
member); \ |
typeof(*pos), member); \ |
111 |
prefetch(pos->member.next), &pos->member != (head); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
112 |
pos = list_entry(rcu_dereference(pos->member.next), \ |
pos = list_entry(rcu_dereference(pos->member.next), \ |
113 |
typeof(*pos), member)) |
typeof(*pos), member)) |
114 |
|
#endif |
115 |
|
|
116 |
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) |
117 |
|
#undef list_for_each_entry_rcu |
118 |
|
#define list_for_each_entry_rcu(pos, head, member) \ |
119 |
|
for (pos = list_entry(srcu_dereference((head)->next, &ccs_ss), \ |
120 |
|
typeof(*pos), member); \ |
121 |
|
prefetch(pos->member.next), &pos->member != (head); \ |
122 |
|
pos = list_entry(srcu_dereference(pos->member.next, &ccs_ss), \ |
123 |
|
typeof(*pos), member)) |
124 |
#endif |
#endif |
125 |
|
|
126 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
158 |
|
|
159 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 30) |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 30) |
160 |
#undef ssleep |
#undef ssleep |
161 |
#define ssleep(secs) { \ |
#define ssleep(secs) { \ |
162 |
set_current_state(TASK_UNINTERRUPTIBLE); \ |
set_current_state(TASK_UNINTERRUPTIBLE); \ |
163 |
schedule_timeout((HZ * secs) + 1); \ |
schedule_timeout((HZ * secs) + 1); \ |
164 |
} |
} |
165 |
#endif |
#endif |
166 |
|
|
167 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) |
168 |
#define s_fs_info u.generic_sbp |
#define s_fs_info u.generic_sbp |
|
#else |
|
|
#include <linux/audit.h> |
|
|
#ifdef AUDIT_APPARMOR_AUDIT |
|
|
/* AppArmor patch adds "struct vfsmount" to VFS helper functions. */ |
|
|
#define HAVE_VFSMOUNT_IN_VFS_HELPER |
|
|
#endif |
|
|
#endif |
|
|
|
|
|
#if defined(RHEL_MAJOR) && RHEL_MAJOR == 5 |
|
|
#define HAVE_NO_I_BLKSIZE_IN_INODE |
|
|
#elif defined(AX_MAJOR) && AX_MAJOR == 3 |
|
|
#define HAVE_NO_I_BLKSIZE_IN_INODE |
|
169 |
#endif |
#endif |
170 |
|
|
171 |
#ifndef list_for_each_entry_safe |
#ifndef list_for_each_entry_safe |
180 |
#define sk_family family |
#define sk_family family |
181 |
#define sk_protocol protocol |
#define sk_protocol protocol |
182 |
#define sk_type type |
#define sk_type type |
|
#define sk_receive_queue receive_queue |
|
183 |
static inline struct socket *SOCKET_I(struct inode *inode) |
static inline struct socket *SOCKET_I(struct inode *inode) |
184 |
{ |
{ |
185 |
return inode->i_sock ? &inode->u.socket_i : NULL; |
return inode->i_sock ? &inode->u.socket_i : NULL; |
188 |
|
|
189 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) |
190 |
#if defined(__LITTLE_ENDIAN) |
#if defined(__LITTLE_ENDIAN) |
191 |
#define HIPQUAD(addr) \ |
#define HIPQUAD(addr) \ |
192 |
((unsigned char *)&addr)[3], \ |
((unsigned char *)&addr)[3], \ |
193 |
((unsigned char *)&addr)[2], \ |
((unsigned char *)&addr)[2], \ |
194 |
((unsigned char *)&addr)[1], \ |
((unsigned char *)&addr)[1], \ |
195 |
((unsigned char *)&addr)[0] |
((unsigned char *)&addr)[0] |
196 |
#elif defined(__BIG_ENDIAN) |
#elif defined(__BIG_ENDIAN) |
197 |
#define HIPQUAD NIPQUAD |
#define HIPQUAD NIPQUAD |
198 |
#else |
#else |
200 |
#endif /* __LITTLE_ENDIAN */ |
#endif /* __LITTLE_ENDIAN */ |
201 |
#endif |
#endif |
202 |
|
|
203 |
#ifndef _LINUX_SRCU_H |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) |
204 |
|
struct path { |
205 |
struct srcu_struct { |
struct vfsmount *mnt; |
206 |
int counter_idx; |
struct dentry *dentry; |
|
int counter[2]; |
|
207 |
}; |
}; |
208 |
|
#endif |
209 |
|
|
210 |
static inline int init_srcu_struct(struct srcu_struct *sp) |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) |
211 |
|
|
212 |
|
#include <linux/mount.h> |
213 |
|
|
214 |
|
static inline void path_get(struct path *path) |
215 |
{ |
{ |
216 |
return 0; |
dget(path->dentry); |
217 |
|
mntget(path->mnt); |
218 |
} |
} |
219 |
|
|
220 |
int srcu_read_lock(struct srcu_struct *sp); |
static inline void path_put(struct path *path) |
221 |
void srcu_read_unlock(struct srcu_struct *sp, const int idx); |
{ |
222 |
void synchronize_srcu(struct srcu_struct *sp); |
dput(path->dentry); |
223 |
|
mntput(path->mnt); |
224 |
|
} |
225 |
|
|
226 |
#endif |
#endif |