3 |
* |
* |
4 |
* Copyright (C) 2005-2009 NTT DATA CORPORATION |
* Copyright (C) 2005-2009 NTT DATA CORPORATION |
5 |
* |
* |
6 |
* Version: 1.7.0-pre 2009/08/24 |
* Version: 1.7.0 2009/09/06 |
7 |
* |
* |
8 |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
9 |
* See README.ccs for ChangeLog. |
* See README.ccs for ChangeLog. |
653 |
domain = ccs_find_domain(ee->tmp); |
domain = ccs_find_domain(ee->tmp); |
654 |
if (domain) |
if (domain) |
655 |
goto done; |
goto done; |
656 |
if (r->mode == CCS_MAC_MODE_ENFORCING) { |
if (r->mode == CCS_CONFIG_ENFORCING) { |
657 |
int error = ccs_supervisor(r, "# wants to create domain\n" |
int error = ccs_supervisor(r, "# wants to create domain\n" |
658 |
"%s\n", ee->tmp); |
"%s\n", ee->tmp); |
659 |
if (error == 1) |
if (error == 1) |
668 |
if (!domain) { |
if (!domain) { |
669 |
printk(KERN_WARNING "ERROR: Domain '%s' not defined.\n", |
printk(KERN_WARNING "ERROR: Domain '%s' not defined.\n", |
670 |
ee->tmp); |
ee->tmp); |
671 |
if (r->mode == CCS_MAC_MODE_ENFORCING) |
if (r->mode == CCS_CONFIG_ENFORCING) |
672 |
retval = -EPERM; |
retval = -EPERM; |
673 |
else { |
else { |
674 |
retval = 0; |
retval = 0; |
863 |
} |
} |
864 |
|
|
865 |
static LIST_HEAD(ccs_execve_list); |
static LIST_HEAD(ccs_execve_list); |
866 |
static DEFINE_SPINLOCK(ccs_execve_list_lock); |
DEFINE_SPINLOCK(ccs_execve_list_lock); |
867 |
|
unsigned int ccs_in_execve_counter; |
868 |
|
|
869 |
/** |
/** |
870 |
* ccs_allocate_execve_entry - Allocate memory for execve(). |
* ccs_allocate_execve_entry - Allocate memory for execve(). |
871 |
* |
* |
872 |
* Returns pointer to "struct ccs_execve_entry" on success, NULL otherwise. |
* Returns pointer to "struct ccs_execve_entry" on success, NULL otherwise. |
873 |
*/ |
*/ |
874 |
static struct ccs_execve_entry *ccs_allocate_execve_entry(void) |
static inline struct ccs_execve_entry *ccs_allocate_execve_entry(void) |
875 |
{ |
{ |
876 |
struct ccs_execve_entry *ee = kzalloc(sizeof(*ee), GFP_KERNEL); |
struct ccs_execve_entry *ee = kzalloc(sizeof(*ee), GFP_KERNEL); |
877 |
if (!ee) |
if (!ee) |
884 |
ee->reader_idx = ccs_read_lock(); |
ee->reader_idx = ccs_read_lock(); |
885 |
/* ee->dump->data is allocated by ccs_dump_page(). */ |
/* ee->dump->data is allocated by ccs_dump_page(). */ |
886 |
ee->task = current; |
ee->task = current; |
887 |
|
ee->previous_domain = ee->task->ccs_domain_info; |
888 |
spin_lock(&ccs_execve_list_lock); |
spin_lock(&ccs_execve_list_lock); |
889 |
|
ccs_in_execve_counter++; |
890 |
list_add(&ee->list, &ccs_execve_list); |
list_add(&ee->list, &ccs_execve_list); |
891 |
spin_unlock(&ccs_execve_list_lock); |
spin_unlock(&ccs_execve_list_lock); |
892 |
return ee; |
return ee; |
893 |
} |
} |
894 |
|
|
895 |
/** |
/** |
|
* ccs_find_execve_entry - Find ccs_execve_entry of current process. |
|
|
* |
|
|
* Returns pointer to "struct ccs_execve_entry" on success, NULL otherwise. |
|
|
*/ |
|
|
static struct ccs_execve_entry *ccs_find_execve_entry(void) |
|
|
{ |
|
|
struct task_struct *task = current; |
|
|
struct ccs_execve_entry *ee = NULL; |
|
|
struct ccs_execve_entry *p; |
|
|
spin_lock(&ccs_execve_list_lock); |
|
|
list_for_each_entry(p, &ccs_execve_list, list) { |
|
|
if (p->task != task) |
|
|
continue; |
|
|
ee = p; |
|
|
break; |
|
|
} |
|
|
spin_unlock(&ccs_execve_list_lock); |
|
|
return ee; |
|
|
} |
|
|
|
|
|
/** |
|
896 |
* ccs_free_execve_entry - Free memory for execve(). |
* ccs_free_execve_entry - Free memory for execve(). |
897 |
* |
* |
898 |
* @ee: Pointer to "struct ccs_execve_entry". |
* @ee: Pointer to "struct ccs_execve_entry". |
899 |
*/ |
*/ |
900 |
static void ccs_free_execve_entry(struct ccs_execve_entry *ee) |
static inline void ccs_free_execve_entry(struct ccs_execve_entry *ee) |
901 |
{ |
{ |
902 |
if (!ee) |
if (!ee) |
903 |
return; |
return; |
904 |
spin_lock(&ccs_execve_list_lock); |
spin_lock(&ccs_execve_list_lock); |
905 |
list_del(&ee->list); |
list_del(&ee->list); |
906 |
|
ccs_in_execve_counter--; |
907 |
spin_unlock(&ccs_execve_list_lock); |
spin_unlock(&ccs_execve_list_lock); |
908 |
kfree(ee->handler_path); |
kfree(ee->handler_path); |
909 |
kfree(ee->tmp); |
kfree(ee->tmp); |
1056 |
int len = ee->handler->total_len + 1; |
int len = ee->handler->total_len + 1; |
1057 |
char *cp = kmalloc(len, GFP_KERNEL); |
char *cp = kmalloc(len, GFP_KERNEL); |
1058 |
if (!cp) { |
if (!cp) { |
1059 |
retval = ENOMEM; |
retval = -ENOMEM; |
1060 |
goto out; |
goto out; |
1061 |
} |
} |
1062 |
ee->handler_path = cp; |
ee->handler_path = cp; |
1165 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
1166 |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
1167 |
return false; |
return false; |
1168 |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR == 3 && defined(CONFIG_MMU) |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR >= 3 && defined(CONFIG_MMU) |
1169 |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
1170 |
return false; |
return false; |
1171 |
#else |
#else |
1186 |
/* Same with put_arg_page(page) in fs/exec.c */ |
/* Same with put_arg_page(page) in fs/exec.c */ |
1187 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
1188 |
put_page(page); |
put_page(page); |
1189 |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR == 3 && defined(CONFIG_MMU) |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR >= 3 && defined(CONFIG_MMU) |
1190 |
put_page(page); |
put_page(page); |
1191 |
#endif |
#endif |
1192 |
return true; |
return true; |
1193 |
} |
} |
1194 |
|
|
1195 |
/** |
/** |
|
* ccs_fetch_next_domain - Fetch next_domain from the list. |
|
|
* |
|
|
* Returns pointer to "struct ccs_domain_info" which will be used if execve() |
|
|
* succeeds. This function does not return NULL. |
|
|
*/ |
|
|
struct ccs_domain_info *ccs_fetch_next_domain(void) |
|
|
{ |
|
|
struct ccs_execve_entry *ee = ccs_find_execve_entry(); |
|
|
struct ccs_domain_info *next_domain = NULL; |
|
|
if (ee) |
|
|
next_domain = ee->r.domain; |
|
|
if (!next_domain) |
|
|
next_domain = ccs_current_domain(); |
|
|
return next_domain; |
|
|
} |
|
|
|
|
|
/** |
|
1196 |
* ccs_start_execve - Prepare for execve() operation. |
* ccs_start_execve - Prepare for execve() operation. |
1197 |
* |
* |
1198 |
* @bprm: Pointer to "struct linux_binprm". |
* @bprm: Pointer to "struct linux_binprm". |
1233 |
ok: |
ok: |
1234 |
if (retval < 0) |
if (retval < 0) |
1235 |
goto out; |
goto out; |
1236 |
ee->r.mode = ccs_get_mode(ee->r.profile, CCS_MAC_ENVIRON); |
/* |
1237 |
|
* Proceed to the next domain in order to allow reaching via PID. |
1238 |
|
* It will be reverted if execve() failed. Reverting is not good. |
1239 |
|
* But it is better than being unable to reach via PID in interactive |
1240 |
|
* enforcing mode. |
1241 |
|
*/ |
1242 |
|
task->ccs_domain_info = ee->r.domain; |
1243 |
|
ee->r.mode = ccs_get_mode(ee->r.domain->profile, CCS_MAC_ENVIRON); |
1244 |
retval = ccs_environ(ee); |
retval = ccs_environ(ee); |
1245 |
if (retval < 0) |
if (retval < 0) |
1246 |
goto out; |
goto out; |
1262 |
void ccs_finish_execve(int retval) |
void ccs_finish_execve(int retval) |
1263 |
{ |
{ |
1264 |
struct task_struct *task = current; |
struct task_struct *task = current; |
1265 |
struct ccs_execve_entry *ee = ccs_find_execve_entry(); |
struct ccs_execve_entry *ee = NULL; |
1266 |
|
struct ccs_execve_entry *p; |
1267 |
task->ccs_flags &= ~CCS_CHECK_READ_FOR_OPEN_EXEC; |
task->ccs_flags &= ~CCS_CHECK_READ_FOR_OPEN_EXEC; |
1268 |
|
spin_lock(&ccs_execve_list_lock); |
1269 |
|
list_for_each_entry(p, &ccs_execve_list, list) { |
1270 |
|
if (p->task != task) |
1271 |
|
continue; |
1272 |
|
ee = p; |
1273 |
|
break; |
1274 |
|
} |
1275 |
|
spin_unlock(&ccs_execve_list_lock); |
1276 |
if (!ee) |
if (!ee) |
1277 |
return; |
return; |
1278 |
if (retval < 0) |
if (retval < 0) { |
1279 |
|
task->ccs_domain_info = ee->previous_domain; |
1280 |
goto out; |
goto out; |
1281 |
/* Proceed to next domain if execution suceeded. */ |
} |
|
task->ccs_domain_info = ee->r.domain; |
|
1282 |
/* Mark the current process as execute handler. */ |
/* Mark the current process as execute handler. */ |
1283 |
if (ee->handler) |
if (ee->handler) |
1284 |
task->ccs_flags |= CCS_TASK_IS_EXECUTE_HANDLER; |
task->ccs_flags |= CCS_TASK_IS_EXECUTE_HANDLER; |