3 |
* |
* |
4 |
* Copyright (C) 2005-2009 NTT DATA CORPORATION |
* Copyright (C) 2005-2009 NTT DATA CORPORATION |
5 |
* |
* |
6 |
* Version: 1.7.0-pre 2009/08/24 |
* Version: 1.7.0 2009/09/06 |
7 |
* |
* |
8 |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
9 |
* See README.ccs for ChangeLog. |
* See README.ccs for ChangeLog. |
22 |
#endif |
#endif |
23 |
#include "internal.h" |
#include "internal.h" |
24 |
|
|
|
/* For compatibility with older kernels. */ |
|
|
#ifndef for_each_process |
|
|
#define for_each_process for_each_task |
|
|
#endif |
|
|
|
|
25 |
/* Variables definitions.*/ |
/* Variables definitions.*/ |
26 |
|
|
27 |
/* The initial domain. */ |
/* The initial domain. */ |
648 |
domain = ccs_find_domain(ee->tmp); |
domain = ccs_find_domain(ee->tmp); |
649 |
if (domain) |
if (domain) |
650 |
goto done; |
goto done; |
651 |
if (r->mode == CCS_MAC_MODE_ENFORCING) { |
if (r->mode == CCS_CONFIG_ENFORCING) { |
652 |
int error = ccs_supervisor(r, "# wants to create domain\n" |
int error = ccs_supervisor(r, "# wants to create domain\n" |
653 |
"%s\n", ee->tmp); |
"%s\n", ee->tmp); |
654 |
if (error == 1) |
if (error == 1) |
663 |
if (!domain) { |
if (!domain) { |
664 |
printk(KERN_WARNING "ERROR: Domain '%s' not defined.\n", |
printk(KERN_WARNING "ERROR: Domain '%s' not defined.\n", |
665 |
ee->tmp); |
ee->tmp); |
666 |
if (r->mode == CCS_MAC_MODE_ENFORCING) |
if (r->mode == CCS_CONFIG_ENFORCING) |
667 |
retval = -EPERM; |
retval = -EPERM; |
668 |
else { |
else { |
669 |
retval = 0; |
retval = 0; |
857 |
return depth; |
return depth; |
858 |
} |
} |
859 |
|
|
|
static LIST_HEAD(ccs_execve_list); |
|
|
static DEFINE_SPINLOCK(ccs_execve_list_lock); |
|
|
|
|
|
/** |
|
|
* ccs_allocate_execve_entry - Allocate memory for execve(). |
|
|
* |
|
|
* Returns pointer to "struct ccs_execve_entry" on success, NULL otherwise. |
|
|
*/ |
|
|
static struct ccs_execve_entry *ccs_allocate_execve_entry(void) |
|
|
{ |
|
|
struct ccs_execve_entry *ee = kzalloc(sizeof(*ee), GFP_KERNEL); |
|
|
if (!ee) |
|
|
return NULL; |
|
|
ee->tmp = kzalloc(CCS_EXEC_TMPSIZE, GFP_KERNEL); |
|
|
if (!ee->tmp) { |
|
|
kfree(ee); |
|
|
return NULL; |
|
|
} |
|
|
ee->reader_idx = ccs_read_lock(); |
|
|
/* ee->dump->data is allocated by ccs_dump_page(). */ |
|
|
ee->task = current; |
|
|
spin_lock(&ccs_execve_list_lock); |
|
|
list_add(&ee->list, &ccs_execve_list); |
|
|
spin_unlock(&ccs_execve_list_lock); |
|
|
return ee; |
|
|
} |
|
|
|
|
|
/** |
|
|
* ccs_find_execve_entry - Find ccs_execve_entry of current process. |
|
|
* |
|
|
* Returns pointer to "struct ccs_execve_entry" on success, NULL otherwise. |
|
|
*/ |
|
|
static struct ccs_execve_entry *ccs_find_execve_entry(void) |
|
|
{ |
|
|
struct task_struct *task = current; |
|
|
struct ccs_execve_entry *ee = NULL; |
|
|
struct ccs_execve_entry *p; |
|
|
spin_lock(&ccs_execve_list_lock); |
|
|
list_for_each_entry(p, &ccs_execve_list, list) { |
|
|
if (p->task != task) |
|
|
continue; |
|
|
ee = p; |
|
|
break; |
|
|
} |
|
|
spin_unlock(&ccs_execve_list_lock); |
|
|
return ee; |
|
|
} |
|
|
|
|
|
/** |
|
|
* ccs_free_execve_entry - Free memory for execve(). |
|
|
* |
|
|
* @ee: Pointer to "struct ccs_execve_entry". |
|
|
*/ |
|
|
static void ccs_free_execve_entry(struct ccs_execve_entry *ee) |
|
|
{ |
|
|
if (!ee) |
|
|
return; |
|
|
spin_lock(&ccs_execve_list_lock); |
|
|
list_del(&ee->list); |
|
|
spin_unlock(&ccs_execve_list_lock); |
|
|
kfree(ee->handler_path); |
|
|
kfree(ee->tmp); |
|
|
kfree(ee->dump.data); |
|
|
ccs_read_unlock(ee->reader_idx); |
|
|
kfree(ee); |
|
|
} |
|
|
|
|
860 |
/** |
/** |
861 |
* ccs_try_alt_exec - Try to start execute handler. |
* ccs_try_alt_exec - Try to start execute handler. |
862 |
* |
* |
910 |
struct task_struct *task = current; |
struct task_struct *task = current; |
911 |
|
|
912 |
/* Close the requested program's dentry. */ |
/* Close the requested program's dentry. */ |
913 |
|
ee->obj.path1.dentry = NULL; |
914 |
|
ee->obj.path1.mnt = NULL; |
915 |
|
ee->obj.validate_done = false; |
916 |
allow_write_access(bprm->file); |
allow_write_access(bprm->file); |
917 |
fput(bprm->file); |
fput(bprm->file); |
918 |
bprm->file = NULL; |
bprm->file = NULL; |
1004 |
int len = ee->handler->total_len + 1; |
int len = ee->handler->total_len + 1; |
1005 |
char *cp = kmalloc(len, GFP_KERNEL); |
char *cp = kmalloc(len, GFP_KERNEL); |
1006 |
if (!cp) { |
if (!cp) { |
1007 |
retval = ENOMEM; |
retval = -ENOMEM; |
1008 |
goto out; |
goto out; |
1009 |
} |
} |
1010 |
ee->handler_path = cp; |
ee->handler_path = cp; |
1039 |
retval = PTR_ERR(filp); |
retval = PTR_ERR(filp); |
1040 |
goto out; |
goto out; |
1041 |
} |
} |
1042 |
|
ee->obj.path1.dentry = filp->f_dentry; |
1043 |
|
ee->obj.path1.mnt = filp->f_vfsmnt; |
1044 |
bprm->file = filp; |
bprm->file = filp; |
1045 |
bprm->filename = ee->handler_path; |
bprm->filename = ee->handler_path; |
1046 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0) |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0) |
1105 |
struct ccs_page_dump *dump) |
struct ccs_page_dump *dump) |
1106 |
{ |
{ |
1107 |
struct page *page; |
struct page *page; |
1108 |
/* dump->data is released by ccs_free_execve_entry(). */ |
/* dump->data is released by ccs_finish_execve(). */ |
1109 |
if (!dump->data) { |
if (!dump->data) { |
1110 |
dump->data = kzalloc(PAGE_SIZE, GFP_KERNEL); |
dump->data = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1111 |
if (!dump->data) |
if (!dump->data) |
1115 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
1116 |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
1117 |
return false; |
return false; |
1118 |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR == 3 && defined(CONFIG_MMU) |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR >= 3 && defined(CONFIG_MMU) |
1119 |
|
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
1120 |
|
return false; |
1121 |
|
#elif defined(AX_MAJOR) && AX_MAJOR == 3 && defined(AX_MINOR) && AX_MINOR >= 2 && defined(CONFIG_MMU) |
1122 |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) |
1123 |
return false; |
return false; |
1124 |
#else |
#else |
1139 |
/* Same with put_arg_page(page) in fs/exec.c */ |
/* Same with put_arg_page(page) in fs/exec.c */ |
1140 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && defined(CONFIG_MMU) |
1141 |
put_page(page); |
put_page(page); |
1142 |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR == 3 && defined(CONFIG_MMU) |
#elif defined(RHEL_MAJOR) && RHEL_MAJOR == 5 && defined(RHEL_MINOR) && RHEL_MINOR >= 3 && defined(CONFIG_MMU) |
1143 |
|
put_page(page); |
1144 |
|
#elif defined(AX_MAJOR) && AX_MAJOR == 3 && defined(AX_MINOR) && AX_MINOR >= 2 && defined(CONFIG_MMU) |
1145 |
put_page(page); |
put_page(page); |
1146 |
#endif |
#endif |
1147 |
return true; |
return true; |
1148 |
} |
} |
1149 |
|
|
1150 |
/** |
/** |
|
* ccs_fetch_next_domain - Fetch next_domain from the list. |
|
|
* |
|
|
* Returns pointer to "struct ccs_domain_info" which will be used if execve() |
|
|
* succeeds. This function does not return NULL. |
|
|
*/ |
|
|
struct ccs_domain_info *ccs_fetch_next_domain(void) |
|
|
{ |
|
|
struct ccs_execve_entry *ee = ccs_find_execve_entry(); |
|
|
struct ccs_domain_info *next_domain = NULL; |
|
|
if (ee) |
|
|
next_domain = ee->r.domain; |
|
|
if (!next_domain) |
|
|
next_domain = ccs_current_domain(); |
|
|
return next_domain; |
|
|
} |
|
|
|
|
|
/** |
|
1151 |
* ccs_start_execve - Prepare for execve() operation. |
* ccs_start_execve - Prepare for execve() operation. |
1152 |
* |
* |
1153 |
* @bprm: Pointer to "struct linux_binprm". |
* @bprm: Pointer to "struct linux_binprm". |
1154 |
|
* @eep: Pointer to "struct ccs_execve_entry *". |
1155 |
* |
* |
1156 |
* Returns 0 on success, negative value otherwise. |
* Returns 0 on success, negative value otherwise. |
1157 |
*/ |
*/ |
1158 |
int ccs_start_execve(struct linux_binprm *bprm) |
int ccs_start_execve(struct linux_binprm *bprm, struct ccs_execve_entry **eep) |
1159 |
{ |
{ |
1160 |
int retval; |
int retval; |
1161 |
struct task_struct *task = current; |
struct task_struct *task = current; |
1162 |
struct ccs_execve_entry *ee = ccs_allocate_execve_entry(); |
struct ccs_execve_entry *ee; |
1163 |
|
*eep = NULL; |
1164 |
if (!ccs_policy_loaded) |
if (!ccs_policy_loaded) |
1165 |
ccs_load_policy(bprm->filename); |
ccs_load_policy(bprm->filename); |
1166 |
|
ee = kzalloc(sizeof(*ee), GFP_KERNEL); |
1167 |
if (!ee) |
if (!ee) |
1168 |
return -ENOMEM; |
return -ENOMEM; |
1169 |
|
ee->tmp = kzalloc(CCS_EXEC_TMPSIZE, GFP_KERNEL); |
1170 |
|
if (!ee->tmp) { |
1171 |
|
kfree(ee); |
1172 |
|
return -ENOMEM; |
1173 |
|
} |
1174 |
|
ee->reader_idx = ccs_read_lock(); |
1175 |
|
/* ee->dump->data is allocated by ccs_dump_page(). */ |
1176 |
|
ee->previous_domain = task->ccs_domain_info; |
1177 |
|
/* Clear manager flag. */ |
1178 |
|
task->ccs_flags &= ~CCS_TASK_IS_POLICY_MANAGER; |
1179 |
|
/* Tell GC that I started execve(). */ |
1180 |
|
task->ccs_flags |= CCS_TASK_IS_IN_EXECVE; |
1181 |
|
/* |
1182 |
|
* Make task->ccs_flags visible to GC before changing |
1183 |
|
* task->ccs_domain_info . |
1184 |
|
*/ |
1185 |
|
smp_mb(); |
1186 |
|
*eep = ee; |
1187 |
ccs_init_request_info(&ee->r, NULL, CCS_MAC_FILE_EXECUTE); |
ccs_init_request_info(&ee->r, NULL, CCS_MAC_FILE_EXECUTE); |
1188 |
ee->r.ee = ee; |
ee->r.ee = ee; |
1189 |
ee->bprm = bprm; |
ee->bprm = bprm; |
1190 |
ee->r.obj = &ee->obj; |
ee->r.obj = &ee->obj; |
1191 |
ee->obj.path1.dentry = bprm->file->f_dentry; |
ee->obj.path1.dentry = bprm->file->f_dentry; |
1192 |
ee->obj.path1.mnt = bprm->file->f_vfsmnt; |
ee->obj.path1.mnt = bprm->file->f_vfsmnt; |
|
/* Clear manager flag. */ |
|
|
task->ccs_flags &= ~CCS_TASK_IS_POLICY_MANAGER; |
|
1193 |
if (ccs_find_execute_handler(ee, CCS_TYPE_EXECUTE_HANDLER)) { |
if (ccs_find_execute_handler(ee, CCS_TYPE_EXECUTE_HANDLER)) { |
1194 |
retval = ccs_try_alt_exec(ee); |
retval = ccs_try_alt_exec(ee); |
1195 |
if (!retval) |
if (!retval) |
1207 |
ok: |
ok: |
1208 |
if (retval < 0) |
if (retval < 0) |
1209 |
goto out; |
goto out; |
1210 |
ee->r.mode = ccs_get_mode(ee->r.profile, CCS_MAC_ENVIRON); |
/* |
1211 |
|
* Proceed to the next domain in order to allow reaching via PID. |
1212 |
|
* It will be reverted if execve() failed. Reverting is not good. |
1213 |
|
* But it is better than being unable to reach via PID in interactive |
1214 |
|
* enforcing mode. |
1215 |
|
*/ |
1216 |
|
task->ccs_domain_info = ee->r.domain; |
1217 |
|
ee->r.mode = ccs_get_mode(ee->r.domain->profile, CCS_MAC_ENVIRON); |
1218 |
retval = ccs_environ(ee); |
retval = ccs_environ(ee); |
1219 |
if (retval < 0) |
if (retval < 0) |
1220 |
goto out; |
goto out; |
|
task->ccs_flags |= CCS_CHECK_READ_FOR_OPEN_EXEC; |
|
1221 |
retval = 0; |
retval = 0; |
1222 |
out: |
out: |
|
if (retval) |
|
|
ccs_finish_execve(retval); |
|
1223 |
return retval; |
return retval; |
1224 |
} |
} |
1225 |
|
|
1227 |
* ccs_finish_execve - Clean up execve() operation. |
* ccs_finish_execve - Clean up execve() operation. |
1228 |
* |
* |
1229 |
* @retval: Return code of an execve() operation. |
* @retval: Return code of an execve() operation. |
1230 |
|
* @ee: Pointer to "struct ccs_execve_entry". |
1231 |
* |
* |
1232 |
* Caller holds ccs_read_lock(). |
* Caller holds ccs_read_lock(). |
1233 |
*/ |
*/ |
1234 |
void ccs_finish_execve(int retval) |
void ccs_finish_execve(int retval, struct ccs_execve_entry *ee) |
1235 |
{ |
{ |
1236 |
struct task_struct *task = current; |
struct task_struct *task = current; |
|
struct ccs_execve_entry *ee = ccs_find_execve_entry(); |
|
|
task->ccs_flags &= ~CCS_CHECK_READ_FOR_OPEN_EXEC; |
|
1237 |
if (!ee) |
if (!ee) |
1238 |
return; |
return; |
1239 |
if (retval < 0) |
if (retval < 0) { |
1240 |
goto out; |
task->ccs_domain_info = ee->previous_domain; |
1241 |
/* Proceed to next domain if execution suceeded. */ |
/* |
1242 |
task->ccs_domain_info = ee->r.domain; |
* Make task->ccs_domain_info visible to GC before changing |
1243 |
/* Mark the current process as execute handler. */ |
* task->ccs_flags . |
1244 |
if (ee->handler) |
*/ |
1245 |
task->ccs_flags |= CCS_TASK_IS_EXECUTE_HANDLER; |
smp_mb(); |
1246 |
/* Mark the current process as normal process. */ |
} else { |
1247 |
else |
/* Mark the current process as execute handler. */ |
1248 |
task->ccs_flags &= ~CCS_TASK_IS_EXECUTE_HANDLER; |
if (ee->handler) |
1249 |
out: |
task->ccs_flags |= CCS_TASK_IS_EXECUTE_HANDLER; |
1250 |
ccs_free_execve_entry(ee); |
/* Mark the current process as normal process. */ |
1251 |
|
else |
1252 |
|
task->ccs_flags &= ~CCS_TASK_IS_EXECUTE_HANDLER; |
1253 |
|
} |
1254 |
|
/* Tell GC that I finished execve(). */ |
1255 |
|
task->ccs_flags &= ~CCS_TASK_IS_IN_EXECVE; |
1256 |
|
ccs_read_unlock(ee->reader_idx); |
1257 |
|
kfree(ee->handler_path); |
1258 |
|
kfree(ee->tmp); |
1259 |
|
kfree(ee->dump.data); |
1260 |
|
kfree(ee); |
1261 |
} |
} |