[TOMOYO #4 03/13] Memory and pathname management functions.

From: Tetsuo Handa
Date: Thu Oct 11 2007 - 09:27:59 EST


Basic functions to get canonicalized absolute pathnames
for TOMOYO Linux. Even the requested pathname is symlink()ed
or chroot()ed, TOMOYO Linux uses the original pathname.

Signed-off-by: Kentaro Takeda <takedakn@xxxxxxxxxxxxx>
Signed-off-by: Tetsuo Handa <penguin-kernel@xxxxxxxxxxxxxxxxxxx>
---
security/tomoyo/realpath.c | 713 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 713 insertions(+)

--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/security/tomoyo/realpath.c 2007-10-11 15:53:12.000000000 +0900
@@ -0,0 +1,713 @@
+/*
+ * security/tomoyo/realpath.c
+ *
+ * Get the canonicalized absolute pathnames.
+ * The basis for TOMOYO Linux.
+ */
+
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/utime.h>
+#include <linux/file.h>
+#include <linux/smp_lock.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/atomic.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#include "realpath.h"
+#include "tomoyo.h"
+
+/***** realpath handler *****/
+
+static int tmy_print_ascii(const char *sp, const char *cp,
+ int *buflen0, char **end0)
+{
+ int buflen = *buflen0;
+ char *end = *end0;
+
+ while (sp <= cp) {
+ unsigned char c;
+
+ c = *(unsigned char *) cp;
+ if (c == '\\') {
+ buflen -= 2;
+ if (buflen < 0)
+ goto out;
+ *--end = '\\';
+ *--end = '\\';
+ } else if (c > ' ' && c < 127) {
+ if (--buflen < 0)
+ goto out;
+ *--end = (char) c;
+ } else {
+ buflen -= 4;
+ if (buflen < 0)
+ goto out;
+ *--end = (c & 7) + '0';
+ *--end = ((c >> 3) & 7) + '0';
+ *--end = (c >> 6) + '0';
+ *--end = '\\';
+ }
+ cp--;
+ }
+
+ *buflen0 = buflen;
+ *end0 = end;
+
+ return 0;
+out: ;
+ return -ENOMEM;
+}
+
+/**
+ * tmy_get_absolute_path - return the realpath of a dentry.
+ * @dentry: pointer to "struct dentry".
+ * @vfsmnt: pointer to "struct vfsmount" to which the @dentry belongs.
+ * @buffer: size of buffer to save the result.
+ * @buflen: size of @buffer .
+ *
+ * Returns zero on success.
+ * Returns nonzero on failure.
+ *
+ * Caller holds the dcache_lock.
+ * Based on __d_path() in fs/dcache.c
+ *
+ * Unlike d_path(), this function traverses upto the root directory of
+ * process's namespace.
+ *
+ * If @dentry is a directory, trailing '/' is appended.
+ * Characters other than ' ' < c < 127 are converted to \ooo style octal string.
+ * Character \ is converted to \\ string.
+ */
+static int tmy_get_absolute_path(struct dentry *dentry,
+ struct vfsmount *vfsmnt,
+ char *buffer,
+ int buflen)
+{
+ char *start = buffer;
+ char *end = buffer + buflen;
+ u8 is_dir = (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode));
+ const char *sp;
+ const char *cp;
+
+ if (buflen < 256)
+ goto out;
+
+ *--end = '\0';
+ buflen--;
+
+ while (1) {
+ struct dentry *parent;
+
+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+ /* Global root? */
+ spin_lock(&vfsmount_lock);
+ if (vfsmnt->mnt_parent == vfsmnt) {
+ spin_unlock(&vfsmount_lock);
+ break;
+ }
+ dentry = vfsmnt->mnt_mountpoint;
+ vfsmnt = vfsmnt->mnt_parent;
+ spin_unlock(&vfsmount_lock);
+ continue;
+ }
+
+ if (is_dir) {
+ is_dir = 0;
+ *--end = '/';
+ buflen--;
+ }
+
+ parent = dentry->d_parent;
+ sp = dentry->d_name.name;
+ cp = sp + dentry->d_name.len - 1;
+
+ /* Exception: Use /proc/self/ rather than */
+ /* /proc/\$/ for current process. */
+ if (IS_ROOT(parent) &&
+ *sp > '0' && *sp <= '9' && parent->d_sb &&
+ parent->d_sb->s_magic == PROC_SUPER_MAGIC) {
+
+ char *ep;
+ const pid_t pid = (pid_t) simple_strtoul(sp, &ep, 10);
+
+ if (!*ep && pid == current->tgid) {
+ sp = "self";
+ cp = sp + 3;
+ }
+
+ }
+
+ if (tmy_print_ascii(sp, cp, &buflen, &end))
+ goto out;
+
+ if (--buflen < 0)
+ goto out;
+ *--end = '/';
+
+ dentry = parent;
+ }
+ if (*end == '/') {
+ buflen++;
+ end++;
+ }
+
+ sp = dentry->d_name.name;
+ cp = sp + dentry->d_name.len - 1;
+
+ if (tmy_print_ascii(sp, cp, &buflen, &end))
+ goto out;
+
+ /* Move the pathname to the top of the buffer. */
+ memmove(start, end, strlen(end) + 1);
+ return 0;
+out: ;
+ return -ENOMEM;
+}
+
+/**
+ * tmy_realpath_dentry2 - return the realpath of a dentry.
+ * @dentry: pointer to "struct dentry".
+ * @mnt: pointer to "struct vfsmount" to which the @dentry belongs.
+ * @newname: buffer to save the result.
+ * @newname_len: size of @newname .
+ *
+ * Returns zero on success.
+ * Returns nonzero on failure.
+ */
+int tmy_realpath_dentry2(struct dentry *dentry,
+ struct vfsmount *mnt,
+ char *newname,
+ int newname_len)
+{
+ int error;
+ struct dentry *d_dentry;
+ struct vfsmount *d_mnt;
+
+ if (!dentry || !mnt || !newname || newname_len <= 0)
+ return -EINVAL;
+
+ d_dentry = dget(dentry);
+ d_mnt = mntget(mnt);
+
+ /***** CRITICAL SECTION START *****/
+ spin_lock(&dcache_lock);
+ error = tmy_get_absolute_path(d_dentry, d_mnt, newname, newname_len);
+ spin_unlock(&dcache_lock);
+ /***** CRITICAL SECTION END *****/
+
+ dput(d_dentry);
+ mntput(d_mnt);
+ return error;
+}
+
+/**
+ * tmy_realpath_dentry - return the realpath of a dentry.
+ * @dentry: pointer to "struct dentry".
+ * @mnt: pointer to "struct vfsmount" to which the @dentry belongs.
+ *
+ * Returns realpath(3) of the @dentry on success.
+ * Returns NULL on failure.
+ *
+ * This function uses tmy_alloc(), so caller must call tmy_free()
+ * if this function didn't return NULL.
+ */
+char *tmy_realpath_dentry(struct dentry *dentry, struct vfsmount *mnt)
+{
+ char *buf = tmy_alloc(TMY_MAX_PATHNAME_LEN);
+
+ if (buf &&
+ tmy_realpath_dentry2(dentry, mnt, buf,
+ TMY_MAX_PATHNAME_LEN - 1) == 0)
+ return buf;
+
+ tmy_free(buf);
+ return NULL;
+}
+
+/**
+ * tmy_realpath - return the realpath of a pathname.
+ * @pathname: pathname to report.
+ *
+ * Returns realpath(3) of the @pathname on success.
+ * Returns NULL on failure.
+ *
+ * This function uses tmy_alloc(), so caller must call tmy_free()
+ * if this function didn't return NULL.
+ */
+char *tmy_realpath(const char *pathname)
+{
+ struct nameidata nd;
+
+ if (pathname && path_lookup(pathname, LOOKUP_FOLLOW, &nd) == 0) {
+ char *buf = tmy_realpath_dentry(nd.dentry, nd.mnt);
+
+ path_release(&nd);
+ return buf;
+ }
+
+ return NULL;
+}
+
+/**
+ * tmy_realpath_nofollow - return the realpath of a pathname.
+ * @pathname: pathname to report.
+ *
+ * Returns realpath(3) of the @pathname on success.
+ * Returns NULL on failure.
+ *
+ * Unlike tmy_realpath(), this function doesn't follow if @pathname is
+ * a symbolic link.
+ *
+ * This function uses tmy_alloc(), so caller must call tmy_free()
+ * if this function didn't return NULL.
+ */
+char *tmy_realpath_nofollow(const char *pathname)
+{
+ struct nameidata nd;
+
+ if (pathname && path_lookup(pathname, 0, &nd) == 0) {
+ char *buf = tmy_realpath_dentry(nd.dentry, nd.mnt);
+
+ path_release(&nd);
+ return buf;
+ }
+
+ return NULL;
+}
+
+/* tmy_get_absolute_path() for "struct ctl_table". */
+static int tmy_sysctl_path(struct ctl_table *table, char *buffer, int buflen)
+{
+ char *end = buffer + buflen;
+
+ if (buflen < 256)
+ goto out;
+
+ *--end = '\0';
+ buflen--;
+
+ buflen -= 9; /* for "/proc/sys" prefix */
+
+ while (table) {
+ char buf[32];
+ const char *sp = table->procname;
+ const char *cp;
+
+ if (!sp) {
+ memset(buf, 0, sizeof(buf));
+ snprintf(buf, sizeof(buf) - 1, "=%d=", table->ctl_name);
+ sp = buf;
+ }
+ cp = strchr(sp, '\0') - 1;
+
+ if (tmy_print_ascii(sp, cp, &buflen, &end))
+ goto out;
+
+ if (--buflen < 0)
+ goto out;
+
+ *--end = '/';
+ table = table->parent;
+ }
+
+ /* Move the pathname to the top of the buffer. */
+ memmove(buffer, "/proc/sys", 9);
+ memmove(buffer + 9, end, strlen(end) + 1);
+ return 0;
+out: ;
+ return -ENOMEM;
+}
+
+/**
+ * sysctlpath_from_table - return the realpath of a ctl_table.
+ * @table: pointer to "struct ctl_table".
+ *
+ * Returns realpath(3) of the @table on success.
+ * Returns NULL on failure.
+ *
+ * This function uses tmy_alloc(), so caller must call tmy_free()
+ * if this function didn't return NULL.
+ */
+char *sysctlpath_from_table(struct ctl_table *table)
+{
+ char *buf = tmy_alloc(TMY_MAX_PATHNAME_LEN);
+
+ if (buf && tmy_sysctl_path(table, buf, TMY_MAX_PATHNAME_LEN - 1) == 0)
+ return buf;
+
+ tmy_free(buf);
+ return NULL;
+}
+
+/***** Private memory allocator. *****/
+
+/*
+ * Round up an integer so that the returned pointers are appropriately aligned.
+ * FIXME: Are there more requirements that is needed for assigning value
+ * atomically?
+ */
+static inline unsigned int tmy_roundup(const unsigned int size)
+{
+ if (sizeof(void *) >= sizeof(long))
+ return ((size + sizeof(void *) - 1) / sizeof(void *))
+ * sizeof(void *);
+
+ return ((size + sizeof(long) - 1) / sizeof(long)) * sizeof(long);
+}
+
+static unsigned int allocated_memory_for_elements;
+
+/**
+ * tmy_get_memory_used_for_elements - get memory usage for private data.
+ *
+ * Returns size of memory allocated for keeping individual ACL elements.
+ */
+unsigned int tmy_get_memory_used_for_elements(void)
+{
+ return allocated_memory_for_elements;
+}
+
+/**
+ * tmy_alloc_element - allocate memory for structures.
+ * @size: requested size in bytes.
+ *
+ * Returns '\0'-initialized memory region on success.
+ * Returns NULL on failure.
+ *
+ * This function allocates memory for keeping ACL entries.
+ * The RAM is chunked, so NEVER try to kfree() the returned pointer.
+ */
+void *tmy_alloc_element(const unsigned int size)
+{
+ static DEFINE_MUTEX(lock);
+ static char *buf;
+ static unsigned int buf_used_len = PAGE_SIZE;
+ char *ptr = NULL;
+ const unsigned int word_aligned_size = tmy_roundup(size);
+
+ if (word_aligned_size > PAGE_SIZE)
+ return NULL;
+
+ mutex_lock(&lock);
+
+ if (buf_used_len + word_aligned_size > PAGE_SIZE) {
+ ptr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (!ptr) {
+ printk(KERN_INFO "ERROR: "
+ "Out of memory for tmy_alloc_element().\n");
+ if (!sbin_init_started)
+ panic("MAC Initialization failed.\n");
+ } else {
+ buf = ptr;
+ allocated_memory_for_elements += PAGE_SIZE;
+ buf_used_len = word_aligned_size;
+ ptr = buf;
+ }
+
+ } else if (word_aligned_size) {
+ unsigned int i;
+
+ ptr = buf + buf_used_len;
+ buf_used_len += word_aligned_size;
+
+ for (i = 0; i < word_aligned_size; i++) {
+ if (ptr[i]) { /* This must not happen! */
+ printk(KERN_ERR
+ "WARNING: Reserved memory was tainted! "
+ "The system might go wrong.\n");
+ ptr[i] = '\0';
+ }
+ }
+
+ }
+
+ mutex_unlock(&lock);
+ return ptr;
+}
+
+/***** Shared memory allocator. *****/
+
+static unsigned int allocated_memory_for_savename;
+
+/**
+ * tmy_get_memory_used_for_save_name - get memory usage for shared data.
+ *
+ * Returns size of memory allocated for keeping string tokens.
+ */
+unsigned int tmy_get_memory_used_for_save_name(void)
+{
+ return allocated_memory_for_savename;
+}
+
+#define MAX_HASH 256
+
+/* List of tokens. */
+struct name_entry {
+ struct list_head list;
+ struct path_info entry;
+};
+
+/* List of free memory. */
+struct free_memory_block_list {
+ struct list_head list;
+ char *ptr; /* Pointer to a free area. */
+ int len; /* Length of the area. */
+};
+
+/**
+ * tmy_save_name - keep the given token on the RAM.
+ * @name: the string token to remember.
+ *
+ * Returns pointer to memory region on success.
+ * Returns NULL on failure.
+ *
+ * This function allocates memory for keeping any string data.
+ * The RAM is shared, so NEVER try to modify or kfree() the returned name.
+ */
+const struct path_info *tmy_save_name(const char *name)
+{
+ static LIST_HEAD(fmb_list);
+ struct free_memory_block_list *fmb;
+ static struct name_entry name_list[MAX_HASH]; /* The list of names. */
+ struct name_entry *ptr;
+ static DEFINE_MUTEX(mutex);
+ unsigned int hash;
+ int len;
+ static int first_call = 1;
+ u8 found = 0;
+
+ if (!name)
+ return NULL;
+
+ len = strlen(name) + 1;
+ if (len > TMY_MAX_PATHNAME_LEN) {
+ printk(KERN_INFO "ERROR: Name too long for tmy_save_name().\n");
+ return NULL;
+ }
+
+ hash = full_name_hash((const unsigned char *) name, len - 1);
+
+ mutex_lock(&mutex);
+
+ if (first_call) {
+ int i;
+
+ first_call = 0;
+ memset(&name_list, 0, sizeof(name_list));
+
+ for (i = 0; i < MAX_HASH; i++) {
+ INIT_LIST_HEAD(&name_list[i].list);
+ name_list[i].entry.name = "/";
+ tmy_fill_path_info(&name_list[i].entry);
+ }
+
+ if (TMY_MAX_PATHNAME_LEN > PAGE_SIZE)
+ panic("Bad size.");
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptr, &name_list[hash % MAX_HASH].list, list) {
+ if (hash != ptr->entry.hash ||
+ strcmp(name, ptr->entry.name))
+ continue;
+ found = 1;
+ break;
+ }
+ rcu_read_unlock();
+ if (found)
+ goto out;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(fmb, &fmb_list, list) {
+ if (len <= fmb->len) {
+ found = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!found) {
+ char *cp;
+ cp = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cp)
+ goto out2;
+
+ fmb = tmy_alloc_element(sizeof(*fmb));
+ if (!fmb) {
+ kfree(cp);
+out2: ;
+ printk(KERN_INFO
+ "ERROR: Out of memory for tmy_save_name().\n");
+ if (!sbin_init_started)
+ panic("MAC Initialization failed.\n");
+ goto out; /* ptr == NULL */
+ }
+ allocated_memory_for_savename += PAGE_SIZE;
+ fmb->ptr = cp;
+ fmb->len = PAGE_SIZE;
+ mb(); /* Avoid out-of-order execution. */
+ /* RCU: Protected by mutex. */
+ list_add_tail_rcu(&fmb->list, &fmb_list);
+ }
+
+ ptr = tmy_alloc_element(sizeof(*ptr));
+ if (!ptr)
+ goto out;
+
+ ptr->entry.name = fmb->ptr;
+ memmove(fmb->ptr, name, len);
+ tmy_fill_path_info(&ptr->entry);
+ fmb->ptr += len;
+ fmb->len -= len;
+ mb(); /* Avoid out-of-order execution. */
+ /* RCU: Protected by mutex. */
+ list_add_tail_rcu(&ptr->list, &name_list[hash % MAX_HASH].list);
+ if (fmb->len == 0)
+ list_del_rcu(&fmb->list);
+out: ;
+ mutex_unlock(&mutex);
+ return ptr ? &ptr->entry : NULL;
+}
+
+/***** Dynamic memory allocator. *****/
+
+/* List of temporary memory blocks. */
+struct cache_entry {
+ struct rcu_head rcu;
+ struct list_head list;
+ void *ptr;
+ int size;
+};
+
+static DEFINE_SPINLOCK(cache_list_lock);
+static struct kmem_cache *tmy_cachep;
+
+/**
+ * tmy_realpath_init - initialize memory allocator.
+ */
+static int __init tmy_realpath_init(void)
+{
+ tmy_cachep = kmem_cache_create("tomoyo_cache",
+ sizeof(struct cache_entry),
+ 0, SLAB_PANIC, NULL);
+ return 0;
+}
+postcore_initcall(tmy_realpath_init);
+
+
+static LIST_HEAD(cache_list);
+static unsigned int dynamic_memory_size;
+
+/**
+ * tmy_get_memory_used_for_dynamic - get memory usage for temporary purpose.
+ *
+ * Returns size of memory allocated for keeping temporary purpose.
+ */
+unsigned int tmy_get_memory_used_for_dynamic(void)
+{
+ return dynamic_memory_size;
+}
+
+/**
+ * tmy_alloc - allocate memory for temporary purpose.
+ * @size: requested size in bytes.
+ *
+ * Returns '\0'-initialized memory region on success.
+ * Returns NULL on failure.
+ *
+ * This function allocates memory for keeping ACL entries.
+ * The caller has to call tmy_free() the returned pointer
+ * when memory is no longer needed.
+ */
+void *tmy_alloc(const size_t size)
+{
+ void *ret = kzalloc(size, GFP_KERNEL);
+ struct cache_entry *new_entry;
+
+ if (!ret) {
+ printk(KERN_INFO "ERROR: "
+ "kmalloc(): Out of memory for tmy_alloc().\n");
+ return ret;
+ }
+
+ new_entry = kmem_cache_alloc(tmy_cachep, GFP_KERNEL);
+
+ if (!new_entry) {
+ printk(KERN_INFO "ERROR: "
+ "kmem_cache_alloc(): Out of memory for tmy_alloc().\n");
+ kfree(ret);
+ ret = NULL;
+ } else {
+
+ /* INIT_RCU_HEAD(&new_entry->rcu); */
+ INIT_LIST_HEAD(&new_entry->list);
+ new_entry->ptr = ret;
+ new_entry->size = ksize(ret);
+
+ /***** CRITICAL SECTION START *****/
+ spin_lock(&cache_list_lock);
+ list_add_rcu(&new_entry->list, &cache_list);
+ dynamic_memory_size += new_entry->size;
+ spin_unlock(&cache_list_lock);
+ /***** CRITICAL SECTION END *****/
+ }
+
+ return ret;
+}
+
+/* callback function for call_rcu(). */
+static void tmy_free_cachep(struct rcu_head *head)
+{
+ struct cache_entry *entry;
+ entry = container_of(head, struct cache_entry, rcu);
+ kmem_cache_free(tmy_cachep, entry);
+}
+
+/**
+ * tmy_free - free memory allocated by tmy_alloc().
+ * @p: pointer to memory block allocated by tmy_alloc().
+ *
+ * If caller calls this function for multiple times (i.e. double-free() bug) or
+ * calls this function with invalid pointer, warning message will appear.
+ * If caller forgets to call this function,
+ * tmy_get_memory_used_for_dynamic() will indicate memory leaks.
+ */
+void tmy_free(const void *p)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+ struct cache_entry *entry = NULL;
+ u8 found = 0;
+ if (!p)
+ return;
+
+ rcu_read_lock();
+ list_for_each_safe_rcu(pos, tmp, &cache_list) {
+ entry = list_entry(pos, struct cache_entry, list);
+ if (entry->ptr != p)
+ continue;
+ /***** CRITICAL SECTION START *****/
+ spin_lock(&cache_list_lock);
+ if (entry->ptr) {
+ list_del_rcu(&entry->list);
+ dynamic_memory_size -= entry->size;
+ found = 1;
+ }
+ entry->ptr = NULL;
+ spin_unlock(&cache_list_lock);
+ /***** CRITICAL SECTION END *****/
+ break;
+ }
+ rcu_read_unlock();
+ if (found) {
+ call_rcu(&entry->rcu, tmy_free_cachep);
+ kfree(p);
+ } else
+ printk(KERN_INFO "BUG: tmy_free() with invalid pointer.\n");
+}

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/