/* * linux/fs/namespace.c * * (C) Copyright Al Viro 2000, 2001 * Released under GPL v2. * * Based on code from fs/super.c, copyright Linus Torvalds and others. * Heavily rewritten. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pnode.h" #include "internal.h" #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) #define HASH_SIZE (1UL << HASH_SHIFT) /* spinlock for vfsmount related operations, inplace of dcache_lock */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); static int event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); static struct list_head *mount_hashtable __read_mostly; static struct kmem_cache *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ struct kobject *fs_kobj; EXPORT_SYMBOL_GPL(fs_kobj); static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) { unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); tmp += ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> HASH_SHIFT); return tmp & (HASH_SIZE - 1); } #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) /* allocation is serialized by namespace_sem */ static int mnt_alloc_id(struct vfsmount *mnt) { int res; retry: ida_pre_get(&mnt_id_ida, GFP_KERNEL); spin_lock(&vfsmount_lock); res = ida_get_new(&mnt_id_ida, &mnt->mnt_id); spin_unlock(&vfsmount_lock); if (res == -EAGAIN) goto retry; return res; } static void mnt_free_id(struct vfsmount *mnt) { spin_lock(&vfsmount_lock); ida_remove(&mnt_id_ida, mnt->mnt_id); spin_unlock(&vfsmount_lock); } /* * Allocate a new peer group ID * * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct vfsmount *mnt) { if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) return -ENOMEM; return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id); } /* * Release a peer group ID */ void mnt_release_group_id(struct vfsmount *mnt) { ida_remove(&mnt_group_ida, mnt->mnt_group_id); mnt->mnt_group_id = 0; } struct vfsmount *alloc_vfsmnt(const char *name) { struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); if (mnt) { int err; err = mnt_alloc_id(mnt); if (err) goto out_free_cache; if (name) { mnt->mnt_devname = kstrdup(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } atomic_set(&mnt->mnt_count, 1); INIT_LIST_HEAD(&mnt->mnt_hash); INIT_LIST_HEAD(&mnt->mnt_child); INIT_LIST_HEAD(&mnt->mnt_mounts); INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); atomic_set(&mnt->__mnt_writers, 0); } return mnt; out_free_id: mnt_free_id(mnt); out_free_cache: kmem_cache_free(mnt_cache, mnt); return NULL; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ int __mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_flags & MNT_READONLY) return 1; if (mnt->mnt_sb->s_flags & MS_RDONLY) return 1; return 0; } EXPORT_SYMBOL_GPL(__mnt_is_readonly); struct mnt_writer { /* * If holding multiple instances of this lock, they * must be ordered by cpu number. */ spinlock_t lock; struct lock_class_key lock_class; /* compiles out with !lockdep */ unsigned long count; struct vfsmount *mnt; } ____cacheline_aligned_in_smp; static DEFINE_PER_CPU(struct mnt_writer, mnt_writers); static int __init init_mnt_writers(void) { int cpu; for_each_possible_cpu(cpu) { struct mnt_writer *writer = &per_cpu(mnt_writers, cpu); spin_lock_init(&writer->lock); lockdep_set_class(&writer->lock, &writer->lock_class); writer->count = 0; } return 0; } fs_initcall(init_mnt_writers); static void unlock_mnt_writers(void) { int cpu; struct mnt_writer *cpu_writer; for_each_possible_cpu(cpu) { cpu_writer = &per_cpu(mnt_writers, cpu); spin_unlock(&cpu_writer->lock); } } static inline void __clear_mnt_count(struct mnt_writer *cpu_writer) { if (!cpu_writer->mnt) return; /* * This is in case anyone ever leaves an invalid, * old ->mnt and a count of 0. */ if (!cpu_writer->count) return; atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers); cpu_writer->count = 0; } /* * must hold cpu_writer->lock */ static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer, struct vfsmount *mnt) { if (cpu_writer->mnt == mnt) return; __clear_mnt_count(cpu_writer); cpu_writer->mnt = mnt; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /** * mnt_want_write - get write access to a mount * @mnt: the mount on which to take a write * * This tells the low-level filesystem that a write is * about to be performed to it, and makes sure that * writes are allowed before returning success. When * the write operation is finished, mnt_drop_write() * must be called. This is effectively a refcount. */ int mnt_want_write(struct vfsmount *mnt) { int ret = 0; struct mnt_writer *cpu_writer; cpu_writer = &get_cpu_var(mnt_writers); spin_lock(&cpu_writer->lock); if (__mnt_is_readonly(mnt)) { ret = -EROFS; goto out; } use_cpu_writer_for_mount(cpu_writer, mnt); cpu_writer->count++; out: spin_unlock(&cpu_writer->lock); put_cpu_var(mnt_writers); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); static void lock_mnt_writers(void) { int cpu; struct mnt_writer *cpu_writer; for_each_possible_cpu(cpu) { cpu_writer = &per_cpu(mnt_writers, cpu); spin_lock(&cpu_writer->lock); __clear_mnt_count(cpu_writer); cpu_writer->mnt = NULL; } } /* * These per-cpu write counts are not guaranteed to have * matched increments and decrements on any given cpu. * A file open()ed for write on one cpu and close()d on * another cpu will imbalance this count. Make sure it * does not get too far out of whack. */ static void handle_write_count_underflow(struct vfsmount *mnt) { if (atomic_read(&mnt->__mnt_writers) >= MNT_WRITER_UNDERFLOW_LIMIT) return; /* * It isn't necessary to hold all of the locks * at the same time, but doing it this way makes * us share a lot more code. */ lock_mnt_writers(); /* * vfsmount_lock is for mnt_flags. */ spin_lock(&vfsmount_lock); /* * If coalescing the per-cpu writer counts did not * get us back to a positive writer count, we have * a bug. */ if ((atomic_read(&mnt->__mnt_writers) < 0) && !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) { WARN(1, KERN_DEBUG "leak detected on mount(%p) writers " "count: %d\n", mnt, atomic_read(&mnt->__mnt_writers)); /* use the flag to keep the dmesg spam down */ mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT; } spin_unlock(&vfsmount_lock); unlock_mnt_writers(); } /** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * mnt_want_write() call above. */ void mnt_drop_write(struct vfsmount *mnt) { int must_check_underflow = 0; struct mnt_writer *cpu_writer; cpu_writer = &get_cpu_var(mnt_writers); spin_lock(&cpu_writer->lock); use_cpu_writer_for_mount(cpu_writer, mnt); if (cpu_writer->count > 0) { cpu_writer->count--; } else { must_check_underflow = 1; atomic_dec(&mnt->__mnt_writers); } spin_unlock(&cpu_writer->lock); /* * Logically, we could call this each time, * but the __mnt_writers cacheline tends to * be cold, and makes this expensive. */ if (must_check_underflow) handle_write_count_underflow(mnt); /* * This could be done right after the spinlock * is taken because the spinlock keeps us on * the cpu, and disables preemption. However, * putting it here bounds the amount that * __mnt_writers can underflow. Without it, * we could theoretically wrap __mnt_writers. */ put_cpu_var(mnt_writers); } EXPORT_SYMBOL_GPL(mnt_drop_write); static int mnt_make_readonly(struct vfsmount *mnt) { int ret = 0; lock_mnt_writers(); /* * With all the locks held, this value is stable */ if (atomic_read(&mnt->__mnt_writers) > 0) { ret = -EBUSY; goto out; } /* * nobody can do a successful mnt_want_write() with all * of the counts in MNT_DENIED_WRITE and the locks held. */ spin_lock(&vfsmount_lock); if (!ret) mnt->mnt_flags |= MNT_READONLY; spin_unlock(&vfsmount_lock); out: unlock_mnt_writers(); return ret; } static void __mnt_unmake_readonly(struct vfsmount *mnt) { spin_lock(&vfsmount_lock); mnt->mnt_flags &= ~MNT_READONLY; spin_unlock(&vfsmount_lock); } int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) { mnt->mnt_sb = sb; mnt->mnt_root = dget(sb->s_root); return 0; } EXPORT_SYMBOL(simple_set_mnt); void free_vfsmnt(struct vfsmount *mnt) { kfree(mnt->mnt_devname); mnt_free_id(mnt); kmem_cache_free(mnt_cache, mnt); } /* * find the first or last mount at @dentry on vfsmount @mnt depending on * @dir. If @dir is set return the first mount else return the last mount. */ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, int dir) { struct list_head *head = mount_hashtable + hash(mnt, dentry); struct list_head *tmp = head; struct vfsmount *p, *found = NULL; for (;;) { tmp = dir ? tmp->next : tmp->prev; p = NULL; if (tmp == head) break; p = list_entry(tmp, struct vfsmount, mnt_hash); if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { found = p; break; } } return found; } /* * lookup_mnt increments the ref count before returning * the vfsmount struct. */ struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) { struct vfsmount *child_mnt; spin_lock(&vfsmount_lock); if ((child_mnt = __lookup_mnt(mnt, dentry, 1))) mntget(child_mnt); spin_unlock(&vfsmount_lock); return child_mnt; } static inline int check_mnt(struct vfsmount *mnt) { return mnt->mnt_ns == current->nsproxy->mnt_ns; } static void touch_mnt_namespace(struct mnt_namespace *ns) { if (ns) { ns->event = ++event; wake_up_interruptible(&ns->poll); } } static void __touch_mnt_namespace(struct mnt_namespace *ns) { if (ns && ns->event != event) { ns->event = event; wake_up_interruptible(&ns->poll); } } static void detach_mnt(struct vfsmount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = mnt->mnt_parent; mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt_root; list_del_init(&mnt->mnt_child); list_del_init(&mnt->mnt_hash); old_path->dentry->d_mounted--; } void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, struct vfsmount *child_mnt) { child_mnt->mnt_parent = mntget(mnt); child_mnt->mnt_mountpoint = dget(dentry); dentry->d_mounted++; } static void attach_mnt(struct vfsmount *mnt, struct path *path) { mnt_set_mountpoint(path->mnt, path->dentry, mnt); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(path->mnt, path->dentry)); list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts); } /* * the caller must hold vfsmount_lock */ static void commit_tree(struct vfsmount *mnt) { struct vfsmount *parent = mnt->mnt_parent; struct vfsmount *m; LIST_HEAD(head); struct mnt_namespace *n = parent->mnt_ns; BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) m->mnt_ns = n; list_splice(&head, n->list.prev); list_add_tail(&mnt->mnt_hash, mount_hashtable + hash(parent, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); touch_mnt_namespace(n); } static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) { struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL; next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break; p = p->mnt_parent; } } return list_entry(next, struct vfsmount, mnt_child); } static struct vfsmount *skip_mnt_tree(struct vfsmount *p) { struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct vfsmount, mnt_child); prev = p->mnt_mounts.prev; } return p; } static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt_sb; struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); if (mnt) { if (flag & (CL_SLAVE | CL_PRIVATE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { int err = mnt_alloc_group_id(mnt); if (err) goto out_free; } mnt->mnt_flags = old->mnt_flags; atomic_inc(&sb->s_active); mnt->mnt_sb = sb; mnt->mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt_root; mnt->mnt_parent = mnt; if (flag & CL_SLAVE) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; } if (flag & CL_MAKE_SHARED) set_mnt_shared(mnt); /* stick the duplicate mount on the same expiry list * as the original if that was on one */ if (flag & CL_EXPIRE) { if (!list_empty(&old->mnt_expire)) list_add(&mnt->mnt_expire, &old->mnt_expire); } } return mnt; out_free: free_vfsmnt(mnt); return NULL; } static inline void __mntput(struct vfsmount *mnt) { int cpu; struct super_block *sb = mnt->mnt_sb; /* * We don't have to hold all of the locks at the * same time here because we know that we're the * last reference to mnt and
/**
 * rofi
 *
 * MIT/X11 License
 * Modified 2016 Qball Cow <qball@gmpclient.org>
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#include <config.h>
#include <stdio.h>
#include "widgets/widget.h"
#include "widgets/widget-internal.h"
#include "widgets/box.h"

#define LOG_DOMAIN    "Widgets.Box"

struct _box
{
    widget  widget;
    boxType type;
    int     max_size;
    // Padding between elements
    int     padding;

    GList   *children;
};

static void box_update ( widget *wid  );

static void vert_calculate_size ( box *b )
{
    int expanding_widgets = 0;
    int active_widgets    = 0;
    b->max_size = 0;
    for ( GList *iter = g_list_first ( b->children ); iter != NULL; iter = g_list_next ( iter ) ) {
        widget * child = (widget *) iter->data;
        if ( !child->enabled ) {
            continue;
        }
        active_widgets++;
        if ( child->expand == TRUE ) {
            expanding_widgets++;
            continue;
        }
        b->max_size += child->h;
    }
    b->max_size += MAX ( 0, ( ( active_widgets - 1 ) * b->padding ) );
    if ( b->max_size > b->widget.h ) {
        g_log ( LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "Widgets to large (height) for box: %d %d", b->max_size, b->widget.h );
        return;
    }
    if ( active_widgets > 0 ) {
        int    bottom = b->widget.h;
        int    top    = 0;
        double rem    = b->widget.h - b->max_size;
        int    index  = 0;
        for ( GList *iter = g_list_first ( b->children ); iter != NULL; iter = g_list_next ( iter ) ) {
            widget * child = (widget *) iter->data;
            if ( child->enabled == FALSE ) {
                continue;
            }
            if ( child->expand == TRUE ) {
                // Re-calculate to avoid round issues leaving one pixel left.
                int expanding_widgets_size = ( rem ) / ( expanding_widgets - index );
                if ( child->end ) {
                    bottom -= expanding_widgets_size;
                    widget_move ( child, child->x, bottom );
                    widget_resize ( child, b->widget.w, expanding_widgets_size );
                    bottom -= b->padding;
                }
                else {
                    widget_move ( child, child->x, top );
                    top += expanding_widgets_size;
                    widget_resize ( child, b->widget.w, expanding_widgets_size );
                    top += b->padding;
                }
                rem -= expanding_widgets_size;
                index++;
            }
            else if ( child->end ) {
                bottom -= widget_get_height (  child );
                widget_move ( child, child->x, bottom );
                widget_resize ( child, b->widget.w, child->h );
                bottom -= b->padding;
            }
            else {
                widget_move ( child, child->x, top );
                top += widget_get_height (  child );
                widget_resize ( child, b->widget.w, child->h );
                top += b->padding;
            }
        }
    }
}
static void hori_calculate_size ( box *b )
{
    int expanding_widgets = 0;
    int active_widgets    = 0;
    b->max_size = 0;
    for ( GList *iter = g_list_first ( b->children ); iter != NULL; iter = g_list_next ( iter ) ) {
        widget * child = (widget *) iter->data;
        if ( !child->enabled ) {
            continue;
        }
        active_widgets++;
        if ( child->expand == TRUE ) {
            expanding_widgets++;
            continue;
        }
        // Size used by fixed width widgets.
        b->max_size += child->w;
    }
    b->max_size += MAX ( 0, ( ( active_widgets - 1 ) * b->padding ) );
    if ( b->max_size > b->widget.w ) {
        g_log ( LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "Widgets to large (width) for box: %d %d", b->max_size, b->widget.w );
        return;
    }
    if ( active_widgets > 0 ) {
        int    right = b->widget.w;
        int    left  = 0;
        double rem   = b->widget.w - b->max_size;
        int    index = 0;
        for ( GList *iter = g_list_first ( b->children ); iter != NULL; iter = g_list_next ( iter ) ) {
            widget * child = (widget *) iter->data;
            if ( child->enabled == FALSE  ) {
                continue;
            }
            if ( child->expand == TRUE ) {
                // Re-calculate to avoid round issues leaving one pixel left.
                int expanding_widgets_size = ( rem ) / ( expanding_widgets - index );
                if ( child->end ) {
                    right -= expanding_widgets_size;
                    widget_move ( child, right, child->y );
                    widget_resize ( child, expanding_widgets_size, b->widget.h );
                    right -= b->padding;
                }
                else {
                    widget_move ( child, left, child->y );
                    left += expanding_widgets_size;
                    widget_resize ( child, expanding_widgets_size, b->widget.h );
                    left += b->padding;
                }
                rem -= expanding_widgets_size;
                index++;
            }
            else if ( child->end ) {
                right -= widget_get_width (  child );