summaryrefslogtreecommitdiffstats
path: root/fs/stack.c
blob: 5b5388250e29351a66b60cd7658832586eb80771 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/fs_stack.h>

/* does _NOT_ require i_mutex to be held.
 *
 * This function cannot be inlined since i_size_{read,write} is rather
 * heavy-weight on 32-bit systems
 */
void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
{
	loff_t i_size;
	blkcnt_t i_blocks;

	/*
	 * i_size_read() includes its own seqlocking and protection from
	 * preemption (see include/linux/fs.h): we need nothing extra for
	 * that here, and prefer to avoid nesting locks than attempt to keep
	 * i_size and i_blocks in sync together.
	 */
	i_size = i_size_read(src);

	/*
	 * But if CONFIG_LBDAF (on 32-bit), we ought to make an effort to
	 * keep the two halves of i_blocks in sync despite SMP or PREEMPT -
	 * though stat's generic_fillattr() doesn't bother, and we won't be
	 * applying quotas (where i_blocks does become important) at the
	 * upper level.
	 *
	 * We don't actually know what locking is used at the lower level;
	 * but if it's a filesystem that supports quotas, it will be using
	 * i_lock as in inode_add_bytes().
	 */
	if (sizeof(i_blocks) > sizeof(long))
		spin_lock(&src->i_lock);
	i_blocks = src->i_blocks;
	if (sizeof(i_blocks) > sizeof(long))
		spin_unlock(&src->i_lock);

	/*
	 * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for
	 * fsstack_copy_inode_size() to hold some lock around
	 * i_size_write(), otherwise i_size_read() may spin forever (see
	 * include/linux/fs.h).  We don't necessarily hold i_mutex when this
	 * is called, so take i_lock for that case.
	 *
	 * And if CONFIG_LBADF (on 32-bit), continue our effort to keep the
	 * two halves of i_blocks in sync despite SMP or PREEMPT: use i_lock
	 * for that case too, and do both at once by combining the tests.
	 *
	 * There is none of this locking overhead in the 64-bit case.
	 */
	if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long))
		spin_lock(&dst->i_lock);
	i_size_write(dst, i_size);
	dst->i_blocks = i_blocks;
	if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long))
		spin_unlock(&dst->i_lock);
}
EXPORT_SYMBOL_GPL(fsstack_copy_inode_size);

/* copy all attributes */
void fsstack_copy_attr_all(struct inode *dest, const struct inode *src)
{
	dest->i_mode = src->i_mode;
	dest->i_uid = src->i_uid;
	dest->i_gid = src->i_gid;
	dest->i_rdev = src->i_rdev;
	dest->i_atime = src->i_atime;
	dest->i_mtime = src->i_mtime;
	dest->i_ctime = src->i_ctime;
	dest->i_blkbits = src->i_blkbits;
	dest->i_flags = src->i_flags;
	set_nlink(dest, src->i_nlink);
}
EXPORT_SYMBOL_GPL(fsstack_copy_attr_all);
>(struct irq_data *data) { struct combiner_chip_data *combiner_data = irq_data_get_irq_chip_data(data); return combiner_data->base; } static void combiner_mask_irq(struct irq_data *data) { u32 mask = 1 << (data->hwirq % 32); __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); } static void combiner_unmask_irq(struct irq_data *data) { u32 mask = 1 << (data->hwirq % 32); __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); } static void combiner_handle_cascade_irq(struct irq_desc *desc) { struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq, combiner_irq; unsigned long status; chained_irq_enter(chip, desc); spin_lock(&irq_controller_lock); status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); spin_unlock(&irq_controller_lock); status &= chip_data->irq_mask; if (status == 0) goto out; combiner_irq = chip_data->hwirq_offset + __ffs(status); cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); if (unlikely(!cascade_irq)) handle_bad_irq(desc); else generic_handle_irq(cascade_irq); out: chained_irq_exit(chip, desc); } #ifdef CONFIG_SMP static int combiner_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d); struct irq_chip *chip = irq_get_chip(chip_data->parent_irq); struct irq_data *data = irq_get_irq_data(chip_data->parent_irq); if (chip && chip->irq_set_affinity) return chip->irq_set_affinity(data, mask_val, force); else return -EINVAL; } #endif static struct irq_chip combiner_chip = { .name = "COMBINER", .irq_mask = combiner_mask_irq, .irq_unmask = combiner_unmask_irq, #ifdef CONFIG_SMP .irq_set_affinity = combiner_set_affinity, #endif }; static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, unsigned int irq) { irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq, combiner_data); } static void __init combiner_init_one(struct combiner_chip_data *combiner_data, unsigned int combiner_nr, void __iomem *base, unsigned int irq) { combiner_data->base = base; combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER; combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3); combiner_data->parent_irq = irq; /* Disable all interrupts */ __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); } static int combiner_irq_domain_xlate(struct irq_domain *d, struct device_node *controller, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (d->of_node != controller) return -EINVAL; if (intsize < 2) return -EINVAL; *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1]; *out_type = 0; return 0; } static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct combiner_chip_data *combiner_data = d->host_data; irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq); irq_set_chip_data(irq, &combiner_data[hw >> 3]); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); return 0; } static const struct irq_domain_ops combiner_irq_domain_ops = { .xlate = combiner_irq_domain_xlate, .map = combiner_irq_domain_map, }; static void __init combiner_init(void __iomem *combiner_base, struct device_node *np) { int i, irq; unsigned int nr_irq; nr_irq = max_nr * IRQ_IN_COMBINER; combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); if (!combiner_data) { pr_warn("%s: could not allocate combiner data\n", __func__); return; } combiner_irq_domain = irq_domain_add_linear(np, nr_irq, &combiner_irq_domain_ops, combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warn("%s: irq domain init failed\n", __func__); return; } for (i = 0; i < max_nr; i++) { irq = irq_of_parse_and_map(np, i); combiner_init_one(&combiner_data[i], i, combiner_base + (i >> 2) * 0x10, irq); combiner_cascade_irq(&combiner_data[i], irq); } } #ifdef CONFIG_PM /** * combiner_suspend - save interrupt combiner state before suspend * * Save the interrupt enable set register for all combiner groups since * the state is lost when the system enters into a sleep state. * */ static int combiner_suspend(void) { int i; for (i = 0; i < max_nr; i++) combiner_data[i].pm_save = __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET); return 0; } /** * combiner_resume - restore interrupt combiner state after resume * * Restore the interrupt enable set register for all combiner groups since * the state is lost when the system enters into a sleep state on suspend. * */ static void combiner_resume(void) { int i; for (i = 0; i < max_nr; i++) { __raw_writel(combiner_data[i].irq_mask, combiner_data[i].base + COMBINER_ENABLE_CLEAR); __raw_writel(combiner_data[i].pm_save, combiner_data[i].base + COMBINER_ENABLE_SET); } } #else #define combiner_suspend NULL #define combiner_resume NULL #endif static struct syscore_ops combiner_syscore_ops = { .suspend = combiner_suspend, .resume = combiner_resume, }; static int __init combiner_of_init(struct device_node *np, struct device_node *parent) { void __iomem *combiner_base; combiner_base = of_iomap(np, 0); if (!combiner_base) { pr_err("%s: failed to map combiner registers\n", __func__); return -ENXIO; } if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { pr_info("%s: number of combiners not specified, " "setting default as %d.\n", __func__, max_nr); } combiner_init(combiner_base, np); register_syscore_ops(&combiner_syscore_ops); return 0; } IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner", combiner_of_init);