summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/inode.h
blob: 2d43f69610a07c082fbfd0809675b51e4d15b81f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
 * of the GNU General Public License version 2.
 */

#ifndef __INODE_DOT_H__
#define __INODE_DOT_H__

#include "util.h"

static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
{
	return !ip->i_height;
}

static inline int gfs2_is_jdata(const struct gfs2_inode *ip)
{
	return ip->i_di.di_flags & GFS2_DIF_JDATA;
}

static inline int gfs2_is_writeback(const struct gfs2_inode *ip)
{
	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	return (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK) && !gfs2_is_jdata(ip);
}

static inline int gfs2_is_ordered(const struct gfs2_inode *ip)
{
	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	return (sdp->sd_args.ar_data == GFS2_DATA_ORDERED) && !gfs2_is_jdata(ip);
}

static inline int gfs2_is_dir(const struct gfs2_inode *ip)
{
	return S_ISDIR(ip->i_inode.i_mode);
}

static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks)
{
	inode->i_blocks = blocks <<
		(GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
}

static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
{
	return inode->i_blocks >>
		(GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
}

static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
{
	gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks > -change));
	change *= (GFS2_SB(inode)->sd_sb.sb_bsize/GFS2_BASIC_BLOCK);
	inode->i_blocks += change;
}

static inline int gfs2_check_inum(const struct gfs2_inode *ip, u64 no_addr,
				  u64 no_formal_ino)
{
	return ip->i_no_addr == no_addr && ip->i_no_formal_ino == no_formal_ino;
}

static inline void gfs2_inum_out(const struct gfs2_inode *ip,
				 struct gfs2_dirent *dent)
{
	dent->de_inum.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
	dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr);
}


void gfs2_set_iop(struct inode *inode);
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
				u64 no_addr, u64 no_formal_ino,
				int skip_freeing);
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);

int gfs2_inode_refresh(struct gfs2_inode *ip);

int gfs2_dinode_dealloc(struct gfs2_inode *inode);
int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
			   int is_root);
struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
			   unsigned int mode, dev_t dev);
int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
		struct gfs2_inode *ip);
int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
		   const struct gfs2_inode *ip);
int gfs2_permission(struct inode *inode, int mask);
int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
void gfs2_dinode_print(const struct gfs2_inode *ip);

#endif /* __INODE_DOT_H__ */
n">slot] = size; /* * Mappings have to be page-aligned */ offset = offset_in_page(phys_addr); phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Mappings have to fit in the FIX_BTMAP area. */ nrpages = size >> PAGE_SHIFT; if (WARN_ON(nrpages > NR_FIX_BTMAPS)) return NULL; /* * Ok, go for it.. */ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { if (after_paging_init) __late_set_fixmap(idx, phys_addr, prot); else __early_set_fixmap(idx, phys_addr, prot); phys_addr += PAGE_SIZE; --idx; --nrpages; } WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n", __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]); prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); return prev_map[slot]; } void __init early_iounmap(void __iomem *addr, unsigned long size) { unsigned long virt_addr; unsigned long offset; unsigned int nrpages; enum fixed_addresses idx; int i, slot; slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (prev_map[i] == addr) { slot = i; break; } } if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n", addr, size)) return; if (WARN(prev_size[slot] != size, "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", addr, size, slot, prev_size[slot])) return; WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n", addr, size, slot); virt_addr = (unsigned long)addr; if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))) return; offset = offset_in_page(virt_addr); nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { if (after_paging_init) __late_clear_fixmap(idx); else __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR); --idx; --nrpages; } prev_map[slot] = NULL; } /* Remap an IO device */ void __init __iomem * early_ioremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO); } /* Remap memory */ void __init * early_memremap(resource_size_t phys_addr, unsigned long size) { return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_NORMAL); } #ifdef FIXMAP_PAGE_RO void __init * early_memremap_ro(resource_size_t phys_addr, unsigned long size) { return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_RO); } #endif #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) { unsigned long slop, clen; char *p; while (size) { slop = offset_in_page(src); clen = size; if (clen > MAX_MAP_CHUNK - slop) clen = MAX_MAP_CHUNK - slop; p = early_memremap(src & PAGE_MASK, clen + slop); memcpy(dest, p + slop, clen); early_memunmap(p, clen + slop); dest += clen; src += clen; size -= clen; } } #else /* CONFIG_MMU */ void __init __iomem * early_ioremap(resource_size_t phys_addr, unsigned long size) { return (__force void __iomem *)phys_addr; } /* Remap memory */ void __init * early_memremap(resource_size_t phys_addr, unsigned long size) { return (void *)phys_addr; } void __init * early_memremap_ro(resource_size_t phys_addr, unsigned long size) { return (void *)phys_addr; } void __init early_iounmap(void __iomem *addr, unsigned long size) { } #endif /* CONFIG_MMU */ void __init early_memunmap(void *addr, unsigned long size) { early_iounmap((__force void __iomem *)addr, size); }