1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* drmem.h: Power specific logical memory block representation
*
* Copyright 2017 IBM Corporation
*/
#ifndef _ASM_POWERPC_LMB_H
#define _ASM_POWERPC_LMB_H
#include <linux/sched.h>
struct drmem_lmb {
u64 base_addr;
u32 drc_index;
u32 aa_index;
u32 flags;
#ifdef CONFIG_MEMORY_HOTPLUG
int nid;
#endif
};
struct drmem_lmb_info {
struct drmem_lmb *lmbs;
int n_lmbs;
u32 lmb_size;
};
extern struct drmem_lmb_info *drmem_info;
static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
const struct drmem_lmb *start)
{
/*
* DLPAR code paths can take several milliseconds per element
* when interacting with firmware. Ensure that we don't
* unfairly monopolize the CPU.
*/
if (((++lmb - start) % 16) == 0)
cond_resched();
return lmb;
}
#define for_each_drmem_lmb_in_range(lmb, start, end) \
for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
#define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \
&drmem_info->lmbs[0], \
&drmem_info->lmbs[drmem_info->n_lmbs])
/*
* The of_drconf_cell_v1 struct defines the layout of the LMB data
* specified in the ibm,dynamic-memory device tree property.
* The property itself is a 32-bit value specifying the number of
* LMBs followed by an array of of_drconf_cell_v1 entries, one
* per LMB.
*/
struct of_drconf_cell_v1 {
__be64 base_addr;
__be32 drc_index;
__be32 reserved;
__be32 aa_index;
__be32 flags;
};
/*
* Version 2 of the ibm,dynamic-memory property is defined as a
* 32-bit value specifying the number of LMB sets followed by an
* array of of_drconf_cell_v2 entries, one per LMB set.
*/
struct of_drconf_cell_v2 {
u32 seq_lmbs;
u64 base_addr;
u32 drc_index;
u32 aa_index;
u32 flags;
} __packed;
#define DRCONF_MEM_ASSIGNED 0x00000008
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
#define DRCONF_MEM_HOTREMOVABLE 0x00000100
static inline u32 drmem_lmb_size(void)
{
return drmem_info->lmb_size;
}
#define DRMEM_LMB_RESERVED 0x80000000
static inline void drmem_mark_lmb_reserved(struct drmem_lmb *lmb)
{
lmb->flags |= DRMEM_LMB_RESERVED;
}
static inline void drmem_remove_lmb_reservation(struct drmem_lmb *lmb)
{
lmb->flags &= ~DRMEM_LMB_RESERVED;
}
static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
{
return lmb->flags & DRMEM_LMB_RESERVED;
}
u64 drmem_lmb_memory_max(void);
int walk_drmem_lmbs(struct device_node *dn, void *data,
int (*func)(struct drmem_lmb *, const __be32 **, void *));
int drmem_update_dt(void);
#ifdef CONFIG_PPC_PSERIES
int __init
walk_drmem_lmbs_early(unsigned long node, void *data,
int (*func)(struct drmem_lmb *, const __be32 **, void *));
#endif
static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
{
lmb->aa_index = 0xffffffff;
}
#ifdef CONFIG_MEMORY_HOTPLUG
static inline void lmb_set_nid(struct drmem_lmb *lmb)
{
lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
}
static inline void lmb_clear_nid(struct drmem_lmb *lmb)
{
lmb->nid = -1;
}
#else
static inline void lmb_set_nid(struct drmem_lmb *lmb)
{
}
static inline void lmb_clear_nid(struct drmem_lmb *lmb)
{
}
#endif
#endif /* _ASM_POWERPC_LMB_H */
|