1 /*
2 * Copyright (c) 2013-2023 Ali Mashtizadeh
3 * All rights reserved.
4 */
5
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <stdarg.h>
9 #include <string.h>
10
11 #include <sys/cdefs.h>
12 #include <sys/kassert.h>
13 #include <sys/kdebug.h>
14 #include <sys/kmem.h>
15 #include <sys/queue.h>
16 #include <sys/spinlock.h>
17
18 // PGSIZE
19 #include <machine/amd64.h>
20 #include <machine/pmap.h>
21
22 /* 'FREEPAGE' */
23 #define FREEPAGE_MAGIC_FREE 0x4652454550414745ULL
24 /* 'ALLOCATE' */
25 #define FREEPAGE_MAGIC_INUSE 0x414c4c4f43415445ULL
26
27 Spinlock pallocLock;
28 uint64_t totalPages;
29 uint64_t freePages;
30
31 typedef struct FreePage
32 {
33 uint64_t magic;
34 LIST_ENTRY(FreePage) entries;
35 } FreePage;
36
37 typedef struct PageInfo
38 {
39 uint64_t refCount;
40 } PageInfo;
41
42 XMem *pageInfoXMem;
43 PageInfo *pageInfoTable;
44 uint64_t pageInfoLength;
45 LIST_HEAD(FreeListHead, FreePage) freeList;
46
47 /*
48 * Initializes the page allocator
49 */
50 void
PAlloc_Init()51 PAlloc_Init()
52 {
53 totalPages = 0;
54 freePages = 0;
55
56 Spinlock_Init(&pallocLock, "PAlloc Lock", SPINLOCK_TYPE_NORMAL);
57
58 LIST_INIT(&freeList);
59 pageInfoXMem = NULL;
60 pageInfoTable = NULL;
61 }
62
63 /**
64 * PAlloc_LateInit --
65 *
66 * The late init call is made after the page tables are initialized using a
67 * small boot memory region (2nd 16MBs). This is where initialize the XMem
68 * region that represents the PageInfo array, and map memory into it.
69 */
70 void
PAlloc_LateInit()71 PAlloc_LateInit()
72 {
73 void *pageInfoOld = pageInfoTable;
74
75 pageInfoXMem = XMem_New();
76 if (!XMem_Allocate(pageInfoXMem, pageInfoLength)) {
77 Panic("Cannot back pageInfoTable!");
78 }
79
80 pageInfoTable = (PageInfo *)XMem_GetBase(pageInfoXMem);
81 memcpy(pageInfoTable, pageInfoOld, pageInfoLength);
82
83 // Free old pages
84 }
85
86 /**
87 * PAlloc_AddRegion --
88 *
89 * Add a physical memory region to the page allocator.
90 */
91 void
PAlloc_AddRegion(uintptr_t start,uintptr_t len)92 PAlloc_AddRegion(uintptr_t start, uintptr_t len)
93 {
94 uintptr_t i;
95 FreePage *pg;
96
97 if ((start % PGSIZE) != 0)
98 Panic("Region start is not page aligned!");
99 if ((len % PGSIZE) != 0)
100 Panic("Region length is not page aligned!");
101
102 /*
103 * PageInfo table isn't initialized on the first call to this function. We
104 * must allocate a temporary table that will be copied into the XMem region
105 * inside PAlloc_LateInit.
106 *
107 * Note that the PageInfo table is invalid for regions that are not added
108 * to the free list such as MMIO regions.
109 */
110 if (pageInfoTable == NULL) {
111 // Physical Address Offsets
112 uintptr_t base = (uintptr_t)DMVA2PA(start);
113 uintptr_t end = base + len;
114
115 pageInfoLength = ROUNDUP(end / PGSIZE * sizeof(PageInfo), PGSIZE);
116 pageInfoTable = (PageInfo *)start;
117
118 start += pageInfoLength;
119 len -= pageInfoLength;
120
121 for (i = 0; i < (base / PGSIZE); i++) {
122 pageInfoTable[i].refCount = 1;
123 }
124 for (i = (base / PGSIZE); i < (end / PGSIZE); i++) {
125 pageInfoTable[i].refCount = 0;
126 }
127 for (i = 0; i < (pageInfoLength / PGSIZE); i++) {
128 pageInfoTable[i + (base / PGSIZE)].refCount = 1;
129 }
130 } else {
131 /*
132 * Only the first call to AddRegion should occur before the XMem region
133 * is initialized.
134 */
135
136 ASSERT(pageInfoXMem != NULL);
137
138 uintptr_t base = (uintptr_t)DMVA2PA(start);
139 uintptr_t end = base + len;
140
141 uintptr_t newLength = ROUNDUP(end / PGSIZE * sizeof(PageInfo), PGSIZE);
142
143 if (!XMem_Allocate(pageInfoXMem, newLength))
144 Panic("Cannot allocate XMem region!");
145
146 // Initialize new pages
147 for (i = (base / PGSIZE); i < (end / PGSIZE); i++) {
148 pageInfoTable[i].refCount = 0;
149 }
150 }
151
152 Spinlock_Lock(&pallocLock);
153 for (i = 0; i < len; i += PGSIZE)
154 {
155 pg = (void *)(start + i);
156 pg->magic = FREEPAGE_MAGIC_FREE;
157
158 totalPages++;
159 freePages++;
160
161 LIST_INSERT_HEAD(&freeList, pg, entries);
162 }
163 Spinlock_Unlock(&pallocLock);
164 }
165
166 /**
167 * PAllocGetInfo --
168 *
169 * Lookup the PageInfo structure for a given physical address.
170 */
171 static inline PageInfo *
PAllocGetInfo(void * pg)172 PAllocGetInfo(void *pg)
173 {
174 uintptr_t entry = (uintptr_t)DMVA2PA(pg) / PGSIZE;
175 return &pageInfoTable[entry];
176 }
177
178 /**
179 * PAlloc_AllocPage --
180 *
181 * Allocate a physical page and return the page's address in the Kernel's ident
182 * mapped memory region.
183 *
184 * @retval NULL if no memory is available.
185 * @return Newly allocated physical page.
186 */
187 void *
PAlloc_AllocPage()188 PAlloc_AllocPage()
189 {
190 PageInfo *info;
191 FreePage *pg;
192
193 Spinlock_Lock(&pallocLock);
194 pg = LIST_FIRST(&freeList);
195 ASSERT(pg != NULL);
196 LIST_REMOVE(pg, entries);
197
198 ASSERT(pg->magic == FREEPAGE_MAGIC_FREE);
199
200 info = PAllocGetInfo(pg);
201 ASSERT(info != NULL);
202 ASSERT(info->refCount == 0);
203 info->refCount++;
204
205 pg->magic = FREEPAGE_MAGIC_INUSE;
206
207 freePages--;
208 Spinlock_Unlock(&pallocLock);
209
210 memset(pg, 0, PGSIZE);
211
212 return (void *)pg;
213 }
214
215 /**
216 * PAllocFreePage --
217 *
218 * Free a page.
219 */
220 static void
PAllocFreePage(void * region)221 PAllocFreePage(void *region)
222 {
223 FreePage *pg = (FreePage *)region;
224
225 ASSERT(((uintptr_t)region % PGSIZE) == 0);
226
227 LIST_INSERT_HEAD(&freeList, pg, entries);
228
229 #ifndef NDEBUG
230 // Application can write this magic, but for
231 // debug builds we can use this as a double free check.
232 ASSERT(pg->magic != FREEPAGE_MAGIC_FREE);
233
234 PageInfo *info = PAllocGetInfo(pg);
235 ASSERT(info->refCount == 0);
236 #endif
237
238 pg->magic = FREEPAGE_MAGIC_FREE;
239 freePages++;
240 }
241
242 /**
243 * PAlloc_Retain --
244 *
245 * Increment the reference count for a physical page.
246 */
247 void
PAlloc_Retain(void * pg)248 PAlloc_Retain(void *pg)
249 {
250 PageInfo *info = PAllocGetInfo(pg);
251
252 Spinlock_Lock(&pallocLock);
253 ASSERT(info->refCount != 0);
254 info->refCount++;
255 Spinlock_Unlock(&pallocLock);
256 }
257
258 /**
259 * PAlloc_Release --
260 *
261 * Deccrement the reference count for a physical page. If the reference count
262 * is zero the page will be freed.
263 */
264 void
PAlloc_Release(void * pg)265 PAlloc_Release(void *pg)
266 {
267 PageInfo *info = PAllocGetInfo(pg);
268
269 Spinlock_Lock(&pallocLock);
270 ASSERT(info->refCount != 0);
271 info->refCount--;
272 if (info->refCount == 0)
273 PAllocFreePage(pg);
274 Spinlock_Unlock(&pallocLock);
275 }
276
277 static void
Debug_PAllocStats(int argc,const char * argv[])278 Debug_PAllocStats(int argc, const char *argv[])
279 {
280 kprintf("Total Pages: %llu\n", totalPages);
281 kprintf("Allocated Pages: %llu\n", totalPages - freePages);
282 kprintf("Free Pages: %llu\n", freePages);
283 }
284
285 REGISTER_DBGCMD(pallocstats, "Page allocator statistics", Debug_PAllocStats);
286
287 static void
Debug_PAllocDump(int argc,const char * argv[])288 Debug_PAllocDump(int argc, const char *argv[])
289 {
290 struct FreePage *it;
291
292 LIST_FOREACH(it, &freeList, entries) {
293 if (it->magic != FREEPAGE_MAGIC_FREE)
294 kprintf("Magic Corrupted! (%lx)\n", it->magic);
295 kprintf("Free %lx\n", (uintptr_t)it);
296 }
297 }
298
299 REGISTER_DBGCMD(pallocdump, "Dump page allocator's free list", Debug_PAllocDump);
300
301