Annotation of prex-old/sys/mem/page.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2006, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * page.c - physical page allocator
32: */
33:
34: /*
35: * This is a simple list-based page allocator.
36: *
37: * When the remaining page is exhausted, what should we do ?
38: * If the system can stop with panic() here, the error check of many
39: * portions in kernel is not necessary, and kernel code can become
40: * more simple. But, in general, even if a page is exhausted,
41: * a kernel can not be stopped but it should return an error and
42: * continue processing.
43: * If the memory becomes short during boot time, kernel and drivers
44: * can use panic() in that case.
45: */
46:
47: #include <kernel.h>
48: #include <page.h>
49: #include <sched.h>
50:
51: /*
52: * page_block is put on the head of the first page of
53: * each free block.
54: */
55: struct page_block {
56: struct page_block *next;
57: struct page_block *prev;
58: size_t size; /* number of bytes of this block */
59: };
60:
61: static struct page_block page_head; /* first free block */
62:
63: static size_t total_bytes;
64: static size_t used_bytes;
65:
66: /*
67: * page_alloc - allocate continuous pages of the specified size.
68: * @size: number of bytes to allocate
69: *
70: * This routine returns the physical address of a new free page block,
71: * or returns NULL on failure. The requested size is automatically
72: * round up to the page boundary.
73: * The allocated memory is _not_ filled with 0.
74: */
75: void *
76: page_alloc(size_t size)
77: {
78: struct page_block *blk, *tmp;
79:
80: ASSERT(size != 0);
81:
82: sched_lock();
83:
84: /*
85: * Find the free block that has enough size.
86: */
87: size = (size_t)PAGE_ALIGN(size);
88: blk = &page_head;
89: do {
90: blk = blk->next;
91: if (blk == &page_head) {
92: sched_unlock();
93: printk("page_alloc: out of memory\n");
94: return NULL; /* Not found. */
95: }
96: } while (blk->size < size);
97:
98: /*
99: * If found block size is exactly same with requested,
100: * just remove it from a free list. Otherwise, the
101: * found block is divided into two and first half is
102: * used for allocation.
103: */
104: if (blk->size == size) {
105: blk->prev->next = blk->next;
106: blk->next->prev = blk->prev;
107: } else {
108: tmp = (struct page_block *)((u_long)blk + size);
109: tmp->size = blk->size - size;
110: tmp->prev = blk->prev;
111: tmp->next = blk->next;
112: blk->prev->next = tmp;
113: blk->next->prev = tmp;
114: }
115: used_bytes += size;
116: sched_unlock();
117:
118: return virt_to_phys(blk);
119: }
120:
121: /*
122: * Free page block.
123: *
124: * This allocator does not maintain the size of allocated page block.
125: * The caller must provide the size information of the block.
126: */
127: void
128: page_free(void *addr, size_t size)
129: {
130: struct page_block *blk, *prev;
131:
132: ASSERT(addr != NULL);
133: ASSERT(size != 0);
134:
135: sched_lock();
136:
137: size = (size_t)PAGE_ALIGN(size);
138: blk = phys_to_virt(addr);
139:
140: /*
141: * Find the target position in list.
142: */
143: for (prev = &page_head; prev->next < blk; prev = prev->next) {
144: if (prev->next == &page_head)
145: break;
146: }
147: #ifdef DEBUG
148: if (prev != &page_head)
149: ASSERT((u_long)prev + prev->size <= (u_long)blk);
150: if (prev->next != &page_head)
151: ASSERT((u_long)blk + size <= (u_long)prev->next);
152: #endif /* DEBUG */
153:
154: /*
155: * Insert new block into list.
156: */
157: blk->size = size;
158: blk->prev = prev;
159: blk->next = prev->next;
160: prev->next->prev = blk;
161: prev->next = blk;
162:
163: /*
164: * If the adjoining block is free, it combines and
165: * is made on block.
166: */
167: if (blk->next != &page_head &&
168: ((u_long)blk + blk->size) == (u_long)blk->next) {
169: blk->size += blk->next->size;
170: blk->next = blk->next->next;
171: blk->next->prev = blk;
172: }
173: if (blk->prev != &page_head &&
174: (u_long)blk->prev + blk->prev->size == (u_long)blk) {
175: blk->prev->size += blk->size;
176: blk->prev->next = blk->next;
177: blk->next->prev = blk->prev;
178: }
179: used_bytes -= size;
180: sched_unlock();
181: }
182:
183: /*
184: * The function to reserve pages in specific address.
185: * Return 0 on success, or -1 on failure
186: */
187: int
188: page_reserve(void *addr, size_t size)
189: {
190: struct page_block *blk, *tmp;
191: u_long end;
192:
193: if (size == 0)
194: return 0;
195:
196: addr = phys_to_virt(addr);
197: end = PAGE_ALIGN((u_long)addr + size);
198: addr = (void *)PAGE_TRUNC(addr);
199: size = (size_t)(end - (u_long)addr);
200:
201: /*
202: * Find the block which includes specified block.
203: */
204: blk = page_head.next;
205: for (;;) {
206: if (blk == &page_head)
207: panic("page_reserve");
208: if ((u_long)blk <= (u_long)addr
209: && end <= (u_long)blk + blk->size)
210: break;
211: blk = blk->next;
212: }
213: if ((u_long)blk == (u_long)addr && blk->size == size) {
214: /*
215: * Unlink the block from free list.
216: */
217: blk->prev->next = blk->next;
218: blk->next->prev = blk->prev;
219: } else {
220: /*
221: * Split this block.
222: */
223: if ((u_long)blk + blk->size != end) {
224: tmp = (struct page_block *)end;
225: tmp->size = (size_t)((u_long)blk + blk->size - end);
226: tmp->next = blk->next;
227: tmp->prev = blk;
228:
229: blk->size -= tmp->size;
230: blk->next->prev = tmp;
231: blk->next = tmp;
232: }
233: if ((u_long)blk == (u_long)addr) {
234: blk->prev->next = blk->next;
235: blk->next->prev = blk->prev;
236: } else
237: blk->size = (size_t)((u_long)addr - (u_long)blk);
238: }
239: used_bytes += size;
240: return 0;
241: }
242:
243: void
244: page_info(size_t *total, size_t *free)
245: {
246:
247: *total = total_bytes;
248: *free = total_bytes - used_bytes;
249: }
250:
251: #if defined(DEBUG) && defined(CONFIG_KDUMP)
252: void
253: page_dump(void)
254: {
255: struct page_block *blk;
256: void *addr;
257: struct mem_map *mem;
258: struct module *img;
259: int i;
260:
261: printk("Page dump:\n");
262: printk(" free pages:\n");
263: printk(" start end size\n");
264: printk(" -------- -------- --------\n");
265:
266: blk = page_head.next;
267: do {
268: addr = virt_to_phys(blk);
269: printk(" %08x - %08x %8x\n", addr, (u_long)addr + blk->size,
270: blk->size);
271: blk = blk->next;
272: } while (blk != &page_head);
273: printk(" used=%dK free=%dK total=%dK\n\n",
274: used_bytes / 1024, (total_bytes - used_bytes) / 1024,
275: total_bytes / 1024);
276:
277: img = (struct module *)&boot_info->kernel;
278: printk(" kernel: %08x - %08x (%dK)\n",
279: img->phys, img->phys + img->size, img->size / 1024);
280:
281: img = (struct module *)&boot_info->driver;
282: printk(" driver: %08x - %08x (%dK)\n",
283: img->phys, img->phys + img->size, img->size / 1024);
284:
285: for (i = 0; i < NRESMEM; i++) {
286: mem = &boot_info->reserved[i];
287: if (mem->size != 0) {
288: printk(" reserved: %08x - %08x (%dK)\n",
289: mem->start, mem->start + mem->size,
290: mem->size / 1024);
291: }
292: }
293: #ifdef CONFIG_RAMDISK
294: mem = (struct mem_map *)&boot_info->ram_disk;
295: printk(" RAM disk: %08x - %08x (%dK)\n",
296: mem->start, mem->start + mem->size, mem->size / 1024);
297: #endif
298: }
299: #endif
300:
301: /*
302: * Initialize page allocator.
303: * page_init() must be called prior to other memory manager's
304: * initializations.
305: */
306: void
307: page_init(void)
308: {
309: struct page_block *blk;
310: struct mem_map *mem;
311: int i;
312:
313: printk("Memory: base=%x size=%dK\n", boot_info->main_mem.start,
314: boot_info->main_mem.size / 1024);
315:
316: /*
317: * First, create one block containing all memory pages.
318: */
319: blk = (struct page_block *)boot_info->main_mem.start;
320: blk = phys_to_virt(blk);
321: blk->size = boot_info->main_mem.size;
322: if (blk->size == 0)
323: panic("page_init: no pages");
324: blk->prev = blk->next = &page_head;
325: page_head.next = page_head.prev = blk;
326:
327: /*
328: * Then, the system reserved pages are marked as a used block.
329: */
330: for (i = 0; i < NRESMEM; i++) {
331: mem = &boot_info->reserved[i];
332: if (mem->size != 0)
333: page_reserve((void *)mem->start, mem->size);
334: }
335: total_bytes = boot_info->main_mem.size - used_bytes;
336: used_bytes = 0;
337:
338: /*
339: * Reserve pages for all boot modules.
340: */
341: mem = &boot_info->modules;
342: page_reserve((void *)mem->start, mem->size);
343: }
CVSweb