Annotation of prex/sys/mem/vm_nommu.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2006, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * vm_nommu.c - virtual memory alloctor for no MMU systems
32: */
33:
34: /*
35: * When the platform does not support memory management unit (MMU)
36: * all virtual memories are mapped to the physical memory. So, the
37: * memory space is shared among all tasks and kernel.
38: *
39: * Important: The lists of regions are not sorted by address.
40: */
41:
42: #include <kernel.h>
43: #include <kmem.h>
44: #include <thread.h>
45: #include <page.h>
46: #include <task.h>
47: #include <sched.h>
48: #include <vm.h>
49:
50: /* forward declarations */
51: static struct region *region_create(struct region *, void *, size_t);
52: static void region_delete(struct region *, struct region *);
53: static struct region *region_find(struct region *, void *, size_t);
54: static void region_free(struct region *, struct region *);
55: static void region_init(struct region *);
56: static int do_allocate(vm_map_t, void **, size_t, int);
57: static int do_free(vm_map_t, void *);
58: static int do_attribute(vm_map_t, void *, int);
59: static int do_map(vm_map_t, void *, size_t, void **);
60:
61:
62: /* vm mapping for kernel task */
63: static struct vm_map kern_map;
64:
65: /**
66: * vm_allocate - allocate zero-filled memory for specified address
67: *
68: * If "anywhere" argument is true, the "addr" argument will be
69: * ignored. In this case, the address of free space will be
70: * found automatically.
71: *
72: * The allocated area has writable, user-access attribute by
73: * default. The "addr" and "size" argument will be adjusted
74: * to page boundary.
75: */
76: int
77: vm_allocate(task_t task, void **addr, size_t size, int anywhere)
78: {
79: int err;
80: void *uaddr;
81:
82: sched_lock();
83:
84: if (!task_valid(task)) {
85: err = ESRCH;
86: goto out;
87: }
88: if (task != cur_task() && !task_capable(CAP_MEMORY)) {
89: err = EPERM;
90: goto out;
91: }
92: if (umem_copyin(addr, &uaddr, sizeof(void *))) {
93: err = EFAULT;
94: goto out;
95: }
96: if (anywhere == 0 && !user_area(*addr)) {
97: err = EACCES;
98: goto out;
99: }
100:
101: err = do_allocate(task->map, &uaddr, size, anywhere);
102: if (err == 0) {
103: if (umem_copyout(&uaddr, addr, sizeof(void *)))
104: err = EFAULT;
105: }
106: out:
107: sched_unlock();
108: return err;
109: }
110:
111: static int
112: do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
113: {
114: struct region *reg;
115: char *start, *end;
116:
117: if (size == 0)
118: return EINVAL;
119:
120: /*
121: * Allocate region, and reserve pages for it.
122: */
123: if (anywhere) {
124: size = (size_t)PAGE_ALIGN(size);
125: if ((start = page_alloc(size)) == 0)
126: return ENOMEM;
127: } else {
128: start = (char *)PAGE_TRUNC(*addr);
129: end = (char *)PAGE_ALIGN(start + size);
130: size = (size_t)(end - start);
131:
132: if (page_reserve(start, size))
133: return EINVAL;
134: }
135: reg = region_create(&map->head, start, size);
136: if (reg == NULL) {
137: page_free(start, size);
138: return ENOMEM;
139: }
140: reg->flags = REG_READ | REG_WRITE;
141:
142: /* Zero fill */
143: memset(start, 0, size);
144: *addr = reg->addr;
145: return 0;
146: }
147:
148: /*
149: * Deallocate memory region for specified address.
150: *
151: * The "addr" argument points to a memory region previously
152: * allocated through a call to vm_allocate() or vm_map(). The
153: * number of bytes freed is the number of bytes of the
154: * allocated region. If one of the region of previous and
155: * next are free, it combines with them, and larger free
156: * region is created.
157: */
158: int
159: vm_free(task_t task, void *addr)
160: {
161: int err;
162:
163: sched_lock();
164: if (!task_valid(task)) {
165: err = ESRCH;
166: goto out;
167: }
168: if (task != cur_task() && !task_capable(CAP_MEMORY)) {
169: err = EPERM;
170: goto out;
171: }
172: if (!user_area(addr)) {
173: err = EFAULT;
174: goto out;
175: }
176:
177: err = do_free(task->map, addr);
178: out:
179: sched_unlock();
180: return err;
181: }
182:
183: static int
184: do_free(vm_map_t map, void *addr)
185: {
186: struct region *reg;
187:
188: addr = (void *)PAGE_TRUNC(addr);
189:
190: /*
191: * Find the target region.
192: */
193: reg = region_find(&map->head, addr, 1);
194: if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE))
195: return EINVAL; /* not allocated */
196:
197: /*
198: * Free pages if it is not shared and mapped.
199: */
200: if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED))
201: page_free(reg->addr, reg->size);
202:
203: region_free(&map->head, reg);
204: return 0;
205: }
206:
207: /*
208: * Change attribute of specified virtual address.
209: *
210: * The "addr" argument points to a memory region previously
211: * allocated through a call to vm_allocate(). The attribute
212: * type can be chosen a combination of VMA_READ, VMA_WRITE.
213: * Note: VMA_EXEC is not supported, yet.
214: */
215: int
216: vm_attribute(task_t task, void *addr, int attr)
217: {
218: int err;
219:
220: sched_lock();
221: if (attr == 0 || attr & ~(VMA_READ | VMA_WRITE)) {
222: err = EINVAL;
223: goto out;
224: }
225: if (!task_valid(task)) {
226: err = ESRCH;
227: goto out;
228: }
229: if (task != cur_task() && !task_capable(CAP_MEMORY)) {
230: err = EPERM;
231: goto out;
232: }
233: if (!user_area(addr)) {
234: err = EFAULT;
235: goto out;
236: }
237:
238: err = do_attribute(task->map, addr, attr);
239: out:
240: sched_unlock();
241: return err;
242: }
243:
244: static int
245: do_attribute(vm_map_t map, void *addr, int attr)
246: {
247: struct region *reg;
248: int new_flags = 0;
249:
250: addr = (void *)PAGE_TRUNC(addr);
251:
252: /*
253: * Find the target region.
254: */
255: reg = region_find(&map->head, addr, 1);
256: if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) {
257: return EINVAL; /* not allocated */
258: }
259: /*
260: * The attribute of the mapped or shared region can not be changed.
261: */
262: if ((reg->flags & REG_MAPPED) || (reg->flags & REG_SHARED))
263: return EINVAL;
264:
265: /*
266: * Check new and old flag.
267: */
268: if (reg->flags & REG_WRITE) {
269: if (!(attr & VMA_WRITE))
270: new_flags = REG_READ;
271: } else {
272: if (attr & VMA_WRITE)
273: new_flags = REG_READ | REG_WRITE;
274: }
275: if (new_flags == 0)
276: return 0; /* same attribute */
277: reg->flags = new_flags;
278: return 0;
279: }
280:
281: /**
282: * vm_map - map another task's memory to current task.
283: *
284: * Note: This routine does not support mapping to the specific
285: * address.
286: */
287: int
288: vm_map(task_t target, void *addr, size_t size, void **alloc)
289: {
290: int err;
291:
292: sched_lock();
293: if (!task_valid(target)) {
294: err = ESRCH;
295: goto out;
296: }
297: if (target == cur_task()) {
298: err = EINVAL;
299: goto out;
300: }
301: if (!task_capable(CAP_MEMORY)) {
302: err = EPERM;
303: goto out;
304: }
305: if (!user_area(addr)) {
306: err = EFAULT;
307: goto out;
308: }
309:
310: err = do_map(target->map, addr, size, alloc);
311: out:
312: sched_unlock();
313: return err;
314: }
315:
316: static int
317: do_map(vm_map_t map, void *addr, size_t size, void **alloc)
318: {
319: vm_map_t curmap;
320: task_t self;
321: char *start, *end;
322: struct region *reg, *tgt;
323: void *tmp;
324:
325: if (size == 0)
326: return EINVAL;
327:
328: /* check fault */
329: tmp = NULL;
330: if (umem_copyout(&tmp, alloc, sizeof(void *)))
331: return EFAULT;
332:
333: start = (char *)PAGE_TRUNC(addr);
334: end = (char *)PAGE_ALIGN((char *)addr + size);
335: size = (size_t)(end - start);
336:
337: /*
338: * Find the region that includes target address
339: */
340: reg = region_find(&map->head, start, size);
341: if (reg == NULL || (reg->flags & REG_FREE))
342: return EINVAL; /* not allocated */
343: tgt = reg;
344:
345: /*
346: * Create new region to map
347: */
348: self = cur_task();
349: curmap = self->map;
350: reg = region_create(&curmap->head, start, size);
351: if (reg == NULL)
352: return ENOMEM;
353: reg->flags = tgt->flags | REG_MAPPED;
354:
355: umem_copyout(&addr, alloc, sizeof(void *));
356: return 0;
357: }
358:
359: /*
360: * Create new virtual memory space.
361: * No memory is inherited.
362: * Must be called with scheduler locked.
363: */
364: vm_map_t
365: vm_create(void)
366: {
367: vm_map_t map;
368:
369: /* Allocate new map structure */
370: if ((map = kmem_alloc(sizeof(struct vm_map))) == NULL)
371: return NULL;
372:
373: map->refcnt = 1;
374: region_init(&map->head);
375: return map;
376: }
377:
378: /*
379: * Terminate specified virtual memory space.
380: * This is called when task is terminated.
381: */
382: void
383: vm_terminate(vm_map_t map)
384: {
385: struct region *reg, *tmp;
386:
387: if (--map->refcnt >= 1)
388: return;
389:
390: sched_lock();
391: reg = &map->head;
392: do {
393: if (reg->flags != REG_FREE) {
394: /* Free region if it is not shared and mapped */
395: if (!(reg->flags & REG_SHARED) &&
396: !(reg->flags & REG_MAPPED)) {
397: page_free(reg->addr, reg->size);
398: }
399: }
400: tmp = reg;
401: reg = reg->next;
402: region_delete(&map->head, tmp);
403: } while (reg != &map->head);
404:
405: kmem_free(map);
406: sched_unlock();
407: }
408:
409: /*
410: * Duplicate specified virtual memory space.
411: */
412: vm_map_t
413: vm_fork(vm_map_t org_map)
414: {
415: /*
416: * This function is not supported with no MMU system.
417: */
418: return NULL;
419: }
420:
421: /*
422: * Switch VM mapping.
423: */
424: void
425: vm_switch(vm_map_t map)
426: {
427: }
428:
429: /*
430: * Increment reference count of VM mapping.
431: */
432: int
433: vm_reference(vm_map_t map)
434: {
435:
436: map->refcnt++;
437: return 0;
438: }
439:
440: /*
441: * Translate virtual address of current task to physical address.
442: * Returns physical address on success, or NULL if no mapped memory.
443: */
444: void *
445: vm_translate(void *addr, size_t size)
446: {
447:
448: return addr;
449: }
450:
451: /*
452: * Reserve specific area for boot tasks.
453: */
454: static int
455: do_reserve(vm_map_t map, void **addr, size_t size)
456: {
457: struct region *reg;
458: char *start, *end;
459:
460: if (size == 0)
461: return EINVAL;
462:
463: start = (char *)PAGE_TRUNC(*addr);
464: end = (char *)PAGE_ALIGN(start + size);
465: size = (size_t)(end - start);
466:
467: reg = region_create(&map->head, start, size);
468: if (reg == NULL)
469: return ENOMEM;
470: reg->flags = REG_READ | REG_WRITE;
471: *addr = reg->addr;
472: return 0;
473: }
474:
475: /*
476: * Setup task image for boot task. (NOMMU version)
477: * Return 0 on success, -1 on failure.
478: *
479: * Note: We assume that the task images are already copied to
480: * the proper address by a boot loader.
481: */
482: int
483: vm_load(vm_map_t map, struct module *mod, void **stack)
484: {
485: void *base;
486: size_t size;
487:
488: DPRINTF(("Loading task:\'%s\'\n", mod->name));
489:
490: /*
491: * Reserve text & data area
492: */
493: base = (void *)mod->text;
494: size = mod->textsz + mod->datasz + mod->bsssz;
495: if (do_reserve(map, &base, size))
496: return -1;
497: if (mod->bsssz != 0)
498: memset((void *)(mod->data + mod->datasz), 0, mod->bsssz);
499:
500: /*
501: * Create stack
502: */
503: if (do_allocate(map, stack, USTACK_SIZE, 1))
504: return -1;
505: return 0;
506: }
507:
508: /*
509: * Create new free region after the specified region.
510: * Returns region on success, or NULL on failure.
511: */
512: static struct region *
513: region_create(struct region *prev, void *addr, size_t size)
514: {
515: struct region *reg;
516:
517: if ((reg = kmem_alloc(sizeof(struct region))) == NULL)
518: return NULL;
519:
520: reg->addr = addr;
521: reg->size = size;
522: reg->flags = REG_FREE;
523: reg->sh_next = reg->sh_prev = reg;
524:
525: reg->next = prev->next;
526: reg->prev = prev;
527: prev->next->prev = reg;
528: prev->next = reg;
529: return reg;
530: }
531:
532: /*
533: * Delete specified region
534: */
535: static void
536: region_delete(struct region *head, struct region *reg)
537: {
538:
539: /*
540: * If it is shared region, unlink from shared list.
541: */
542: if (reg->flags & REG_SHARED) {
543: reg->sh_prev->sh_next = reg->sh_next;
544: reg->sh_next->sh_prev = reg->sh_prev;
545: if (reg->sh_prev == reg->sh_next)
546: reg->sh_prev->flags &= ~REG_SHARED;
547: }
548: if (head != reg)
549: kmem_free(reg);
550: }
551:
552: /*
553: * Find the region at the specified area.
554: */
555: static struct region *
556: region_find(struct region *head, void *addr, size_t size)
557: {
558: struct region *reg;
559:
560: reg = head;
561: do {
562: if (reg->addr <= addr &&
563: (char *)reg->addr + reg->size >= (char *)addr + size) {
564: return reg;
565: }
566: reg = reg->next;
567: } while (reg != head);
568: return NULL;
569: }
570:
571: /*
572: * Free specified region
573: */
574: static void
575: region_free(struct region *head, struct region *reg)
576: {
577: ASSERT(reg->flags != REG_FREE);
578:
579: /*
580: * If it is shared region, unlink from shared list.
581: */
582: if (reg->flags & REG_SHARED) {
583: reg->sh_prev->sh_next = reg->sh_next;
584: reg->sh_next->sh_prev = reg->sh_prev;
585: if (reg->sh_prev == reg->sh_next)
586: reg->sh_prev->flags &= ~REG_SHARED;
587: }
588: reg->prev->next = reg->next;
589: reg->next->prev = reg->prev;
590: kmem_free(reg);
591: }
592:
593: /*
594: * Initialize region
595: */
596: static void
597: region_init(struct region *reg)
598: {
599:
600: reg->next = reg->prev = reg;
601: reg->sh_next = reg->sh_prev = reg;
602: reg->addr = NULL;
603: reg->size = 0;
604: reg->flags = REG_FREE;
605: }
606:
607: #ifdef DEBUG
608: static void
609: vm_dump_one(task_t task)
610: {
611: vm_map_t map;
612: struct region *reg;
613: char flags[6];
614: size_t total = 0;
615:
616: printf("task=%x map=%x name=%s\n", task, task->map,
617: task->name ? task->name : "no name");
618: printf(" region virtual size flags\n");
619: printf(" -------- -------- -------- -----\n");
620:
621: map = task->map;
622: reg = &map->head;
623: do {
624: if (reg->flags != REG_FREE) {
625: strlcpy(flags, "-----", 6);
626: if (reg->flags & REG_READ)
627: flags[0] = 'R';
628: if (reg->flags & REG_WRITE)
629: flags[1] = 'W';
630: if (reg->flags & REG_EXEC)
631: flags[2] = 'E';
632: if (reg->flags & REG_SHARED)
633: flags[3] = 'S';
634: if (reg->flags & REG_MAPPED)
635: flags[4] = 'M';
636:
637: printf(" %08x %08x %08x %s\n", reg,
638: reg->addr, reg->size, flags);
639: if ((reg->flags & REG_MAPPED) == 0)
640: total += reg->size;
641: }
642: reg = reg->next;
643: } while (reg != &map->head); /* Process all regions */
644: printf(" *total=%dK bytes\n\n", total / 1024);
645: }
646:
647: void
648: vm_dump(void)
649: {
650: list_t n;
651: task_t task;
652:
653: printf("\nVM dump:\n");
654: n = list_first(&kern_task.link);
655: while (n != &kern_task.link) {
656: task = list_entry(n, struct task, link);
657: vm_dump_one(task);
658: n = list_next(n);
659: }
660: }
661: #endif
662:
663: void
664: vm_init(void)
665: {
666:
667: region_init(&kern_map.head);
668: kern_task.map = &kern_map;
669: }
CVSweb