Annotation of prex-old/sys/mem/vm_nommu.c, Revision 1.1
1.1 ! nbrk 1: /*-
! 2: * Copyright (c) 2005-2006, Kohsuke Ohtani
! 3: * All rights reserved.
! 4: *
! 5: * Redistribution and use in source and binary forms, with or without
! 6: * modification, are permitted provided that the following conditions
! 7: * are met:
! 8: * 1. Redistributions of source code must retain the above copyright
! 9: * notice, this list of conditions and the following disclaimer.
! 10: * 2. Redistributions in binary form must reproduce the above copyright
! 11: * notice, this list of conditions and the following disclaimer in the
! 12: * documentation and/or other materials provided with the distribution.
! 13: * 3. Neither the name of the author nor the names of any co-contributors
! 14: * may be used to endorse or promote products derived from this software
! 15: * without specific prior written permission.
! 16: *
! 17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
! 18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
! 19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
! 20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
! 21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
! 22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
! 23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
! 24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
! 25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
! 26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
! 27: * SUCH DAMAGE.
! 28: */
! 29:
! 30: /*
! 31: * vm_nommu.c - virtual memory functions for no MMU systems
! 32: */
! 33:
! 34: /*
! 35: * When the platform does not support memory management unit (MMU)
! 36: * all virtual memories are mapped to the physical memory. So, the
! 37: * memory space is shared among all tasks and kernel.
! 38: *
! 39: * Important: The lists of regions are not sorted by address.
! 40: */
! 41:
! 42: #include <kernel.h>
! 43: #include <kmem.h>
! 44: #include <thread.h>
! 45: #include <page.h>
! 46: #include <task.h>
! 47: #include <sched.h>
! 48: #include <vm.h>
! 49:
! 50: #ifdef CONFIG_VMTRACE
! 51: static void vm_error(const char *, int);
! 52: #define LOG(x) printk x
! 53: #define CHK(fn,x) do { if (x) vm_error(fn, x); } while (0)
! 54: #else
! 55: #define LOG(x)
! 56: #define CHK(fn,x)
! 57: #endif
! 58:
! 59: /* forward declarations */
! 60: static struct region *region_create(struct region *, u_long, size_t);
! 61: static void region_delete(struct region *, struct region *);
! 62: static struct region *region_find(struct region *, u_long, size_t);
! 63: static void region_free(struct region *, struct region *);
! 64: static void region_init(struct region *);
! 65: static int do_allocate(vm_map_t, void **, size_t, int);
! 66: static int do_free(vm_map_t, void *);
! 67: static int do_attribute(vm_map_t, void *, int);
! 68: static int do_map(vm_map_t, void *, size_t, void **);
! 69:
! 70: /* vm mapping for kernel task */
! 71: static struct vm_map kern_map;
! 72:
! 73: /**
! 74: * vm_allocate - allocate zero-filled memory for specified address
! 75: * @task: task id to allocate memory
! 76: * @addr: required address. set an allocated address in return.
! 77: * @size: allocation size
! 78: * @anywhere: if it is true, the "addr" argument will be ignored.
! 79: * In this case, the address of free space will be found
! 80: * automatically.
! 81: *
! 82: * The allocated area has writable, user-access attribute by default.
! 83: * The "addr" and "size" argument will be adjusted to page boundary.
! 84: */
! 85: int
! 86: vm_allocate(task_t task, void **addr, size_t size, int anywhere)
! 87: {
! 88: int err;
! 89: void *uaddr;
! 90:
! 91: LOG(("vm_aloc: task=%s addr=%x size=%x anywhere=%d\n",
! 92: task->name ? task->name : "no name", *addr, size, anywhere));
! 93:
! 94: sched_lock();
! 95:
! 96: if (!task_valid(task)) {
! 97: err = ESRCH;
! 98: } else if (task != cur_task() && !task_capable(CAP_MEMORY)) {
! 99: err = EPERM;
! 100: } else if (umem_copyin(addr, &uaddr, sizeof(void *))) {
! 101: err = EFAULT;
! 102: } else if (anywhere == 0 && !user_area(*addr)) {
! 103: err = EACCES;
! 104: } else {
! 105: err = do_allocate(task->map, &uaddr, size, anywhere);
! 106: if (err == 0) {
! 107: if (umem_copyout(&uaddr, addr, sizeof(void *)))
! 108: err = EFAULT;
! 109: }
! 110: }
! 111: sched_unlock();
! 112: CHK("vm_allocate", err);
! 113: return err;
! 114: }
! 115:
! 116: static int
! 117: do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
! 118: {
! 119: struct region *reg;
! 120: u_long start, end;
! 121:
! 122: if (size == 0)
! 123: return EINVAL;
! 124:
! 125: /*
! 126: * Allocate region, and reserve pages for it.
! 127: */
! 128: if (anywhere) {
! 129: size = (size_t)PAGE_ALIGN(size);
! 130: if ((start = (u_long)page_alloc(size)) == 0)
! 131: return ENOMEM;
! 132: } else {
! 133: start = PAGE_TRUNC(*addr);
! 134: end = PAGE_ALIGN(start + size);
! 135: size = (size_t)(end - start);
! 136:
! 137: if (page_reserve((void *)start, size))
! 138: return EINVAL;
! 139: }
! 140: reg = region_create(&map->head, start, size);
! 141: if (reg == NULL) {
! 142: page_free((void *)start, size);
! 143: return ENOMEM;
! 144: }
! 145: reg->flags = REG_READ | REG_WRITE;
! 146:
! 147: /* Zero fill */
! 148: memset((void *)start, 0, size);
! 149: *addr = (void *)reg->addr;
! 150: return 0;
! 151: }
! 152:
! 153: /*
! 154: * Deallocate memory region for specified address.
! 155: *
! 156: * The "addr" argument points to a memory region previously
! 157: * allocated through a call to vm_allocate() or vm_map(). The number
! 158: * of bytes freed is the number of bytes of the allocated region.
! 159: * If one of the region of previous and next are free, it combines
! 160: * with them, and larger free region is created.
! 161: */
! 162: int
! 163: vm_free(task_t task, void *addr)
! 164: {
! 165: int err;
! 166:
! 167: LOG(("vm_free: task=%s addr=%x\n",
! 168: task->name ? task->name : "no name", addr));
! 169:
! 170: sched_lock();
! 171: if (!task_valid(task)) {
! 172: err = ESRCH;
! 173: } else if (task != cur_task() && !task_capable(CAP_MEMORY)) {
! 174: err = EPERM;
! 175: } else if (!user_area(addr)) {
! 176: err = EFAULT;
! 177: } else {
! 178: err = do_free(task->map, addr);
! 179: }
! 180: sched_unlock();
! 181: CHK("vm_free", err);
! 182: return err;
! 183: }
! 184:
! 185: static int
! 186: do_free(vm_map_t map, void *addr)
! 187: {
! 188: struct region *reg;
! 189:
! 190: addr = (void *)PAGE_TRUNC(addr);
! 191:
! 192: /*
! 193: * Find the target region.
! 194: */
! 195: reg = region_find(&map->head, (u_long)addr, 1);
! 196: if (reg == NULL || reg->addr != (u_long)addr ||
! 197: (reg->flags & REG_FREE))
! 198: return EINVAL; /* not allocated */
! 199:
! 200: /*
! 201: * Free pages if it is not shared and mapped.
! 202: */
! 203: if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED))
! 204: page_free((void *)reg->addr, reg->size);
! 205:
! 206: region_free(&map->head, reg);
! 207: return 0;
! 208: }
! 209:
! 210: /*
! 211: * Change attribute of specified virtual address.
! 212: *
! 213: * The "addr" argument points to a memory region previously allocated
! 214: * through a call to vm_allocate(). The attribute type can be chosen
! 215: * a combination of VMA_READ, VMA_WRITE.
! 216: * Note: VMA_EXEC is not supported, yet.
! 217: */
! 218: int
! 219: vm_attribute(task_t task, void *addr, int attr)
! 220: {
! 221: int err;
! 222:
! 223: LOG(("vm_attr: task=%s addr=%x attr=%x\n",
! 224: task->name ? task->name : "no name", addr, attr));
! 225:
! 226: sched_lock();
! 227: if (attr == 0 || attr & ~(VMA_READ | VMA_WRITE)) {
! 228: err = EINVAL;
! 229: } else if (!task_valid(task)) {
! 230: err = ESRCH;
! 231: } else if (task != cur_task() && !task_capable(CAP_MEMORY)) {
! 232: err = EPERM;
! 233: } else if (!user_area(addr)) {
! 234: err = EFAULT;
! 235: } else {
! 236: err = do_attribute(task->map, addr, attr);
! 237: }
! 238: sched_unlock();
! 239: CHK("vm_attribute", err);
! 240: return err;
! 241: }
! 242:
! 243: static int
! 244: do_attribute(vm_map_t map, void *addr, int attr)
! 245: {
! 246: struct region *reg;
! 247: int new_flags = 0;
! 248:
! 249: addr = (void *)PAGE_TRUNC(addr);
! 250:
! 251: /*
! 252: * Find the target region.
! 253: */
! 254: reg = region_find(&map->head, (u_long)addr, 1);
! 255: if (reg == NULL || reg->addr != (u_long)addr ||
! 256: (reg->flags & REG_FREE)) {
! 257: return EINVAL; /* not allocated */
! 258: }
! 259: /*
! 260: * The attribute of the mapped or shared region can not be changed.
! 261: */
! 262: if ((reg->flags & REG_MAPPED) || (reg->flags & REG_SHARED))
! 263: return EINVAL;
! 264:
! 265: /*
! 266: * Check new and old flag.
! 267: */
! 268: if (reg->flags & REG_WRITE) {
! 269: if (!(attr & VMA_WRITE))
! 270: new_flags = REG_READ;
! 271: } else {
! 272: if (attr & VMA_WRITE)
! 273: new_flags = REG_READ | REG_WRITE;
! 274: }
! 275: if (new_flags == 0)
! 276: return 0; /* same attribute */
! 277: reg->flags = new_flags;
! 278: return 0;
! 279: }
! 280:
! 281: /**
! 282: * vm_map - map another task's memory to current task.
! 283: * @target: memory owner
! 284: * @addr: target address
! 285: * @size: map size
! 286: * @alloc: map address returned
! 287: *
! 288: * Note: This routine does not support mapping to the specific address.
! 289: */
! 290: int
! 291: vm_map(task_t target, void *addr, size_t size, void **alloc)
! 292: {
! 293: int err;
! 294:
! 295: LOG(("vm_map : task=%s addr=%x size=%x\n",
! 296: target->name ? target->name : "no name", addr, size));
! 297:
! 298: sched_lock();
! 299: if (!task_valid(target)) {
! 300: err = ESRCH;
! 301: } else if (target == cur_task()) {
! 302: err = EINVAL;
! 303: } else if (!task_capable(CAP_MEMORY)) {
! 304: err = EPERM;
! 305: } else if (!user_area(addr)) {
! 306: err = EFAULT;
! 307: } else {
! 308: err = do_map(target->map, addr, size, alloc);
! 309: }
! 310: sched_unlock();
! 311: CHK("vm_map", err);
! 312: return err;
! 313: }
! 314:
! 315: static int
! 316: do_map(vm_map_t map, void *addr, size_t size, void **alloc)
! 317: {
! 318: vm_map_t curmap;
! 319: u_long start, end;
! 320: struct region *reg, *tgt;
! 321: void *tmp;
! 322:
! 323: if (size == 0)
! 324: return EINVAL;
! 325:
! 326: /* check fault */
! 327: tmp = NULL;
! 328: if (umem_copyout(&tmp, alloc, sizeof(void *)))
! 329: return EFAULT;
! 330:
! 331: start = PAGE_TRUNC(addr);
! 332: end = PAGE_ALIGN((u_long)addr + size);
! 333: size = (size_t)(end - start);
! 334:
! 335: /*
! 336: * Find the region that includes target address
! 337: */
! 338: reg = region_find(&map->head, start, size);
! 339: if (reg == NULL || (reg->flags & REG_FREE))
! 340: return EINVAL; /* not allocated */
! 341: tgt = reg;
! 342:
! 343: /*
! 344: * Create new region to map
! 345: */
! 346: curmap = cur_task()->map;
! 347: reg = region_create(&curmap->head, start, size);
! 348: if (reg == NULL)
! 349: return ENOMEM;
! 350: reg->flags = tgt->flags | REG_MAPPED;
! 351:
! 352: umem_copyout(&addr, alloc, sizeof(void *));
! 353: return 0;
! 354: }
! 355:
! 356: /*
! 357: * Create new virtual memory space.
! 358: * No memory is inherited.
! 359: * Must be called with scheduler locked.
! 360: */
! 361: vm_map_t
! 362: vm_create(void)
! 363: {
! 364: vm_map_t map;
! 365:
! 366: /* Allocate new map structure */
! 367: if ((map = kmem_alloc(sizeof(struct vm_map))) == NULL)
! 368: return NULL;
! 369:
! 370: map->ref_count = 1;
! 371: region_init(&map->head);
! 372: return map;
! 373: }
! 374:
! 375: /*
! 376: * Terminate specified virtual memory space.
! 377: * This is called when task is terminated.
! 378: */
! 379: void
! 380: vm_terminate(vm_map_t map)
! 381: {
! 382: struct region *reg, *tmp;
! 383:
! 384: if (--map->ref_count >= 1)
! 385: return;
! 386:
! 387: sched_lock();
! 388: reg = &map->head;
! 389: do {
! 390: if (reg->flags != REG_FREE) {
! 391: /* Free region if it is not shared and mapped */
! 392: if (!(reg->flags & REG_SHARED) &&
! 393: !(reg->flags & REG_MAPPED)) {
! 394: page_free((void *)reg->addr, reg->size);
! 395: }
! 396: }
! 397: tmp = reg;
! 398: reg = reg->next;
! 399: region_delete(&map->head, tmp);
! 400: } while (reg != &map->head);
! 401:
! 402: kmem_free(map);
! 403: sched_unlock();
! 404: }
! 405:
! 406: /*
! 407: * Duplicate specified virtual memory space.
! 408: */
! 409: vm_map_t
! 410: vm_fork(vm_map_t org_map)
! 411: {
! 412: /*
! 413: * This function is not supported with no MMU system.
! 414: */
! 415: return NULL;
! 416: }
! 417:
! 418: /*
! 419: * Switch VM mapping.
! 420: */
! 421: void
! 422: vm_switch(vm_map_t map)
! 423: {
! 424: }
! 425:
! 426: /*
! 427: * Increment reference count of VM mapping.
! 428: */
! 429: int
! 430: vm_reference(vm_map_t map)
! 431: {
! 432:
! 433: map->ref_count++;
! 434: return 0;
! 435: }
! 436:
! 437: /*
! 438: * Translate virtual address of current task to physical address.
! 439: * Returns physical address on success, or NULL if no mapped memory.
! 440: */
! 441: void *
! 442: vm_translate(void *addr, size_t size)
! 443: {
! 444: return addr;
! 445: }
! 446:
! 447: /*
! 448: * Check if specified access can be allowed.
! 449: * return 0 on success, or EFAULT on failure.
! 450: */
! 451: int
! 452: vm_access(void *addr, size_t size, int type)
! 453: {
! 454: u_long start, end;
! 455: int err;
! 456: char tmp;
! 457:
! 458: ASSERT(size);
! 459: start = (u_long)addr;
! 460: end = (u_long)addr + size - 1;
! 461: if ((err = umem_copyin((void *)start, &tmp, 1)))
! 462: return EFAULT;
! 463: if (type == VMA_WRITE) {
! 464: if ((err = umem_copyout(&tmp, (void *)start, 1)))
! 465: return EFAULT;
! 466: }
! 467: if ((err = umem_copyin((void *)end, &tmp, 1)))
! 468: return EFAULT;
! 469: if (type == VMA_WRITE) {
! 470: if ((err = umem_copyout(&tmp, (void *)end, 1)))
! 471: return EFAULT;
! 472: }
! 473: return 0;
! 474: }
! 475:
! 476: /*
! 477: * Reserve specific area for boot tasks.
! 478: */
! 479: static int
! 480: do_reserve(vm_map_t map, void **addr, size_t size)
! 481: {
! 482: struct region *reg;
! 483: u_long start, end;
! 484:
! 485: if (size == 0)
! 486: return EINVAL;
! 487:
! 488: start = PAGE_TRUNC(*addr);
! 489: end = PAGE_ALIGN(start + size);
! 490: size = (size_t)(end - start);
! 491:
! 492: reg = region_create(&map->head, start, size);
! 493: if (reg == NULL)
! 494: return ENOMEM;
! 495: reg->flags = REG_READ | REG_WRITE;
! 496: *addr = (void *)reg->addr;
! 497: return 0;
! 498: }
! 499:
! 500: /*
! 501: * Setup task image for boot task. (NOMMU version)
! 502: * Return 0 on success, -1 on failure.
! 503: *
! 504: * Note: We assume that the task images are already copied to
! 505: * the proper address by a boot loader.
! 506: */
! 507: int
! 508: vm_load(vm_map_t map, struct module *m, void **stack)
! 509: {
! 510: void *base;
! 511: size_t size;
! 512:
! 513: printk("Loading task:\'%s\'\n", m->name);
! 514:
! 515: /*
! 516: * Reserve text & data area
! 517: */
! 518: base = (void *)m->text;
! 519: size = m->textsz + m->datasz + m->bsssz;
! 520: if (do_reserve(map, &base, size))
! 521: return -1;
! 522: if (m->bsssz != 0)
! 523: memset((void *)(m->data + m->datasz), 0, m->bsssz);
! 524:
! 525: /*
! 526: * Create stack
! 527: */
! 528: if (do_allocate(map, stack, USTACK_SIZE, 1))
! 529: return -1;
! 530: return 0;
! 531: }
! 532:
! 533: /*
! 534: * Create new free region after the specified region.
! 535: * Returns region on success, or NULL on failure.
! 536: */
! 537: static struct region *
! 538: region_create(struct region *prev, u_long addr, size_t size)
! 539: {
! 540: struct region *reg;
! 541:
! 542: if ((reg = kmem_alloc(sizeof(struct region))) == NULL)
! 543: return NULL;
! 544:
! 545: reg->addr = addr;
! 546: reg->size = size;
! 547: reg->flags = REG_FREE;
! 548: reg->sh_next = reg->sh_prev = reg;
! 549:
! 550: reg->next = prev->next;
! 551: reg->prev = prev;
! 552: prev->next->prev = reg;
! 553: prev->next = reg;
! 554: return reg;
! 555: }
! 556:
! 557: /*
! 558: * Delete specified region
! 559: */
! 560: static void
! 561: region_delete(struct region *head, struct region *reg)
! 562: {
! 563:
! 564: /*
! 565: * If it is shared region, unlink from shared list.
! 566: */
! 567: if (reg->flags & REG_SHARED) {
! 568: reg->sh_prev->sh_next = reg->sh_next;
! 569: reg->sh_next->sh_prev = reg->sh_prev;
! 570: if (reg->sh_prev == reg->sh_next)
! 571: reg->sh_prev->flags &= ~REG_SHARED;
! 572: }
! 573: if (head != reg)
! 574: kmem_free(reg);
! 575: }
! 576:
! 577: /*
! 578: * Find the region at the specified area.
! 579: */
! 580: static struct region *
! 581: region_find(struct region *head, u_long addr, size_t size)
! 582: {
! 583: struct region *reg;
! 584:
! 585: reg = head;
! 586: do {
! 587: if (reg->addr <= addr &&
! 588: reg->addr + reg->size >= addr + size) {
! 589: return reg;
! 590: }
! 591: reg = reg->next;
! 592: } while (reg != head);
! 593: return NULL;
! 594: }
! 595:
! 596: /*
! 597: * Free specified region
! 598: */
! 599: static void
! 600: region_free(struct region *head, struct region *reg)
! 601: {
! 602: ASSERT(reg->flags != REG_FREE);
! 603:
! 604: /*
! 605: * If it is shared region, unlink from shared list.
! 606: */
! 607: if (reg->flags & REG_SHARED) {
! 608: reg->sh_prev->sh_next = reg->sh_next;
! 609: reg->sh_next->sh_prev = reg->sh_prev;
! 610: if (reg->sh_prev == reg->sh_next)
! 611: reg->sh_prev->flags &= ~REG_SHARED;
! 612: }
! 613: reg->prev->next = reg->next;
! 614: reg->next->prev = reg->prev;
! 615: kmem_free(reg);
! 616: }
! 617:
! 618: /*
! 619: * Initialize region
! 620: */
! 621: static void
! 622: region_init(struct region *reg)
! 623: {
! 624:
! 625: reg->next = reg->prev = reg;
! 626: reg->sh_next = reg->sh_prev = reg;
! 627: reg->addr = 0;
! 628: reg->size = 0;
! 629: reg->flags = REG_FREE;
! 630: }
! 631:
! 632: #if defined(DEBUG) && defined(CONFIG_KDUMP)
! 633: void
! 634: vm_dump_one(task_t task)
! 635: {
! 636: vm_map_t map;
! 637: struct region *reg;
! 638: char flags[6];
! 639: u_long total = 0;
! 640:
! 641: printk("task=%x map=%x name=%s\n", task, task->map,
! 642: task->name ? task->name : "no name");
! 643: printk(" region virtual size flags\n");
! 644: printk(" -------- -------- -------- -----\n");
! 645:
! 646: map = task->map;
! 647: reg = &map->head;
! 648: do {
! 649: if (reg->flags != REG_FREE) {
! 650: strlcpy(flags, "-----", 6);
! 651: if (reg->flags & REG_READ)
! 652: flags[0] = 'R';
! 653: if (reg->flags & REG_WRITE)
! 654: flags[1] = 'W';
! 655: if (reg->flags & REG_EXEC)
! 656: flags[2] = 'E';
! 657: if (reg->flags & REG_SHARED)
! 658: flags[3] = 'S';
! 659: if (reg->flags & REG_MAPPED)
! 660: flags[4] = 'M';
! 661:
! 662: printk(" %08x %08x %08x %s\n", reg,
! 663: reg->addr, reg->size, flags);
! 664: if ((reg->flags & REG_MAPPED) == 0)
! 665: total += reg->size;
! 666: }
! 667: reg = reg->next;
! 668: } while (reg != &map->head); /* Process all regions */
! 669: printk(" *total=%dK bytes\n\n", total / 1024);
! 670: }
! 671:
! 672: void
! 673: vm_dump(void)
! 674: {
! 675: list_t n;
! 676: task_t task;
! 677:
! 678: printk("\nVM dump:\n");
! 679: n = &kern_task.link;
! 680: do {
! 681: task = list_entry(n, struct task, link);
! 682: ASSERT(task_valid(task));
! 683: vm_dump_one(task);
! 684: n = list_next(n);
! 685: } while (n != &kern_task.link);
! 686: }
! 687: #endif
! 688:
! 689:
! 690: #ifdef CONFIG_VMTRACE
! 691: static void
! 692: vm_error(const char *func, int err)
! 693: {
! 694: printk("Error!!: %s returns err=%x\n", func, err);
! 695: }
! 696: #endif
! 697:
! 698: void
! 699: vm_init(void)
! 700: {
! 701:
! 702: region_init(&kern_map.head);
! 703: kern_task.map = &kern_map;
! 704: }
CVSweb