Annotation of prex-old/sys/kern/device.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2007, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * device.c - device I/O support routines
32: */
33:
34: /*
35: * The device_* system calls are interfaces for user mode applications
36: * to access the specific device object which is handled by the related
37: * device driver. A device driver is an execution module different from
38: * a kernel on Prex. The routines in this file have the following role
39: * to handle the device I/O.
40: *
41: * - Manage the name space for device objects.
42: * - Forward user I/O requests to the drivers after checking parameters.
43: *
44: * The driver module(s) and kernel are dynamically linked at system boot.
45: */
46:
47: #include <kernel.h>
48: #include <irq.h>
49: #include <page.h>
50: #include <kmem.h>
51: #include <task.h>
52: #include <timer.h>
53: #include <sched.h>
54: #include <exception.h>
55: #include <vm.h>
56: #include <device.h>
57: #include <system.h>
58:
59: /* forward declarations */
60: static device_t device_create(struct devio *, const char *, int);
61: static int device_destroy(device_t);
62: static int device_broadcast(int, int);
63: static void machine_bootinfo(struct boot_info **);
64: static void machine__reset(void);
65: static void machine__idle(void);
66: static int task__capable(cap_t cap);
67: static void *phys__to_virt(void *);
68: static void *virt__to_phys(void *);
69:
70: #ifndef DEBUG
71: static void nosys(void);
72: #undef printk
73: #define printk nosys
74:
75: #undef panic
76: #define panic machine_reset
77: #endif
78:
79: typedef void (*dkifn_t)(void);
80:
81: #define DKIENT(func) (dkifn_t)(func)
82:
83: /*
84: * Driver-Kernel Interface (DKI)
85: */
86: static const dkifn_t driver_service[] = {
87: /* 0 */ DKIENT(device_create),
88: /* 1 */ DKIENT(device_destroy),
89: /* 2 */ DKIENT(device_broadcast),
90: /* 3 */ DKIENT(umem_copyin),
91: /* 4 */ DKIENT(umem_copyout),
92: /* 5 */ DKIENT(umem_strnlen),
93: /* 6 */ DKIENT(kmem_alloc),
94: /* 7 */ DKIENT(kmem_free),
95: /* 8 */ DKIENT(kmem_map),
96: /* 9 */ DKIENT(page_alloc),
97: /* 10 */ DKIENT(page_free),
98: /* 11 */ DKIENT(page_reserve),
99: /* 12 */ DKIENT(irq_attach),
100: /* 13 */ DKIENT(irq_detach),
101: /* 14 */ DKIENT(irq_lock),
102: /* 15 */ DKIENT(irq_unlock),
103: /* 16 */ DKIENT(timer_callout),
104: /* 17 */ DKIENT(timer_stop),
105: /* 18 */ DKIENT(timer_delay),
106: /* 19 */ DKIENT(timer_count),
107: /* 20 */ DKIENT(timer_hook),
108: /* 21 */ DKIENT(sched_lock),
109: /* 22 */ DKIENT(sched_unlock),
110: /* 23 */ DKIENT(sched_tsleep),
111: /* 24 */ DKIENT(sched_wakeup),
112: /* 25 */ DKIENT(sched_dpc),
113: /* 26 */ DKIENT(task__capable),
114: /* 27 */ DKIENT(exception_post),
115: /* 28 */ DKIENT(machine_bootinfo),
116: /* 29 */ DKIENT(machine__reset),
117: /* 30 */ DKIENT(machine__idle),
118: /* 31 */ DKIENT(phys__to_virt),
119: /* 32 */ DKIENT(virt__to_phys),
120: /* 33 */ DKIENT(debug_attach),
121: /* 34 */ DKIENT(debug_dump),
122: /* 35 */ DKIENT(printk),
123: /* 36 */ DKIENT(panic),
124: };
125:
126: static struct list device_list; /* list of the device objects */
127:
128: /*
129: * Increment reference count on an active device.
130: * This routine checks whether the specified device is valid.
131: * It returns 0 on success, or -1 on failure.
132: */
133: static int
134: device_hold(device_t dev)
135: {
136: int err = -1;
137:
138: sched_lock();
139: if (device_valid(dev)) {
140: dev->ref_count++;
141: err = 0;
142: }
143: sched_unlock();
144: return err;
145: }
146:
147: /*
148: * Decrement the reference count on a device. If the reference
149: * count becomes zero, we can release the resource for the
150: * target device. Assumes the device is already validated by caller.
151: */
152: static void
153: device_release(device_t dev)
154: {
155:
156: sched_lock();
157: if (--dev->ref_count == 0) {
158: list_remove(&dev->link);
159: kmem_free(dev);
160: }
161: sched_unlock();
162: }
163:
164: /*
165: * Look up a device object by device name.
166: * Return device ID on success, or NULL on failure.
167: * This must be called with scheduler locked.
168: */
169: static device_t
170: device_lookup(const char *name)
171: {
172: list_t head, n;
173: device_t dev;
174:
175: if (name == NULL)
176: return NULL;
177:
178: head = &device_list;
179: for (n = list_first(head); n != head; n = list_next(n)) {
180: dev = list_entry(n, struct device, link);
181: if (!strncmp(dev->name, name, MAXDEVNAME))
182: return dev;
183: }
184: return NULL;
185: }
186:
187: /*
188: * device_create - create new device object.
189: * @io: pointer to device I/O routines
190: * @name: string for device name
191: * @flags: flags for device object. (ex. block or character)
192: *
193: * A device object is created by the device driver to provide
194: * I/O services to applications.
195: * Returns device ID on success, or 0 on failure.
196: */
197: static device_t
198: device_create(struct devio *io, const char *name, int flags)
199: {
200: device_t dev;
201: size_t len;
202:
203: ASSERT(irq_level == 0);
204:
205: len = strnlen(name, MAXDEVNAME);
206: if (len == 0 || len >= MAXDEVNAME) /* Invalid name? */
207: return 0;
208:
209: sched_lock();
210: if ((dev = device_lookup(name)) != NULL) {
211: /*
212: * Error - the device name is already used.
213: */
214: sched_unlock();
215: return 0;
216: }
217: if ((dev = kmem_alloc(sizeof(struct device))) == NULL) {
218: sched_unlock();
219: return 0;
220: }
221: strlcpy(dev->name, name, len + 1);
222: dev->devio = io;
223: dev->flags = flags;
224: dev->ref_count = 1;
225: dev->magic = DEVICE_MAGIC;
226: list_insert(&device_list, &dev->link);
227: sched_unlock();
228: return dev;
229: }
230:
231: /*
232: * Destroy a device object. If some other threads still refer
233: * the target device, the destroy operating will be pending
234: * until its reference count becomes 0.
235: */
236: static int
237: device_destroy(device_t dev)
238: {
239: int err = 0;
240:
241: ASSERT(irq_level == 0);
242:
243: sched_lock();
244: if (device_valid(dev))
245: device_release(dev);
246: else
247: err = ENODEV;
248: sched_unlock();
249: return err;
250: }
251:
252: /*
253: * device_open - open the specified device.
254: * @name: device name (null-terminated)
255: * @mode: open mode. (like O_RDONLY etc.)
256: * @devp: device handle of opened device to be returned.
257: *
258: * Even if the target driver does not have an open routine, this
259: * function does not return an error. By using this mechanism, an
260: * application can check whether the specific device exists or not.
261: * The open mode should be handled by an each device driver if it
262: * is needed.
263: */
264: int
265: device_open(const char *name, int mode, device_t *devp)
266: {
267: char str[MAXDEVNAME];
268: device_t dev;
269: size_t len;
270: int err = 0;
271:
272: if (!task_capable(CAP_DEVIO))
273: return EPERM;
274:
275: if (umem_strnlen(name, MAXDEVNAME, &len))
276: return EFAULT;
277: if (len == 0)
278: return ENOENT;
279: if (len >= MAXDEVNAME)
280: return ENAMETOOLONG;
281:
282: if (umem_copyin((void *)name, str, len + 1))
283: return EFAULT;
284:
285: sched_lock();
286: if ((dev = device_lookup(str)) == NULL) {
287: sched_unlock();
288: return ENXIO;
289: }
290: device_hold(dev);
291: sched_unlock();
292:
293: if (dev->devio->open != NULL)
294: err = (dev->devio->open)(dev, mode);
295:
296: if (!err)
297: err = umem_copyout(&dev, devp, sizeof(device_t));
298: device_release(dev);
299: return err;
300: }
301:
302: /*
303: * device_close - close a device.
304: *
305: * Even if the target driver does not have close routine,
306: * this function does not return any errors.
307: */
308: int
309: device_close(device_t dev)
310: {
311: int err = 0;
312:
313: if (!task_capable(CAP_DEVIO))
314: return EPERM;
315:
316: if (device_hold(dev))
317: return ENODEV;
318:
319: if (dev->devio->close != NULL)
320: err = (dev->devio->close)(dev);
321:
322: device_release(dev);
323: return err;
324: }
325:
326: /*
327: * device_read - read from a device.
328: * @dev: device id
329: * @buf: pointer to read buffer
330: * @nbyte: number of bytes to read. actual read count is set in return.
331: * @blkno: block number (for block device)
332: *
333: * Note: The size of one block is device dependent.
334: */
335: int
336: device_read(device_t dev, void *buf, size_t *nbyte, int blkno)
337: {
338: size_t count;
339: int err;
340:
341: if (!task_capable(CAP_DEVIO))
342: return EPERM;
343:
344: if (device_hold(dev))
345: return ENODEV;
346:
347: if (dev->devio->read == NULL) {
348: device_release(dev);
349: return EBADF;
350: }
351: if (umem_copyin(nbyte, &count, sizeof(u_long)) ||
352: vm_access(buf, count, VMA_WRITE)) {
353: device_release(dev);
354: return EFAULT;
355: }
356: err = (dev->devio->read)(dev, buf, &count, blkno);
357: if (err == 0)
358: err = umem_copyout(&count, nbyte, sizeof(u_long));
359: device_release(dev);
360: return err;
361: }
362:
363: /*
364: * device_write - write to a device.
365: * @dev: device id
366: * @buf: pointer to write buffer
367: * @nbyte: number of bytes to write. actual write count is set in return.
368: * @blkno: block number (for block device)
369: */
370: int
371: device_write(device_t dev, void *buf, size_t *nbyte, int blkno)
372: {
373: size_t count;
374: int err;
375:
376: if (!task_capable(CAP_DEVIO))
377: return EPERM;
378:
379: if (device_hold(dev))
380: return ENODEV;
381:
382: if (dev->devio->write == NULL) {
383: device_release(dev);
384: return EBADF;
385: }
386: if (umem_copyin(nbyte, &count, sizeof(u_long)) ||
387: vm_access(buf, count, VMA_READ)) {
388: device_release(dev);
389: return EFAULT;
390: }
391: err = (dev->devio->write)(dev, buf, &count, blkno);
392: if (err == 0)
393: err = umem_copyout(&count, nbyte, sizeof(u_long));
394:
395: device_release(dev);
396: return err;
397: }
398:
399: /*
400: * deivce_ioctl - I/O control request.
401: * @dev: device id
402: * @cmd: command
403: * @arg: argument
404: *
405: * A command and an argument are completely device dependent.
406: * If argument type is pointer, the driver routine must validate
407: * the pointer address.
408: */
409: int
410: device_ioctl(device_t dev, int cmd, u_long arg)
411: {
412: int err;
413:
414: if (!task_capable(CAP_DEVIO))
415: return EPERM;
416:
417: if (device_hold(dev))
418: return ENODEV;
419:
420: err = EBADF;
421: if (dev->devio->ioctl != NULL)
422: err = (dev->devio->ioctl)(dev, cmd, arg);
423:
424: device_release(dev);
425: return err;
426: }
427:
428: /*
429: * device_broadcast - broadcast an event to all device objects.
430: * @event: event code
431: * @force: true to ignore the return value from driver.
432: *
433: * If force argument is true, a kernel will continue event
434: * notification even if some driver returns error. In this case,
435: * this routine returns EIO error if at least one driver returns
436: * an error.
437: *
438: * If force argument is false, a kernel stops the event processing
439: * when at least one driver returns an error. In this case,
440: * device_broadcast will return the error code which is returned
441: * by the driver.
442: */
443: static int
444: device_broadcast(int event, int force)
445: {
446: device_t dev;
447: list_t head, n;
448: int err, ret = 0;
449:
450: sched_lock();
451: head = &device_list;
452:
453: #ifdef DEBUG
454: printk("Broadcasting device event:%d\n", event);
455: #endif
456: for (n = list_first(head); n != head; n = list_next(n)) {
457: dev = list_entry(n, struct device, link);
458: if (dev->devio->event == NULL)
459: continue;
460:
461: err = (dev->devio->event)(event);
462: if (err) {
463: if (force)
464: ret = EIO;
465: else {
466: ret = err;
467: break;
468: }
469: }
470: }
471: sched_unlock();
472: return ret;
473: }
474:
475: /*
476: * Return device information (for devfs).
477: */
478: int
479: device_info(struct info_device *info)
480: {
481: u_long index, target = info->cookie;
482: device_t dev;
483: struct devio *io;
484: list_t head, n;
485:
486: sched_lock();
487:
488: index = 0;
489: head = &device_list;
490: for (n = list_first(head); n != head; n = list_next(n), index++) {
491: dev = list_entry(n, struct device, link);
492: io = dev->devio;
493: if (index == target)
494: break;
495: }
496: if (n == head) {
497: sched_unlock();
498: return ESRCH;
499: }
500: info->id = dev;
501: info->flags = dev->flags;
502: strlcpy(info->name, dev->name, MAXDEVNAME);
503:
504: sched_unlock();
505: return 0;
506: }
507:
508: #if defined(DEBUG) && defined(CONFIG_KDUMP)
509: void
510: device_dump(void)
511: {
512: device_t dev;
513: struct devio *io;
514: list_t head, n;
515:
516: printk("Device dump:\n");
517: printk(" device open close read write ioctl "
518: "event name\n");
519: printk(" -------- -------- -------- -------- -------- -------- "
520: "-------- ------------\n");
521:
522: head = &device_list;
523: for (n = list_first(head); n != head; n = list_next(n)) {
524: dev = list_entry(n, struct device, link);
525: io = dev->devio;
526: printk(" %08x %08x %08x %08x %08x %08x %08x %s\n",
527: dev, io->open, io->close, io->read, io->write,
528: io->ioctl, io->event, dev->name);
529: }
530: }
531: #endif
532:
533: #ifndef DEBUG
534: static void
535: nosys(void)
536: {
537: }
538: #endif
539:
540: /*
541: * Check the capability of the current task.
542: */
543: static int
544: task__capable(cap_t cap)
545: {
546:
547: return task_capable(cap);
548: }
549:
550: /*
551: * Return boot information
552: */
553: static void
554: machine_bootinfo(struct boot_info **info)
555: {
556: ASSERT(info != NULL);
557:
558: *info = boot_info;
559: }
560:
561: static void
562: machine__reset(void)
563: {
564:
565: machine_reset();
566: }
567:
568: static void
569: machine__idle(void)
570: {
571:
572: machine_idle();
573: }
574:
575: /*
576: * Address transtion (physical -> virtual)
577: */
578: static void *
579: phys__to_virt(void *phys)
580: {
581:
582: return phys_to_virt(phys);
583: }
584:
585: /*
586: * Address transtion (virtual -> physical)
587: */
588: static void *
589: virt__to_phys(void *virt)
590: {
591:
592: return virt_to_phys(virt);
593: }
594:
595: /*
596: * Initialize device driver module.
597: */
598: void
599: device_init(void)
600: {
601: struct module *m;
602: void (*drv_entry)(const dkifn_t *);
603:
604: list_init(&device_list);
605:
606: m = &boot_info->driver;
607: if (m == NULL)
608: return;
609:
610: drv_entry = (void (*)(const dkifn_t *))m->entry;
611: if (drv_entry == NULL)
612: return;
613: /*
614: * Call all driver initialization functions.
615: */
616: drv_entry(driver_service);
617: }
CVSweb