Annotation of sys/dev/ic/mpi.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: mpi.c,v 1.86 2007/06/12 19:29:23 thib Exp $ */
2:
3: /*
4: * Copyright (c) 2005, 2006 David Gwynne <dlg@openbsd.org>
5: * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
6: *
7: * Permission to use, copy, modify, and distribute this software for any
8: * purpose with or without fee is hereby granted, provided that the above
9: * copyright notice and this permission notice appear in all copies.
10: *
11: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18: */
19:
20: #include <sys/param.h>
21: #include <sys/systm.h>
22: #include <sys/buf.h>
23: #include <sys/device.h>
24: #include <sys/proc.h>
25: #include <sys/malloc.h>
26: #include <sys/kernel.h>
27:
28: #include <machine/bus.h>
29:
30: #include <scsi/scsi_all.h>
31: #include <scsi/scsiconf.h>
32:
33: #include <dev/ic/mpireg.h>
34: #include <dev/ic/mpivar.h>
35:
36: #ifdef MPI_DEBUG
37: uint32_t mpi_debug = 0
38: /* | MPI_D_CMD */
39: /* | MPI_D_INTR */
40: /* | MPI_D_MISC */
41: /* | MPI_D_DMA */
42: /* | MPI_D_IOCTL */
43: /* | MPI_D_RW */
44: /* | MPI_D_MEM */
45: /* | MPI_D_CCB */
46: /* | MPI_D_PPR */
47: /* | MPI_D_RAID */
48: /* | MPI_D_EVT */
49: ;
50: #endif
51:
52: struct cfdriver mpi_cd = {
53: NULL, "mpi", DV_DULL
54: };
55:
56: int mpi_scsi_cmd(struct scsi_xfer *);
57: void mpi_scsi_cmd_done(struct mpi_ccb *);
58: void mpi_minphys(struct buf *bp);
59: int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
60: int, struct proc *);
61:
62: struct scsi_adapter mpi_switch = {
63: mpi_scsi_cmd, mpi_minphys, NULL, NULL, mpi_scsi_ioctl
64: };
65:
66: struct scsi_device mpi_dev = {
67: NULL, NULL, NULL, NULL
68: };
69:
70: struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t);
71: void mpi_dmamem_free(struct mpi_softc *,
72: struct mpi_dmamem *);
73: int mpi_alloc_ccbs(struct mpi_softc *);
74: struct mpi_ccb *mpi_get_ccb(struct mpi_softc *);
75: void mpi_put_ccb(struct mpi_softc *, struct mpi_ccb *);
76: int mpi_alloc_replies(struct mpi_softc *);
77: void mpi_push_replies(struct mpi_softc *);
78:
79: void mpi_start(struct mpi_softc *, struct mpi_ccb *);
80: int mpi_complete(struct mpi_softc *, struct mpi_ccb *, int);
81: int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
82: int mpi_reply(struct mpi_softc *, u_int32_t);
83:
84: void mpi_squash_ppr(struct mpi_softc *);
85: void mpi_run_ppr(struct mpi_softc *);
86: int mpi_ppr(struct mpi_softc *, struct scsi_link *,
87: struct mpi_cfg_raid_physdisk *, int, int, int);
88: int mpi_inq(struct mpi_softc *, u_int16_t, int);
89:
90: void mpi_timeout_xs(void *);
91: int mpi_load_xs(struct mpi_ccb *);
92:
93: u_int32_t mpi_read(struct mpi_softc *, bus_size_t);
94: void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
95: int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
96: u_int32_t);
97: int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
98: u_int32_t);
99:
100: int mpi_init(struct mpi_softc *);
101: int mpi_reset_soft(struct mpi_softc *);
102: int mpi_reset_hard(struct mpi_softc *);
103:
104: int mpi_handshake_send(struct mpi_softc *, void *, size_t);
105: int mpi_handshake_recv_dword(struct mpi_softc *,
106: u_int32_t *);
107: int mpi_handshake_recv(struct mpi_softc *, void *, size_t);
108:
109: void mpi_empty_done(struct mpi_ccb *);
110:
111: int mpi_iocinit(struct mpi_softc *);
112: int mpi_iocfacts(struct mpi_softc *);
113: int mpi_portfacts(struct mpi_softc *);
114: int mpi_portenable(struct mpi_softc *);
115: void mpi_get_raid(struct mpi_softc *);
116: int mpi_fwupload(struct mpi_softc *);
117:
118: int mpi_eventnotify(struct mpi_softc *);
119: void mpi_eventnotify_done(struct mpi_ccb *);
120: void mpi_eventack(struct mpi_softc *,
121: struct mpi_msg_event_reply *);
122: void mpi_eventack_done(struct mpi_ccb *);
123: void mpi_evt_sas(void *, void *);
124:
125: int mpi_cfg_header(struct mpi_softc *, u_int8_t, u_int8_t,
126: u_int32_t, struct mpi_cfg_hdr *);
127: int mpi_cfg_page(struct mpi_softc *, u_int32_t,
128: struct mpi_cfg_hdr *, int, void *, size_t);
129:
130: #define DEVNAME(s) ((s)->sc_dev.dv_xname)
131:
132: #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
133: #define sizeofa(s) (sizeof(s) / sizeof((s)[0]))
134:
135: #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL)
136: #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v))
137: #define mpi_read_intr(s) mpi_read((s), MPI_INTR_STATUS)
138: #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v))
139: #define mpi_pop_reply(s) mpi_read((s), MPI_REPLY_QUEUE)
140: #define mpi_push_reply(s, v) mpi_write((s), MPI_REPLY_QUEUE, (v))
141:
142: #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \
143: MPI_INTR_STATUS_DOORBELL, 0)
144: #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \
145: MPI_INTR_STATUS_IOCDOORBELL, 0)
146:
147: int
148: mpi_attach(struct mpi_softc *sc)
149: {
150: struct scsibus_attach_args saa;
151: struct mpi_ccb *ccb;
152:
153: printf("\n");
154:
155: /* disable interrupts */
156: mpi_write(sc, MPI_INTR_MASK,
157: MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
158:
159: if (mpi_init(sc) != 0) {
160: printf("%s: unable to initialise\n", DEVNAME(sc));
161: return (1);
162: }
163:
164: if (mpi_iocfacts(sc) != 0) {
165: printf("%s: unable to get iocfacts\n", DEVNAME(sc));
166: return (1);
167: }
168:
169: if (mpi_alloc_ccbs(sc) != 0) {
170: /* error already printed */
171: return (1);
172: }
173:
174: if (mpi_alloc_replies(sc) != 0) {
175: printf("%s: unable to allocate reply space\n", DEVNAME(sc));
176: goto free_ccbs;
177: }
178:
179: if (mpi_iocinit(sc) != 0) {
180: printf("%s: unable to send iocinit\n", DEVNAME(sc));
181: goto free_ccbs;
182: }
183:
184: /* spin until we're operational */
185: if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
186: MPI_DOORBELL_STATE_OPER) != 0) {
187: printf("%s: state: 0x%08x\n", DEVNAME(sc),
188: mpi_read_db(sc) & MPI_DOORBELL_STATE);
189: printf("%s: operational state timeout\n", DEVNAME(sc));
190: goto free_ccbs;
191: }
192:
193: mpi_push_replies(sc);
194:
195: if (mpi_portfacts(sc) != 0) {
196: printf("%s: unable to get portfacts\n", DEVNAME(sc));
197: goto free_replies;
198: }
199:
200: #ifdef notyet
201: if (mpi_eventnotify(sc) != 0) {
202: printf("%s: unable to get portfacts\n", DEVNAME(sc));
203: goto free_replies;
204: }
205: #endif
206:
207: if (mpi_portenable(sc) != 0) {
208: printf("%s: unable to enable port\n", DEVNAME(sc));
209: goto free_replies;
210: }
211:
212: if (mpi_fwupload(sc) != 0) {
213: printf("%s: unable to upload firmware\n", DEVNAME(sc));
214: goto free_replies;
215: }
216:
217: if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
218: mpi_squash_ppr(sc);
219:
220: /* we should be good to go now, attach scsibus */
221: sc->sc_link.device = &mpi_dev;
222: sc->sc_link.adapter = &mpi_switch;
223: sc->sc_link.adapter_softc = sc;
224: sc->sc_link.adapter_target = sc->sc_target;
225: sc->sc_link.adapter_buswidth = sc->sc_buswidth;
226: sc->sc_link.openings = sc->sc_maxcmds / sc->sc_buswidth;
227:
228: bzero(&saa, sizeof(saa));
229: saa.saa_sc_link = &sc->sc_link;
230:
231: /* config_found() returns the scsibus attached to us */
232: sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
233: &saa, scsiprint);
234:
235: /* get raid pages */
236: mpi_get_raid(sc);
237:
238: /* do domain validation */
239: if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
240: mpi_run_ppr(sc);
241:
242: /* enable interrupts */
243: mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
244:
245: return (0);
246:
247: free_replies:
248: bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
249: 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
250: mpi_dmamem_free(sc, sc->sc_replies);
251: free_ccbs:
252: while ((ccb = mpi_get_ccb(sc)) != NULL)
253: bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
254: mpi_dmamem_free(sc, sc->sc_requests);
255: free(sc->sc_ccbs, M_DEVBUF);
256:
257: return(1);
258: }
259:
260: void
261: mpi_squash_ppr(struct mpi_softc *sc)
262: {
263: struct mpi_cfg_hdr hdr;
264: struct mpi_cfg_spi_dev_pg1 page;
265: int i;
266:
267: DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
268:
269: for (i = 0; i < sc->sc_buswidth; i++) {
270: if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
271: 1, i, &hdr) != 0)
272: return;
273:
274: if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
275: return;
276:
277: DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x "
278: "req_offset: 0x%02x req_period: 0x%02x "
279: "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
280: page.req_params1, page.req_offset, page.req_period,
281: page.req_params2, letoh32(page.configuration));
282:
283: page.req_params1 = 0x0;
284: page.req_offset = 0x0;
285: page.req_period = 0x0;
286: page.req_params2 = 0x0;
287: page.configuration = htole32(0x0);
288:
289: if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
290: return;
291: }
292: }
293:
294: void
295: mpi_run_ppr(struct mpi_softc *sc)
296: {
297: struct mpi_cfg_hdr hdr;
298: struct mpi_cfg_spi_port_pg0 port_pg;
299: struct mpi_cfg_ioc_pg3 *physdisk_pg;
300: struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk;
301: size_t pagelen;
302: struct scsi_link *link;
303: int i, tries;
304:
305: if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
306: &hdr) != 0) {
307: DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
308: DEVNAME(sc));
309: return;
310: }
311:
312: if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
313: DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
314: DEVNAME(sc));
315: return;
316: }
317:
318: for (i = 0; i < sc->sc_buswidth; i++) {
319: link = sc->sc_scsibus->sc_link[i][0];
320: if (link == NULL)
321: continue;
322:
323: /* do not ppr volumes */
324: if (link->flags & SDEV_VIRTUAL)
325: continue;
326:
327: tries = 0;
328: while (mpi_ppr(sc, link, NULL, port_pg.min_period,
329: port_pg.max_offset, tries) == EAGAIN)
330: tries++;
331: }
332:
333: if ((sc->sc_flags & MPI_F_RAID) == 0)
334: return;
335:
336: if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
337: &hdr) != 0) {
338: DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
339: "fetch ioc pg 3 header\n", DEVNAME(sc));
340: return;
341: }
342:
343: pagelen = hdr.page_length * 4; /* dwords to bytes */
344: physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
345: if (physdisk_pg == NULL) {
346: DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
347: "allocate ioc pg 3\n", DEVNAME(sc));
348: return;
349: }
350: physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
351:
352: if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
353: DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
354: "fetch ioc page 3\n", DEVNAME(sc));
355: goto out;
356: }
357:
358: DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc),
359: physdisk_pg->no_phys_disks);
360:
361: for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
362: physdisk = &physdisk_list[i];
363:
364: DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d "
365: "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
366: physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
367: physdisk->phys_disk_num);
368:
369: if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
370: continue;
371:
372: tries = 0;
373: while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
374: port_pg.max_offset, tries) == EAGAIN)
375: tries++;
376: }
377:
378: out:
379: free(physdisk_pg, M_TEMP);
380: }
381:
382: int
383: mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
384: struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
385: {
386: struct mpi_cfg_hdr hdr0, hdr1;
387: struct mpi_cfg_spi_dev_pg0 pg0;
388: struct mpi_cfg_spi_dev_pg1 pg1;
389: u_int32_t address;
390: int id;
391: int raid = 0;
392:
393: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
394: "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
395: link->quirks);
396:
397: if (try >= 3)
398: return (EIO);
399:
400: if (physdisk == NULL) {
401: if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
402: return (EIO);
403:
404: address = link->target;
405: id = link->target;
406: } else {
407: raid = 1;
408: address = (physdisk->phys_disk_bus << 8) |
409: (physdisk->phys_disk_id);
410: id = physdisk->phys_disk_num;
411: }
412:
413: if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
414: address, &hdr0) != 0) {
415: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
416: DEVNAME(sc));
417: return (EIO);
418: }
419:
420: if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
421: address, &hdr1) != 0) {
422: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
423: DEVNAME(sc));
424: return (EIO);
425: }
426:
427: #ifdef MPI_DEBUG
428: if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
429: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
430: DEVNAME(sc));
431: return (EIO);
432: }
433:
434: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
435: "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
436: "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
437: pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
438: #endif
439:
440: if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
441: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
442: DEVNAME(sc));
443: return (EIO);
444: }
445:
446: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
447: "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
448: "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
449: pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
450:
451: pg1.req_params1 = 0;
452: pg1.req_offset = offset;
453: pg1.req_period = period;
454: pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
455:
456: if (raid || !(link->quirks & SDEV_NOSYNC)) {
457: pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
458:
459: switch (try) {
460: case 0: /* U320 */
461: break;
462: case 1: /* U160 */
463: pg1.req_period = 0x09;
464: break;
465: case 2: /* U80 */
466: pg1.req_period = 0x0a;
467: break;
468: }
469:
470: if (pg1.req_period < 0x09) {
471: /* Ultra320: enable QAS & PACKETIZED */
472: pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
473: MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
474: }
475: if (pg1.req_period < 0xa) {
476: /* >= Ultra160: enable dual xfers */
477: pg1.req_params1 |=
478: MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
479: }
480: }
481:
482: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
483: "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
484: "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
485: pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
486:
487: if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
488: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
489: DEVNAME(sc));
490: return (EIO);
491: }
492:
493: if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
494: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
495: DEVNAME(sc));
496: return (EIO);
497: }
498:
499: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
500: "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
501: "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
502: pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
503:
504: if (mpi_inq(sc, id, raid) != 0) {
505: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
506: "target %d\n", DEVNAME(sc), link->target);
507: return (EIO);
508: }
509:
510: if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
511: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
512: "inquiry\n", DEVNAME(sc));
513: return (EIO);
514: }
515:
516: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
517: "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
518: "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
519: pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
520:
521: if (!(letoh32(pg0.information) & 0x07) && (try == 0)) {
522: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
523: DEVNAME(sc));
524: return (EAGAIN);
525: }
526:
527: if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
528: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
529: DEVNAME(sc));
530: return (EAGAIN);
531: }
532:
533: if (letoh32(pg0.information) & 0x0e) {
534: DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
535: DEVNAME(sc), letoh32(pg0.information));
536: return (EAGAIN);
537: }
538:
539: switch(pg0.neg_period) {
540: case 0x08:
541: period = 160;
542: break;
543: case 0x09:
544: period = 80;
545: break;
546: case 0x0a:
547: period = 40;
548: break;
549: case 0x0b:
550: period = 20;
551: break;
552: case 0x0c:
553: period = 10;
554: break;
555: default:
556: period = 0;
557: break;
558: }
559:
560: printf("%s: %s %d %s at %dMHz width %dbit offset %d "
561: "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
562: id, period ? "Sync" : "Async", period,
563: (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
564: pg0.neg_offset,
565: (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
566: (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
567: (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
568:
569: return (0);
570: }
571:
572: int
573: mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
574: {
575: struct mpi_ccb *ccb;
576: struct scsi_inquiry inq;
577: struct {
578: struct mpi_msg_scsi_io io;
579: struct mpi_sge sge;
580: struct scsi_inquiry_data inqbuf;
581: struct scsi_sense_data sense;
582: } __packed *bundle;
583: struct mpi_msg_scsi_io *io;
584: struct mpi_sge *sge;
585: u_int64_t addr;
586:
587: DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
588:
589: bzero(&inq, sizeof(inq));
590: inq.opcode = INQUIRY;
591: _lto2b(sizeof(struct scsi_inquiry_data), inq.length);
592:
593: ccb = mpi_get_ccb(sc);
594: if (ccb == NULL)
595: return (1);
596:
597: ccb->ccb_done = mpi_empty_done;
598:
599: bundle = ccb->ccb_cmd;
600: io = &bundle->io;
601: sge = &bundle->sge;
602:
603: io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
604: MPI_FUNCTION_SCSI_IO_REQUEST;
605: /*
606: * bus is always 0
607: * io->bus = htole16(sc->sc_bus);
608: */
609: io->target_id = target;
610:
611: io->cdb_length = sizeof(inq);
612: io->sense_buf_len = sizeof(struct scsi_sense_data);
613: io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
614:
615: io->msg_context = htole32(ccb->ccb_id);
616:
617: /*
618: * always lun 0
619: * io->lun[0] = htobe16(link->lun);
620: */
621:
622: io->direction = MPI_SCSIIO_DIR_READ;
623: io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
624:
625: bcopy(&inq, io->cdb, sizeof(inq));
626:
627: io->data_length = htole32(sizeof(struct scsi_inquiry_data));
628:
629: io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva +
630: ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle));
631:
632: sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
633: MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
634: (u_int32_t)sizeof(inq));
635:
636: addr = ccb->ccb_cmd_dva +
637: ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle);
638: sge->sg_hi_addr = htole32((u_int32_t)(addr >> 32));
639: sge->sg_lo_addr = htole32((u_int32_t)addr);
640:
641: if (mpi_poll(sc, ccb, 5000) != 0)
642: return (1);
643:
644: if (ccb->ccb_rcb != NULL)
645: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
646:
647: mpi_put_ccb(sc, ccb);
648:
649: return (0);
650: }
651:
652: void
653: mpi_detach(struct mpi_softc *sc)
654: {
655:
656: }
657:
658: int
659: mpi_intr(void *arg)
660: {
661: struct mpi_softc *sc = arg;
662: u_int32_t reg;
663: int rv = 0;
664:
665: while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
666: mpi_reply(sc, reg);
667: rv = 1;
668: }
669:
670: return (rv);
671: }
672:
673: int
674: mpi_reply(struct mpi_softc *sc, u_int32_t reg)
675: {
676: struct mpi_ccb *ccb;
677: struct mpi_rcb *rcb = NULL;
678: struct mpi_msg_reply *reply = NULL;
679: u_int32_t reply_dva;
680: int id;
681: int i;
682:
683: DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
684:
685: if (reg & MPI_REPLY_QUEUE_ADDRESS) {
686: bus_dmamap_sync(sc->sc_dmat,
687: MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE,
688: BUS_DMASYNC_POSTREAD);
689:
690: reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
691:
692: i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
693: MPI_REPLY_SIZE;
694: rcb = &sc->sc_rcbs[i];
695: reply = rcb->rcb_reply;
696:
697: id = letoh32(reply->msg_context);
698:
699: bus_dmamap_sync(sc->sc_dmat,
700: MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE,
701: BUS_DMASYNC_PREREAD);
702: } else {
703: switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
704: case MPI_REPLY_QUEUE_TYPE_INIT:
705: id = reg & MPI_REPLY_QUEUE_CONTEXT;
706: break;
707:
708: default:
709: panic("%s: unsupported context reply\n",
710: DEVNAME(sc));
711: }
712: }
713:
714: DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
715: DEVNAME(sc), id, reply);
716:
717: ccb = &sc->sc_ccbs[id];
718:
719: bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
720: ccb->ccb_offset, MPI_REQUEST_SIZE,
721: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
722: ccb->ccb_state = MPI_CCB_READY;
723: ccb->ccb_rcb = rcb;
724:
725: ccb->ccb_done(ccb);
726:
727: return (id);
728: }
729:
730: struct mpi_dmamem *
731: mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
732: {
733: struct mpi_dmamem *mdm;
734: int nsegs;
735:
736: mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT);
737: if (mdm == NULL)
738: return (NULL);
739:
740: bzero(mdm, sizeof(struct mpi_dmamem));
741: mdm->mdm_size = size;
742:
743: if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
744: BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
745: goto mdmfree;
746:
747: if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
748: 1, &nsegs, BUS_DMA_NOWAIT) != 0)
749: goto destroy;
750:
751: if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
752: &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
753: goto free;
754:
755: if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
756: NULL, BUS_DMA_NOWAIT) != 0)
757: goto unmap;
758:
759: bzero(mdm->mdm_kva, size);
760:
761: DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
762: "map: %#x nsegs: %d segs: %#x kva: %x\n",
763: DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
764:
765: return (mdm);
766:
767: unmap:
768: bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
769: free:
770: bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
771: destroy:
772: bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
773: mdmfree:
774: free(mdm, M_DEVBUF);
775:
776: return (NULL);
777: }
778:
779: void
780: mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
781: {
782: DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
783:
784: bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
785: bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
786: bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
787: bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
788: free(mdm, M_DEVBUF);
789: }
790:
791: int
792: mpi_alloc_ccbs(struct mpi_softc *sc)
793: {
794: struct mpi_ccb *ccb;
795: u_int8_t *cmd;
796: int i;
797:
798: TAILQ_INIT(&sc->sc_ccb_free);
799:
800: sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds,
801: M_DEVBUF, M_WAITOK|M_CANFAIL);
802: if (sc->sc_ccbs == NULL) {
803: printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
804: return (1);
805: }
806: bzero(sc->sc_ccbs, sizeof(struct mpi_ccb) * sc->sc_maxcmds);
807:
808: sc->sc_requests = mpi_dmamem_alloc(sc,
809: MPI_REQUEST_SIZE * sc->sc_maxcmds);
810: if (sc->sc_requests == NULL) {
811: printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
812: goto free_ccbs;
813: }
814: cmd = MPI_DMA_KVA(sc->sc_requests);
815: bzero(cmd, MPI_REQUEST_SIZE * sc->sc_maxcmds);
816:
817: for (i = 0; i < sc->sc_maxcmds; i++) {
818: ccb = &sc->sc_ccbs[i];
819:
820: if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
821: sc->sc_max_sgl_len, MAXPHYS, 0, 0,
822: &ccb->ccb_dmamap) != 0) {
823: printf("%s: unable to create dma map\n", DEVNAME(sc));
824: goto free_maps;
825: }
826:
827: ccb->ccb_sc = sc;
828: ccb->ccb_id = i;
829: ccb->ccb_offset = MPI_REQUEST_SIZE * i;
830:
831: ccb->ccb_cmd = &cmd[ccb->ccb_offset];
832: ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
833: ccb->ccb_offset;
834:
835: DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
836: "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
837: DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
838: ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
839: ccb->ccb_cmd_dva);
840:
841: mpi_put_ccb(sc, ccb);
842: }
843:
844: return (0);
845:
846: free_maps:
847: while ((ccb = mpi_get_ccb(sc)) != NULL)
848: bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
849:
850: mpi_dmamem_free(sc, sc->sc_requests);
851: free_ccbs:
852: free(sc->sc_ccbs, M_DEVBUF);
853:
854: return (1);
855: }
856:
857: struct mpi_ccb *
858: mpi_get_ccb(struct mpi_softc *sc)
859: {
860: struct mpi_ccb *ccb;
861:
862: ccb = TAILQ_FIRST(&sc->sc_ccb_free);
863: if (ccb == NULL) {
864: DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb == NULL\n", DEVNAME(sc));
865: return (NULL);
866: }
867:
868: TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link);
869:
870: ccb->ccb_state = MPI_CCB_READY;
871:
872: DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %#x\n", DEVNAME(sc), ccb);
873:
874: return (ccb);
875: }
876:
877: void
878: mpi_put_ccb(struct mpi_softc *sc, struct mpi_ccb *ccb)
879: {
880: DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %#x\n", DEVNAME(sc), ccb);
881:
882: ccb->ccb_state = MPI_CCB_FREE;
883: ccb->ccb_xs = NULL;
884: ccb->ccb_done = NULL;
885: bzero(ccb->ccb_cmd, MPI_REQUEST_SIZE);
886: TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link);
887: }
888:
889: int
890: mpi_alloc_replies(struct mpi_softc *sc)
891: {
892: DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
893:
894: sc->sc_rcbs = malloc(MPI_REPLY_COUNT * sizeof(struct mpi_rcb),
895: M_DEVBUF, M_WAITOK|M_CANFAIL);
896: if (sc->sc_rcbs == NULL)
897: return (1);
898:
899: sc->sc_replies = mpi_dmamem_alloc(sc, PAGE_SIZE);
900: if (sc->sc_replies == NULL) {
901: free(sc->sc_rcbs, M_DEVBUF);
902: return (1);
903: }
904:
905: return (0);
906: }
907:
908: void
909: mpi_push_replies(struct mpi_softc *sc)
910: {
911: struct mpi_rcb *rcb;
912: char *kva = MPI_DMA_KVA(sc->sc_replies);
913: int i;
914:
915: bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
916: 0, PAGE_SIZE, BUS_DMASYNC_PREREAD);
917:
918: for (i = 0; i < MPI_REPLY_COUNT; i++) {
919: rcb = &sc->sc_rcbs[i];
920:
921: rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
922: rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
923: MPI_REPLY_SIZE * i;
924: mpi_push_reply(sc, rcb->rcb_reply_dva);
925: }
926: }
927:
928: void
929: mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
930: {
931: DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
932: ccb->ccb_cmd_dva);
933:
934: bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
935: ccb->ccb_offset, MPI_REQUEST_SIZE,
936: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
937:
938: ccb->ccb_state = MPI_CCB_QUEUED;
939: mpi_write(sc, MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
940: }
941:
942: int
943: mpi_complete(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
944: {
945: u_int32_t reg;
946: int id = -1;
947:
948: DNPRINTF(MPI_D_INTR, "%s: mpi_complete timeout %d\n", DEVNAME(sc),
949: timeout);
950:
951: do {
952: reg = mpi_pop_reply(sc);
953: if (reg == 0xffffffff) {
954: if (timeout-- == 0)
955: return (1);
956:
957: delay(1000);
958: continue;
959: }
960:
961: id = mpi_reply(sc, reg);
962:
963: } while (ccb->ccb_id != id);
964:
965: return (0);
966: }
967:
968: int
969: mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
970: {
971: int error;
972: int s;
973:
974: DNPRINTF(MPI_D_CMD, "%s: mpi_poll\n", DEVNAME(sc));
975:
976: s = splbio();
977: mpi_start(sc, ccb);
978: error = mpi_complete(sc, ccb, timeout);
979: splx(s);
980:
981: return (error);
982: }
983:
984: int
985: mpi_scsi_cmd(struct scsi_xfer *xs)
986: {
987: struct scsi_link *link = xs->sc_link;
988: struct mpi_softc *sc = link->adapter_softc;
989: struct mpi_ccb *ccb;
990: struct mpi_ccb_bundle *mcb;
991: struct mpi_msg_scsi_io *io;
992: int s;
993:
994: DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
995:
996: if (xs->cmdlen > MPI_CDB_LEN) {
997: DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
998: DEVNAME(sc), xs->cmdlen);
999: bzero(&xs->sense, sizeof(xs->sense));
1000: xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
1001: xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1002: xs->sense.add_sense_code = 0x20;
1003: xs->error = XS_SENSE;
1004: s = splbio();
1005: scsi_done(xs);
1006: splx(s);
1007: return (COMPLETE);
1008: }
1009:
1010: s = splbio();
1011: ccb = mpi_get_ccb(sc);
1012: splx(s);
1013: if (ccb == NULL) {
1014: xs->error = XS_DRIVER_STUFFUP;
1015: s = splbio();
1016: scsi_done(xs);
1017: splx(s);
1018: return (COMPLETE);
1019: }
1020: DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1021: DEVNAME(sc), ccb->ccb_id, xs->flags);
1022:
1023: ccb->ccb_xs = xs;
1024: ccb->ccb_done = mpi_scsi_cmd_done;
1025:
1026: mcb = ccb->ccb_cmd;
1027: io = &mcb->mcb_io;
1028:
1029: io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1030: /*
1031: * bus is always 0
1032: * io->bus = htole16(sc->sc_bus);
1033: */
1034: io->target_id = link->target;
1035:
1036: io->cdb_length = xs->cmdlen;
1037: io->sense_buf_len = sizeof(xs->sense);
1038: io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1039:
1040: io->msg_context = htole32(ccb->ccb_id);
1041:
1042: io->lun[0] = htobe16(link->lun);
1043:
1044: switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1045: case SCSI_DATA_IN:
1046: io->direction = MPI_SCSIIO_DIR_READ;
1047: break;
1048: case SCSI_DATA_OUT:
1049: io->direction = MPI_SCSIIO_DIR_WRITE;
1050: break;
1051: default:
1052: io->direction = MPI_SCSIIO_DIR_NONE;
1053: break;
1054: }
1055:
1056: if (link->quirks & SDEV_NOTAGS)
1057: io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1058: else
1059: io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1060:
1061: bcopy(xs->cmd, io->cdb, xs->cmdlen);
1062:
1063: io->data_length = htole32(xs->datalen);
1064:
1065: io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva +
1066: ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb));
1067:
1068: if (mpi_load_xs(ccb) != 0) {
1069: xs->error = XS_DRIVER_STUFFUP;
1070: s = splbio();
1071: mpi_put_ccb(sc, ccb);
1072: scsi_done(xs);
1073: splx(s);
1074: return (COMPLETE);
1075: }
1076:
1077: timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1078:
1079: if (xs->flags & SCSI_POLL) {
1080: if (mpi_poll(sc, ccb, xs->timeout) != 0)
1081: xs->error = XS_DRIVER_STUFFUP;
1082: return (COMPLETE);
1083: }
1084:
1085: s = splbio();
1086: mpi_start(sc, ccb);
1087: splx(s);
1088: return (SUCCESSFULLY_QUEUED);
1089: }
1090:
1091: void
1092: mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1093: {
1094: struct mpi_softc *sc = ccb->ccb_sc;
1095: struct scsi_xfer *xs = ccb->ccb_xs;
1096: struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1097: bus_dmamap_t dmap = ccb->ccb_dmamap;
1098: struct mpi_msg_scsi_io_error *sie;
1099:
1100: if (xs->datalen != 0) {
1101: bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1102: (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1103: BUS_DMASYNC_POSTWRITE);
1104:
1105: bus_dmamap_unload(sc->sc_dmat, dmap);
1106: }
1107:
1108: /* timeout_del */
1109: xs->error = XS_NOERROR;
1110: xs->resid = 0;
1111: xs->flags |= ITSDONE;
1112:
1113: if (ccb->ccb_rcb == NULL) {
1114: /* no scsi error, we're ok so drop out early */
1115: xs->status = SCSI_OK;
1116: mpi_put_ccb(sc, ccb);
1117: scsi_done(xs);
1118: return;
1119: }
1120:
1121: sie = ccb->ccb_rcb->rcb_reply;
1122:
1123: DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1124: "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1125: xs->flags);
1126: DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d "
1127: "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1128: sie->msg_length, sie->function);
1129: DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d "
1130: "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1131: sie->sense_buf_len, sie->msg_flags);
1132: DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
1133: letoh32(sie->msg_context));
1134: DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
1135: "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1136: sie->scsi_state, letoh16(sie->ioc_status));
1137: DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1138: letoh32(sie->ioc_loginfo));
1139: DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
1140: letoh32(sie->transfer_count));
1141: DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
1142: letoh32(sie->sense_count));
1143: DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
1144: letoh32(sie->response_info));
1145: DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc),
1146: letoh16(sie->tag));
1147:
1148: xs->status = sie->scsi_status;
1149: switch (letoh16(sie->ioc_status)) {
1150: case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1151: xs->resid = xs->datalen - letoh32(sie->transfer_count);
1152: if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) {
1153: xs->error = XS_DRIVER_STUFFUP;
1154: break;
1155: }
1156: /* FALLTHROUGH */
1157: case MPI_IOCSTATUS_SUCCESS:
1158: case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1159: switch (xs->status) {
1160: case SCSI_OK:
1161: xs->resid = 0;
1162: break;
1163:
1164: case SCSI_CHECK:
1165: xs->error = XS_SENSE;
1166: break;
1167:
1168: case SCSI_BUSY:
1169: case SCSI_QUEUE_FULL:
1170: xs->error = XS_BUSY;
1171: break;
1172:
1173: default:
1174: xs->error = XS_DRIVER_STUFFUP;
1175: break;
1176: }
1177: break;
1178:
1179: case MPI_IOCSTATUS_BUSY:
1180: case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1181: xs->error = XS_BUSY;
1182: break;
1183:
1184: case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1185: case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1186: case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1187: xs->error = XS_SELTIMEOUT;
1188: break;
1189:
1190: default:
1191: xs->error = XS_DRIVER_STUFFUP;
1192: break;
1193: }
1194:
1195: if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1196: bcopy(&mcb->mcb_sense, &xs->sense, sizeof(xs->sense));
1197:
1198: DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc),
1199: xs->error, xs->status);
1200:
1201: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
1202: mpi_put_ccb(sc, ccb);
1203: scsi_done(xs);
1204: }
1205:
1206: void
1207: mpi_timeout_xs(void *arg)
1208: {
1209: /* XXX */
1210: }
1211:
1212: int
1213: mpi_load_xs(struct mpi_ccb *ccb)
1214: {
1215: struct mpi_softc *sc = ccb->ccb_sc;
1216: struct scsi_xfer *xs = ccb->ccb_xs;
1217: struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1218: struct mpi_msg_scsi_io *io = &mcb->mcb_io;
1219: struct mpi_sge *sge, *nsge = &mcb->mcb_sgl[0];
1220: struct mpi_sge *ce = NULL, *nce;
1221: u_int64_t ce_dva;
1222: bus_dmamap_t dmap = ccb->ccb_dmamap;
1223: u_int32_t addr, flags;
1224: int i, error;
1225:
1226: if (xs->datalen == 0) {
1227: nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
1228: MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1229: return (0);
1230: }
1231:
1232: error = bus_dmamap_load(sc->sc_dmat, dmap,
1233: xs->data, xs->datalen, NULL,
1234: (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1235: if (error) {
1236: printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1237: return (1);
1238: }
1239:
1240: flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1241: if (xs->flags & SCSI_DATA_OUT)
1242: flags |= MPI_SGE_FL_DIR_OUT;
1243:
1244: if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1245: ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1246: io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4;
1247: }
1248:
1249: for (i = 0; i < dmap->dm_nsegs; i++) {
1250:
1251: if (nsge == ce) {
1252: nsge++;
1253: sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1254:
1255: DNPRINTF(MPI_D_DMA, "%s: - 0x%08x 0x%08x 0x%08x\n",
1256: DEVNAME(sc), sge->sg_hdr,
1257: sge->sg_hi_addr, sge->sg_lo_addr);
1258:
1259: if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1260: nce = &nsge[sc->sc_chain_len - 1];
1261: addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4;
1262: addr = addr << 16 |
1263: sizeof(struct mpi_sge) * sc->sc_chain_len;
1264: } else {
1265: nce = NULL;
1266: addr = sizeof(struct mpi_sge) *
1267: (dmap->dm_nsegs - i);
1268: }
1269:
1270: ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1271: MPI_SGE_FL_SIZE_64 | addr);
1272:
1273: ce_dva = ccb->ccb_cmd_dva +
1274: ((u_int8_t *)nsge - (u_int8_t *)mcb);
1275:
1276: addr = (u_int32_t)(ce_dva >> 32);
1277: ce->sg_hi_addr = htole32(addr);
1278: addr = (u_int32_t)ce_dva;
1279: ce->sg_lo_addr = htole32(addr);
1280:
1281: DNPRINTF(MPI_D_DMA, "%s: ce: 0x%08x 0x%08x 0x%08x\n",
1282: DEVNAME(sc), ce->sg_hdr, ce->sg_hi_addr,
1283: ce->sg_lo_addr);
1284:
1285: ce = nce;
1286: }
1287:
1288: DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc),
1289: i, dmap->dm_segs[i].ds_len,
1290: (u_int64_t)dmap->dm_segs[i].ds_addr);
1291:
1292: sge = nsge;
1293:
1294: sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1295: addr = (u_int32_t)((u_int64_t)dmap->dm_segs[i].ds_addr >> 32);
1296: sge->sg_hi_addr = htole32(addr);
1297: addr = (u_int32_t)dmap->dm_segs[i].ds_addr;
1298: sge->sg_lo_addr = htole32(addr);
1299:
1300: DNPRINTF(MPI_D_DMA, "%s: %d: 0x%08x 0x%08x 0x%08x\n",
1301: DEVNAME(sc), i, sge->sg_hdr, sge->sg_hi_addr,
1302: sge->sg_lo_addr);
1303:
1304: nsge = sge + 1;
1305: }
1306:
1307: /* terminate list */
1308: sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1309: MPI_SGE_FL_EOL);
1310:
1311: bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1312: (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1313: BUS_DMASYNC_PREWRITE);
1314:
1315: return (0);
1316: }
1317:
1318: void
1319: mpi_minphys(struct buf *bp)
1320: {
1321: /* XXX */
1322: if (bp->b_bcount > MAXPHYS)
1323: bp->b_bcount = MAXPHYS;
1324: minphys(bp);
1325: }
1326:
1327: int
1328: mpi_scsi_ioctl(struct scsi_link *a, u_long b, caddr_t c, int d, struct proc *e)
1329: {
1330: return (ENOTTY);
1331: }
1332:
1333: u_int32_t
1334: mpi_read(struct mpi_softc *sc, bus_size_t r)
1335: {
1336: u_int32_t rv;
1337:
1338: bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1339: BUS_SPACE_BARRIER_READ);
1340: rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1341:
1342: DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1343:
1344: return (rv);
1345: }
1346:
1347: void
1348: mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1349: {
1350: DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1351:
1352: bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1353: bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1354: BUS_SPACE_BARRIER_WRITE);
1355: }
1356:
1357: int
1358: mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1359: u_int32_t target)
1360: {
1361: int i;
1362:
1363: DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1364: mask, target);
1365:
1366: for (i = 0; i < 10000; i++) {
1367: if ((mpi_read(sc, r) & mask) == target)
1368: return (0);
1369: delay(1000);
1370: }
1371:
1372: return (1);
1373: }
1374:
1375: int
1376: mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1377: u_int32_t target)
1378: {
1379: int i;
1380:
1381: DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1382: mask, target);
1383:
1384: for (i = 0; i < 10000; i++) {
1385: if ((mpi_read(sc, r) & mask) != target)
1386: return (0);
1387: delay(1000);
1388: }
1389:
1390: return (1);
1391: }
1392:
1393: int
1394: mpi_init(struct mpi_softc *sc)
1395: {
1396: u_int32_t db;
1397: int i;
1398:
1399: /* spin until the IOC leaves the RESET state */
1400: if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1401: MPI_DOORBELL_STATE_RESET) != 0) {
1402: DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1403: "reset state\n", DEVNAME(sc));
1404: return (1);
1405: }
1406:
1407: /* check current ownership */
1408: db = mpi_read_db(sc);
1409: if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1410: DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1411: DEVNAME(sc));
1412: return (0);
1413: }
1414:
1415: for (i = 0; i < 5; i++) {
1416: switch (db & MPI_DOORBELL_STATE) {
1417: case MPI_DOORBELL_STATE_READY:
1418: DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1419: DEVNAME(sc));
1420: return (0);
1421:
1422: case MPI_DOORBELL_STATE_OPER:
1423: case MPI_DOORBELL_STATE_FAULT:
1424: DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1425: "reset\n" , DEVNAME(sc));
1426: if (mpi_reset_soft(sc) != 0)
1427: mpi_reset_hard(sc);
1428: break;
1429:
1430: case MPI_DOORBELL_STATE_RESET:
1431: DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1432: "out of reset\n", DEVNAME(sc));
1433: if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1434: MPI_DOORBELL_STATE_RESET) != 0)
1435: return (1);
1436: break;
1437: }
1438: db = mpi_read_db(sc);
1439: }
1440:
1441: return (1);
1442: }
1443:
1444: int
1445: mpi_reset_soft(struct mpi_softc *sc)
1446: {
1447: DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1448:
1449: if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1450: return (1);
1451:
1452: mpi_write_db(sc,
1453: MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1454: if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1455: MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1456: return (1);
1457:
1458: if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1459: MPI_DOORBELL_STATE_READY) != 0)
1460: return (1);
1461:
1462: return (0);
1463: }
1464:
1465: int
1466: mpi_reset_hard(struct mpi_softc *sc)
1467: {
1468: DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1469:
1470: /* enable diagnostic register */
1471: mpi_write(sc, MPI_WRITESEQ, 0xff);
1472: mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1473: mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1474: mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1475: mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1476: mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1477:
1478: /* reset ioc */
1479: mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1480:
1481: delay(10000);
1482:
1483: /* disable diagnostic register */
1484: mpi_write(sc, MPI_WRITESEQ, 0xff);
1485:
1486: /* restore pci bits? */
1487:
1488: /* firmware bits? */
1489: return (0);
1490: }
1491:
1492: int
1493: mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1494: {
1495: u_int32_t *query = buf;
1496: int i;
1497:
1498: /* make sure the doorbell is not in use. */
1499: if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1500: return (1);
1501:
1502: /* clear pending doorbell interrupts */
1503: if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1504: mpi_write_intr(sc, 0);
1505:
1506: /*
1507: * first write the doorbell with the handshake function and the
1508: * dword count.
1509: */
1510: mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1511: MPI_DOORBELL_DWORDS(dwords));
1512:
1513: /*
1514: * the doorbell used bit will be set because a doorbell function has
1515: * started. Wait for the interrupt and then ack it.
1516: */
1517: if (mpi_wait_db_int(sc) != 0)
1518: return (1);
1519: mpi_write_intr(sc, 0);
1520:
1521: /* poll for the acknowledgement. */
1522: if (mpi_wait_db_ack(sc) != 0)
1523: return (1);
1524:
1525: /* write the query through the doorbell. */
1526: for (i = 0; i < dwords; i++) {
1527: mpi_write_db(sc, htole32(query[i]));
1528: if (mpi_wait_db_ack(sc) != 0)
1529: return (1);
1530: }
1531:
1532: return (0);
1533: }
1534:
1535: int
1536: mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1537: {
1538: u_int16_t *words = (u_int16_t *)dword;
1539: int i;
1540:
1541: for (i = 0; i < 2; i++) {
1542: if (mpi_wait_db_int(sc) != 0)
1543: return (1);
1544: words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1545: mpi_write_intr(sc, 0);
1546: }
1547:
1548: return (0);
1549: }
1550:
1551: int
1552: mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1553: {
1554: struct mpi_msg_reply *reply = buf;
1555: u_int32_t *dbuf = buf, dummy;
1556: int i;
1557:
1558: /* get the first dword so we can read the length out of the header. */
1559: if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1560: return (1);
1561:
1562: DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1563: DEVNAME(sc), dwords, reply->msg_length);
1564:
1565: /*
1566: * the total length, in dwords, is in the message length field of the
1567: * reply header.
1568: */
1569: for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1570: if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1571: return (1);
1572: }
1573:
1574: /* if there's extra stuff to come off the ioc, discard it */
1575: while (i++ < reply->msg_length) {
1576: if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1577: return (1);
1578: DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1579: "0x%08x\n", DEVNAME(sc), dummy);
1580: }
1581:
1582: /* wait for the doorbell used bit to be reset and clear the intr */
1583: if (mpi_wait_db_int(sc) != 0)
1584: return (1);
1585: mpi_write_intr(sc, 0);
1586:
1587: return (0);
1588: }
1589:
1590: void
1591: mpi_empty_done(struct mpi_ccb *ccb)
1592: {
1593: /* nothing to do */
1594: }
1595:
1596: int
1597: mpi_iocfacts(struct mpi_softc *sc)
1598: {
1599: struct mpi_msg_iocfacts_request ifq;
1600: struct mpi_msg_iocfacts_reply ifp;
1601:
1602: DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1603:
1604: bzero(&ifq, sizeof(ifq));
1605: bzero(&ifp, sizeof(ifp));
1606:
1607: ifq.function = MPI_FUNCTION_IOC_FACTS;
1608: ifq.chain_offset = 0;
1609: ifq.msg_flags = 0;
1610: ifq.msg_context = htole32(0xdeadbeef);
1611:
1612: if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1613: DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1614: DEVNAME(sc));
1615: return (1);
1616: }
1617:
1618: if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1619: DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1620: DEVNAME(sc));
1621: return (1);
1622: }
1623:
1624: DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n",
1625: DEVNAME(sc), ifp.function, ifp.msg_length,
1626: ifp.msg_version_maj, ifp.msg_version_min);
1627: DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x "
1628: "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1629: ifp.ioc_number, ifp.header_version_maj,
1630: ifp.header_version_min);
1631: DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc),
1632: letoh32(ifp.msg_context));
1633: DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n",
1634: DEVNAME(sc), letoh16(ifp.ioc_status),
1635: letoh16(ifp.ioc_exceptions));
1636: DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc),
1637: letoh32(ifp.ioc_loginfo));
1638: DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x "
1639: "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
1640: ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
1641: DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n",
1642: DEVNAME(sc), letoh16(ifp.request_frame_size),
1643: letoh16(ifp.reply_queue_depth));
1644: DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc),
1645: letoh16(ifp.product_id));
1646: DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
1647: letoh32(ifp.current_host_mfa_hi_addr));
1648: DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d "
1649: "global_credits: %d\n",
1650: DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
1651: letoh16(ifp.global_credits));
1652: DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
1653: letoh32(ifp.current_sense_buffer_hi_addr));
1654: DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n",
1655: DEVNAME(sc), ifp.max_buses, ifp.max_devices,
1656: letoh16(ifp.current_reply_frame_size));
1657: DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc),
1658: letoh32(ifp.fw_image_size));
1659: DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc),
1660: letoh32(ifp.ioc_capabilities));
1661: DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x "
1662: "fw_version_dev: 0x%02x\n", DEVNAME(sc),
1663: ifp.fw_version_maj, ifp.fw_version_min,
1664: ifp.fw_version_unit, ifp.fw_version_dev);
1665: DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n",
1666: DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
1667: DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x "
1668: "addr 0x%08x %08x\n", DEVNAME(sc),
1669: letoh32(ifp.host_page_buffer_sge.sg_hdr),
1670: letoh32(ifp.host_page_buffer_sge.sg_hi_addr),
1671: letoh32(ifp.host_page_buffer_sge.sg_lo_addr));
1672:
1673: sc->sc_maxcmds = letoh16(ifp.global_credits);
1674: sc->sc_maxchdepth = ifp.max_chain_depth;
1675: sc->sc_ioc_number = ifp.ioc_number;
1676: if (sc->sc_flags & MPI_F_SPI)
1677: sc->sc_buswidth = 16;
1678: else
1679: sc->sc_buswidth =
1680: (ifp.max_devices == 0) ? 256 : ifp.max_devices;
1681: if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
1682: sc->sc_fw_len = letoh32(ifp.fw_image_size);
1683:
1684: /*
1685: * you can fit sg elements on the end of the io cmd if they fit in the
1686: * request frame size.
1687: */
1688: sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) -
1689: sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
1690: DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc),
1691: sc->sc_first_sgl_len);
1692:
1693: sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) /
1694: sizeof(struct mpi_sge);
1695: DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc),
1696: sc->sc_chain_len);
1697:
1698: /* the sgl tailing the io cmd loses an entry to the chain element. */
1699: sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
1700: /* the sgl chains lose an entry for each chain element */
1701: sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
1702: sc->sc_chain_len;
1703: DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc),
1704: sc->sc_max_sgl_len);
1705:
1706: /* XXX we're ignoring the max chain depth */
1707:
1708: return (0);
1709: }
1710:
1711: int
1712: mpi_iocinit(struct mpi_softc *sc)
1713: {
1714: struct mpi_msg_iocinit_request iiq;
1715: struct mpi_msg_iocinit_reply iip;
1716: u_int32_t hi_addr;
1717:
1718: DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
1719:
1720: bzero(&iiq, sizeof(iiq));
1721: bzero(&iip, sizeof(iip));
1722:
1723: iiq.function = MPI_FUNCTION_IOC_INIT;
1724: iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
1725:
1726: iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
1727: iiq.max_buses = 1;
1728:
1729: iiq.msg_context = htole32(0xd00fd00f);
1730:
1731: iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
1732:
1733: hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_requests) >> 32);
1734: iiq.host_mfa_hi_addr = htole32(hi_addr);
1735: iiq.sense_buffer_hi_addr = htole32(hi_addr);
1736:
1737: hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_replies) >> 32);
1738: iiq.reply_fifo_host_signalling_addr = htole32(hi_addr);
1739:
1740: iiq.msg_version_maj = 0x01;
1741: iiq.msg_version_min = 0x02;
1742:
1743: iiq.hdr_version_unit = 0x0d;
1744: iiq.hdr_version_dev = 0x00;
1745:
1746: if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1747: DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
1748: DEVNAME(sc));
1749: return (1);
1750: }
1751:
1752: if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1753: DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
1754: DEVNAME(sc));
1755: return (1);
1756: }
1757:
1758: DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d "
1759: "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1760: iip.msg_length, iip.whoinit);
1761: DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d "
1762: "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
1763: iip.max_buses, iip.max_devices, iip.flags);
1764: DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
1765: letoh32(iip.msg_context));
1766: DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1767: letoh16(iip.ioc_status));
1768: DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1769: letoh32(iip.ioc_loginfo));
1770:
1771: return (0);
1772: }
1773:
1774: int
1775: mpi_portfacts(struct mpi_softc *sc)
1776: {
1777: struct mpi_ccb *ccb;
1778: struct mpi_msg_portfacts_request *pfq;
1779: volatile struct mpi_msg_portfacts_reply *pfp;
1780: int s, rv = 1;
1781:
1782: DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
1783:
1784: s = splbio();
1785: ccb = mpi_get_ccb(sc);
1786: splx(s);
1787: if (ccb == NULL) {
1788: DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
1789: DEVNAME(sc));
1790: return (rv);
1791: }
1792:
1793: ccb->ccb_done = mpi_empty_done;
1794: pfq = ccb->ccb_cmd;
1795:
1796: pfq->function = MPI_FUNCTION_PORT_FACTS;
1797: pfq->chain_offset = 0;
1798: pfq->msg_flags = 0;
1799: pfq->port_number = 0;
1800: pfq->msg_context = htole32(ccb->ccb_id);
1801:
1802: if (mpi_poll(sc, ccb, 50000) != 0) {
1803: DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
1804: goto err;
1805: }
1806:
1807: if (ccb->ccb_rcb == NULL) {
1808: DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
1809: DEVNAME(sc));
1810: goto err;
1811: }
1812: pfp = ccb->ccb_rcb->rcb_reply;
1813:
1814: DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n",
1815: DEVNAME(sc), pfp->function, pfp->msg_length);
1816: DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n",
1817: DEVNAME(sc), pfp->msg_flags, pfp->port_number);
1818: DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
1819: letoh32(pfp->msg_context));
1820: DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1821: letoh16(pfp->ioc_status));
1822: DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1823: letoh32(pfp->ioc_loginfo));
1824: DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n",
1825: DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
1826: DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n",
1827: DEVNAME(sc), letoh16(pfp->protocol_flags),
1828: letoh16(pfp->port_scsi_id));
1829: DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d "
1830: "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
1831: letoh16(pfp->max_persistent_ids),
1832: letoh16(pfp->max_posted_cmd_buffers));
1833: DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc),
1834: letoh16(pfp->max_lan_buckets));
1835:
1836: sc->sc_porttype = pfp->port_type;
1837: sc->sc_target = letoh16(pfp->port_scsi_id);
1838:
1839: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
1840: rv = 0;
1841: err:
1842: mpi_put_ccb(sc, ccb);
1843:
1844: return (rv);
1845: }
1846:
1847: int
1848: mpi_eventnotify(struct mpi_softc *sc)
1849: {
1850: struct mpi_ccb *ccb;
1851: struct mpi_msg_event_request *enq;
1852: int s;
1853:
1854: s = splbio();
1855: ccb = mpi_get_ccb(sc);
1856: splx(s);
1857: if (ccb == NULL) {
1858: DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
1859: DEVNAME(sc));
1860: return (1);
1861: }
1862:
1863: ccb->ccb_done = mpi_eventnotify_done;
1864: enq = ccb->ccb_cmd;
1865:
1866: enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
1867: enq->chain_offset = 0;
1868: enq->event_switch = MPI_EVENT_SWITCH_ON;
1869: enq->msg_context = htole32(ccb->ccb_id);
1870:
1871: mpi_start(sc, ccb);
1872: return (0);
1873: }
1874:
1875: void
1876: mpi_eventnotify_done(struct mpi_ccb *ccb)
1877: {
1878: struct mpi_softc *sc = ccb->ccb_sc;
1879: struct mpi_msg_event_reply *enp = ccb->ccb_rcb->rcb_reply;
1880: int deferred = 0;
1881:
1882: DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
1883:
1884: DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d "
1885: "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
1886: letoh16(enp->data_length));
1887: DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n",
1888: DEVNAME(sc), enp->ack_required, enp->msg_flags);
1889: DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
1890: letoh32(enp->msg_context));
1891: DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1892: letoh16(enp->ioc_status));
1893: DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1894: letoh32(enp->ioc_loginfo));
1895: DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc),
1896: letoh32(enp->event));
1897: DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc),
1898: letoh32(enp->event_context));
1899:
1900: switch (letoh32(enp->event)) {
1901: /* ignore these */
1902: case MPI_EVENT_EVENT_CHANGE:
1903: case MPI_EVENT_SAS_PHY_LINK_STATUS:
1904: break;
1905:
1906: case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1907: if (sc->sc_scsibus == NULL)
1908: break;
1909:
1910: if (scsi_task(mpi_evt_sas, sc, ccb->ccb_rcb, 0) != 0) {
1911: printf("%s: unable to run SAS device status change\n",
1912: DEVNAME(sc));
1913: break;
1914: }
1915: deferred = 1;
1916: break;
1917:
1918: default:
1919: printf("%s: unhandled event 0x%02x\n", DEVNAME(sc),
1920: letoh32(enp->event));
1921: break;
1922: }
1923:
1924: if (!deferred) {
1925: if (enp->ack_required)
1926: mpi_eventack(sc, enp);
1927: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
1928: }
1929:
1930: if ((enp->msg_flags & MPI_EVENT_FLAGS_REPLY_KEPT) == 0) {
1931: /* XXX this shouldnt happen till shutdown */
1932: mpi_put_ccb(sc, ccb);
1933: }
1934: }
1935:
1936: void
1937: mpi_evt_sas(void *xsc, void *arg)
1938: {
1939: struct mpi_softc *sc = xsc;
1940: struct mpi_rcb *rcb = arg;
1941: struct mpi_msg_event_reply *enp = rcb->rcb_reply;
1942: struct mpi_evt_sas_change *ch;
1943: u_int8_t *data;
1944: int s;
1945:
1946: data = rcb->rcb_reply;
1947: data += sizeof(struct mpi_msg_event_reply);
1948: ch = (struct mpi_evt_sas_change *)data;
1949:
1950: if (ch->bus != 0)
1951: return;
1952:
1953: switch (ch->reason) {
1954: case MPI_EVT_SASCH_REASON_ADDED:
1955: case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
1956: scsi_probe_target(sc->sc_scsibus, ch->target);
1957: break;
1958:
1959: case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
1960: scsi_detach_target(sc->sc_scsibus, ch->target, DETACH_FORCE);
1961: break;
1962:
1963: case MPI_EVT_SASCH_REASON_SMART_DATA:
1964: case MPI_EVT_SASCH_REASON_UNSUPPORTED:
1965: case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
1966: break;
1967: default:
1968: printf("%s: unknown reason for SAS device status change: "
1969: "0x%02x\n", DEVNAME(sc), ch->reason);
1970: break;
1971: }
1972:
1973: s = splbio();
1974: mpi_push_reply(sc, rcb->rcb_reply_dva);
1975: if (enp->ack_required)
1976: mpi_eventack(sc, enp);
1977: splx(s);
1978: }
1979:
1980: void
1981: mpi_eventack(struct mpi_softc *sc, struct mpi_msg_event_reply *enp)
1982: {
1983: struct mpi_ccb *ccb;
1984: struct mpi_msg_eventack_request *eaq;
1985:
1986: ccb = mpi_get_ccb(sc);
1987: if (ccb == NULL) {
1988: DNPRINTF(MPI_D_EVT, "%s: mpi_eventack ccb_get\n", DEVNAME(sc));
1989: return;
1990: }
1991:
1992: ccb->ccb_done = mpi_eventack_done;
1993: eaq = ccb->ccb_cmd;
1994:
1995: eaq->function = MPI_FUNCTION_EVENT_ACK;
1996: eaq->msg_context = htole32(ccb->ccb_id);
1997:
1998: eaq->event = enp->event;
1999: eaq->event_context = enp->event_context;
2000:
2001: mpi_start(sc, ccb);
2002: return;
2003: }
2004:
2005: void
2006: mpi_eventack_done(struct mpi_ccb *ccb)
2007: {
2008: struct mpi_softc *sc = ccb->ccb_sc;
2009:
2010: DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2011:
2012: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2013: mpi_put_ccb(sc, ccb);
2014: }
2015:
2016: int
2017: mpi_portenable(struct mpi_softc *sc)
2018: {
2019: struct mpi_ccb *ccb;
2020: struct mpi_msg_portenable_request *peq;
2021: struct mpi_msg_portenable_repy *pep;
2022: int s;
2023:
2024: DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2025:
2026: s = splbio();
2027: ccb = mpi_get_ccb(sc);
2028: splx(s);
2029: if (ccb == NULL) {
2030: DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2031: DEVNAME(sc));
2032: return (1);
2033: }
2034:
2035: ccb->ccb_done = mpi_empty_done;
2036: peq = ccb->ccb_cmd;
2037:
2038: peq->function = MPI_FUNCTION_PORT_ENABLE;
2039: peq->port_number = 0;
2040: peq->msg_context = htole32(ccb->ccb_id);
2041:
2042: if (mpi_poll(sc, ccb, 50000) != 0) {
2043: DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2044: return (1);
2045: }
2046:
2047: if (ccb->ccb_rcb == NULL) {
2048: DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2049: DEVNAME(sc));
2050: return (1);
2051: }
2052: pep = ccb->ccb_rcb->rcb_reply;
2053:
2054: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2055: mpi_put_ccb(sc, ccb);
2056:
2057: return (0);
2058: }
2059:
2060: int
2061: mpi_fwupload(struct mpi_softc *sc)
2062: {
2063: struct mpi_ccb *ccb;
2064: struct {
2065: struct mpi_msg_fwupload_request req;
2066: struct mpi_sge sge;
2067: } __packed *bundle;
2068: struct mpi_msg_fwupload_reply *upp;
2069: u_int64_t addr;
2070: int s;
2071: int rv = 0;
2072:
2073: if (sc->sc_fw_len == 0)
2074: return (0);
2075:
2076: DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2077:
2078: sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2079: if (sc->sc_fw == NULL) {
2080: DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2081: DEVNAME(sc), sc->sc_fw_len);
2082: return (1);
2083: }
2084:
2085: s = splbio();
2086: ccb = mpi_get_ccb(sc);
2087: splx(s);
2088: if (ccb == NULL) {
2089: DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2090: DEVNAME(sc));
2091: goto err;
2092: }
2093:
2094: ccb->ccb_done = mpi_empty_done;
2095: bundle = ccb->ccb_cmd;
2096:
2097: bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2098: bundle->req.msg_context = htole32(ccb->ccb_id);
2099:
2100: bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2101:
2102: bundle->req.tce.details_length = 12;
2103: bundle->req.tce.image_size = htole32(sc->sc_fw_len);
2104:
2105: bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2106: MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2107: MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2108: addr = MPI_DMA_DVA(sc->sc_fw);
2109: bundle->sge.sg_hi_addr = htole32((u_int32_t)(addr >> 32));
2110: bundle->sge.sg_lo_addr = htole32((u_int32_t)addr);
2111:
2112: if (mpi_poll(sc, ccb, 50000) != 0) {
2113: DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2114: goto err;
2115: }
2116:
2117: if (ccb->ccb_rcb == NULL)
2118: panic("%s: unable to do fw upload\n", DEVNAME(sc));
2119: upp = ccb->ccb_rcb->rcb_reply;
2120:
2121: if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2122: rv = 1;
2123:
2124: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2125: mpi_put_ccb(sc, ccb);
2126:
2127: return (rv);
2128:
2129: err:
2130: mpi_dmamem_free(sc, sc->sc_fw);
2131: return (1);
2132: }
2133:
2134: void
2135: mpi_get_raid(struct mpi_softc *sc)
2136: {
2137: struct mpi_cfg_hdr hdr;
2138: struct mpi_cfg_ioc_pg2 *vol_page;
2139: struct mpi_cfg_raid_vol *vol_list, *vol;
2140: size_t pagelen;
2141: u_int32_t capabilities;
2142: struct scsi_link *link;
2143: int i;
2144:
2145: DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2146:
2147: if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2148: DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2149: "for IOC page 2\n", DEVNAME(sc));
2150: return;
2151: }
2152:
2153: pagelen = hdr.page_length * 4; /* dwords to bytes */
2154: vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2155: if (vol_page == NULL) {
2156: DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2157: "space for ioc config page 2\n", DEVNAME(sc));
2158: return;
2159: }
2160: vol_list = (struct mpi_cfg_raid_vol *)(vol_page + 1);
2161:
2162: if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2163: DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2164: "page 2\n", DEVNAME(sc));
2165: goto out;
2166: }
2167:
2168: capabilities = letoh32(vol_page->capabilities);
2169:
2170: DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc),
2171: letoh32(vol_page->capabilities));
2172: DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d "
2173: "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2174: vol_page->active_vols, vol_page->max_vols,
2175: vol_page->active_physdisks, vol_page->max_physdisks);
2176:
2177: /* don't walk list if there are no RAID capability */
2178: if (capabilities == 0xdeadbeef) {
2179: printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2180: goto out;
2181: }
2182:
2183: if ((capabilities & MPI_CFG_IOC_2_CAPABILITIES_RAID) == 0 ||
2184: (vol_page->active_vols == 0))
2185: goto out;
2186:
2187: sc->sc_flags |= MPI_F_RAID;
2188:
2189: for (i = 0; i < vol_page->active_vols; i++) {
2190: vol = &vol_list[i];
2191:
2192: DNPRINTF(MPI_D_RAID, "%s: id: %d bus: %d ioc: %d pg: %d\n",
2193: DEVNAME(sc), vol->vol_id, vol->vol_bus, vol->vol_ioc,
2194: vol->vol_page);
2195: DNPRINTF(MPI_D_RAID, "%s: type: 0x%02x flags: 0x%02x\n",
2196: DEVNAME(sc), vol->vol_type, vol->flags);
2197:
2198: if (vol->vol_ioc != sc->sc_ioc_number || vol->vol_bus != 0)
2199: continue;
2200:
2201: link = sc->sc_scsibus->sc_link[vol->vol_id][0];
2202: if (link == NULL)
2203: continue;
2204:
2205: link->flags |= SDEV_VIRTUAL;
2206: }
2207:
2208: out:
2209: free(vol_page, M_TEMP);
2210: }
2211:
2212: int
2213: mpi_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2214: u_int32_t address, struct mpi_cfg_hdr *hdr)
2215: {
2216: struct mpi_ccb *ccb;
2217: struct mpi_msg_config_request *cq;
2218: struct mpi_msg_config_reply *cp;
2219: int rv = 0;
2220: int s;
2221:
2222: DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header type: %#x number: %x "
2223: "address: %d\n", DEVNAME(sc), type, number, address);
2224:
2225: s = splbio();
2226: ccb = mpi_get_ccb(sc);
2227: splx(s);
2228: if (ccb == NULL) {
2229: DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2230: DEVNAME(sc));
2231: return (1);
2232: }
2233:
2234: ccb->ccb_done = mpi_empty_done;
2235: cq = ccb->ccb_cmd;
2236:
2237: cq->function = MPI_FUNCTION_CONFIG;
2238: cq->msg_context = htole32(ccb->ccb_id);
2239:
2240: cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2241:
2242: cq->config_header.page_number = number;
2243: cq->config_header.page_type = type;
2244: cq->page_address = htole32(address);
2245: cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2246: MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2247:
2248: if (mpi_poll(sc, ccb, 50000) != 0) {
2249: DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2250: return (1);
2251: }
2252:
2253: if (ccb->ccb_rcb == NULL)
2254: panic("%s: unable to fetch config header\n", DEVNAME(sc));
2255: cp = ccb->ccb_rcb->rcb_reply;
2256:
2257: DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2258: "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2259: DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2260: "msg_flags: 0x%02x\n", DEVNAME(sc),
2261: letoh16(cp->ext_page_length), cp->ext_page_type,
2262: cp->msg_flags);
2263: DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2264: letoh32(cp->msg_context));
2265: DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2266: letoh16(cp->ioc_status));
2267: DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2268: letoh32(cp->ioc_loginfo));
2269: DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2270: "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2271: cp->config_header.page_version,
2272: cp->config_header.page_length,
2273: cp->config_header.page_number,
2274: cp->config_header.page_type);
2275:
2276: if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2277: rv = 1;
2278: else
2279: *hdr = cp->config_header;
2280:
2281: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2282: mpi_put_ccb(sc, ccb);
2283:
2284: return (rv);
2285: }
2286:
2287: int
2288: mpi_cfg_page(struct mpi_softc *sc, u_int32_t address, struct mpi_cfg_hdr *hdr,
2289: int read, void *page, size_t len)
2290: {
2291: struct mpi_ccb *ccb;
2292: struct mpi_msg_config_request *cq;
2293: struct mpi_msg_config_reply *cp;
2294: u_int64_t dva;
2295: char *kva;
2296: int rv = 0;
2297: int s;
2298:
2299: DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2300: DEVNAME(sc), address, read, hdr->page_type);
2301:
2302: if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2303: len < hdr->page_length * 4)
2304: return (1);
2305:
2306: s = splbio();
2307: ccb = mpi_get_ccb(sc);
2308: splx(s);
2309: if (ccb == NULL) {
2310: DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2311: return (1);
2312: }
2313:
2314: ccb->ccb_done = mpi_empty_done;
2315: cq = ccb->ccb_cmd;
2316:
2317: cq->function = MPI_FUNCTION_CONFIG;
2318: cq->msg_context = htole32(ccb->ccb_id);
2319:
2320: cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2321: MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2322:
2323: cq->config_header = *hdr;
2324: cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2325: cq->page_address = htole32(address);
2326: cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE |
2327: MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2328: (hdr->page_length * 4) |
2329: (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2330:
2331: /* bounce the page via the request space to avoid more bus_dma games */
2332: dva = ccb->ccb_cmd_dva + sizeof(struct mpi_msg_config_request);
2333:
2334: cq->page_buffer.sg_hi_addr = htole32((u_int32_t)(dva >> 32));
2335: cq->page_buffer.sg_lo_addr = htole32((u_int32_t)dva);
2336:
2337: kva = ccb->ccb_cmd;
2338: kva += sizeof(struct mpi_msg_config_request);
2339: if (!read)
2340: bcopy(page, kva, len);
2341:
2342: if (mpi_poll(sc, ccb, 50000) != 0) {
2343: DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page poll\n", DEVNAME(sc));
2344: return (1);
2345: }
2346:
2347: if (ccb->ccb_rcb == NULL) {
2348: mpi_put_ccb(sc, ccb);
2349: return (1);
2350: }
2351: cp = ccb->ccb_rcb->rcb_reply;
2352:
2353: DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2354: "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2355: DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2356: "msg_flags: 0x%02x\n", DEVNAME(sc),
2357: letoh16(cp->ext_page_length), cp->ext_page_type,
2358: cp->msg_flags);
2359: DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2360: letoh32(cp->msg_context));
2361: DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2362: letoh16(cp->ioc_status));
2363: DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2364: letoh32(cp->ioc_loginfo));
2365: DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2366: "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2367: cp->config_header.page_version,
2368: cp->config_header.page_length,
2369: cp->config_header.page_number,
2370: cp->config_header.page_type);
2371:
2372: if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2373: rv = 1;
2374: else if (read)
2375: bcopy(kva, page, len);
2376:
2377: mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva);
2378: mpi_put_ccb(sc, ccb);
2379:
2380: return (rv);
2381: }
CVSweb