1/* $NetBSD: vioscsi.c,v 1.8 2016/10/04 18:23:24 jdolecek Exp $ */
2/* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4/*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/cdefs.h>
21__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.8 2016/10/04 18:23:24 jdolecek Exp $");
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/device.h>
26#include <sys/bus.h>
27#include <sys/buf.h>
28
29#include <dev/pci/pcidevs.h>
30#include <dev/pci/pcireg.h>
31#include <dev/pci/pcivar.h>
32
33#include <dev/pci/vioscsireg.h>
34#include <dev/pci/virtiovar.h>
35
36#include <dev/scsipi/scsi_all.h>
37#include <dev/scsipi/scsiconf.h>
38
39#ifdef VIOSCSI_DEBUG
40static int vioscsi_debug = 1;
41#define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
42#else
43#define DPRINTF(f) ((void)0)
44#endif
45
46struct vioscsi_req {
47 struct virtio_scsi_req_hdr vr_req;
48 struct virtio_scsi_res_hdr vr_res;
49 struct scsipi_xfer *vr_xs;
50 bus_dmamap_t vr_control;
51 bus_dmamap_t vr_data;
52};
53
54struct vioscsi_softc {
55 device_t sc_dev;
56 struct scsipi_adapter sc_adapter;
57 struct scsipi_channel sc_channel;
58
59 struct virtqueue sc_vqs[3];
60 struct vioscsi_req *sc_reqs;
61 bus_dma_segment_t sc_reqs_segs[1];
62
63 u_int32_t sc_seg_max;
64};
65
66/*
67 * Each block request uses at least two segments - one for the header
68 * and one for the status.
69*/
70#define VIRTIO_SCSI_MIN_SEGMENTS 2
71
72static int vioscsi_match(device_t, cfdata_t, void *);
73static void vioscsi_attach(device_t, device_t, void *);
74
75static int vioscsi_alloc_reqs(struct vioscsi_softc *,
76 struct virtio_softc *, int, uint32_t);
77static void vioscsi_scsipi_request(struct scsipi_channel *,
78 scsipi_adapter_req_t, void *);
79static int vioscsi_vq_done(struct virtqueue *);
80static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
81 struct vioscsi_req *);
82static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
83static void vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *);
84
85static const char *const vioscsi_vq_names[] = {
86 "control",
87 "event",
88 "request",
89};
90
91CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
92 vioscsi_match, vioscsi_attach, NULL, NULL);
93
94static int
95vioscsi_match(device_t parent, cfdata_t match, void *aux)
96{
97 struct virtio_softc *va = aux;
98
99 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
100 return 1;
101 return 0;
102}
103
104static void
105vioscsi_attach(device_t parent, device_t self, void *aux)
106{
107 struct vioscsi_softc *sc = device_private(self);
108 struct virtio_softc *vsc = device_private(parent);
109 struct scsipi_adapter *adapt = &sc->sc_adapter;
110 struct scsipi_channel *chan = &sc->sc_channel;
111 uint32_t features;
112 char buf[256];
113 int rv;
114
115 if (vsc->sc_child != NULL) {
116 aprint_error(": parent %s already has a child\n",
117 device_xname(parent));
118 return;
119 }
120
121 sc->sc_dev = self;
122
123 vsc->sc_child = self;
124 vsc->sc_ipl = IPL_BIO;
125 vsc->sc_vqs = sc->sc_vqs;
126 vsc->sc_nvqs = __arraycount(sc->sc_vqs);
127 vsc->sc_config_change = NULL;
128 vsc->sc_intrhand = virtio_vq_intr;
129 vsc->sc_flags = 0;
130
131 features = virtio_negotiate_features(vsc, 0);
132 snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
133 aprint_normal(": Features: %s\n", buf);
134 aprint_naive("\n");
135
136 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
137 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
138
139 uint32_t seg_max = virtio_read_device_config_4(vsc,
140 VIRTIO_SCSI_CONFIG_SEG_MAX);
141
142 uint16_t max_target = virtio_read_device_config_2(vsc,
143 VIRTIO_SCSI_CONFIG_MAX_TARGET);
144
145 uint16_t max_channel = virtio_read_device_config_2(vsc,
146 VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
147
148 uint32_t max_lun = virtio_read_device_config_4(vsc,
149 VIRTIO_SCSI_CONFIG_MAX_LUN);
150
151 sc->sc_seg_max = seg_max;
152
153 for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
154 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
155 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
156 if (rv) {
157 aprint_error_dev(sc->sc_dev,
158 "failed to allocate virtqueue %zu\n", i);
159 return;
160 }
161 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
162 }
163
164 int qsize = sc->sc_vqs[2].vq_num;
165 aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
166 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
167 return;
168
169 /*
170 * Fill in the scsipi_adapter.
171 */
172 memset(adapt, 0, sizeof(*adapt));
173 adapt->adapt_dev = sc->sc_dev;
174 adapt->adapt_nchannels = max_channel;
175 adapt->adapt_openings = cmd_per_lun;
176 adapt->adapt_max_periph = adapt->adapt_openings;
177 adapt->adapt_request = vioscsi_scsipi_request;
178 adapt->adapt_minphys = minphys;
179
180 /*
181 * Fill in the scsipi_channel.
182 */
183 memset(chan, 0, sizeof(*chan));
184 chan->chan_adapter = adapt;
185 chan->chan_bustype = &scsi_bustype;
186 chan->chan_channel = 0;
187 chan->chan_ntargets = max_target;
188 chan->chan_nluns = max_lun;
189 chan->chan_id = 0;
190 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
191
192 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
193}
194
195#define XS2DMA(xs) \
196 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
197 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
198 BUS_DMA_STREAMING)
199
200#define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
201 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
202
203#define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
204 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
205
206static void
207vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
208 request, void *arg)
209{
210 struct vioscsi_softc *sc =
211 device_private(chan->chan_adapter->adapt_dev);
212 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
213 struct scsipi_xfer *xs;
214 struct scsipi_periph *periph;
215 struct vioscsi_req *vr;
216 struct virtio_scsi_req_hdr *req;
217 struct virtqueue *vq = &sc->sc_vqs[2];
218 int slot, error;
219
220 DPRINTF(("%s: enter\n", __func__));
221
222 switch (request) {
223 case ADAPTER_REQ_RUN_XFER:
224 break;
225 case ADAPTER_REQ_SET_XFER_MODE:
226 {
227 struct scsipi_xfer_mode *xm = arg;
228 xm->xm_mode = PERIPH_CAP_TQING;
229 xm->xm_period = 0;
230 xm->xm_offset = 0;
231 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
232 return;
233 }
234 default:
235 DPRINTF(("%s: unhandled %d\n", __func__, request));
236 return;
237 }
238
239 xs = arg;
240 periph = xs->xs_periph;
241
242 vr = vioscsi_req_get(sc);
243 /*
244 * This can happen when we run out of queue slots.
245 */
246 if (vr == NULL) {
247 xs->error = XS_RESOURCE_SHORTAGE;
248 scsipi_done(xs);
249 return;
250 }
251
252 req = &vr->vr_req;
253 slot = vr - sc->sc_reqs;
254
255 vr->vr_xs = xs;
256
257 /*
258 * "The only supported format for the LUN field is: first byte set to
259 * 1, second byte set to target, third and fourth byte representing a
260 * single level LUN structure, followed by four zero bytes."
261 */
262 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
263 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
264 periph->periph_target, periph->periph_lun));
265 goto stuffup;
266 }
267 req->lun[0] = 1;
268 req->lun[1] = periph->periph_target - 1;
269 req->lun[2] = 0x40 | (periph->periph_lun >> 8);
270 req->lun[3] = periph->periph_lun;
271 memset(req->lun + 4, 0, 4);
272 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
273 periph->periph_target - 1, periph->periph_lun, slot));
274
275 /* tag */
276 switch (XS_CTL_TAGTYPE(xs)) {
277 case XS_CTL_HEAD_TAG:
278 req->task_attr = VIRTIO_SCSI_S_HEAD;
279 break;
280
281#if 0 /* XXX */
282 case XS_CTL_ACA_TAG:
283 req->task_attr = VIRTIO_SCSI_S_ACA;
284 break;
285#endif
286
287 case XS_CTL_ORDERED_TAG:
288 req->task_attr = VIRTIO_SCSI_S_ORDERED;
289 break;
290
291 case XS_CTL_SIMPLE_TAG:
292 default:
293 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
294 break;
295 }
296 req->id = (intptr_t)vr;
297
298 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
299 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
300 (size_t)xs->cmdlen, sizeof(req->cdb)));
301 goto stuffup;
302 }
303
304 memset(req->cdb, 0, sizeof(req->cdb));
305 memcpy(req->cdb, xs->cmd, xs->cmdlen);
306
307 error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
308 xs->data, xs->datalen, NULL, XS2DMA(xs));
309 switch (error) {
310 case 0:
311 break;
312 case ENOMEM:
313 case EAGAIN:
314 xs->error = XS_RESOURCE_SHORTAGE;
315 goto nomore;
316 default:
317 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
318 error);
319 stuffup:
320 xs->error = XS_DRIVER_STUFFUP;
321nomore:
322 vioscsi_req_put(sc, vr);
323 scsipi_done(xs);
324 return;
325 }
326
327 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
328 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
329 nsegs += vr->vr_data->dm_nsegs;
330
331 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
332 if (error) {
333 DPRINTF(("%s: error reserving %d\n", __func__, error));
334 goto stuffup;
335 }
336
337 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
338 offsetof(struct vioscsi_req, vr_req),
339 sizeof(struct virtio_scsi_req_hdr),
340 BUS_DMASYNC_PREWRITE);
341 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
342 offsetof(struct vioscsi_req, vr_res),
343 sizeof(struct virtio_scsi_res_hdr),
344 BUS_DMASYNC_PREREAD);
345 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
346 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
347 XS2DMAPRE(xs));
348
349 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
350 offsetof(struct vioscsi_req, vr_req),
351 sizeof(struct virtio_scsi_req_hdr), 1);
352 if (xs->xs_control & XS_CTL_DATA_OUT)
353 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
354 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
355 offsetof(struct vioscsi_req, vr_res),
356 sizeof(struct virtio_scsi_res_hdr), 0);
357 if (xs->xs_control & XS_CTL_DATA_IN)
358 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
359 virtio_enqueue_commit(vsc, vq, slot, 1);
360
361 if ((xs->xs_control & XS_CTL_POLL) == 0)
362 return;
363
364 DPRINTF(("%s: polling...\n", __func__));
365 // XXX: do this better.
366 int timeout = 1000;
367 do {
368 (*vsc->sc_intrhand)(vsc);
369 if (vr->vr_xs != xs)
370 break;
371 delay(1000);
372 } while (--timeout > 0);
373
374 if (vr->vr_xs == xs) {
375 // XXX: Abort!
376 xs->error = XS_TIMEOUT;
377 xs->resid = xs->datalen;
378 DPRINTF(("%s: polling timeout\n", __func__));
379 scsipi_done(xs);
380 }
381 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
382}
383
384static void
385vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
386 struct vioscsi_req *vr)
387{
388 struct scsipi_xfer *xs = vr->vr_xs;
389 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
390 size_t sense_len;
391
392 DPRINTF(("%s: enter\n", __func__));
393
394 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
395 offsetof(struct vioscsi_req, vr_req),
396 sizeof(struct virtio_scsi_req_hdr),
397 BUS_DMASYNC_POSTWRITE);
398 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
399 offsetof(struct vioscsi_req, vr_res),
400 sizeof(struct virtio_scsi_res_hdr),
401 BUS_DMASYNC_POSTREAD);
402 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
403 XS2DMAPOST(xs));
404
405 switch (vr->vr_res.response) {
406 case VIRTIO_SCSI_S_OK:
407 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
408 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
409 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
410 break;
411 case VIRTIO_SCSI_S_BAD_TARGET:
412 DPRINTF(("%s: bad target\n", __func__));
413 memset(sense, 0, sizeof(*sense));
414 sense->response_code = 0x70;
415 sense->flags = SKEY_ILLEGAL_REQUEST;
416 xs->error = XS_SENSE;
417 xs->status = 0;
418 xs->resid = 0;
419 break;
420 default:
421 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
422 xs->error = XS_DRIVER_STUFFUP;
423 xs->resid = xs->datalen;
424 break;
425 }
426
427 xs->status = vr->vr_res.status;
428 xs->resid = vr->vr_res.residual;
429
430 DPRINTF(("%s: done %d, %d, %d\n", __func__,
431 xs->error, xs->status, xs->resid));
432
433 vr->vr_xs = NULL;
434 vioscsi_req_put(sc, vr);
435 scsipi_done(xs);
436}
437
438static int
439vioscsi_vq_done(struct virtqueue *vq)
440{
441 struct virtio_softc *vsc = vq->vq_owner;
442 struct vioscsi_softc *sc = device_private(vsc->sc_child);
443 int ret = 0;
444
445 DPRINTF(("%s: enter\n", __func__));
446
447 for (;;) {
448 int r, slot;
449 r = virtio_dequeue(vsc, vq, &slot, NULL);
450 if (r != 0)
451 break;
452
453 DPRINTF(("%s: slot=%d\n", __func__, slot));
454 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
455 ret = 1;
456 }
457
458 DPRINTF(("%s: exit %d\n", __func__, ret));
459
460 return ret;
461}
462
463static struct vioscsi_req *
464vioscsi_req_get(struct vioscsi_softc *sc)
465{
466 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
467 struct virtqueue *vq = &sc->sc_vqs[2];
468 struct vioscsi_req *vr;
469 int r, slot;
470
471 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
472 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
473 goto err1;
474 }
475 vr = &sc->sc_reqs[slot];
476
477 vr->vr_req.id = slot;
478 vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
479
480 r = bus_dmamap_create(vsc->sc_dmat,
481 offsetof(struct vioscsi_req, vr_xs), 1,
482 offsetof(struct vioscsi_req, vr_xs), 0,
483 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
484 if (r != 0) {
485 DPRINTF(("%s: bus_dmamap_create xs error %d\n", __func__, r));
486 goto err2;
487 }
488 r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
489 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
490 if (r != 0) {
491 DPRINTF(("%s: bus_dmamap_create data error %d\n", __func__, r));
492 goto err3;
493 }
494 r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
495 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
496 BUS_DMA_NOWAIT);
497 if (r != 0) {
498 DPRINTF(("%s: bus_dmamap_create ctrl error %d\n", __func__, r));
499 goto err4;
500 }
501
502 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
503
504 return vr;
505
506err4:
507 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
508err3:
509 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
510err2:
511 virtio_enqueue_abort(vsc, vq, slot);
512err1:
513 return NULL;
514}
515
516static void
517vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr)
518{
519 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
520 struct virtqueue *vq = &sc->sc_vqs[2];
521 int slot = vr - sc->sc_reqs;
522
523 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
524
525 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
526 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
527
528 virtio_dequeue_commit(vsc, vq, slot);
529}
530
531int
532vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
533 int qsize, uint32_t seg_max)
534{
535 size_t allocsize;
536 int r, rsegs;
537 void *vaddr;
538
539 allocsize = qsize * sizeof(struct vioscsi_req);
540 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
541 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
542 if (r != 0) {
543 aprint_error_dev(sc->sc_dev,
544 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
545 allocsize, r);
546 return 1;
547 }
548 r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
549 allocsize, &vaddr, BUS_DMA_NOWAIT);
550 if (r != 0) {
551 aprint_error_dev(sc->sc_dev,
552 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
553 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
554 return 1;
555 }
556 sc->sc_reqs = vaddr;
557 memset(vaddr, 0, allocsize);
558 return 0;
559}
560