vioscsi.c revision 1.10 1 /* $NetBSD: vioscsi.c,v 1.10 2017/03/13 20:47:38 jdolecek Exp $ */
2 /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3
4 /*
5 * Copyright (c) 2013 Google Inc.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.10 2017/03/13 20:47:38 jdolecek Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/bus.h>
27 #include <sys/buf.h>
28
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcireg.h>
31 #include <dev/pci/pcivar.h>
32
33 #include <dev/pci/vioscsireg.h>
34 #include <dev/pci/virtiovar.h>
35
36 #include <dev/scsipi/scsi_all.h>
37 #include <dev/scsipi/scsiconf.h>
38
39 #ifdef VIOSCSI_DEBUG
40 static int vioscsi_debug = 1;
41 #define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
42 #else
43 #define DPRINTF(f) ((void)0)
44 #endif
45
46 struct vioscsi_req {
47 struct virtio_scsi_req_hdr vr_req;
48 struct virtio_scsi_res_hdr vr_res;
49 struct scsipi_xfer *vr_xs;
50 bus_dmamap_t vr_control;
51 bus_dmamap_t vr_data;
52 };
53
54 struct vioscsi_softc {
55 device_t sc_dev;
56 struct scsipi_adapter sc_adapter;
57 struct scsipi_channel sc_channel;
58
59 struct virtqueue sc_vqs[3];
60 #define VIOSCSI_VQ_CONTROL 0
61 #define VIOSCSI_VQ_EVENT 1
62 #define VIOSCSI_VQ_REQUEST 2
63
64 struct vioscsi_req *sc_reqs;
65 int sc_nreqs;
66 bus_dma_segment_t sc_reqs_segs[1];
67
68 u_int32_t sc_seg_max;
69 };
70
71 /*
72 * Each block request uses at least two segments - one for the header
73 * and one for the status.
74 */
75 #define VIRTIO_SCSI_MIN_SEGMENTS 2
76
77 static int vioscsi_match(device_t, cfdata_t, void *);
78 static void vioscsi_attach(device_t, device_t, void *);
79
80 static int vioscsi_alloc_reqs(struct vioscsi_softc *,
81 struct virtio_softc *, int, uint32_t);
82 static void vioscsi_scsipi_request(struct scsipi_channel *,
83 scsipi_adapter_req_t, void *);
84 static int vioscsi_vq_done(struct virtqueue *);
85 static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
86 struct vioscsi_req *);
87 static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
88 static void vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *);
89
90 static const char *const vioscsi_vq_names[] = {
91 "control",
92 "event",
93 "request",
94 };
95
96 CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
97 vioscsi_match, vioscsi_attach, NULL, NULL);
98
99 static int
100 vioscsi_match(device_t parent, cfdata_t match, void *aux)
101 {
102 struct virtio_softc *va = aux;
103
104 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
105 return 1;
106 return 0;
107 }
108
109 static void
110 vioscsi_attach(device_t parent, device_t self, void *aux)
111 {
112 struct vioscsi_softc *sc = device_private(self);
113 struct virtio_softc *vsc = device_private(parent);
114 struct scsipi_adapter *adapt = &sc->sc_adapter;
115 struct scsipi_channel *chan = &sc->sc_channel;
116 uint32_t features;
117 char buf[256];
118 int rv;
119
120 if (vsc->sc_child != NULL) {
121 aprint_error(": parent %s already has a child\n",
122 device_xname(parent));
123 return;
124 }
125
126 sc->sc_dev = self;
127
128 vsc->sc_child = self;
129 vsc->sc_ipl = IPL_BIO;
130 vsc->sc_vqs = sc->sc_vqs;
131 vsc->sc_nvqs = __arraycount(sc->sc_vqs);
132 vsc->sc_config_change = NULL;
133 vsc->sc_intrhand = virtio_vq_intr;
134 vsc->sc_flags = 0;
135
136 features = virtio_negotiate_features(vsc, 0);
137 snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
138 aprint_normal(": Features: %s\n", buf);
139 aprint_naive("\n");
140
141 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
142 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
143
144 uint32_t seg_max = virtio_read_device_config_4(vsc,
145 VIRTIO_SCSI_CONFIG_SEG_MAX);
146
147 uint16_t max_target = virtio_read_device_config_2(vsc,
148 VIRTIO_SCSI_CONFIG_MAX_TARGET);
149
150 uint16_t max_channel = virtio_read_device_config_2(vsc,
151 VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
152
153 uint32_t max_lun = virtio_read_device_config_4(vsc,
154 VIRTIO_SCSI_CONFIG_MAX_LUN);
155
156 sc->sc_seg_max = seg_max;
157
158 for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
159 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
160 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
161 if (rv) {
162 aprint_error_dev(sc->sc_dev,
163 "failed to allocate virtqueue %zu\n", i);
164 return;
165 }
166
167 if (i == VIOSCSI_VQ_REQUEST)
168 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
169 }
170
171 int qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
172 aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
173 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
174 return;
175
176 virtio_start_vq_intr(vsc, &sc->sc_vqs[VIOSCSI_VQ_REQUEST]);
177
178 /*
179 * Fill in the scsipi_adapter.
180 */
181 memset(adapt, 0, sizeof(*adapt));
182 adapt->adapt_dev = sc->sc_dev;
183 adapt->adapt_nchannels = max_channel;
184 adapt->adapt_openings = cmd_per_lun;
185 adapt->adapt_max_periph = adapt->adapt_openings;
186 adapt->adapt_request = vioscsi_scsipi_request;
187 adapt->adapt_minphys = minphys;
188
189 /*
190 * Fill in the scsipi_channel.
191 */
192 memset(chan, 0, sizeof(*chan));
193 chan->chan_adapter = adapt;
194 chan->chan_bustype = &scsi_bustype;
195 chan->chan_channel = 0;
196 chan->chan_ntargets = max_target;
197 chan->chan_nluns = max_lun;
198 chan->chan_id = 0;
199 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
200
201 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
202 }
203
204 #define XS2DMA(xs) \
205 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
206 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
207 BUS_DMA_STREAMING)
208
209 #define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
210 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
211
212 #define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
213 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
214
215 static void
216 vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
217 request, void *arg)
218 {
219 struct vioscsi_softc *sc =
220 device_private(chan->chan_adapter->adapt_dev);
221 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
222 struct scsipi_xfer *xs;
223 struct scsipi_periph *periph;
224 struct vioscsi_req *vr;
225 struct virtio_scsi_req_hdr *req;
226 struct virtqueue *vq = &sc->sc_vqs[2];
227 int slot, error;
228
229 DPRINTF(("%s: enter\n", __func__));
230
231 switch (request) {
232 case ADAPTER_REQ_RUN_XFER:
233 break;
234 case ADAPTER_REQ_SET_XFER_MODE:
235 {
236 struct scsipi_xfer_mode *xm = arg;
237 xm->xm_mode = PERIPH_CAP_TQING;
238 xm->xm_period = 0;
239 xm->xm_offset = 0;
240 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
241 return;
242 }
243 default:
244 DPRINTF(("%s: unhandled %d\n", __func__, request));
245 return;
246 }
247
248 xs = arg;
249 periph = xs->xs_periph;
250
251 /*
252 * This can happen when we run out of queue slots.
253 */
254 vr = vioscsi_req_get(sc);
255 if (vr == NULL) {
256 xs->error = XS_RESOURCE_SHORTAGE;
257 scsipi_done(xs);
258 return;
259 }
260
261 req = &vr->vr_req;
262 slot = vr - sc->sc_reqs;
263
264 vr->vr_xs = xs;
265
266 /*
267 * "The only supported format for the LUN field is: first byte set to
268 * 1, second byte set to target, third and fourth byte representing a
269 * single level LUN structure, followed by four zero bytes."
270 */
271 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
272 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
273 periph->periph_target, periph->periph_lun));
274 goto stuffup;
275 }
276 req->lun[0] = 1;
277 req->lun[1] = periph->periph_target - 1;
278 req->lun[2] = 0x40 | ((periph->periph_lun >> 8) & 0x3F);
279 req->lun[3] = periph->periph_lun & 0xFF;
280 memset(req->lun + 4, 0, 4);
281 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
282 periph->periph_target - 1, periph->periph_lun, slot));
283
284 /* tag */
285 switch (XS_CTL_TAGTYPE(xs)) {
286 case XS_CTL_HEAD_TAG:
287 req->task_attr = VIRTIO_SCSI_S_HEAD;
288 break;
289
290 #if 0 /* XXX */
291 case XS_CTL_ACA_TAG:
292 req->task_attr = VIRTIO_SCSI_S_ACA;
293 break;
294 #endif
295
296 case XS_CTL_ORDERED_TAG:
297 req->task_attr = VIRTIO_SCSI_S_ORDERED;
298 break;
299
300 case XS_CTL_SIMPLE_TAG:
301 default:
302 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
303 break;
304 }
305 req->id = slot;
306
307 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
308 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
309 (size_t)xs->cmdlen, sizeof(req->cdb)));
310 goto stuffup;
311 }
312
313 memset(req->cdb, 0, sizeof(req->cdb));
314 memcpy(req->cdb, xs->cmd, xs->cmdlen);
315
316 error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
317 xs->data, xs->datalen, NULL, XS2DMA(xs));
318 switch (error) {
319 case 0:
320 break;
321 case ENOMEM:
322 case EAGAIN:
323 xs->error = XS_RESOURCE_SHORTAGE;
324 goto nomore;
325 default:
326 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
327 error);
328 stuffup:
329 xs->error = XS_DRIVER_STUFFUP;
330 nomore:
331 vioscsi_req_put(sc, vr);
332 scsipi_done(xs);
333 return;
334 }
335
336 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
337 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
338 nsegs += vr->vr_data->dm_nsegs;
339
340 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
341 if (error) {
342 DPRINTF(("%s: error reserving %d\n", __func__, error));
343 goto stuffup;
344 }
345
346 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
347 offsetof(struct vioscsi_req, vr_req),
348 sizeof(struct virtio_scsi_req_hdr),
349 BUS_DMASYNC_PREWRITE);
350 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
351 offsetof(struct vioscsi_req, vr_res),
352 sizeof(struct virtio_scsi_res_hdr),
353 BUS_DMASYNC_PREREAD);
354 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
355 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
356 XS2DMAPRE(xs));
357
358 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
359 offsetof(struct vioscsi_req, vr_req),
360 sizeof(struct virtio_scsi_req_hdr), 1);
361 if (xs->xs_control & XS_CTL_DATA_OUT)
362 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
363 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
364 offsetof(struct vioscsi_req, vr_res),
365 sizeof(struct virtio_scsi_res_hdr), 0);
366 if (xs->xs_control & XS_CTL_DATA_IN)
367 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
368 virtio_enqueue_commit(vsc, vq, slot, 1);
369
370 if ((xs->xs_control & XS_CTL_POLL) == 0)
371 return;
372
373 DPRINTF(("%s: polling...\n", __func__));
374 // XXX: do this better.
375 int timeout = 1000;
376 do {
377 (*vsc->sc_intrhand)(vsc);
378 if (vr->vr_xs != xs)
379 break;
380 delay(1000);
381 } while (--timeout > 0);
382
383 if (vr->vr_xs == xs) {
384 // XXX: Abort!
385 xs->error = XS_TIMEOUT;
386 xs->resid = xs->datalen;
387 DPRINTF(("%s: polling timeout\n", __func__));
388 scsipi_done(xs);
389 }
390 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
391 }
392
393 static void
394 vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
395 struct vioscsi_req *vr)
396 {
397 struct scsipi_xfer *xs = vr->vr_xs;
398 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
399 size_t sense_len;
400
401 DPRINTF(("%s: enter\n", __func__));
402
403 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
404 offsetof(struct vioscsi_req, vr_req),
405 sizeof(struct virtio_scsi_req_hdr),
406 BUS_DMASYNC_POSTWRITE);
407 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
408 offsetof(struct vioscsi_req, vr_res),
409 sizeof(struct virtio_scsi_res_hdr),
410 BUS_DMASYNC_POSTREAD);
411 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
412 XS2DMAPOST(xs));
413
414 switch (vr->vr_res.response) {
415 case VIRTIO_SCSI_S_OK:
416 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
417 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
418 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
419 break;
420 case VIRTIO_SCSI_S_BAD_TARGET:
421 DPRINTF(("%s: bad target\n", __func__));
422 memset(sense, 0, sizeof(*sense));
423 sense->response_code = 0x70;
424 sense->flags = SKEY_ILLEGAL_REQUEST;
425 xs->error = XS_SENSE;
426 xs->status = 0;
427 xs->resid = 0;
428 break;
429 default:
430 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
431 xs->error = XS_DRIVER_STUFFUP;
432 xs->resid = xs->datalen;
433 break;
434 }
435
436 xs->status = vr->vr_res.status;
437 xs->resid = vr->vr_res.residual;
438
439 DPRINTF(("%s: done %d, %d, %d\n", __func__,
440 xs->error, xs->status, xs->resid));
441
442 vr->vr_xs = NULL;
443 vioscsi_req_put(sc, vr);
444 scsipi_done(xs);
445 }
446
447 static int
448 vioscsi_vq_done(struct virtqueue *vq)
449 {
450 struct virtio_softc *vsc = vq->vq_owner;
451 struct vioscsi_softc *sc = device_private(vsc->sc_child);
452 int ret = 0;
453
454 DPRINTF(("%s: enter\n", __func__));
455
456 for (;;) {
457 int r, slot;
458 r = virtio_dequeue(vsc, vq, &slot, NULL);
459 if (r != 0)
460 break;
461
462 DPRINTF(("%s: slot=%d\n", __func__, slot));
463 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
464 ret = 1;
465 }
466
467 DPRINTF(("%s: exit %d\n", __func__, ret));
468
469 return ret;
470 }
471
472 static struct vioscsi_req *
473 vioscsi_req_get(struct vioscsi_softc *sc)
474 {
475 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
476 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST];
477 struct vioscsi_req *vr;
478 int r, slot;
479
480 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
481 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
482 return NULL;
483 }
484 KASSERT(slot < sc->sc_nreqs);
485 vr = &sc->sc_reqs[slot];
486
487 vr->vr_req.id = slot;
488 vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
489
490 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
491
492 return vr;
493 }
494
495 static void
496 vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr)
497 {
498 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
499 struct virtqueue *vq = &sc->sc_vqs[2];
500 int slot = vr - sc->sc_reqs;
501
502 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
503
504 bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
505
506 virtio_dequeue_commit(vsc, vq, slot);
507 }
508
509 int
510 vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
511 int qsize, uint32_t seg_max)
512 {
513 size_t allocsize;
514 int r, rsegs, slot;
515 void *vaddr;
516 struct vioscsi_req *vr;
517
518 allocsize = qsize * sizeof(struct vioscsi_req);
519 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
520 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
521 if (r != 0) {
522 aprint_error_dev(sc->sc_dev,
523 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
524 allocsize, r);
525 return r;
526 }
527 r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
528 allocsize, &vaddr, BUS_DMA_NOWAIT);
529 if (r != 0) {
530 aprint_error_dev(sc->sc_dev,
531 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
532 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
533 return r;
534 }
535 memset(vaddr, 0, allocsize);
536
537 sc->sc_reqs = vaddr;
538 sc->sc_nreqs = qsize;
539
540 /* Prepare maps for the requests */
541 for (slot=0; slot < qsize; slot++) {
542 vr = &sc->sc_reqs[slot];
543
544 r = bus_dmamap_create(vsc->sc_dmat,
545 offsetof(struct vioscsi_req, vr_xs), 1,
546 offsetof(struct vioscsi_req, vr_xs), 0,
547 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
548 if (r != 0) {
549 aprint_error_dev(sc->sc_dev,
550 "%s: bus_dmamem_create failed, error %d\n",
551 __func__, r);
552 goto cleanup;
553 }
554
555 r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
556 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
557 if (r != 0) {
558 aprint_error_dev(sc->sc_dev,
559 "%s: bus_dmamem_map failed, error %d\n",
560 __func__, r);
561 goto cleanup;
562 }
563
564 r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
565 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
566 BUS_DMA_NOWAIT);
567 if (r != 0) {
568 aprint_error_dev(sc->sc_dev,
569 "%s: bus_dmamap_create ctrl error %d\n",
570 __func__, r);
571 goto cleanup;
572 }
573 }
574
575 return 0;
576
577 cleanup:
578 for (; slot > 0; slot--) {
579 vr = &sc->sc_reqs[slot];
580
581 if (vr->vr_control) {
582 /* this will also unload the mapping if loaded */
583 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
584 vr->vr_control = NULL;
585 }
586
587 if (vr->vr_data) {
588 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
589 vr->vr_data = NULL;
590 }
591 }
592
593 bus_dmamem_unmap(vsc->sc_dmat, vaddr, allocsize);
594 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
595
596 return r;
597 }
598