1 | /* $NetBSD: udsir.c,v 1.3 2016/07/07 06:55:42 msaitoh Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2001 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by David Sainty <David.Sainty@dtsp.co.nz> |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: udsir.c,v 1.3 2016/07/07 06:55:42 msaitoh Exp $" ); |
34 | |
35 | #include <sys/param.h> |
36 | #include <sys/device.h> |
37 | #include <sys/errno.h> |
38 | #include <sys/systm.h> |
39 | #include <sys/kernel.h> |
40 | #include <sys/kmem.h> |
41 | #include <sys/conf.h> |
42 | #include <sys/file.h> |
43 | #include <sys/poll.h> |
44 | #include <sys/select.h> |
45 | #include <sys/proc.h> |
46 | #include <sys/kthread.h> |
47 | |
48 | #include <dev/usb/usb.h> |
49 | #include <dev/usb/usbdevs.h> |
50 | #include <dev/usb/usbdi.h> |
51 | #include <dev/usb/usbdi_util.h> |
52 | |
53 | #include <dev/ir/ir.h> |
54 | #include <dev/ir/irdaio.h> |
55 | #include <dev/ir/irframevar.h> |
56 | #include <dev/ir/sir.h> |
57 | |
58 | #ifdef UDSIR_DEBUG |
59 | #define DPRINTFN(n,x) if (udsirdebug > (n)) printf x |
60 | int udsirdebug = 0; |
61 | #else |
62 | #define DPRINTFN(n,x) |
63 | #endif |
64 | |
65 | /* Max size with framing. */ |
66 | #define MAX_UDSIR_OUTPUT_FRAME (2 * IRDA_MAX_FRAME_SIZE + IRDA_MAX_EBOFS + 4) |
67 | |
68 | struct udsir_softc { |
69 | device_t sc_dev; |
70 | struct usbd_device *sc_udev; |
71 | struct usbd_interface *sc_iface; |
72 | |
73 | uint8_t *sc_ur_buf; /* Unencapsulated frame */ |
74 | u_int sc_ur_framelen; |
75 | |
76 | uint8_t *sc_rd_buf; /* Raw incoming data stream */ |
77 | int sc_rd_maxpsz; |
78 | size_t sc_rd_index; |
79 | int sc_rd_addr; |
80 | struct usbd_pipe *sc_rd_pipe; |
81 | struct usbd_xfer *sc_rd_xfer; |
82 | u_int sc_rd_count; |
83 | int sc_rd_readinprogress; |
84 | int sc_rd_expectdataticks; |
85 | u_char sc_rd_err; |
86 | struct framestate sc_framestate; |
87 | struct lwp *sc_thread; |
88 | struct selinfo sc_rd_sel; |
89 | |
90 | uint8_t *sc_wr_buf; |
91 | int sc_wr_maxpsz; |
92 | int sc_wr_addr; |
93 | int sc_wr_stalewrite; |
94 | struct usbd_xfer *sc_wr_xfer; |
95 | struct usbd_pipe *sc_wr_pipe; |
96 | struct selinfo sc_wr_sel; |
97 | |
98 | enum { |
99 | udir_input, /* Receiving data */ |
100 | udir_output, /* Transmitting data */ |
101 | udir_stalled, /* Error preventing data flow */ |
102 | udir_idle /* Neither receiving nor transmitting */ |
103 | } sc_direction; |
104 | |
105 | device_t sc_child; |
106 | struct irda_params sc_params; |
107 | |
108 | int sc_refcnt; |
109 | char sc_closing; |
110 | char sc_dying; |
111 | }; |
112 | |
113 | /* True if we cannot safely read data from the device */ |
114 | #define UDSIR_BLOCK_RX_DATA(sc) ((sc)->sc_ur_framelen != 0) |
115 | |
116 | #define UDSIR_WR_TIMEOUT 200 |
117 | |
118 | static int udsir_match(device_t, cfdata_t, void *); |
119 | static void udsir_attach(device_t, device_t, void *); |
120 | static int udsir_detach(device_t, int); |
121 | static void udsir_childdet(device_t, device_t); |
122 | static int udsir_activate(device_t, enum devact); |
123 | |
124 | static int udsir_open(void *, int, int, struct lwp *); |
125 | static int udsir_close(void *, int, int, struct lwp *); |
126 | static int udsir_read(void *, struct uio *, int); |
127 | static int udsir_write(void *, struct uio *, int); |
128 | static int udsir_poll(void *, int, struct lwp *); |
129 | static int udsir_kqfilter(void *, struct knote *); |
130 | static int udsir_set_params(void *, struct irda_params *); |
131 | static int udsir_get_speeds(void *, int *); |
132 | static int udsir_get_turnarounds(void *, int *); |
133 | |
134 | static void filt_udsirrdetach(struct knote *); |
135 | static int filt_udsirread(struct knote *, long); |
136 | static void filt_udsirwdetach(struct knote *); |
137 | static int filt_udsirwrite(struct knote *, long); |
138 | |
139 | static void udsir_thread(void *); |
140 | |
141 | #ifdef UDSIR_DEBUG |
142 | static void udsir_dumpdata(uint8_t const *, size_t, char const *); |
143 | #endif |
144 | static int deframe_rd_ur(struct udsir_softc *); |
145 | static void udsir_periodic(struct udsir_softc *); |
146 | static void udsir_rd_cb(struct usbd_xfer *, void *, usbd_status); |
147 | static usbd_status udsir_start_read(struct udsir_softc *); |
148 | |
149 | CFATTACH_DECL2_NEW(udsir, sizeof(struct udsir_softc), |
150 | udsir_match, udsir_attach, udsir_detach, |
151 | udsir_activate, NULL, udsir_childdet); |
152 | |
153 | static struct irframe_methods const udsir_methods = { |
154 | udsir_open, udsir_close, udsir_read, udsir_write, udsir_poll, |
155 | udsir_kqfilter, udsir_set_params, udsir_get_speeds, udsir_get_turnarounds, |
156 | }; |
157 | |
158 | static int |
159 | udsir_match(device_t parent, cfdata_t match, void *aux) |
160 | { |
161 | struct usbif_attach_arg *uiaa = aux; |
162 | |
163 | DPRINTFN(50, ("udsir_match\n" )); |
164 | |
165 | if (uiaa->uiaa_vendor == USB_VENDOR_KINGSUN && |
166 | uiaa->uiaa_product == USB_PRODUCT_KINGSUN_IRDA) |
167 | return UMATCH_VENDOR_PRODUCT; |
168 | |
169 | return UMATCH_NONE; |
170 | } |
171 | |
172 | static void |
173 | udsir_attach(device_t parent, device_t self, void *aux) |
174 | { |
175 | struct udsir_softc *sc = device_private(self); |
176 | struct usbif_attach_arg *uiaa = aux; |
177 | struct usbd_device *dev = uiaa->uiaa_device; |
178 | struct usbd_interface *iface = uiaa->uiaa_iface; |
179 | char *devinfop; |
180 | usb_endpoint_descriptor_t *ed; |
181 | uint8_t epcount; |
182 | int i; |
183 | struct ir_attach_args ia; |
184 | |
185 | DPRINTFN(10, ("udsir_attach: sc=%p\n" , sc)); |
186 | |
187 | sc->sc_dev = self; |
188 | |
189 | aprint_naive("\n" ); |
190 | aprint_normal("\n" ); |
191 | |
192 | devinfop = usbd_devinfo_alloc(dev, 0); |
193 | aprint_normal_dev(self, "%s\n" , devinfop); |
194 | usbd_devinfo_free(devinfop); |
195 | |
196 | sc->sc_udev = dev; |
197 | sc->sc_iface = iface; |
198 | |
199 | epcount = 0; |
200 | (void)usbd_endpoint_count(iface, &epcount); |
201 | |
202 | sc->sc_rd_addr = -1; |
203 | sc->sc_wr_addr = -1; |
204 | for (i = 0; i < epcount; i++) { |
205 | ed = usbd_interface2endpoint_descriptor(iface, i); |
206 | if (ed == NULL) { |
207 | aprint_error_dev(self, "couldn't get ep %d\n" , i); |
208 | return; |
209 | } |
210 | if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && |
211 | UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { |
212 | sc->sc_rd_addr = ed->bEndpointAddress; |
213 | sc->sc_rd_maxpsz = UGETW(ed->wMaxPacketSize); |
214 | } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && |
215 | UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { |
216 | sc->sc_wr_addr = ed->bEndpointAddress; |
217 | sc->sc_wr_maxpsz = UGETW(ed->wMaxPacketSize); |
218 | } |
219 | } |
220 | if (sc->sc_rd_addr == -1 || sc->sc_wr_addr == -1) { |
221 | aprint_error_dev(self, "missing endpoint\n" ); |
222 | return; |
223 | } |
224 | |
225 | DPRINTFN(10, ("udsir_attach: %p\n" , sc->sc_udev)); |
226 | |
227 | usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); |
228 | |
229 | ia.ia_type = IR_TYPE_IRFRAME; |
230 | ia.ia_methods = &udsir_methods; |
231 | ia.ia_handle = sc; |
232 | |
233 | sc->sc_child = config_found(self, &ia, ir_print); |
234 | selinit(&sc->sc_rd_sel); |
235 | selinit(&sc->sc_wr_sel); |
236 | |
237 | return; |
238 | } |
239 | |
240 | static int |
241 | udsir_detach(device_t self, int flags) |
242 | { |
243 | struct udsir_softc *sc = device_private(self); |
244 | int s; |
245 | int rv = 0; |
246 | |
247 | DPRINTFN(0, ("udsir_detach: sc=%p flags=%d\n" , sc, flags)); |
248 | |
249 | sc->sc_closing = sc->sc_dying = 1; |
250 | |
251 | wakeup(&sc->sc_thread); |
252 | |
253 | while (sc->sc_thread != NULL) |
254 | tsleep(&sc->sc_closing, PWAIT, "usircl" , 0); |
255 | |
256 | /* Abort all pipes. Causes processes waiting for transfer to wake. */ |
257 | if (sc->sc_rd_pipe != NULL) { |
258 | usbd_abort_pipe(sc->sc_rd_pipe); |
259 | } |
260 | if (sc->sc_wr_pipe != NULL) { |
261 | usbd_abort_pipe(sc->sc_wr_pipe); |
262 | } |
263 | if (sc->sc_rd_xfer != NULL) { |
264 | usbd_destroy_xfer(sc->sc_rd_xfer); |
265 | sc->sc_rd_xfer = NULL; |
266 | sc->sc_rd_buf = NULL; |
267 | } |
268 | if (sc->sc_wr_xfer != NULL) { |
269 | usbd_destroy_xfer(sc->sc_wr_xfer); |
270 | sc->sc_wr_xfer = NULL; |
271 | sc->sc_wr_buf = NULL; |
272 | } |
273 | /* Close pipes. */ |
274 | if (sc->sc_rd_pipe != NULL) { |
275 | usbd_close_pipe(sc->sc_rd_pipe); |
276 | sc->sc_rd_pipe = NULL; |
277 | } |
278 | if (sc->sc_wr_pipe != NULL) { |
279 | usbd_close_pipe(sc->sc_wr_pipe); |
280 | sc->sc_wr_pipe = NULL; |
281 | } |
282 | wakeup(&sc->sc_ur_framelen); |
283 | wakeup(&sc->sc_wr_buf); |
284 | |
285 | s = splusb(); |
286 | if (--sc->sc_refcnt >= 0) { |
287 | /* Wait for processes to go away. */ |
288 | usb_detach_waitold(sc->sc_dev); |
289 | } |
290 | splx(s); |
291 | |
292 | if (sc->sc_child != NULL) |
293 | rv = config_detach(sc->sc_child, flags); |
294 | |
295 | usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev); |
296 | |
297 | seldestroy(&sc->sc_rd_sel); |
298 | seldestroy(&sc->sc_wr_sel); |
299 | |
300 | return rv; |
301 | } |
302 | |
303 | static void |
304 | udsir_childdet(device_t self, device_t child) |
305 | { |
306 | struct udsir_softc *sc = device_private(self); |
307 | |
308 | KASSERT(sc->sc_child == child); |
309 | sc->sc_child = NULL; |
310 | } |
311 | |
312 | static int |
313 | udsir_activate(device_t self, enum devact act) |
314 | { |
315 | struct udsir_softc *sc = device_private(self); |
316 | |
317 | switch (act) { |
318 | case DVACT_DEACTIVATE: |
319 | sc->sc_dying = 1; |
320 | return 0; |
321 | default: |
322 | return EOPNOTSUPP; |
323 | } |
324 | } |
325 | |
326 | /* ARGSUSED */ |
327 | static int |
328 | udsir_open(void *h, int flag, int mode, struct lwp *l) |
329 | { |
330 | struct udsir_softc *sc = h; |
331 | int error; |
332 | usbd_status err; |
333 | |
334 | DPRINTFN(0, ("%s: sc=%p\n" , __func__, sc)); |
335 | |
336 | err = usbd_open_pipe(sc->sc_iface, sc->sc_rd_addr, 0, &sc->sc_rd_pipe); |
337 | if (err != USBD_NORMAL_COMPLETION) { |
338 | error = EIO; |
339 | goto bad1; |
340 | } |
341 | err = usbd_open_pipe(sc->sc_iface, sc->sc_wr_addr, 0, &sc->sc_wr_pipe); |
342 | if (err != USBD_NORMAL_COMPLETION) { |
343 | error = EIO; |
344 | goto bad2; |
345 | } |
346 | error = usbd_create_xfer(sc->sc_rd_pipe, sc->sc_rd_maxpsz, |
347 | USBD_SHORT_XFER_OK, 0, &sc->sc_rd_xfer); |
348 | if (error) |
349 | goto bad3; |
350 | |
351 | error = usbd_create_xfer(sc->sc_wr_pipe, IRDA_MAX_FRAME_SIZE, |
352 | USBD_FORCE_SHORT_XFER, 0, &sc->sc_wr_xfer); |
353 | if (error) |
354 | goto bad4; |
355 | |
356 | sc->sc_rd_buf = usbd_get_buffer(sc->sc_rd_xfer); |
357 | sc->sc_wr_buf = usbd_get_buffer(sc->sc_wr_xfer); |
358 | |
359 | sc->sc_ur_buf = kmem_alloc(IRDA_MAX_FRAME_SIZE, KM_SLEEP); |
360 | if (sc->sc_ur_buf == NULL) { |
361 | error = ENOMEM; |
362 | goto bad5; |
363 | } |
364 | |
365 | sc->sc_rd_index = sc->sc_rd_count = 0; |
366 | sc->sc_closing = 0; |
367 | sc->sc_rd_readinprogress = 0; |
368 | sc->sc_rd_expectdataticks = 0; |
369 | sc->sc_ur_framelen = 0; |
370 | sc->sc_rd_err = 0; |
371 | sc->sc_wr_stalewrite = 0; |
372 | sc->sc_direction = udir_idle; |
373 | sc->sc_params.speed = 0; |
374 | sc->sc_params.ebofs = 0; |
375 | sc->sc_params.maxsize = min(sc->sc_rd_maxpsz, sc->sc_wr_maxpsz); |
376 | |
377 | deframe_init(&sc->sc_framestate, sc->sc_ur_buf, IRDA_MAX_FRAME_SIZE); |
378 | |
379 | /* Increment reference for thread */ |
380 | sc->sc_refcnt++; |
381 | |
382 | error = kthread_create(PRI_NONE, 0, NULL, udsir_thread, sc, |
383 | &sc->sc_thread, "%s" , device_xname(sc->sc_dev)); |
384 | if (error) { |
385 | sc->sc_refcnt--; |
386 | goto bad5; |
387 | } |
388 | |
389 | return 0; |
390 | |
391 | bad5: |
392 | usbd_destroy_xfer(sc->sc_wr_xfer); |
393 | sc->sc_wr_xfer = NULL; |
394 | bad4: |
395 | usbd_destroy_xfer(sc->sc_rd_xfer); |
396 | sc->sc_rd_xfer = NULL; |
397 | bad3: |
398 | usbd_close_pipe(sc->sc_wr_pipe); |
399 | sc->sc_wr_pipe = NULL; |
400 | bad2: |
401 | usbd_close_pipe(sc->sc_rd_pipe); |
402 | sc->sc_rd_pipe = NULL; |
403 | bad1: |
404 | return error; |
405 | } |
406 | |
407 | /* ARGSUSED */ |
408 | static int |
409 | udsir_close(void *h, int flag, int mode, struct lwp *l) |
410 | { |
411 | struct udsir_softc *sc = h; |
412 | |
413 | DPRINTFN(0, ("%s: sc=%p\n" , __func__, sc)); |
414 | |
415 | sc->sc_refcnt++; |
416 | |
417 | sc->sc_rd_readinprogress = 1; |
418 | sc->sc_closing = 1; |
419 | |
420 | wakeup(&sc->sc_thread); |
421 | |
422 | while (sc->sc_thread != NULL) |
423 | tsleep(&sc->sc_closing, PWAIT, "usircl" , 0); |
424 | |
425 | if (sc->sc_rd_pipe != NULL) { |
426 | usbd_abort_pipe(sc->sc_rd_pipe); |
427 | } |
428 | if (sc->sc_wr_pipe != NULL) { |
429 | usbd_abort_pipe(sc->sc_wr_pipe); |
430 | } |
431 | if (sc->sc_rd_xfer != NULL) { |
432 | usbd_destroy_xfer(sc->sc_rd_xfer); |
433 | sc->sc_rd_xfer = NULL; |
434 | sc->sc_rd_buf = NULL; |
435 | } |
436 | if (sc->sc_wr_xfer != NULL) { |
437 | usbd_destroy_xfer(sc->sc_wr_xfer); |
438 | sc->sc_wr_xfer = NULL; |
439 | sc->sc_wr_buf = NULL; |
440 | } |
441 | if (sc->sc_rd_pipe != NULL) { |
442 | usbd_close_pipe(sc->sc_rd_pipe); |
443 | sc->sc_rd_pipe = NULL; |
444 | } |
445 | if (sc->sc_wr_pipe != NULL) { |
446 | usbd_close_pipe(sc->sc_wr_pipe); |
447 | sc->sc_wr_pipe = NULL; |
448 | } |
449 | if (sc->sc_ur_buf != NULL) { |
450 | kmem_free(sc->sc_ur_buf, IRDA_MAX_FRAME_SIZE); |
451 | sc->sc_ur_buf = NULL; |
452 | } |
453 | |
454 | if (--sc->sc_refcnt < 0) |
455 | usb_detach_wakeupold(sc->sc_dev); |
456 | |
457 | return 0; |
458 | } |
459 | |
460 | /* ARGSUSED */ |
461 | static int |
462 | udsir_read(void *h, struct uio *uio, int flag) |
463 | { |
464 | struct udsir_softc *sc = h; |
465 | int s; |
466 | int error; |
467 | u_int uframelen; |
468 | |
469 | DPRINTFN(1, ("%s: sc=%p\n" , __func__, sc)); |
470 | |
471 | if (sc->sc_dying) |
472 | return EIO; |
473 | |
474 | #ifdef DIAGNOSTIC |
475 | if (sc->sc_rd_buf == NULL) |
476 | return EINVAL; |
477 | #endif |
478 | |
479 | sc->sc_refcnt++; |
480 | |
481 | if (!sc->sc_rd_readinprogress && !UDSIR_BLOCK_RX_DATA(sc)) |
482 | /* Possibly wake up polling thread */ |
483 | wakeup(&sc->sc_thread); |
484 | |
485 | do { |
486 | s = splusb(); |
487 | while (sc->sc_ur_framelen == 0) { |
488 | DPRINTFN(5, ("%s: calling tsleep()\n" , __func__)); |
489 | error = tsleep(&sc->sc_ur_framelen, PZERO | PCATCH, |
490 | "usirrd" , 0); |
491 | if (sc->sc_dying) |
492 | error = EIO; |
493 | if (error) { |
494 | splx(s); |
495 | DPRINTFN(0, ("%s: tsleep() = %d\n" , |
496 | __func__, error)); |
497 | goto ret; |
498 | } |
499 | } |
500 | splx(s); |
501 | |
502 | uframelen = sc->sc_ur_framelen; |
503 | DPRINTFN(1, ("%s: sc=%p framelen=%u, hdr=0x%02x\n" , |
504 | __func__, sc, uframelen, sc->sc_ur_buf[0])); |
505 | if (uframelen > uio->uio_resid) |
506 | error = EINVAL; |
507 | else |
508 | error = uiomove(sc->sc_ur_buf, uframelen, uio); |
509 | sc->sc_ur_framelen = 0; |
510 | |
511 | if (deframe_rd_ur(sc) == 0 && uframelen > 0) { |
512 | /* |
513 | * Need to wait for another read to obtain a |
514 | * complete frame... If we also obtained |
515 | * actual data, wake up the possibly sleeping |
516 | * thread immediately... |
517 | */ |
518 | wakeup(&sc->sc_thread); |
519 | } |
520 | } while (uframelen == 0); |
521 | |
522 | DPRINTFN(1, ("%s: return %d\n" , __func__, error)); |
523 | |
524 | ret: |
525 | if (--sc->sc_refcnt < 0) |
526 | usb_detach_wakeupold(sc->sc_dev); |
527 | return error; |
528 | } |
529 | |
530 | /* ARGSUSED */ |
531 | static int |
532 | udsir_write(void *h, struct uio *uio, int flag) |
533 | { |
534 | struct udsir_softc *sc = h; |
535 | usbd_status err; |
536 | uint32_t wrlen; |
537 | int error, sirlength; |
538 | uint8_t *wrbuf; |
539 | int s; |
540 | |
541 | DPRINTFN(1, ("%s: sc=%p\n" , __func__, sc)); |
542 | |
543 | if (sc->sc_dying) |
544 | return EIO; |
545 | |
546 | #ifdef DIAGNOSTIC |
547 | if (sc->sc_wr_buf == NULL) |
548 | return EINVAL; |
549 | #endif |
550 | |
551 | wrlen = uio->uio_resid; |
552 | if (wrlen > sc->sc_wr_maxpsz) |
553 | return EINVAL; |
554 | |
555 | sc->sc_refcnt++; |
556 | |
557 | if (!UDSIR_BLOCK_RX_DATA(sc)) { |
558 | /* |
559 | * If reads are not blocked, determine what action we |
560 | * should potentially take... |
561 | */ |
562 | if (sc->sc_direction == udir_output) { |
563 | /* |
564 | * If the last operation was an output, wait for the |
565 | * polling thread to check for incoming data. |
566 | */ |
567 | sc->sc_wr_stalewrite = 1; |
568 | wakeup(&sc->sc_thread); |
569 | } else if (!sc->sc_rd_readinprogress && |
570 | (sc->sc_direction == udir_idle || |
571 | sc->sc_direction == udir_input)) { |
572 | /* If idle, check for input before outputting */ |
573 | udsir_start_read(sc); |
574 | } |
575 | } |
576 | |
577 | s = splusb(); |
578 | while (sc->sc_wr_stalewrite || |
579 | (sc->sc_direction != udir_output && |
580 | sc->sc_direction != udir_idle)) { |
581 | DPRINTFN(5, ("%s: sc=%p stalewrite=%d direction=%d, " |
582 | "calling tsleep()\n" , |
583 | __func__, sc, sc->sc_wr_stalewrite, |
584 | sc->sc_direction)); |
585 | error = tsleep(&sc->sc_wr_buf, PZERO | PCATCH, "usirwr" , 0); |
586 | if (sc->sc_dying) |
587 | error = EIO; |
588 | if (error) { |
589 | splx(s); |
590 | DPRINTFN(0, ("%s: tsleep() = %d\n" , __func__, error)); |
591 | goto ret; |
592 | } |
593 | } |
594 | splx(s); |
595 | |
596 | wrbuf = sc->sc_wr_buf; |
597 | |
598 | sirlength = irda_sir_frame(wrbuf, MAX_UDSIR_OUTPUT_FRAME, |
599 | uio, sc->sc_params.ebofs); |
600 | if (sirlength < 0) |
601 | error = -sirlength; |
602 | else { |
603 | uint32_t btlen; |
604 | |
605 | DPRINTFN(1, ("%s: transfer %u bytes\n" , |
606 | __func__, (unsigned int)wrlen)); |
607 | |
608 | btlen = sirlength; |
609 | |
610 | sc->sc_direction = udir_output; |
611 | |
612 | #ifdef UDSIR_DEBUG |
613 | if (udsirdebug >= 20) |
614 | udsir_dumpdata(wrbuf, btlen, __func__); |
615 | #endif |
616 | |
617 | err = usbd_intr_transfer(sc->sc_wr_xfer, sc->sc_wr_pipe, |
618 | USBD_FORCE_SHORT_XFER, UDSIR_WR_TIMEOUT, |
619 | wrbuf, &btlen); |
620 | DPRINTFN(2, ("%s: err=%d\n" , __func__, err)); |
621 | if (err != USBD_NORMAL_COMPLETION) { |
622 | if (err == USBD_INTERRUPTED) |
623 | error = EINTR; |
624 | else if (err == USBD_TIMEOUT) |
625 | error = ETIMEDOUT; |
626 | else |
627 | error = EIO; |
628 | } else |
629 | error = 0; |
630 | } |
631 | |
632 | ret: |
633 | if (--sc->sc_refcnt < 0) |
634 | usb_detach_wakeupold(sc->sc_dev); |
635 | |
636 | DPRINTFN(1, ("%s: sc=%p done\n" , __func__, sc)); |
637 | return error; |
638 | } |
639 | |
640 | static int |
641 | udsir_poll(void *h, int events, struct lwp *l) |
642 | { |
643 | struct udsir_softc *sc = h; |
644 | int revents = 0; |
645 | |
646 | DPRINTFN(1, ("%s: sc=%p\n" , __func__, sc)); |
647 | |
648 | if (events & (POLLOUT | POLLWRNORM)) { |
649 | if (sc->sc_direction != udir_input) |
650 | revents |= events & (POLLOUT | POLLWRNORM); |
651 | else { |
652 | DPRINTFN(2, ("%s: recording write select\n" , __func__)); |
653 | selrecord(l, &sc->sc_wr_sel); |
654 | } |
655 | } |
656 | |
657 | if (events & (POLLIN | POLLRDNORM)) { |
658 | if (sc->sc_ur_framelen != 0) { |
659 | DPRINTFN(2, ("%s: have data\n" , __func__)); |
660 | revents |= events & (POLLIN | POLLRDNORM); |
661 | } else { |
662 | DPRINTFN(2, ("%s: recording read select\n" , __func__)); |
663 | selrecord(l, &sc->sc_rd_sel); |
664 | } |
665 | } |
666 | |
667 | return revents; |
668 | } |
669 | |
670 | static const struct filterops udsirread_filtops = |
671 | { 1, NULL, filt_udsirrdetach, filt_udsirread }; |
672 | static const struct filterops udsirwrite_filtops = |
673 | { 1, NULL, filt_udsirwdetach, filt_udsirwrite }; |
674 | |
675 | static int |
676 | udsir_kqfilter(void *h, struct knote *kn) |
677 | { |
678 | struct udsir_softc *sc = h; |
679 | struct klist *klist; |
680 | int s; |
681 | |
682 | switch (kn->kn_filter) { |
683 | case EVFILT_READ: |
684 | klist = &sc->sc_rd_sel.sel_klist; |
685 | kn->kn_fop = &udsirread_filtops; |
686 | break; |
687 | case EVFILT_WRITE: |
688 | klist = &sc->sc_wr_sel.sel_klist; |
689 | kn->kn_fop = &udsirwrite_filtops; |
690 | break; |
691 | default: |
692 | return (EINVAL); |
693 | } |
694 | |
695 | kn->kn_hook = sc; |
696 | |
697 | s = splusb(); |
698 | SLIST_INSERT_HEAD(klist, kn, kn_selnext); |
699 | splx(s); |
700 | |
701 | return (0); |
702 | } |
703 | |
704 | static int |
705 | udsir_set_params(void *h, struct irda_params *p) |
706 | { |
707 | struct udsir_softc *sc = h; |
708 | |
709 | DPRINTFN(0, ("%s: sc=%p, speed=%d ebofs=%d maxsize=%d\n" , |
710 | __func__, sc, p->speed, p->ebofs, p->maxsize)); |
711 | |
712 | if (sc->sc_dying) |
713 | return EIO; |
714 | |
715 | if (p->speed != 9600) |
716 | return EINVAL; |
717 | |
718 | if (p->maxsize != sc->sc_params.maxsize) { |
719 | if (p->maxsize > min(sc->sc_rd_maxpsz, sc->sc_wr_maxpsz)) |
720 | return EINVAL; |
721 | sc->sc_params.maxsize = p->maxsize; |
722 | } |
723 | |
724 | sc->sc_params = *p; |
725 | |
726 | return 0; |
727 | } |
728 | |
729 | static int |
730 | udsir_get_speeds(void *h, int *speeds) |
731 | { |
732 | struct udsir_softc *sc = h; |
733 | |
734 | DPRINTFN(0, ("%s: sc=%p\n" , __func__, sc)); |
735 | |
736 | if (sc->sc_dying) |
737 | return EIO; |
738 | |
739 | /* Support only 9600bps now. */ |
740 | *speeds = IRDA_SPEED_9600; |
741 | |
742 | return 0; |
743 | } |
744 | |
745 | static int |
746 | udsir_get_turnarounds(void *h, int *turnarounds) |
747 | { |
748 | struct udsir_softc *sc = h; |
749 | |
750 | DPRINTFN(0, ("%s: sc=%p\n" , __func__, sc)); |
751 | |
752 | if (sc->sc_dying) |
753 | return EIO; |
754 | |
755 | /* |
756 | * Documentation is on the light side with respect to |
757 | * turnaround time for this device. |
758 | */ |
759 | *turnarounds = IRDA_TURNT_10000; |
760 | |
761 | return 0; |
762 | } |
763 | |
764 | static void |
765 | filt_udsirrdetach(struct knote *kn) |
766 | { |
767 | struct udsir_softc *sc = kn->kn_hook; |
768 | int s; |
769 | |
770 | s = splusb(); |
771 | SLIST_REMOVE(&sc->sc_rd_sel.sel_klist, kn, knote, kn_selnext); |
772 | splx(s); |
773 | } |
774 | |
775 | /* ARGSUSED */ |
776 | static int |
777 | filt_udsirread(struct knote *kn, long hint) |
778 | { |
779 | struct udsir_softc *sc = kn->kn_hook; |
780 | |
781 | kn->kn_data = sc->sc_ur_framelen; |
782 | return (kn->kn_data > 0); |
783 | } |
784 | |
785 | static void |
786 | filt_udsirwdetach(struct knote *kn) |
787 | { |
788 | struct udsir_softc *sc = kn->kn_hook; |
789 | int s; |
790 | |
791 | s = splusb(); |
792 | SLIST_REMOVE(&sc->sc_wr_sel.sel_klist, kn, knote, kn_selnext); |
793 | splx(s); |
794 | } |
795 | |
796 | /* ARGSUSED */ |
797 | static int |
798 | filt_udsirwrite(struct knote *kn, long hint) |
799 | { |
800 | struct udsir_softc *sc = kn->kn_hook; |
801 | |
802 | kn->kn_data = 0; |
803 | return (sc->sc_direction != udir_input); |
804 | } |
805 | |
806 | |
807 | static void |
808 | udsir_thread(void *arg) |
809 | { |
810 | struct udsir_softc *sc = arg; |
811 | int error; |
812 | |
813 | DPRINTFN(20, ("%s: starting polling thread\n" , __func__)); |
814 | |
815 | while (!sc->sc_closing) { |
816 | if (!sc->sc_rd_readinprogress && !UDSIR_BLOCK_RX_DATA(sc)) |
817 | udsir_periodic(sc); |
818 | |
819 | if (!sc->sc_closing) { |
820 | error = tsleep(&sc->sc_thread, PWAIT, "udsir" , hz / 10); |
821 | if (error == EWOULDBLOCK && |
822 | sc->sc_rd_expectdataticks > 0) |
823 | /* |
824 | * After a timeout decrement the tick |
825 | * counter within which time we expect |
826 | * data to arrive if we are receiving |
827 | * data... |
828 | */ |
829 | sc->sc_rd_expectdataticks--; |
830 | } |
831 | } |
832 | |
833 | DPRINTFN(20, ("%s: exiting polling thread\n" , __func__)); |
834 | |
835 | sc->sc_thread = NULL; |
836 | |
837 | wakeup(&sc->sc_closing); |
838 | |
839 | if (--sc->sc_refcnt < 0) |
840 | usb_detach_wakeupold(sc->sc_dev); |
841 | |
842 | kthread_exit(0); |
843 | } |
844 | |
845 | #ifdef UDSIR_DEBUG |
846 | static void |
847 | udsir_dumpdata(uint8_t const *data, size_t dlen, char const *desc) |
848 | { |
849 | size_t bdindex; |
850 | |
851 | printf("%s: (%lx)" , desc, (unsigned long)dlen); |
852 | for (bdindex = 0; bdindex < dlen; bdindex++) |
853 | printf(" %02x" , (unsigned int)data[bdindex]); |
854 | printf("\n" ); |
855 | } |
856 | #endif |
857 | |
858 | /* Returns 0 if more data required, 1 if a complete frame was extracted */ |
859 | static int |
860 | deframe_rd_ur(struct udsir_softc *sc) |
861 | { |
862 | |
863 | if (sc->sc_rd_index == 0) { |
864 | KASSERT(sc->sc_rd_count == sc->sc_rd_maxpsz); |
865 | /* valid count */ |
866 | sc->sc_rd_count = sc->sc_rd_buf[sc->sc_rd_index++] + 1; |
867 | KASSERT(sc->sc_rd_count < sc->sc_rd_maxpsz); |
868 | } |
869 | |
870 | while (sc->sc_rd_index < sc->sc_rd_count) { |
871 | uint8_t const *buf; |
872 | size_t buflen; |
873 | enum frameresult fresult; |
874 | |
875 | buf = &sc->sc_rd_buf[sc->sc_rd_index]; |
876 | buflen = sc->sc_rd_count - sc->sc_rd_index; |
877 | |
878 | fresult = deframe_process(&sc->sc_framestate, &buf, &buflen); |
879 | |
880 | sc->sc_rd_index = sc->sc_rd_count - buflen; |
881 | |
882 | DPRINTFN(1,("%s: result=%d\n" , __func__, (int)fresult)); |
883 | |
884 | switch (fresult) { |
885 | case FR_IDLE: |
886 | case FR_INPROGRESS: |
887 | case FR_FRAMEBADFCS: |
888 | case FR_FRAMEMALFORMED: |
889 | case FR_BUFFEROVERRUN: |
890 | break; |
891 | case FR_FRAMEOK: |
892 | sc->sc_ur_framelen = sc->sc_framestate.bufindex; |
893 | wakeup(&sc->sc_ur_framelen); /* XXX should use flag */ |
894 | selnotify(&sc->sc_rd_sel, 0, 0); |
895 | return 1; |
896 | } |
897 | } |
898 | |
899 | /* Reset indices into USB-side buffer */ |
900 | sc->sc_rd_index = sc->sc_rd_count = 0; |
901 | |
902 | return 0; |
903 | } |
904 | |
905 | /* |
906 | * Direction transitions: |
907 | * |
908 | * udsir_periodic() can switch the direction from: |
909 | * |
910 | * output -> idle |
911 | * output -> stalled |
912 | * stalled -> idle |
913 | * idle -> input |
914 | * |
915 | * udsir_rd_cb() can switch the direction from: |
916 | * |
917 | * input -> stalled |
918 | * input -> idle |
919 | * |
920 | * udsir_write() can switch the direction from: |
921 | * |
922 | * idle -> output |
923 | */ |
924 | static void |
925 | udsir_periodic(struct udsir_softc *sc) |
926 | { |
927 | |
928 | DPRINTFN(60, ("%s: direction = %d\n" , __func__, sc->sc_direction)); |
929 | |
930 | if (sc->sc_wr_stalewrite && sc->sc_direction == udir_idle) { |
931 | /* |
932 | * In a stale write case, we need to check if the |
933 | * write has completed. Once that has happened, the |
934 | * write is no longer stale. |
935 | * |
936 | * But note that we may immediately start a read poll... |
937 | */ |
938 | sc->sc_wr_stalewrite = 0; |
939 | wakeup(&sc->sc_wr_buf); |
940 | } |
941 | |
942 | if (!sc->sc_rd_readinprogress && |
943 | (sc->sc_direction == udir_idle || |
944 | sc->sc_direction == udir_input)) |
945 | /* Do a read poll if appropriate... */ |
946 | udsir_start_read(sc); |
947 | } |
948 | |
949 | static void |
950 | udsir_rd_cb(struct usbd_xfer *xfer, void * priv, usbd_status status) |
951 | { |
952 | struct udsir_softc *sc = priv; |
953 | uint32_t size; |
954 | |
955 | DPRINTFN(60, ("%s: sc=%p\n" , __func__, sc)); |
956 | |
957 | /* Read is no longer in progress */ |
958 | sc->sc_rd_readinprogress = 0; |
959 | |
960 | if (status == USBD_CANCELLED || sc->sc_closing) /* this is normal */ |
961 | return; |
962 | if (status) { |
963 | size = 0; |
964 | sc->sc_rd_err = 1; |
965 | |
966 | if (sc->sc_direction == udir_input || |
967 | sc->sc_direction == udir_idle) { |
968 | /* |
969 | * Receive error, probably need to clear error |
970 | * condition. |
971 | */ |
972 | sc->sc_direction = udir_stalled; |
973 | } |
974 | } else |
975 | usbd_get_xfer_status(xfer, NULL, NULL, &size, NULL); |
976 | |
977 | sc->sc_rd_index = 0; |
978 | sc->sc_rd_count = size; |
979 | |
980 | DPRINTFN(((size > 0 || sc->sc_rd_err != 0) ? 20 : 60), |
981 | ("%s: sc=%p size=%u, err=%d\n" , |
982 | __func__, sc, size, sc->sc_rd_err)); |
983 | |
984 | #ifdef UDSIR_DEBUG |
985 | if (udsirdebug >= 20 && size > 0) |
986 | udsir_dumpdata(sc->sc_rd_buf, size, __func__); |
987 | #endif |
988 | |
989 | if (deframe_rd_ur(sc) == 0) { |
990 | if (!deframe_isclear(&sc->sc_framestate) && size == 0 && |
991 | sc->sc_rd_expectdataticks == 0) { |
992 | /* |
993 | * Expected data, but didn't get it |
994 | * within expected time... |
995 | */ |
996 | DPRINTFN(5,("%s: incoming packet timeout\n" , |
997 | __func__)); |
998 | deframe_clear(&sc->sc_framestate); |
999 | } else if (size > 0) { |
1000 | /* |
1001 | * If we also received actual data, reset the |
1002 | * data read timeout and wake up the possibly |
1003 | * sleeping thread... |
1004 | */ |
1005 | sc->sc_rd_expectdataticks = 2; |
1006 | wakeup(&sc->sc_thread); |
1007 | } |
1008 | } |
1009 | |
1010 | /* |
1011 | * Check if incoming data has stopped, or that we cannot |
1012 | * safely read any more data. In the case of the latter we |
1013 | * must switch to idle so that a write will not block... |
1014 | */ |
1015 | if (sc->sc_direction == udir_input && |
1016 | ((size == 0 && sc->sc_rd_expectdataticks == 0) || |
1017 | UDSIR_BLOCK_RX_DATA(sc))) { |
1018 | DPRINTFN(8, ("%s: idling on packet timeout, " |
1019 | "complete frame, or no data\n" , __func__)); |
1020 | sc->sc_direction = udir_idle; |
1021 | |
1022 | /* Wake up for possible output */ |
1023 | wakeup(&sc->sc_wr_buf); |
1024 | selnotify(&sc->sc_wr_sel, 0, 0); |
1025 | } |
1026 | } |
1027 | |
1028 | static usbd_status |
1029 | udsir_start_read(struct udsir_softc *sc) |
1030 | { |
1031 | usbd_status err; |
1032 | |
1033 | DPRINTFN(60, ("%s: sc=%p, size=%d\n" , __func__, sc, sc->sc_rd_maxpsz)); |
1034 | |
1035 | if (sc->sc_dying) |
1036 | return USBD_IOERROR; |
1037 | |
1038 | if (UDSIR_BLOCK_RX_DATA(sc) || deframe_rd_ur(sc)) { |
1039 | /* |
1040 | * Can't start reading just yet. Since we aren't |
1041 | * going to start a read, have to switch direction to |
1042 | * idle. |
1043 | */ |
1044 | sc->sc_direction = udir_idle; |
1045 | return USBD_NORMAL_COMPLETION; |
1046 | } |
1047 | |
1048 | /* Starting a read... */ |
1049 | sc->sc_rd_readinprogress = 1; |
1050 | sc->sc_direction = udir_input; |
1051 | |
1052 | if (sc->sc_rd_err) { |
1053 | sc->sc_rd_err = 0; |
1054 | DPRINTFN(0, ("%s: clear stall\n" , __func__)); |
1055 | usbd_clear_endpoint_stall(sc->sc_rd_pipe); |
1056 | } |
1057 | |
1058 | usbd_setup_xfer(sc->sc_rd_xfer, sc, sc->sc_rd_buf, sc->sc_rd_maxpsz, |
1059 | USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, udsir_rd_cb); |
1060 | err = usbd_transfer(sc->sc_rd_xfer); |
1061 | if (err != USBD_IN_PROGRESS) { |
1062 | DPRINTFN(0, ("%s: err=%d\n" , __func__, (int)err)); |
1063 | return err; |
1064 | } |
1065 | return USBD_NORMAL_COMPLETION; |
1066 | } |
1067 | |