@@ -324,61 +324,14 @@ static int uvcg_video_usb_req_queue(struct uvc_video *video,
324
324
return 0 ;
325
325
}
326
326
327
- /*
328
- * Must only be called from uvcg_video_enable - since after that we only want to
329
- * queue requests to the endpoint from the uvc_video_complete complete handler.
330
- * This function is needed in order to 'kick start' the flow of requests from
331
- * gadget driver to the usb controller.
332
- */
333
- static void uvc_video_ep_queue_initial_requests (struct uvc_video * video )
334
- {
335
- struct usb_request * req = NULL ;
336
- unsigned long flags = 0 ;
337
- unsigned int count = 0 ;
338
- int ret = 0 ;
339
-
340
- /*
341
- * We only queue half of the free list since we still want to have
342
- * some free usb_requests in the free list for the video_pump async_wq
343
- * thread to encode uvc buffers into. Otherwise we could get into a
344
- * situation where the free list does not have any usb requests to
345
- * encode into - we always end up queueing 0 length requests to the
346
- * end point.
347
- */
348
- unsigned int half_list_size = video -> uvc_num_requests / 2 ;
349
-
350
- spin_lock_irqsave (& video -> req_lock , flags );
351
- /*
352
- * Take these requests off the free list and queue them all to the
353
- * endpoint. Since we queue 0 length requests with the req_lock held,
354
- * there isn't any 'data' race involved here with the complete handler.
355
- */
356
- while (count < half_list_size ) {
357
- req = list_first_entry (& video -> req_free , struct usb_request ,
358
- list );
359
- list_del (& req -> list );
360
- req -> length = 0 ;
361
- ret = uvcg_video_ep_queue (video , req );
362
- if (ret < 0 ) {
363
- uvcg_queue_cancel (& video -> queue , 0 );
364
- break ;
365
- }
366
- count ++ ;
367
- }
368
- spin_unlock_irqrestore (& video -> req_lock , flags );
369
- }
370
-
371
327
static void
372
328
uvc_video_complete (struct usb_ep * ep , struct usb_request * req )
373
329
{
374
330
struct uvc_request * ureq = req -> context ;
375
331
struct uvc_video * video = ureq -> video ;
376
332
struct uvc_video_queue * queue = & video -> queue ;
377
333
struct uvc_buffer * last_buf ;
378
- struct usb_request * to_queue = req ;
379
334
unsigned long flags ;
380
- bool is_bulk = video -> max_payload_size ;
381
- int ret = 0 ;
382
335
383
336
spin_lock_irqsave (& video -> req_lock , flags );
384
337
atomic_dec (& video -> queued );
@@ -441,65 +394,85 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
441
394
return ;
442
395
}
443
396
397
+ list_add_tail (& req -> list , & video -> req_free );
444
398
/*
445
- * Here we check whether any request is available in the ready
446
- * list. If it is, queue it to the ep and add the current
447
- * usb_request to the req_free list - for video_pump to fill in.
448
- * Otherwise, just use the current usb_request to queue a 0
449
- * length request to the ep. Since we always add to the req_free
450
- * list if we dequeue from the ready list, there will never
451
- * be a situation where the req_free list is completely out of
452
- * requests and cannot recover.
399
+ * Queue work to the wq as well since it is possible that a
400
+ * buffer may not have been completely encoded with the set of
401
+ * in-flight usb requests for whih the complete callbacks are
402
+ * firing.
403
+ * In that case, if we do not queue work to the worker thread,
404
+ * the buffer will never be marked as complete - and therefore
405
+ * not be returned to userpsace. As a result,
406
+ * dequeue -> queue -> dequeue flow of uvc buffers will not
407
+ * happen. Since there are is a new free request wake up the pump.
453
408
*/
454
- to_queue -> length = 0 ;
455
- if (!list_empty (& video -> req_ready )) {
456
- to_queue = list_first_entry (& video -> req_ready ,
457
- struct usb_request , list );
458
- list_del (& to_queue -> list );
459
- list_add_tail (& req -> list , & video -> req_free );
460
- /*
461
- * Queue work to the wq as well since it is possible that a
462
- * buffer may not have been completely encoded with the set of
463
- * in-flight usb requests for whih the complete callbacks are
464
- * firing.
465
- * In that case, if we do not queue work to the worker thread,
466
- * the buffer will never be marked as complete - and therefore
467
- * not be returned to userpsace. As a result,
468
- * dequeue -> queue -> dequeue flow of uvc buffers will not
469
- * happen.
470
- */
471
- queue_work (video -> async_wq , & video -> pump );
472
- } else if (atomic_read (& video -> queued ) > UVCG_REQ_MAX_ZERO_COUNT ) {
473
- list_add_tail (& to_queue -> list , & video -> req_free );
474
- /*
475
- * There is a new free request - wake up the pump.
476
- */
477
- queue_work (video -> async_wq , & video -> pump );
409
+ queue_work (video -> async_wq , & video -> pump );
478
410
479
- spin_unlock_irqrestore (& video -> req_lock , flags );
411
+ spin_unlock_irqrestore (& video -> req_lock , flags );
480
412
481
- return ;
482
- }
483
- /*
484
- * Queue to the endpoint. The actual queueing to ep will
485
- * only happen on one thread - the async_wq for bulk endpoints
486
- * and this thread for isoc endpoints.
487
- */
488
- ret = uvcg_video_usb_req_queue (video , to_queue , !is_bulk );
489
- if (ret < 0 ) {
413
+ kthread_queue_work (video -> kworker , & video -> hw_submit );
414
+ }
415
+
416
+ static void uvcg_video_hw_submit (struct kthread_work * work )
417
+ {
418
+ struct uvc_video * video = container_of (work , struct uvc_video , hw_submit );
419
+ bool is_bulk = video -> max_payload_size ;
420
+ unsigned long flags ;
421
+ struct usb_request * req ;
422
+ int ret = 0 ;
423
+
424
+ while (true) {
425
+ if (!video -> ep -> enabled )
426
+ return ;
427
+ spin_lock_irqsave (& video -> req_lock , flags );
490
428
/*
491
- * Endpoint error, but the stream is still enabled.
492
- * Put request back in req_free for it to be cleaned
493
- * up later.
429
+ * Here we check whether any request is available in the ready
430
+ * list. If it is, queue it to the ep and add the current
431
+ * usb_request to the req_free list - for video_pump to fill in.
432
+ * Otherwise, just use the current usb_request to queue a 0
433
+ * length request to the ep. Since we always add to the req_free
434
+ * list if we dequeue from the ready list, there will never
435
+ * be a situation where the req_free list is completely out of
436
+ * requests and cannot recover.
494
437
*/
495
- list_add_tail (& to_queue -> list , & video -> req_free );
438
+ if (!list_empty (& video -> req_ready )) {
439
+ req = list_first_entry (& video -> req_ready ,
440
+ struct usb_request , list );
441
+ } else {
442
+ if (list_empty (& video -> req_free ) ||
443
+ (atomic_read (& video -> queued ) > UVCG_REQ_MAX_ZERO_COUNT )) {
444
+ spin_unlock_irqrestore (& video -> req_lock , flags );
445
+
446
+ return ;
447
+ }
448
+ req = list_first_entry (& video -> req_free , struct usb_request ,
449
+ list );
450
+ req -> length = 0 ;
451
+ }
452
+ list_del (& req -> list );
453
+
496
454
/*
497
- * There is a new free request - wake up the pump.
455
+ * Queue to the endpoint. The actual queueing to ep will
456
+ * only happen on one thread - the async_wq for bulk endpoints
457
+ * and this thread for isoc endpoints.
498
458
*/
499
- queue_work (video -> async_wq , & video -> pump );
500
- }
459
+ ret = uvcg_video_usb_req_queue (video , req , !is_bulk );
460
+ if (ret < 0 ) {
461
+ /*
462
+ * Endpoint error, but the stream is still enabled.
463
+ * Put request back in req_free for it to be cleaned
464
+ * up later.
465
+ */
466
+ list_add_tail (& req -> list , & video -> req_free );
467
+ /*
468
+ * There is a new free request - wake up the pump.
469
+ */
470
+ queue_work (video -> async_wq , & video -> pump );
501
471
502
- spin_unlock_irqrestore (& video -> req_lock , flags );
472
+ }
473
+
474
+ spin_unlock_irqrestore (& video -> req_lock , flags );
475
+ }
503
476
}
504
477
505
478
static int
@@ -771,7 +744,7 @@ int uvcg_video_enable(struct uvc_video *video)
771
744
772
745
atomic_set (& video -> queued , 0 );
773
746
774
- uvc_video_ep_queue_initial_requests (video );
747
+ kthread_queue_work (video -> kworker , & video -> hw_submit );
775
748
queue_work (video -> async_wq , & video -> pump );
776
749
777
750
return ret ;
@@ -794,6 +767,17 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
794
767
if (!video -> async_wq )
795
768
return - EINVAL ;
796
769
770
+ /* Allocate a kthread for asynchronous hw submit handler. */
771
+ video -> kworker = kthread_create_worker (0 , "UVCG" );
772
+ if (IS_ERR (video -> kworker )) {
773
+ uvcg_err (& video -> uvc -> func , "failed to create UVCG kworker\n" );
774
+ return PTR_ERR (video -> kworker );
775
+ }
776
+
777
+ kthread_init_work (& video -> hw_submit , uvcg_video_hw_submit );
778
+
779
+ sched_set_fifo (video -> kworker -> task );
780
+
797
781
video -> uvc = uvc ;
798
782
video -> fcc = V4L2_PIX_FMT_YUYV ;
799
783
video -> bpp = 16 ;
0 commit comments