diff --git a/mjpg-streamer-experimental/plugins/input_uvc/input_uvc.c b/mjpg-streamer-experimental/plugins/input_uvc/input_uvc.c index b8c6fc94..4378612c 100644 --- a/mjpg-streamer-experimental/plugins/input_uvc/input_uvc.c +++ b/mjpg-streamer-experimental/plugins/input_uvc/input_uvc.c @@ -74,6 +74,8 @@ static unsigned int every = 1; static int wantTimestamp = 0; static struct timeval timestamp; static int softfps = -1; +static unsigned int timeout = 5; +static unsigned int dv_timings = 0; static const struct { const char * k; @@ -211,6 +213,8 @@ int input_init(input_parameter *param, int id) {"cb", required_argument, 0, 0}, {"timestamp", no_argument, 0, 0}, {"softfps", required_argument, 0, 0}, + {"timeout", required_argument, 0, 0}, + {"dv_timings", no_argument, 0, 0}, {0, 0, 0, 0} }; @@ -373,6 +377,14 @@ int input_init(input_parameter *param, int id) case 40: softfps = atoi(optarg); break; + case 41: + DBG("case 41\n"); + timeout = MAX(atoi(optarg), 1); + break; + case 42: + DBG("case 42\n"); + dv_timings = 1; + break; default: DBG("default case\n"); help(); @@ -430,6 +442,7 @@ int input_init(input_parameter *param, int id) DBG("vdIn pn: %d\n", id); /* open video device and prepare data structure */ + pctx->videoIn->dv_timings = dv_timings; if(init_videoIn(pctx->videoIn, dev, width, height, fps, format, 1, pctx->pglobal, id, tvnorm) < 0) { IPRINT("init_VideoIn failed\n"); closelog(); @@ -531,6 +544,8 @@ void help(void) " [-timestamp ]..........: Populate frame timestamp with system time\n" \ " [-softfps] ............: Drop frames to try and achieve this fps\n" \ " set your camera to its maximum fps to avoid stuttering\n" \ + " [-timeout] ............: Timeout for device querying (seconds)\n" \ + " [-dv_timings] .........: Enable DV timings queriyng and events processing\n" \ " ---------------------------------------------------------------\n"); fprintf(stderr, "\n"\ @@ -648,102 +663,165 @@ void *cam_thread(void *arg) pcontext->videoIn->frame_period_time = 1000/softfps; } + if (video_enable(pcontext->videoIn)) { + IPRINT("Can\'t enable video in first time\n"); + goto endloop; + } + while(!pglobal->stop) { while(pcontext->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } - /* grab a frame */ - if(uvcGrab(pcontext->videoIn) < 0) { - IPRINT("Error grabbing frames\n"); - exit(EXIT_FAILURE); - } + fd_set rd_fds; // for capture + fd_set ex_fds; // for capture + fd_set wr_fds; // for output - if ( every_count < every - 1 ) { - DBG("dropping %d frame for every=%d\n", every_count + 1, every); - ++every_count; - continue; - } else { - every_count = 0; - } + FD_ZERO(&rd_fds); + FD_SET(pcontext->videoIn->fd, &rd_fds); - //DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->tmpbytesused, pcontext->id); - - /* - * Workaround for broken, corrupted frames: - * Under low light conditions corrupted frames may get captured. - * The good thing is such frames are quite small compared to the regular pictures. - * For example a VGA (640x480) webcam picture is normally >= 8kByte large, - * corrupted frames are smaller. - */ - if(pcontext->videoIn->tmpbytesused < minimum_size) { - DBG("dropping too small frame, assuming it as broken\n"); - continue; - } + FD_ZERO(&ex_fds); + FD_SET(pcontext->videoIn->fd, &ex_fds); - // Overwrite timestamp (e.g. where camera is providing 0 values) - // Do it here so that this timestamp can be used in frameskipping - if(wantTimestamp) - { - gettimeofday(×tamp, NULL); - pcontext->videoIn->tmptimestamp = timestamp; - } + FD_ZERO(&wr_fds); + FD_SET(pcontext->videoIn->fd, &wr_fds); - // use software frame dropping on low fps - if (pcontext->videoIn->soft_framedrop == 1) { - unsigned long last = pglobal->in[pcontext->id].timestamp.tv_sec * 1000 + - (pglobal->in[pcontext->id].timestamp.tv_usec/1000); // convert to ms - unsigned long current = pcontext->videoIn->tmptimestamp.tv_sec * 1000 + - pcontext->videoIn->tmptimestamp.tv_usec/1000; // convert to ms + struct timeval tv; + tv.tv_sec = timeout; + tv.tv_usec = 0; - // if the requested time did not esplashed skip the frame - if ((current - last) < pcontext->videoIn->frame_period_time) { - DBG("Last frame taken %d ms ago so drop it\n", (current - last)); + int sel = select(pcontext->videoIn->fd + 1, &rd_fds, &wr_fds, &ex_fds, &tv); + DBG("select() = %d\n", sel); + + if (sel < 0) { + if (errno == EINTR) { + continue; + } + perror("select() error"); + goto endloop; + } else if (sel == 0) { + IPRINT("select() timeout\n"); + if (dv_timings) { + if (setResolution(pcontext->videoIn, pcontext->videoIn->width, pcontext->videoIn->height) < 0) { + goto endloop; + } continue; + } else { + goto endloop; } - DBG("Lagg: %ld\n", (current - last) - pcontext->videoIn->frame_period_time); } - /* copy JPG picture to global buffer */ - pthread_mutex_lock(&pglobal->in[pcontext->id].db); + if (FD_ISSET(pcontext->videoIn->fd, &rd_fds)) { + DBG("Grabbing a frame...\n"); + /* grab a frame */ + if(uvcGrab(pcontext->videoIn) < 0) { + IPRINT("Error grabbing frames\n"); + goto endloop; + } - /* - * If capturing in YUV mode convert to JPEG now. - * This compression requires many CPU cycles, so try to avoid YUV format. - * Getting JPEGs straight from the webcam, is one of the major advantages of - * Linux-UVC compatible devices. - */ - #ifndef NO_LIBJPEG - if ((pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) || - (pcontext->videoIn->formatIn == V4L2_PIX_FMT_UYVY) || - (pcontext->videoIn->formatIn == V4L2_PIX_FMT_RGB565) ) { - DBG("compressing frame from input: %d\n", (int)pcontext->id); - pglobal->in[pcontext->id].size = compress_image_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, quality); - /* copy this frame's timestamp to user space */ - pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp; - } else { - #endif - DBG("copying frame from input: %d\n", (int)pcontext->id); - pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused); - /* copy this frame's timestamp to user space */ - pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp; - #ifndef NO_LIBJPEG - } - #endif + if ( every_count < every - 1 ) { + DBG("dropping %d frame for every=%d\n", every_count + 1, every); + ++every_count; + goto other_select_handlers; + } else { + every_count = 0; + } + + //DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->tmpbytesused, pcontext->id); + + /* + * Workaround for broken, corrupted frames: + * Under low light conditions corrupted frames may get captured. + * The good thing is such frames are quite small compared to the regular pictures. + * For example a VGA (640x480) webcam picture is normally >= 8kByte large, + * corrupted frames are smaller. + */ + if(pcontext->videoIn->tmpbytesused < minimum_size) { + DBG("dropping too small frame, assuming it as broken\n"); + goto other_select_handlers; + } + + // Overwrite timestamp (e.g. where camera is providing 0 values) + // Do it here so that this timestamp can be used in frameskipping + if(wantTimestamp) + { + gettimeofday(×tamp, NULL); + pcontext->videoIn->tmptimestamp = timestamp; + } + + // use software frame dropping on low fps + if (pcontext->videoIn->soft_framedrop == 1) { + unsigned long last = pglobal->in[pcontext->id].timestamp.tv_sec * 1000 + + (pglobal->in[pcontext->id].timestamp.tv_usec/1000); // convert to ms + unsigned long current = pcontext->videoIn->tmptimestamp.tv_sec * 1000 + + pcontext->videoIn->tmptimestamp.tv_usec/1000; // convert to ms + + // if the requested time did not esplashed skip the frame + if ((current - last) < pcontext->videoIn->frame_period_time) { + DBG("Last frame taken %d ms ago so drop it\n", (current - last)); + goto other_select_handlers; + } + DBG("Lagg: %ld\n", (current - last) - pcontext->videoIn->frame_period_time); + } + + /* copy JPG picture to global buffer */ + pthread_mutex_lock(&pglobal->in[pcontext->id].db); + + /* + * If capturing in YUV mode convert to JPEG now. + * This compression requires many CPU cycles, so try to avoid YUV format. + * Getting JPEGs straight from the webcam, is one of the major advantages of + * Linux-UVC compatible devices. + */ + #ifndef NO_LIBJPEG + if ((pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) || + (pcontext->videoIn->formatIn == V4L2_PIX_FMT_UYVY) || + (pcontext->videoIn->formatIn == V4L2_PIX_FMT_RGB565) ) { + DBG("compressing frame from input: %d\n", (int)pcontext->id); + pglobal->in[pcontext->id].size = compress_image_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, quality); + /* copy this frame's timestamp to user space */ + pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp; + } else { + #endif + DBG("copying frame from input: %d\n", (int)pcontext->id); + pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused); + /* copy this frame's timestamp to user space */ + pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp; + #ifndef NO_LIBJPEG + } + #endif #if 0 - /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */ - if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) { - DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024); - } - prev_size = global->size; + /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */ + if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) { + DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024); + } + prev_size = global->size; #endif - /* signal fresh_frame */ - pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); - pthread_mutex_unlock(&pglobal->in[pcontext->id].db); + /* signal fresh_frame */ + pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); + pthread_mutex_unlock(&pglobal->in[pcontext->id].db); + } + +other_select_handlers: + + if (dv_timings) { + if (FD_ISSET(pcontext->videoIn->fd, &wr_fds)) { + IPRINT("Writing?!\n"); + } + + if (FD_ISSET(pcontext->videoIn->fd, &ex_fds)) { + IPRINT("FD exception\n"); + if (video_handle_event(pcontext->videoIn) < 0) { + goto endloop; + } + } + } } +endloop: + DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); diff --git a/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.c b/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.c index 57da971f..45fae16f 100644 --- a/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.c +++ b/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.c @@ -87,6 +87,8 @@ int xioctl(int fd, int IOCTL_X, void *arg) } static int init_v4l2(struct vdIn *vd); +static int init_framebuffer(struct vdIn *vd); +static void free_framebuffer(struct vdIn *vd); int init_videoIn(struct vdIn *vd, char *device, int width, int height, int fps, int format, int grabmethod, globals *pglobal, int id, v4l2_std_id vstd) @@ -114,9 +116,9 @@ int init_videoIn(struct vdIn *vd, char *device, int width, vd->vstd = vstd; vd->grabmethod = grabmethod; vd->soft_framedrop = 0; + if(init_v4l2(vd) < 0) { - fprintf(stderr, " Init v4L2 failed !! exit fatal \n"); - goto error;; + goto error; } // getting the name of the input source @@ -210,38 +212,13 @@ int init_videoIn(struct vdIn *vd, char *device, int width, } } - /* alloc a temp buffer to reconstruct the pict */ - vd->framesizeIn = (vd->width * vd->height << 1); - switch(vd->formatIn) { - case V4L2_PIX_FMT_JPEG: - // Fall-through intentional - case V4L2_PIX_FMT_MJPEG: // in JPG mode the frame size is varies at every frame, so we allocate a bit bigger buffer - vd->tmpbuffer = (unsigned char *) calloc(1, (size_t) vd->framesizeIn); - if(!vd->tmpbuffer) - goto error; - vd->framebuffer = - (unsigned char *) calloc(1, (size_t) vd->width * (vd->height + 8) * 2); - break; - case V4L2_PIX_FMT_RGB565: // buffer allocation for non varies on frame size formats - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_UYVY: - vd->framebuffer = - (unsigned char *) calloc(1, (size_t) vd->framesizeIn); - break; - vd->framebuffer = - (unsigned char *) calloc(1, (size_t) vd->framesizeIn); - break; - default: - fprintf(stderr, " should never arrive exit fatal !!\n"); + if (init_framebuffer(vd) < 0) { goto error; - break; - } - if(!vd->framebuffer) - goto error; return 0; error: + free_framebuffer(vd); free(pglobal->in[id].in_parameters); free(vd->videodevice); free(vd->status); @@ -250,6 +227,41 @@ int init_videoIn(struct vdIn *vd, char *device, int width, return -1; } +static int init_framebuffer(struct vdIn *vd) { + /* alloc a temp buffer to reconstruct the pict */ + vd->framesizeIn = (vd->width * vd->height << 1); + switch (vd->formatIn) { + case V4L2_PIX_FMT_JPEG: + // Fall-through intentional + case V4L2_PIX_FMT_MJPEG: // in JPG mode the frame size is varies at every frame, so we allocate a bit bigger buffer + vd->tmpbuffer = (unsigned char *) calloc(1, (size_t) vd->framesizeIn); + if(!vd->tmpbuffer) + return -1; + vd->framebuffer = + (unsigned char *) calloc(1, (size_t) vd->width * (vd->height + 8) * 2); + break; + case V4L2_PIX_FMT_RGB565: // buffer allocation for non varies on frame size formats + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_UYVY: + vd->framebuffer = + (unsigned char *) calloc(1, (size_t) vd->framesizeIn); + break; + default: + fprintf(stderr, " Unknow vd->formatIn\n"); + return -1; + } + return -!vd->framebuffer; +} + +static void free_framebuffer(struct vdIn *vd) { + if (vd->tmpbuffer) { + free(vd->tmpbuffer); + } + vd->tmpbuffer = NULL; + free(vd->framebuffer); + vd->framebuffer = NULL; +} + static int init_v4l2(struct vdIn *vd) { int i; @@ -292,6 +304,19 @@ static int init_v4l2(struct vdIn *vd) } } + if (vd->dv_timings) { + if (video_set_dv_timings(vd)) { + goto fatal; + } + + struct v4l2_event_subscription sub; + memset(&sub, 0, sizeof(sub)); + sub.type = V4L2_EVENT_SOURCE_CHANGE; + if (ioctl(vd->fd, VIDIOC_SUBSCRIBE_EVENT, &sub) < 0) { + IPRINT("Can\'t subscribe to V4L2_EVENT_SOURCE_CHANGE: %s\n", strerror(errno)); + } + } + /* * set format in */ @@ -450,11 +475,12 @@ static int init_v4l2(struct vdIn *vd) } return 0; fatal: + fprintf(stderr, "Init v4L2 failed !! exit fatal\n"); return -1; } -static int video_enable(struct vdIn *vd) +int video_enable(struct vdIn *vd) { int type = V4L2_BUF_TYPE_VIDEO_CAPTURE; int ret; @@ -483,6 +509,54 @@ static int video_disable(struct vdIn *vd, streaming_state disabledState) return 0; } +int video_set_dv_timings(struct vdIn *vd) +{ + struct v4l2_dv_timings timings; + v4l2_std_id std; + + memset(&timings, 0, sizeof(timings)); + if (xioctl(vd->fd, VIDIOC_QUERY_DV_TIMINGS, &timings) >= 0) { + IPRINT("QUERY_DV_TIMINGS returned %ux%u pixclk %llu\n", timings.bt.width, timings.bt.height, timings.bt.pixelclock); + // Can read DV timings, so set them. + if (xioctl(vd->fd, VIDIOC_S_DV_TIMINGS, &timings) < 0) { + perror("Failed to set DV timings"); + return -1; + } else { + vd->width = timings.bt.width; + vd->height = timings.bt.height; + } + } else { + memset(&std, 0, sizeof(std)); + if (xioctl(vd->fd, VIDIOC_QUERYSTD, &std) >= 0) { + // Can read standard, so set it. + if (xioctl(vd->fd, VIDIOC_S_STD, &std) < 0) { + perror("Failed to set standard"); + return -1; + } + } + } + return 0; +} + +int video_handle_event(struct vdIn *vd) +{ + struct v4l2_event ev; + if (!ioctl(vd->fd, VIDIOC_DQEVENT, &ev)) { + switch (ev.type) { + case V4L2_EVENT_SOURCE_CHANGE: + IPRINT("V4L2_EVENT_SOURCE_CHANGE: Source changed\n"); + if (setResolution(vd, vd->width, vd->height) < 0) { + return -1; + } + break; + case V4L2_EVENT_EOS: + IPRINT("V4L2_EVENT_EOS\n"); + break; + } + } + return 0; +} + /****************************************************************************** Description.: Input Value.: @@ -609,11 +683,7 @@ int close_v4l2(struct vdIn *vd) { if(vd->streamingState == STREAMING_ON) video_disable(vd, STREAMING_OFF); - if(vd->tmpbuffer) - free(vd->tmpbuffer); - vd->tmpbuffer = NULL; - free(vd->framebuffer); - vd->framebuffer = NULL; + free_framebuffer(vd); free(vd->videodevice); free(vd->status); free(vd->pictName); @@ -889,35 +959,40 @@ void control_readed(struct vdIn *vd, struct v4l2_queryctrl *ctrl, globals *pglob */ int setResolution(struct vdIn *vd, int width, int height) { - int ret; - DBG("setResolution(%d, %d)\n", width, height); - vd->streamingState = STREAMING_PAUSED; - if(video_disable(vd, STREAMING_PAUSED) == 0) { // do streamoff - DBG("Unmap buffers\n"); - int i; - for(i = 0; i < NB_BUFFER; i++) - munmap(vd->mem[i], vd->buf.length); + if (video_disable(vd, STREAMING_PAUSED) < 0) { + IPRINT("Unable to disable streaming\n"); + return -1; + } - if(CLOSE_VIDEO(vd->fd) == 0) { - DBG("Device closed successfully\n"); - } + DBG("Unmap buffers\n"); + for (int i = 0; i < NB_BUFFER; i++) { + munmap(vd->mem[i], vd->buf.length); + } - vd->width = width; - vd->height = height; - if(init_v4l2(vd) < 0) { - fprintf(stderr, " Init v4L2 failed !! exit fatal \n"); - return -1; - } else { - DBG("reinit done\n"); - video_enable(vd); - return 0; - } - } else { - DBG("Unable to disable streaming\n"); + if (CLOSE_VIDEO(vd->fd) == 0) { + DBG("Device closed successfully\n"); + } + + vd->width = width; + vd->height = height; + if (init_v4l2(vd) < 0) { + return -1; + } + + free_framebuffer(vd); + if (init_framebuffer(vd) < 0) { + IPRINT("Can\'t reallocate framebuffer\n"); return -1; } - return ret; + + DBG("Resolution changed to %dx%d , enabling the video...\n", width, height); + if (video_enable(vd) < 0) { + IPRINT("Can\'t RE-enable the video after setResolution(%dx%d)", width, height); + return -1; + } + + return 0; } /* diff --git a/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.h b/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.h index 9602129d..df4d3dfe 100644 --- a/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.h +++ b/mjpg-streamer-experimental/plugins/input_uvc/v4l2uvc.h @@ -114,6 +114,7 @@ struct vdIn { v4l2_std_id vstd; unsigned long frame_period_time; // in ms unsigned char soft_framedrop; + unsigned int dv_timings; }; /* optional initial settings */ @@ -154,6 +155,10 @@ int memcpy_picture(unsigned char *out, unsigned char *buf, int size); int uvcGrab(struct vdIn *vd); int close_v4l2(struct vdIn *vd); +int video_enable(struct vdIn *vd); +int video_set_dv_timings(struct vdIn *vd); +int video_handle_event(struct vdIn *vd); + int v4l2GetControl(struct vdIn *vd, int control); int v4l2SetControl(struct vdIn *vd, int control, int value, int plugin_number, globals *pglobal); int v4l2UpControl(struct vdIn *vd, int control);