Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
228 changes: 153 additions & 75 deletions mjpg-streamer-experimental/plugins/input_uvc/input_uvc.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,8 @@ static unsigned int every = 1;
static int wantTimestamp = 0;
static struct timeval timestamp;
static int softfps = -1;
static unsigned int timeout = 5;
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can timeout be disabled by default? To keep current behaviour.

Copy link
Contributor Author

@mdevaev mdevaev Sep 12, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The current behaviour is totally broken. You don't have any way to know that something went wrong while the program is running. If the connection to device is broken it will never be restored and the fd will be "dead". Mjpg-streamer will hang forever and you do not know why. If you have a timeout, at least you can restart the program.

For example, if mjpg-streamer works inside the camera firmware, the self-monitoring will restart stream in case of a problem if you have this timeout. With old behavior this is impossible, mjpg-streamer will remain frozen and broken.

Do you really want to keep this behavior?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

static unsigned int dv_timings = 0;

static const struct {
const char * k;
Expand Down Expand Up @@ -211,6 +213,8 @@ int input_init(input_parameter *param, int id)
{"cb", required_argument, 0, 0},
{"timestamp", no_argument, 0, 0},
{"softfps", required_argument, 0, 0},
{"timeout", required_argument, 0, 0},
{"dv_timings", no_argument, 0, 0},
{0, 0, 0, 0}
};

Expand Down Expand Up @@ -373,6 +377,14 @@ int input_init(input_parameter *param, int id)
case 40:
softfps = atoi(optarg);
break;
case 41:
DBG("case 41\n");
timeout = MAX(atoi(optarg), 1);
break;
case 42:
DBG("case 42\n");
dv_timings = 1;
break;
default:
DBG("default case\n");
help();
Expand Down Expand Up @@ -430,6 +442,7 @@ int input_init(input_parameter *param, int id)

DBG("vdIn pn: %d\n", id);
/* open video device and prepare data structure */
pctx->videoIn->dv_timings = dv_timings;
if(init_videoIn(pctx->videoIn, dev, width, height, fps, format, 1, pctx->pglobal, id, tvnorm) < 0) {
IPRINT("init_VideoIn failed\n");
closelog();
Expand Down Expand Up @@ -531,6 +544,8 @@ void help(void)
" [-timestamp ]..........: Populate frame timestamp with system time\n" \
" [-softfps] ............: Drop frames to try and achieve this fps\n" \
" set your camera to its maximum fps to avoid stuttering\n" \
" [-timeout] ............: Timeout for device querying (seconds)\n" \
" [-dv_timings] .........: Enable DV timings queriyng and events processing\n" \
" ---------------------------------------------------------------\n");

fprintf(stderr, "\n"\
Expand Down Expand Up @@ -648,102 +663,165 @@ void *cam_thread(void *arg)
pcontext->videoIn->frame_period_time = 1000/softfps;
}

if (video_enable(pcontext->videoIn)) {
IPRINT("Can\'t enable video in first time\n");
goto endloop;
}

while(!pglobal->stop) {
while(pcontext->videoIn->streamingState == STREAMING_PAUSED) {
usleep(1); // maybe not the best way so FIXME
}

/* grab a frame */
if(uvcGrab(pcontext->videoIn) < 0) {
IPRINT("Error grabbing frames\n");
exit(EXIT_FAILURE);
}
fd_set rd_fds; // for capture
fd_set ex_fds; // for capture
fd_set wr_fds; // for output

if ( every_count < every - 1 ) {
DBG("dropping %d frame for every=%d\n", every_count + 1, every);
++every_count;
continue;
} else {
every_count = 0;
}
FD_ZERO(&rd_fds);
FD_SET(pcontext->videoIn->fd, &rd_fds);

//DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->tmpbytesused, pcontext->id);

/*
* Workaround for broken, corrupted frames:
* Under low light conditions corrupted frames may get captured.
* The good thing is such frames are quite small compared to the regular pictures.
* For example a VGA (640x480) webcam picture is normally >= 8kByte large,
* corrupted frames are smaller.
*/
if(pcontext->videoIn->tmpbytesused < minimum_size) {
DBG("dropping too small frame, assuming it as broken\n");
continue;
}
FD_ZERO(&ex_fds);
FD_SET(pcontext->videoIn->fd, &ex_fds);

// Overwrite timestamp (e.g. where camera is providing 0 values)
// Do it here so that this timestamp can be used in frameskipping
if(wantTimestamp)
{
gettimeofday(&timestamp, NULL);
pcontext->videoIn->tmptimestamp = timestamp;
}
FD_ZERO(&wr_fds);
FD_SET(pcontext->videoIn->fd, &wr_fds);

// use software frame dropping on low fps
if (pcontext->videoIn->soft_framedrop == 1) {
unsigned long last = pglobal->in[pcontext->id].timestamp.tv_sec * 1000 +
(pglobal->in[pcontext->id].timestamp.tv_usec/1000); // convert to ms
unsigned long current = pcontext->videoIn->tmptimestamp.tv_sec * 1000 +
pcontext->videoIn->tmptimestamp.tv_usec/1000; // convert to ms
struct timeval tv;
tv.tv_sec = timeout;
tv.tv_usec = 0;

// if the requested time did not esplashed skip the frame
if ((current - last) < pcontext->videoIn->frame_period_time) {
DBG("Last frame taken %d ms ago so drop it\n", (current - last));
int sel = select(pcontext->videoIn->fd + 1, &rd_fds, &wr_fds, &ex_fds, &tv);
DBG("select() = %d\n", sel);

if (sel < 0) {
if (errno == EINTR) {
continue;
}
perror("select() error");
goto endloop;
} else if (sel == 0) {
IPRINT("select() timeout\n");
if (dv_timings) {
if (setResolution(pcontext->videoIn, pcontext->videoIn->width, pcontext->videoIn->height) < 0) {
goto endloop;
}
continue;
} else {
goto endloop;
}
DBG("Lagg: %ld\n", (current - last) - pcontext->videoIn->frame_period_time);
}

/* copy JPG picture to global buffer */
pthread_mutex_lock(&pglobal->in[pcontext->id].db);
if (FD_ISSET(pcontext->videoIn->fd, &rd_fds)) {
DBG("Grabbing a frame...\n");
/* grab a frame */
if(uvcGrab(pcontext->videoIn) < 0) {
IPRINT("Error grabbing frames\n");
goto endloop;
}

/*
* If capturing in YUV mode convert to JPEG now.
* This compression requires many CPU cycles, so try to avoid YUV format.
* Getting JPEGs straight from the webcam, is one of the major advantages of
* Linux-UVC compatible devices.
*/
#ifndef NO_LIBJPEG
if ((pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_UYVY) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_RGB565) ) {
DBG("compressing frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = compress_image_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, quality);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
} else {
#endif
DBG("copying frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
#ifndef NO_LIBJPEG
}
#endif
if ( every_count < every - 1 ) {
DBG("dropping %d frame for every=%d\n", every_count + 1, every);
++every_count;
goto other_select_handlers;
} else {
every_count = 0;
}

//DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->tmpbytesused, pcontext->id);

/*
* Workaround for broken, corrupted frames:
* Under low light conditions corrupted frames may get captured.
* The good thing is such frames are quite small compared to the regular pictures.
* For example a VGA (640x480) webcam picture is normally >= 8kByte large,
* corrupted frames are smaller.
*/
if(pcontext->videoIn->tmpbytesused < minimum_size) {
DBG("dropping too small frame, assuming it as broken\n");
goto other_select_handlers;
}

// Overwrite timestamp (e.g. where camera is providing 0 values)
// Do it here so that this timestamp can be used in frameskipping
if(wantTimestamp)
{
gettimeofday(&timestamp, NULL);
pcontext->videoIn->tmptimestamp = timestamp;
}

// use software frame dropping on low fps
if (pcontext->videoIn->soft_framedrop == 1) {
unsigned long last = pglobal->in[pcontext->id].timestamp.tv_sec * 1000 +
(pglobal->in[pcontext->id].timestamp.tv_usec/1000); // convert to ms
unsigned long current = pcontext->videoIn->tmptimestamp.tv_sec * 1000 +
pcontext->videoIn->tmptimestamp.tv_usec/1000; // convert to ms

// if the requested time did not esplashed skip the frame
if ((current - last) < pcontext->videoIn->frame_period_time) {
DBG("Last frame taken %d ms ago so drop it\n", (current - last));
goto other_select_handlers;
}
DBG("Lagg: %ld\n", (current - last) - pcontext->videoIn->frame_period_time);
}

/* copy JPG picture to global buffer */
pthread_mutex_lock(&pglobal->in[pcontext->id].db);

/*
* If capturing in YUV mode convert to JPEG now.
* This compression requires many CPU cycles, so try to avoid YUV format.
* Getting JPEGs straight from the webcam, is one of the major advantages of
* Linux-UVC compatible devices.
*/
#ifndef NO_LIBJPEG
if ((pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_UYVY) ||
(pcontext->videoIn->formatIn == V4L2_PIX_FMT_RGB565) ) {
DBG("compressing frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = compress_image_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, quality);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
} else {
#endif
DBG("copying frame from input: %d\n", (int)pcontext->id);
pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused);
/* copy this frame's timestamp to user space */
pglobal->in[pcontext->id].timestamp = pcontext->videoIn->tmptimestamp;
#ifndef NO_LIBJPEG
}
#endif

#if 0
/* motion detection can be done just by comparing the picture size, but it is not very accurate!! */
if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) {
DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024);
}
prev_size = global->size;
/* motion detection can be done just by comparing the picture size, but it is not very accurate!! */
if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) {
DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024);
}
prev_size = global->size;
#endif

/* signal fresh_frame */
pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update);
pthread_mutex_unlock(&pglobal->in[pcontext->id].db);
/* signal fresh_frame */
pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update);
pthread_mutex_unlock(&pglobal->in[pcontext->id].db);
}

other_select_handlers:
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not crazy about the multiple gotos, but I guess this code not very well structured to start with and I can't see an quick way to fix it!

Copy link
Contributor Author

@mdevaev mdevaev Sep 12, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's an old problem of this function. I didn't want to modify the existing code too much to get a giant patch because I saw that you are working on something in this function. I plan to investigate the automatic resolution change and continue to implement the best support for B101 and maybe I will have to refactor. It seems to me that it is better to make other PR.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I refactored this code and moved events processing into a separate function.


if (dv_timings) {
if (FD_ISSET(pcontext->videoIn->fd, &wr_fds)) {
IPRINT("Writing?!\n");
}

if (FD_ISSET(pcontext->videoIn->fd, &ex_fds)) {
IPRINT("FD exception\n");
if (video_handle_event(pcontext->videoIn) < 0) {
goto endloop;
}
}
}
}

endloop:

DBG("leaving input thread, calling cleanup function now\n");
pthread_cleanup_pop(1);

Expand Down
Loading