@@ -2251,9 +2251,11 @@ static bool waitee_running(struct thread_data *me)
2251
2251
static void run_threads (struct sk_out * sk_out )
2252
2252
{
2253
2253
struct thread_data * td ;
2254
- unsigned int i , todo , nr_running , nr_started ;
2254
+ struct timespec last_finish_time ;
2255
+ unsigned int i , todo , nr_running , nr_started , prev_nr_running ;
2255
2256
uint64_t m_rate , t_rate ;
2256
2257
uint64_t spent ;
2258
+ uint64_t per_job_spent ;
2257
2259
2258
2260
if (fio_gtod_offload && fio_start_gtod_thread ())
2259
2261
return ;
@@ -2294,6 +2296,7 @@ static void run_threads(struct sk_out *sk_out)
2294
2296
todo = thread_number ;
2295
2297
nr_running = 0 ;
2296
2298
nr_started = 0 ;
2299
+ prev_nr_running = 0 ;
2297
2300
m_rate = t_rate = 0 ;
2298
2301
2299
2302
for_each_td (td , i ) {
@@ -2338,6 +2341,7 @@ static void run_threads(struct sk_out *sk_out)
2338
2341
fio_idle_prof_start ();
2339
2342
2340
2343
set_genesis_time ();
2344
+ fio_gettime (& last_finish_time , NULL );
2341
2345
2342
2346
while (todo ) {
2343
2347
struct thread_data * map [REAL_MAX_JOBS ];
@@ -2380,6 +2384,13 @@ static void run_threads(struct sk_out *sk_out)
2380
2384
continue ;
2381
2385
}
2382
2386
2387
+ if (td -> o .stonewall && td -> o .per_job_start_delay ) {
2388
+ per_job_spent = utime_since_now (& last_finish_time );
2389
+
2390
+ if (td -> o .per_job_start_delay > per_job_spent )
2391
+ continue ;
2392
+ }
2393
+
2383
2394
init_disk_util (td );
2384
2395
2385
2396
td -> rusage_sem = fio_sem_init (FIO_SEM_LOCKED );
@@ -2498,7 +2509,12 @@ static void run_threads(struct sk_out *sk_out)
2498
2509
fio_sem_up (td -> sem );
2499
2510
}
2500
2511
2512
+ prev_nr_running = nr_running ;
2501
2513
reap_threads (& nr_running , & t_rate , & m_rate );
2514
+ if (nr_running == prev_nr_running - 1 ) {
2515
+ //sequential job has finished get new base time
2516
+ fio_gettime (& last_finish_time , NULL );
2517
+ }
2502
2518
2503
2519
if (todo )
2504
2520
do_usleep (100000 );
0 commit comments