8
8
from loguru import logger
9
9
from requests .exceptions import RequestException
10
10
11
+ from lightx2v .server .metrics import monitor_cli
11
12
from lightx2v .utils .envs import *
12
13
from lightx2v .utils .generate_task_id import generate_task_id
13
14
from lightx2v .utils .memory_profiler import peak_memory_decorator
14
15
from lightx2v .utils .profiler import *
16
+ from lightx2v .utils .metrics_profiler import MetricsProfilingContext
15
17
from lightx2v .utils .utils import save_to_video , vae_to_comfyui_image
16
18
17
19
from .base_runner import BaseRunner
@@ -161,6 +163,8 @@ def read_image_input(self, img_path):
161
163
img_ori = img_path
162
164
else :
163
165
img_ori = Image .open (img_path ).convert ("RGB" )
166
+ width , height = img_ori .size
167
+ monitor_cli .lightx2v_input_image_len .observe (width * height )
164
168
img = TF .to_tensor (img_ori ).sub_ (0.5 ).div_ (0.5 ).unsqueeze (0 ).cuda ()
165
169
return img , img_ori
166
170
@@ -243,18 +247,21 @@ def run_main(self, total_steps=None):
243
247
for segment_idx in range (self .video_segment_num ):
244
248
logger .info (f"🔄 start segment { segment_idx + 1 } /{ self .video_segment_num } " )
245
249
with ProfilingContext4DebugL1 (f"segment end2end { segment_idx + 1 } /{ self .video_segment_num } " ):
246
- self .check_stop ()
247
- # 1. default do nothing
248
- self .init_run_segment (segment_idx )
249
- # 2. main inference loop
250
- latents = self .run_segment (total_steps = total_steps )
251
- # 3. vae decoder
252
- self .gen_video = self .run_vae_decoder (latents )
253
- # 4. default do nothing
254
- self .end_run_segment (segment_idx )
250
+ with MetricsProfilingContext (monitor_cli .lightx2v_run_pre_step_dit_duration , labels = [segment_idx + 1 ,
251
+ self .video_segment_num ]):
252
+ self .check_stop ()
253
+ # 1. default do nothing
254
+ self .init_run_segment (segment_idx )
255
+ # 2. main inference loop
256
+ latents = self .run_segment (total_steps = total_steps )
257
+ # 3. vae decoder
258
+ self .gen_video = self .run_vae_decoder (latents )
259
+ # 4. default do nothing
260
+ self .end_run_segment (segment_idx )
255
261
self .end_run ()
256
262
257
263
@ProfilingContext4DebugL1 ("Run VAE Decoder" )
264
+ @MetricsProfilingContext (monitor_cli .lightx2v_run_vae_decode_duration , labels = ["DefaultRunner" ])
258
265
def run_vae_decoder (self , latents ):
259
266
if self .config .get ("lazy_load" , False ) or self .config .get ("unload_modules" , False ):
260
267
self .vae_decoder = self .load_vae_decoder ()
@@ -309,7 +316,9 @@ def process_images_after_vae_decoder(self, save_video=True):
309
316
return {"video" : self .gen_video }
310
317
return {"video" : None }
311
318
319
+ @MetricsProfilingContext (monitor_cli .lightx2v_worker_request_duration , labels = ["DefaultRunner" ])
312
320
def run_pipeline (self , save_video = True ):
321
+ monitor_cli .lightx2v_worker_request_count .inc ()
313
322
if self .config ["use_prompt_enhancer" ]:
314
323
self .config ["prompt_enhanced" ] = self .post_prompt_enhancer ()
315
324
@@ -321,4 +330,5 @@ def run_pipeline(self, save_video=True):
321
330
torch .cuda .empty_cache ()
322
331
gc .collect ()
323
332
333
+ monitor_cli .lightx2v_worker_request_success .inc ()
324
334
return gen_video
0 commit comments