diff --git a/records/090525_SkipMLPBlocks/07e7ae76-b7d0-4481-b149-01e7d81b5ad4.txt b/records/090525_SkipMLPBlocks/07e7ae76-b7d0-4481-b149-01e7d81b5ad4.txt new file mode 100644 index 000000000..6abacd0c3 --- /dev/null +++ b/records/090525_SkipMLPBlocks/07e7ae76-b7d0-4481-b149-01e7d81b5ad4.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:45:09 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 130W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 42C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 34C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 72774 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 72775 C /usr/bin/python3 610MiB | +| 0 N/A N/A 72776 C /usr/bin/python3 610MiB | +| 0 N/A N/A 72777 C /usr/bin/python3 610MiB | +| 0 N/A N/A 72778 C /usr/bin/python3 610MiB | +| 0 N/A N/A 72779 C /usr/bin/python3 610MiB | +| 0 N/A N/A 72780 C /usr/bin/python3 610MiB | +| 0 N/A N/A 72781 C /usr/bin/python3 610MiB | +| 1 N/A N/A 72775 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 72776 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 72777 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 72778 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 72779 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 72780 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 72781 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1705 train_time:390ms step_avg:390.50ms +step:2/1705 train_time:411ms step_avg:205.49ms +step:3/1705 train_time:481ms step_avg:160.24ms +step:4/1705 train_time:571ms step_avg:142.85ms +step:5/1705 train_time:664ms step_avg:132.73ms +step:6/1705 train_time:756ms step_avg:126.05ms +step:7/1705 train_time:848ms step_avg:121.18ms +step:8/1705 train_time:940ms step_avg:117.56ms +step:9/1705 train_time:1033ms step_avg:114.77ms +step:10/1705 train_time:1125ms step_avg:112.53ms +step:11/1705 train_time:1217ms step_avg:110.68ms +step:12/1705 train_time:1311ms step_avg:109.24ms +step:13/1705 train_time:1407ms step_avg:108.25ms +step:14/1705 train_time:1503ms step_avg:107.35ms +step:15/1705 train_time:1596ms step_avg:106.38ms +step:16/1705 train_time:1688ms step_avg:105.52ms +step:17/1705 train_time:1782ms step_avg:104.82ms +step:18/1705 train_time:1875ms step_avg:104.15ms +step:19/1705 train_time:1967ms step_avg:103.53ms +step:20/1705 train_time:2060ms step_avg:102.98ms +step:21/1705 train_time:2153ms step_avg:102.50ms +step:22/1705 train_time:2246ms step_avg:102.09ms +step:23/1705 train_time:2340ms step_avg:101.73ms +step:24/1705 train_time:2433ms step_avg:101.38ms +step:25/1705 train_time:2527ms step_avg:101.09ms +step:26/1705 train_time:2620ms step_avg:100.78ms +step:27/1705 train_time:2713ms step_avg:100.47ms +step:28/1705 train_time:2807ms step_avg:100.24ms +step:29/1705 train_time:2900ms step_avg:99.99ms +step:30/1705 train_time:2992ms step_avg:99.74ms +step:31/1705 train_time:3085ms step_avg:99.51ms +step:32/1705 train_time:3178ms step_avg:99.31ms +step:33/1705 train_time:3271ms step_avg:99.12ms +step:34/1705 train_time:3364ms step_avg:98.95ms +step:35/1705 train_time:3458ms step_avg:98.79ms +step:36/1705 train_time:3550ms step_avg:98.62ms +step:37/1705 train_time:3644ms step_avg:98.49ms +step:38/1705 train_time:3737ms step_avg:98.35ms +step:39/1705 train_time:3830ms step_avg:98.21ms +step:40/1705 train_time:3924ms step_avg:98.10ms +step:41/1705 train_time:4017ms step_avg:97.98ms +step:42/1705 train_time:4110ms step_avg:97.85ms +step:43/1705 train_time:4204ms step_avg:97.76ms +step:44/1705 train_time:4297ms step_avg:97.65ms +step:45/1705 train_time:4389ms step_avg:97.54ms +step:46/1705 train_time:4483ms step_avg:97.45ms +step:47/1705 train_time:4576ms step_avg:97.36ms +step:48/1705 train_time:4669ms step_avg:97.27ms +step:49/1705 train_time:4763ms step_avg:97.20ms +step:50/1705 train_time:4856ms step_avg:97.13ms +step:51/1705 train_time:4950ms step_avg:97.05ms +step:52/1705 train_time:5044ms step_avg:96.99ms +step:53/1705 train_time:5137ms step_avg:96.92ms +step:54/1705 train_time:5230ms step_avg:96.85ms +step:55/1705 train_time:5323ms step_avg:96.78ms +step:56/1705 train_time:5415ms step_avg:96.70ms +step:57/1705 train_time:5509ms step_avg:96.64ms +step:58/1705 train_time:5602ms step_avg:96.59ms +step:59/1705 train_time:5695ms step_avg:96.52ms +step:60/1705 train_time:5788ms step_avg:96.47ms +step:61/1705 train_time:5882ms step_avg:96.42ms +step:62/1705 train_time:5975ms step_avg:96.36ms +step:63/1705 train_time:6068ms step_avg:96.32ms +step:64/1705 train_time:6162ms step_avg:96.28ms +step:65/1705 train_time:6256ms step_avg:96.24ms +step:66/1705 train_time:6348ms step_avg:96.19ms +step:67/1705 train_time:6442ms step_avg:96.14ms +step:68/1705 train_time:6535ms step_avg:96.10ms +step:69/1705 train_time:6628ms step_avg:96.05ms +step:70/1705 train_time:6721ms step_avg:96.01ms +step:71/1705 train_time:6814ms step_avg:95.97ms +step:72/1705 train_time:6907ms step_avg:95.93ms +step:73/1705 train_time:7002ms step_avg:95.91ms +step:74/1705 train_time:7094ms step_avg:95.87ms +step:75/1705 train_time:7188ms step_avg:95.84ms +step:76/1705 train_time:7281ms step_avg:95.80ms +step:77/1705 train_time:7374ms step_avg:95.77ms +step:78/1705 train_time:7467ms step_avg:95.74ms +step:79/1705 train_time:7560ms step_avg:95.70ms +step:80/1705 train_time:7653ms step_avg:95.66ms +step:81/1705 train_time:7746ms step_avg:95.63ms +step:82/1705 train_time:7839ms step_avg:95.59ms +step:83/1705 train_time:7931ms step_avg:95.56ms +step:84/1705 train_time:8026ms step_avg:95.54ms +step:85/1705 train_time:8119ms step_avg:95.51ms +step:86/1705 train_time:8211ms step_avg:95.48ms +step:87/1705 train_time:8305ms step_avg:95.46ms +step:88/1705 train_time:8397ms step_avg:95.42ms +step:89/1705 train_time:8490ms step_avg:95.40ms +step:90/1705 train_time:8584ms step_avg:95.38ms +step:91/1705 train_time:8677ms step_avg:95.35ms +step:92/1705 train_time:8770ms step_avg:95.33ms +step:93/1705 train_time:8863ms step_avg:95.30ms +step:94/1705 train_time:8956ms step_avg:95.27ms +step:95/1705 train_time:9049ms step_avg:95.25ms +step:96/1705 train_time:9142ms step_avg:95.23ms +step:97/1705 train_time:9235ms step_avg:95.21ms +step:98/1705 train_time:9328ms step_avg:95.18ms +step:99/1705 train_time:9421ms step_avg:95.16ms +step:100/1705 train_time:9514ms step_avg:95.14ms +step:101/1705 train_time:9608ms step_avg:95.13ms +step:102/1705 train_time:9701ms step_avg:95.10ms +step:103/1705 train_time:9793ms step_avg:95.08ms +step:104/1705 train_time:9887ms step_avg:95.07ms +step:105/1705 train_time:9980ms step_avg:95.05ms +step:106/1705 train_time:10073ms step_avg:95.03ms +step:107/1705 train_time:10167ms step_avg:95.02ms +step:108/1705 train_time:10260ms step_avg:95.00ms +step:109/1705 train_time:10352ms step_avg:94.97ms +step:110/1705 train_time:10445ms step_avg:94.96ms +step:111/1705 train_time:10537ms step_avg:94.93ms +step:112/1705 train_time:10630ms step_avg:94.91ms +step:113/1705 train_time:10723ms step_avg:94.90ms +step:114/1705 train_time:10816ms step_avg:94.88ms +step:115/1705 train_time:10909ms step_avg:94.86ms +step:116/1705 train_time:11002ms step_avg:94.85ms +step:117/1705 train_time:11095ms step_avg:94.83ms +step:118/1705 train_time:11189ms step_avg:94.82ms +step:119/1705 train_time:11282ms step_avg:94.81ms +step:120/1705 train_time:11374ms step_avg:94.78ms +step:121/1705 train_time:11467ms step_avg:94.77ms +step:122/1705 train_time:11560ms step_avg:94.75ms +step:123/1705 train_time:11652ms step_avg:94.73ms +step:124/1705 train_time:11745ms step_avg:94.72ms +step:125/1705 train_time:11839ms step_avg:94.71ms +step:125/1705 val_loss:4.2939 train_time:11931ms step_avg:95.45ms +step:126/1705 train_time:11957ms step_avg:94.89ms +step:127/1705 train_time:12031ms step_avg:94.73ms +step:128/1705 train_time:12133ms step_avg:94.79ms +step:129/1705 train_time:12229ms step_avg:94.80ms +step:130/1705 train_time:12323ms step_avg:94.79ms +step:131/1705 train_time:12415ms step_avg:94.77ms +step:132/1705 train_time:12507ms step_avg:94.75ms +step:133/1705 train_time:12599ms step_avg:94.73ms +step:134/1705 train_time:12690ms step_avg:94.70ms +step:135/1705 train_time:12783ms step_avg:94.69ms +step:136/1705 train_time:12875ms step_avg:94.67ms +step:137/1705 train_time:12968ms step_avg:94.66ms +step:138/1705 train_time:13064ms step_avg:94.67ms +step:139/1705 train_time:13160ms step_avg:94.67ms +step:140/1705 train_time:13253ms step_avg:94.67ms +step:141/1705 train_time:13346ms step_avg:94.66ms +step:142/1705 train_time:13440ms step_avg:94.64ms +step:143/1705 train_time:13532ms step_avg:94.63ms +step:144/1705 train_time:13624ms step_avg:94.61ms +step:145/1705 train_time:13716ms step_avg:94.60ms +step:146/1705 train_time:13808ms step_avg:94.58ms +step:147/1705 train_time:13901ms step_avg:94.57ms +step:148/1705 train_time:13993ms step_avg:94.55ms +step:149/1705 train_time:14087ms step_avg:94.55ms +step:150/1705 train_time:14183ms step_avg:94.55ms +step:151/1705 train_time:14276ms step_avg:94.54ms +step:152/1705 train_time:14369ms step_avg:94.53ms +step:153/1705 train_time:14462ms step_avg:94.52ms +step:154/1705 train_time:14555ms step_avg:94.51ms +step:155/1705 train_time:14647ms step_avg:94.50ms +step:156/1705 train_time:14740ms step_avg:94.49ms +step:157/1705 train_time:14832ms step_avg:94.47ms +step:158/1705 train_time:14926ms step_avg:94.47ms +step:159/1705 train_time:15018ms step_avg:94.45ms +step:160/1705 train_time:15111ms step_avg:94.44ms +step:161/1705 train_time:15205ms step_avg:94.44ms +step:162/1705 train_time:15298ms step_avg:94.43ms +step:163/1705 train_time:15390ms step_avg:94.42ms +step:164/1705 train_time:15484ms step_avg:94.42ms +step:165/1705 train_time:15577ms step_avg:94.41ms +step:166/1705 train_time:15670ms step_avg:94.40ms +step:167/1705 train_time:15763ms step_avg:94.39ms +step:168/1705 train_time:15855ms step_avg:94.38ms +step:169/1705 train_time:15948ms step_avg:94.37ms +step:170/1705 train_time:16041ms step_avg:94.36ms +step:171/1705 train_time:16133ms step_avg:94.35ms +step:172/1705 train_time:16228ms step_avg:94.35ms +step:173/1705 train_time:16321ms step_avg:94.34ms +step:174/1705 train_time:16415ms step_avg:94.34ms +step:175/1705 train_time:16508ms step_avg:94.33ms +step:176/1705 train_time:16601ms step_avg:94.33ms +step:177/1705 train_time:16694ms step_avg:94.31ms +step:178/1705 train_time:16787ms step_avg:94.31ms +step:179/1705 train_time:16881ms step_avg:94.30ms +step:180/1705 train_time:16974ms step_avg:94.30ms +step:181/1705 train_time:17067ms step_avg:94.29ms +step:182/1705 train_time:17160ms step_avg:94.28ms +step:183/1705 train_time:17252ms step_avg:94.27ms +step:184/1705 train_time:17345ms step_avg:94.27ms +step:185/1705 train_time:17439ms step_avg:94.26ms +step:186/1705 train_time:17532ms step_avg:94.26ms +step:187/1705 train_time:17625ms step_avg:94.25ms +step:188/1705 train_time:17718ms step_avg:94.24ms +step:189/1705 train_time:17810ms step_avg:94.23ms +step:190/1705 train_time:17905ms step_avg:94.24ms +step:191/1705 train_time:17998ms step_avg:94.23ms +step:192/1705 train_time:18091ms step_avg:94.22ms +step:193/1705 train_time:18185ms step_avg:94.22ms +step:194/1705 train_time:18278ms step_avg:94.22ms +step:195/1705 train_time:18370ms step_avg:94.21ms +step:196/1705 train_time:18464ms step_avg:94.20ms +step:197/1705 train_time:18557ms step_avg:94.20ms +step:198/1705 train_time:18650ms step_avg:94.19ms +step:199/1705 train_time:18743ms step_avg:94.19ms +step:200/1705 train_time:18836ms step_avg:94.18ms +step:201/1705 train_time:18928ms step_avg:94.17ms +step:202/1705 train_time:19021ms step_avg:94.16ms +step:203/1705 train_time:19114ms step_avg:94.16ms +step:204/1705 train_time:19207ms step_avg:94.15ms +step:205/1705 train_time:19301ms step_avg:94.15ms +step:206/1705 train_time:19394ms step_avg:94.15ms +step:207/1705 train_time:19487ms step_avg:94.14ms +step:208/1705 train_time:19582ms step_avg:94.14ms +step:209/1705 train_time:19675ms step_avg:94.14ms +step:210/1705 train_time:19768ms step_avg:94.13ms +step:211/1705 train_time:19862ms step_avg:94.13ms +step:212/1705 train_time:19954ms step_avg:94.12ms +step:213/1705 train_time:20336ms step_avg:95.47ms +step:214/1705 train_time:20405ms step_avg:95.35ms +step:215/1705 train_time:20496ms step_avg:95.33ms +step:216/1705 train_time:20588ms step_avg:95.31ms +step:217/1705 train_time:20680ms step_avg:95.30ms +step:218/1705 train_time:20771ms step_avg:95.28ms +step:219/1705 train_time:20864ms step_avg:95.27ms +step:220/1705 train_time:20956ms step_avg:95.26ms +step:221/1705 train_time:21048ms step_avg:95.24ms +step:222/1705 train_time:21140ms step_avg:95.23ms +step:223/1705 train_time:21233ms step_avg:95.21ms +step:224/1705 train_time:21328ms step_avg:95.22ms +step:225/1705 train_time:21424ms step_avg:95.22ms +step:226/1705 train_time:21517ms step_avg:95.21ms +step:227/1705 train_time:21610ms step_avg:95.20ms +step:228/1705 train_time:21703ms step_avg:95.19ms +step:229/1705 train_time:21795ms step_avg:95.17ms +step:230/1705 train_time:21887ms step_avg:95.16ms +step:231/1705 train_time:21979ms step_avg:95.15ms +step:232/1705 train_time:22071ms step_avg:95.13ms +step:233/1705 train_time:22164ms step_avg:95.13ms +step:234/1705 train_time:22258ms step_avg:95.12ms +step:235/1705 train_time:22350ms step_avg:95.11ms +step:236/1705 train_time:22445ms step_avg:95.11ms +step:237/1705 train_time:22538ms step_avg:95.10ms +step:238/1705 train_time:22631ms step_avg:95.09ms +step:239/1705 train_time:22724ms step_avg:95.08ms +step:240/1705 train_time:22816ms step_avg:95.07ms +step:241/1705 train_time:22908ms step_avg:95.06ms +step:242/1705 train_time:23002ms step_avg:95.05ms +step:243/1705 train_time:23094ms step_avg:95.04ms +step:244/1705 train_time:23186ms step_avg:95.03ms +step:245/1705 train_time:23279ms step_avg:95.02ms +step:246/1705 train_time:23371ms step_avg:95.01ms +step:247/1705 train_time:23465ms step_avg:95.00ms +step:248/1705 train_time:23558ms step_avg:94.99ms +step:249/1705 train_time:23652ms step_avg:94.99ms +step:250/1705 train_time:23745ms step_avg:94.98ms +step:250/1705 val_loss:3.9797 train_time:23838ms step_avg:95.35ms +step:251/1705 train_time:23859ms step_avg:95.06ms +step:252/1705 train_time:23936ms step_avg:94.98ms +step:253/1705 train_time:24034ms step_avg:95.00ms +step:254/1705 train_time:24127ms step_avg:94.99ms +step:255/1705 train_time:24219ms step_avg:94.98ms +step:256/1705 train_time:24311ms step_avg:94.97ms +step:257/1705 train_time:24403ms step_avg:94.95ms +step:258/1705 train_time:24495ms step_avg:94.94ms +step:259/1705 train_time:24586ms step_avg:94.93ms +step:260/1705 train_time:24679ms step_avg:94.92ms +step:261/1705 train_time:24770ms step_avg:94.91ms +step:262/1705 train_time:24864ms step_avg:94.90ms +step:263/1705 train_time:24960ms step_avg:94.91ms +step:264/1705 train_time:25054ms step_avg:94.90ms +step:265/1705 train_time:25147ms step_avg:94.89ms +step:266/1705 train_time:25240ms step_avg:94.89ms +step:267/1705 train_time:25332ms step_avg:94.88ms +step:268/1705 train_time:25424ms step_avg:94.87ms +step:269/1705 train_time:25517ms step_avg:94.86ms +step:270/1705 train_time:25609ms step_avg:94.85ms +step:271/1705 train_time:25701ms step_avg:94.84ms +step:272/1705 train_time:25794ms step_avg:94.83ms +step:273/1705 train_time:25887ms step_avg:94.82ms +step:274/1705 train_time:25981ms step_avg:94.82ms +step:275/1705 train_time:26075ms step_avg:94.82ms +step:276/1705 train_time:26168ms step_avg:94.81ms +step:277/1705 train_time:26261ms step_avg:94.80ms +step:278/1705 train_time:26353ms step_avg:94.80ms +step:279/1705 train_time:26446ms step_avg:94.79ms +step:280/1705 train_time:26539ms step_avg:94.78ms +step:281/1705 train_time:26632ms step_avg:94.78ms +step:282/1705 train_time:26724ms step_avg:94.77ms +step:283/1705 train_time:26817ms step_avg:94.76ms +step:284/1705 train_time:26910ms step_avg:94.75ms +step:285/1705 train_time:27004ms step_avg:94.75ms +step:286/1705 train_time:27097ms step_avg:94.74ms +step:287/1705 train_time:27189ms step_avg:94.74ms +step:288/1705 train_time:27282ms step_avg:94.73ms +step:289/1705 train_time:27375ms step_avg:94.72ms +step:290/1705 train_time:27467ms step_avg:94.71ms +step:291/1705 train_time:27560ms step_avg:94.71ms +step:292/1705 train_time:27652ms step_avg:94.70ms +step:293/1705 train_time:27745ms step_avg:94.69ms +step:294/1705 train_time:27838ms step_avg:94.69ms +step:295/1705 train_time:27931ms step_avg:94.68ms +step:296/1705 train_time:28024ms step_avg:94.68ms +step:297/1705 train_time:28118ms step_avg:94.67ms +step:298/1705 train_time:28210ms step_avg:94.66ms +step:299/1705 train_time:28303ms step_avg:94.66ms +step:300/1705 train_time:28396ms step_avg:94.65ms +step:301/1705 train_time:28488ms step_avg:94.64ms +step:302/1705 train_time:28581ms step_avg:94.64ms +step:303/1705 train_time:28674ms step_avg:94.63ms +step:304/1705 train_time:28766ms step_avg:94.63ms +step:305/1705 train_time:28859ms step_avg:94.62ms +step:306/1705 train_time:28952ms step_avg:94.61ms +step:307/1705 train_time:29044ms step_avg:94.61ms +step:308/1705 train_time:29137ms step_avg:94.60ms +step:309/1705 train_time:29230ms step_avg:94.59ms +step:310/1705 train_time:29323ms step_avg:94.59ms +step:311/1705 train_time:29415ms step_avg:94.58ms +step:312/1705 train_time:29507ms step_avg:94.57ms +step:313/1705 train_time:29600ms step_avg:94.57ms +step:314/1705 train_time:29693ms step_avg:94.56ms +step:315/1705 train_time:29786ms step_avg:94.56ms +step:316/1705 train_time:29879ms step_avg:94.55ms +step:317/1705 train_time:29972ms step_avg:94.55ms +step:318/1705 train_time:30065ms step_avg:94.54ms +step:319/1705 train_time:30157ms step_avg:94.54ms +step:320/1705 train_time:30250ms step_avg:94.53ms +step:321/1705 train_time:30343ms step_avg:94.53ms +step:322/1705 train_time:30436ms step_avg:94.52ms +step:323/1705 train_time:30528ms step_avg:94.51ms +step:324/1705 train_time:30621ms step_avg:94.51ms +step:325/1705 train_time:30714ms step_avg:94.50ms +step:326/1705 train_time:30806ms step_avg:94.50ms +step:327/1705 train_time:30899ms step_avg:94.49ms +step:328/1705 train_time:30993ms step_avg:94.49ms +step:329/1705 train_time:31085ms step_avg:94.48ms +step:330/1705 train_time:31178ms step_avg:94.48ms +step:331/1705 train_time:31271ms step_avg:94.47ms +step:332/1705 train_time:31364ms step_avg:94.47ms +step:333/1705 train_time:31456ms step_avg:94.46ms +step:334/1705 train_time:31549ms step_avg:94.46ms +step:335/1705 train_time:31643ms step_avg:94.46ms +step:336/1705 train_time:31736ms step_avg:94.45ms +step:337/1705 train_time:31828ms step_avg:94.45ms +step:338/1705 train_time:31921ms step_avg:94.44ms +step:339/1705 train_time:32014ms step_avg:94.44ms +step:340/1705 train_time:32107ms step_avg:94.43ms +step:341/1705 train_time:32200ms step_avg:94.43ms +step:342/1705 train_time:32293ms step_avg:94.42ms +step:343/1705 train_time:32385ms step_avg:94.42ms +step:344/1705 train_time:32478ms step_avg:94.41ms +step:345/1705 train_time:32570ms step_avg:94.41ms +step:346/1705 train_time:32663ms step_avg:94.40ms +step:347/1705 train_time:32756ms step_avg:94.40ms +step:348/1705 train_time:32848ms step_avg:94.39ms +step:349/1705 train_time:32942ms step_avg:94.39ms +step:350/1705 train_time:33034ms step_avg:94.38ms +step:351/1705 train_time:33126ms step_avg:94.38ms +step:352/1705 train_time:33219ms step_avg:94.37ms +step:353/1705 train_time:33313ms step_avg:94.37ms +step:354/1705 train_time:33405ms step_avg:94.36ms +step:355/1705 train_time:33498ms step_avg:94.36ms +step:356/1705 train_time:33591ms step_avg:94.36ms +step:357/1705 train_time:33684ms step_avg:94.35ms +step:358/1705 train_time:33777ms step_avg:94.35ms +step:359/1705 train_time:33870ms step_avg:94.35ms +step:360/1705 train_time:33964ms step_avg:94.34ms +step:361/1705 train_time:34058ms step_avg:94.34ms +step:362/1705 train_time:34150ms step_avg:94.34ms +step:363/1705 train_time:34243ms step_avg:94.33ms +step:364/1705 train_time:34336ms step_avg:94.33ms +step:365/1705 train_time:34428ms step_avg:94.32ms +step:366/1705 train_time:34522ms step_avg:94.32ms +step:367/1705 train_time:34614ms step_avg:94.32ms +step:368/1705 train_time:34707ms step_avg:94.31ms +step:369/1705 train_time:34800ms step_avg:94.31ms +step:370/1705 train_time:34893ms step_avg:94.31ms +step:371/1705 train_time:34986ms step_avg:94.30ms +step:372/1705 train_time:35080ms step_avg:94.30ms +step:373/1705 train_time:35173ms step_avg:94.30ms +step:374/1705 train_time:35265ms step_avg:94.29ms +step:375/1705 train_time:35359ms step_avg:94.29ms +step:375/1705 val_loss:3.8236 train_time:35452ms step_avg:94.54ms +step:376/1705 train_time:35474ms step_avg:94.34ms +step:377/1705 train_time:35550ms step_avg:94.30ms +step:378/1705 train_time:35647ms step_avg:94.30ms +step:379/1705 train_time:35740ms step_avg:94.30ms +step:380/1705 train_time:35832ms step_avg:94.30ms +step:381/1705 train_time:35924ms step_avg:94.29ms +step:382/1705 train_time:36016ms step_avg:94.28ms +step:383/1705 train_time:36108ms step_avg:94.28ms +step:384/1705 train_time:36201ms step_avg:94.27ms +step:385/1705 train_time:36293ms step_avg:94.27ms +step:386/1705 train_time:36386ms step_avg:94.26ms +step:387/1705 train_time:36481ms step_avg:94.27ms +step:388/1705 train_time:36577ms step_avg:94.27ms +step:389/1705 train_time:36670ms step_avg:94.27ms +step:390/1705 train_time:36763ms step_avg:94.26ms +step:391/1705 train_time:36856ms step_avg:94.26ms +step:392/1705 train_time:36948ms step_avg:94.25ms +step:393/1705 train_time:37040ms step_avg:94.25ms +step:394/1705 train_time:37132ms step_avg:94.24ms +step:395/1705 train_time:37224ms step_avg:94.24ms +step:396/1705 train_time:37317ms step_avg:94.23ms +step:397/1705 train_time:37409ms step_avg:94.23ms +step:398/1705 train_time:37504ms step_avg:94.23ms +step:399/1705 train_time:37598ms step_avg:94.23ms +step:400/1705 train_time:37691ms step_avg:94.23ms +step:401/1705 train_time:37784ms step_avg:94.22ms +step:402/1705 train_time:37877ms step_avg:94.22ms +step:403/1705 train_time:37969ms step_avg:94.22ms +step:404/1705 train_time:38061ms step_avg:94.21ms +step:405/1705 train_time:38153ms step_avg:94.21ms +step:406/1705 train_time:38245ms step_avg:94.20ms +step:407/1705 train_time:38339ms step_avg:94.20ms +step:408/1705 train_time:38431ms step_avg:94.19ms +step:409/1705 train_time:38525ms step_avg:94.19ms +step:410/1705 train_time:38619ms step_avg:94.19ms +step:411/1705 train_time:38712ms step_avg:94.19ms +step:412/1705 train_time:38805ms step_avg:94.19ms +step:413/1705 train_time:38900ms step_avg:94.19ms +step:414/1705 train_time:38992ms step_avg:94.18ms +step:415/1705 train_time:39084ms step_avg:94.18ms +step:416/1705 train_time:39177ms step_avg:94.18ms +step:417/1705 train_time:39269ms step_avg:94.17ms +step:418/1705 train_time:39362ms step_avg:94.17ms +step:419/1705 train_time:39455ms step_avg:94.16ms +step:420/1705 train_time:39547ms step_avg:94.16ms +step:421/1705 train_time:39641ms step_avg:94.16ms +step:422/1705 train_time:39735ms step_avg:94.16ms +step:423/1705 train_time:39828ms step_avg:94.16ms +step:424/1705 train_time:39921ms step_avg:94.15ms +step:425/1705 train_time:40274ms step_avg:94.76ms +step:426/1705 train_time:40343ms step_avg:94.70ms +step:427/1705 train_time:40435ms step_avg:94.69ms +step:428/1705 train_time:40526ms step_avg:94.69ms +step:429/1705 train_time:40618ms step_avg:94.68ms +step:430/1705 train_time:40710ms step_avg:94.67ms +step:431/1705 train_time:40802ms step_avg:94.67ms +step:432/1705 train_time:40894ms step_avg:94.66ms +step:433/1705 train_time:40986ms step_avg:94.66ms +step:434/1705 train_time:41079ms step_avg:94.65ms +step:435/1705 train_time:41172ms step_avg:94.65ms +step:436/1705 train_time:41269ms step_avg:94.65ms +step:437/1705 train_time:41363ms step_avg:94.65ms +step:438/1705 train_time:41457ms step_avg:94.65ms +step:439/1705 train_time:41549ms step_avg:94.65ms +step:440/1705 train_time:41642ms step_avg:94.64ms +step:441/1705 train_time:41734ms step_avg:94.63ms +step:442/1705 train_time:41826ms step_avg:94.63ms +step:443/1705 train_time:41918ms step_avg:94.62ms +step:444/1705 train_time:42010ms step_avg:94.62ms +step:445/1705 train_time:42103ms step_avg:94.61ms +step:446/1705 train_time:42197ms step_avg:94.61ms +step:447/1705 train_time:42290ms step_avg:94.61ms +step:448/1705 train_time:42384ms step_avg:94.61ms +step:449/1705 train_time:42478ms step_avg:94.60ms +step:450/1705 train_time:42570ms step_avg:94.60ms +step:451/1705 train_time:42663ms step_avg:94.60ms +step:452/1705 train_time:42756ms step_avg:94.59ms +step:453/1705 train_time:42848ms step_avg:94.59ms +step:454/1705 train_time:42940ms step_avg:94.58ms +step:455/1705 train_time:43033ms step_avg:94.58ms +step:456/1705 train_time:43126ms step_avg:94.57ms +step:457/1705 train_time:43221ms step_avg:94.57ms +step:458/1705 train_time:43314ms step_avg:94.57ms +step:459/1705 train_time:43407ms step_avg:94.57ms +step:460/1705 train_time:43501ms step_avg:94.57ms +step:461/1705 train_time:43594ms step_avg:94.56ms +step:462/1705 train_time:43686ms step_avg:94.56ms +step:463/1705 train_time:43779ms step_avg:94.55ms +step:464/1705 train_time:43871ms step_avg:94.55ms +step:465/1705 train_time:43964ms step_avg:94.55ms +step:466/1705 train_time:44057ms step_avg:94.54ms +step:467/1705 train_time:44150ms step_avg:94.54ms +step:468/1705 train_time:44243ms step_avg:94.54ms +step:469/1705 train_time:44336ms step_avg:94.53ms +step:470/1705 train_time:44430ms step_avg:94.53ms +step:471/1705 train_time:44525ms step_avg:94.53ms +step:472/1705 train_time:44618ms step_avg:94.53ms +step:473/1705 train_time:44710ms step_avg:94.52ms +step:474/1705 train_time:44803ms step_avg:94.52ms +step:475/1705 train_time:44896ms step_avg:94.52ms +step:476/1705 train_time:44988ms step_avg:94.51ms +step:477/1705 train_time:45081ms step_avg:94.51ms +step:478/1705 train_time:45174ms step_avg:94.51ms +step:479/1705 train_time:45266ms step_avg:94.50ms +step:480/1705 train_time:45360ms step_avg:94.50ms +step:481/1705 train_time:45453ms step_avg:94.50ms +step:482/1705 train_time:45546ms step_avg:94.49ms +step:483/1705 train_time:45640ms step_avg:94.49ms +step:484/1705 train_time:45733ms step_avg:94.49ms +step:485/1705 train_time:45826ms step_avg:94.49ms +step:486/1705 train_time:45919ms step_avg:94.48ms +step:487/1705 train_time:46012ms step_avg:94.48ms +step:488/1705 train_time:46104ms step_avg:94.48ms +step:489/1705 train_time:46197ms step_avg:94.47ms +step:490/1705 train_time:46290ms step_avg:94.47ms +step:491/1705 train_time:46383ms step_avg:94.47ms +step:492/1705 train_time:46477ms step_avg:94.47ms +step:493/1705 train_time:46571ms step_avg:94.46ms +step:494/1705 train_time:46664ms step_avg:94.46ms +step:495/1705 train_time:46758ms step_avg:94.46ms +step:496/1705 train_time:46851ms step_avg:94.46ms +step:497/1705 train_time:46944ms step_avg:94.45ms +step:498/1705 train_time:47037ms step_avg:94.45ms +step:499/1705 train_time:47131ms step_avg:94.45ms +step:500/1705 train_time:47224ms step_avg:94.45ms +step:500/1705 val_loss:3.7233 train_time:47317ms step_avg:94.63ms +step:501/1705 train_time:47339ms step_avg:94.49ms +step:502/1705 train_time:47416ms step_avg:94.45ms +step:503/1705 train_time:47514ms step_avg:94.46ms +step:504/1705 train_time:47608ms step_avg:94.46ms +step:505/1705 train_time:47700ms step_avg:94.45ms +step:506/1705 train_time:47792ms step_avg:94.45ms +step:507/1705 train_time:47883ms step_avg:94.44ms +step:508/1705 train_time:47976ms step_avg:94.44ms +step:509/1705 train_time:48067ms step_avg:94.43ms +step:510/1705 train_time:48159ms step_avg:94.43ms +step:511/1705 train_time:48252ms step_avg:94.43ms +step:512/1705 train_time:48347ms step_avg:94.43ms +step:513/1705 train_time:48444ms step_avg:94.43ms +step:514/1705 train_time:48538ms step_avg:94.43ms +step:515/1705 train_time:48631ms step_avg:94.43ms +step:516/1705 train_time:48724ms step_avg:94.43ms +step:517/1705 train_time:48816ms step_avg:94.42ms +step:518/1705 train_time:48909ms step_avg:94.42ms +step:519/1705 train_time:49000ms step_avg:94.41ms +step:520/1705 train_time:49093ms step_avg:94.41ms +step:521/1705 train_time:49185ms step_avg:94.40ms +step:522/1705 train_time:49279ms step_avg:94.40ms +step:523/1705 train_time:49373ms step_avg:94.40ms +step:524/1705 train_time:49467ms step_avg:94.40ms +step:525/1705 train_time:49561ms step_avg:94.40ms +step:526/1705 train_time:49656ms step_avg:94.40ms +step:527/1705 train_time:49749ms step_avg:94.40ms +step:528/1705 train_time:49841ms step_avg:94.40ms +step:529/1705 train_time:49934ms step_avg:94.39ms +step:530/1705 train_time:50026ms step_avg:94.39ms +step:531/1705 train_time:50118ms step_avg:94.38ms +step:532/1705 train_time:50211ms step_avg:94.38ms +step:533/1705 train_time:50304ms step_avg:94.38ms +step:534/1705 train_time:50398ms step_avg:94.38ms +step:535/1705 train_time:50491ms step_avg:94.38ms +step:536/1705 train_time:50584ms step_avg:94.37ms +step:537/1705 train_time:50678ms step_avg:94.37ms +step:538/1705 train_time:50771ms step_avg:94.37ms +step:539/1705 train_time:50864ms step_avg:94.37ms +step:540/1705 train_time:50956ms step_avg:94.36ms +step:541/1705 train_time:51049ms step_avg:94.36ms +step:542/1705 train_time:51142ms step_avg:94.36ms +step:543/1705 train_time:51235ms step_avg:94.36ms +step:544/1705 train_time:51329ms step_avg:94.35ms +step:545/1705 train_time:51421ms step_avg:94.35ms +step:546/1705 train_time:51515ms step_avg:94.35ms +step:547/1705 train_time:51608ms step_avg:94.35ms +step:548/1705 train_time:51701ms step_avg:94.34ms +step:549/1705 train_time:51795ms step_avg:94.34ms +step:550/1705 train_time:51888ms step_avg:94.34ms +step:551/1705 train_time:51980ms step_avg:94.34ms +step:552/1705 train_time:52073ms step_avg:94.33ms +step:553/1705 train_time:52165ms step_avg:94.33ms +step:554/1705 train_time:52258ms step_avg:94.33ms +step:555/1705 train_time:52351ms step_avg:94.33ms +step:556/1705 train_time:52444ms step_avg:94.32ms +step:557/1705 train_time:52537ms step_avg:94.32ms +step:558/1705 train_time:52631ms step_avg:94.32ms +step:559/1705 train_time:52723ms step_avg:94.32ms +step:560/1705 train_time:52816ms step_avg:94.32ms +step:561/1705 train_time:52910ms step_avg:94.31ms +step:562/1705 train_time:53003ms step_avg:94.31ms +step:563/1705 train_time:53095ms step_avg:94.31ms +step:564/1705 train_time:53188ms step_avg:94.30ms +step:565/1705 train_time:53280ms step_avg:94.30ms +step:566/1705 train_time:53374ms step_avg:94.30ms +step:567/1705 train_time:53467ms step_avg:94.30ms +step:568/1705 train_time:53560ms step_avg:94.30ms +step:569/1705 train_time:53653ms step_avg:94.29ms +step:570/1705 train_time:53747ms step_avg:94.29ms +step:571/1705 train_time:53841ms step_avg:94.29ms +step:572/1705 train_time:53935ms step_avg:94.29ms +step:573/1705 train_time:54029ms step_avg:94.29ms +step:574/1705 train_time:54123ms step_avg:94.29ms +step:575/1705 train_time:54218ms step_avg:94.29ms +step:576/1705 train_time:54313ms step_avg:94.29ms +step:577/1705 train_time:54408ms step_avg:94.29ms +step:578/1705 train_time:54500ms step_avg:94.29ms +step:579/1705 train_time:54595ms step_avg:94.29ms +step:580/1705 train_time:54690ms step_avg:94.29ms +step:581/1705 train_time:54784ms step_avg:94.29ms +step:582/1705 train_time:54879ms step_avg:94.29ms +step:583/1705 train_time:54973ms step_avg:94.29ms +step:584/1705 train_time:55067ms step_avg:94.29ms +step:585/1705 train_time:55161ms step_avg:94.29ms +step:586/1705 train_time:55255ms step_avg:94.29ms +step:587/1705 train_time:55351ms step_avg:94.29ms +step:588/1705 train_time:55444ms step_avg:94.29ms +step:589/1705 train_time:55539ms step_avg:94.29ms +step:590/1705 train_time:55634ms step_avg:94.29ms +step:591/1705 train_time:55729ms step_avg:94.30ms +step:592/1705 train_time:55824ms step_avg:94.30ms +step:593/1705 train_time:55918ms step_avg:94.30ms +step:594/1705 train_time:56013ms step_avg:94.30ms +step:595/1705 train_time:56106ms step_avg:94.30ms +step:596/1705 train_time:56200ms step_avg:94.29ms +step:597/1705 train_time:56294ms step_avg:94.30ms +step:598/1705 train_time:56389ms step_avg:94.30ms +step:599/1705 train_time:56483ms step_avg:94.29ms +step:600/1705 train_time:56578ms step_avg:94.30ms +step:601/1705 train_time:56672ms step_avg:94.30ms +step:602/1705 train_time:56767ms step_avg:94.30ms +step:603/1705 train_time:56861ms step_avg:94.30ms +step:604/1705 train_time:56956ms step_avg:94.30ms +step:605/1705 train_time:57050ms step_avg:94.30ms +step:606/1705 train_time:57144ms step_avg:94.30ms +step:607/1705 train_time:57238ms step_avg:94.30ms +step:608/1705 train_time:57334ms step_avg:94.30ms +step:609/1705 train_time:57429ms step_avg:94.30ms +step:610/1705 train_time:57523ms step_avg:94.30ms +step:611/1705 train_time:57617ms step_avg:94.30ms +step:612/1705 train_time:57712ms step_avg:94.30ms +step:613/1705 train_time:57806ms step_avg:94.30ms +step:614/1705 train_time:57900ms step_avg:94.30ms +step:615/1705 train_time:57995ms step_avg:94.30ms +step:616/1705 train_time:58089ms step_avg:94.30ms +step:617/1705 train_time:58182ms step_avg:94.30ms +step:618/1705 train_time:58276ms step_avg:94.30ms +step:619/1705 train_time:58371ms step_avg:94.30ms +step:620/1705 train_time:58466ms step_avg:94.30ms +step:621/1705 train_time:58559ms step_avg:94.30ms +step:622/1705 train_time:58654ms step_avg:94.30ms +step:623/1705 train_time:58750ms step_avg:94.30ms +step:624/1705 train_time:58843ms step_avg:94.30ms +step:625/1705 train_time:58937ms step_avg:94.30ms +step:625/1705 val_loss:3.6227 train_time:59033ms step_avg:94.45ms +step:626/1705 train_time:59055ms step_avg:94.34ms +step:627/1705 train_time:59138ms step_avg:94.32ms +step:628/1705 train_time:59237ms step_avg:94.33ms +step:629/1705 train_time:59331ms step_avg:94.33ms +step:630/1705 train_time:59424ms step_avg:94.32ms +step:631/1705 train_time:59517ms step_avg:94.32ms +step:632/1705 train_time:59610ms step_avg:94.32ms +step:633/1705 train_time:59703ms step_avg:94.32ms +step:634/1705 train_time:59797ms step_avg:94.32ms +step:635/1705 train_time:59890ms step_avg:94.32ms +step:636/1705 train_time:59986ms step_avg:94.32ms +step:637/1705 train_time:60082ms step_avg:94.32ms +step:638/1705 train_time:60179ms step_avg:94.32ms +step:639/1705 train_time:60551ms step_avg:94.76ms +step:640/1705 train_time:60639ms step_avg:94.75ms +step:641/1705 train_time:60732ms step_avg:94.75ms +step:642/1705 train_time:60825ms step_avg:94.74ms +step:643/1705 train_time:60918ms step_avg:94.74ms +step:644/1705 train_time:61012ms step_avg:94.74ms +step:645/1705 train_time:61105ms step_avg:94.74ms +step:646/1705 train_time:61198ms step_avg:94.73ms +step:647/1705 train_time:61291ms step_avg:94.73ms +step:648/1705 train_time:61384ms step_avg:94.73ms +step:649/1705 train_time:61483ms step_avg:94.73ms +step:650/1705 train_time:61580ms step_avg:94.74ms +step:651/1705 train_time:61677ms step_avg:94.74ms +step:652/1705 train_time:61771ms step_avg:94.74ms +step:653/1705 train_time:61865ms step_avg:94.74ms +step:654/1705 train_time:61959ms step_avg:94.74ms +step:655/1705 train_time:62053ms step_avg:94.74ms +step:656/1705 train_time:62145ms step_avg:94.73ms +step:657/1705 train_time:62239ms step_avg:94.73ms +step:658/1705 train_time:62332ms step_avg:94.73ms +step:659/1705 train_time:62426ms step_avg:94.73ms +step:660/1705 train_time:62521ms step_avg:94.73ms +step:661/1705 train_time:62617ms step_avg:94.73ms +step:662/1705 train_time:62713ms step_avg:94.73ms +step:663/1705 train_time:62807ms step_avg:94.73ms +step:664/1705 train_time:62901ms step_avg:94.73ms +step:665/1705 train_time:62996ms step_avg:94.73ms +step:666/1705 train_time:63090ms step_avg:94.73ms +step:667/1705 train_time:63183ms step_avg:94.73ms +step:668/1705 train_time:63277ms step_avg:94.73ms +step:669/1705 train_time:63372ms step_avg:94.73ms +step:670/1705 train_time:63466ms step_avg:94.73ms +step:671/1705 train_time:63561ms step_avg:94.73ms +step:672/1705 train_time:63656ms step_avg:94.73ms +step:673/1705 train_time:63752ms step_avg:94.73ms +step:674/1705 train_time:63846ms step_avg:94.73ms +step:675/1705 train_time:63941ms step_avg:94.73ms +step:676/1705 train_time:64036ms step_avg:94.73ms +step:677/1705 train_time:64129ms step_avg:94.73ms +step:678/1705 train_time:64223ms step_avg:94.72ms +step:679/1705 train_time:64317ms step_avg:94.72ms +step:680/1705 train_time:64411ms step_avg:94.72ms +step:681/1705 train_time:64506ms step_avg:94.72ms +step:682/1705 train_time:64600ms step_avg:94.72ms +step:683/1705 train_time:64696ms step_avg:94.72ms +step:684/1705 train_time:64791ms step_avg:94.72ms +step:685/1705 train_time:64884ms step_avg:94.72ms +step:686/1705 train_time:64979ms step_avg:94.72ms +step:687/1705 train_time:65073ms step_avg:94.72ms +step:688/1705 train_time:65168ms step_avg:94.72ms +step:689/1705 train_time:65261ms step_avg:94.72ms +step:690/1705 train_time:65355ms step_avg:94.72ms +step:691/1705 train_time:65450ms step_avg:94.72ms +step:692/1705 train_time:65543ms step_avg:94.72ms +step:693/1705 train_time:65638ms step_avg:94.72ms +step:694/1705 train_time:65733ms step_avg:94.72ms +step:695/1705 train_time:65829ms step_avg:94.72ms +step:696/1705 train_time:65922ms step_avg:94.72ms +step:697/1705 train_time:66017ms step_avg:94.72ms +step:698/1705 train_time:66111ms step_avg:94.71ms +step:699/1705 train_time:66205ms step_avg:94.71ms +step:700/1705 train_time:66299ms step_avg:94.71ms +step:701/1705 train_time:66394ms step_avg:94.71ms +step:702/1705 train_time:66488ms step_avg:94.71ms +step:703/1705 train_time:66581ms step_avg:94.71ms +step:704/1705 train_time:66677ms step_avg:94.71ms +step:705/1705 train_time:66772ms step_avg:94.71ms +step:706/1705 train_time:66866ms step_avg:94.71ms +step:707/1705 train_time:66960ms step_avg:94.71ms +step:708/1705 train_time:67055ms step_avg:94.71ms +step:709/1705 train_time:67149ms step_avg:94.71ms +step:710/1705 train_time:67243ms step_avg:94.71ms +step:711/1705 train_time:67337ms step_avg:94.71ms +step:712/1705 train_time:67431ms step_avg:94.71ms +step:713/1705 train_time:67524ms step_avg:94.70ms +step:714/1705 train_time:67619ms step_avg:94.70ms +step:715/1705 train_time:67714ms step_avg:94.70ms +step:716/1705 train_time:67809ms step_avg:94.70ms +step:717/1705 train_time:67903ms step_avg:94.70ms +step:718/1705 train_time:67997ms step_avg:94.70ms +step:719/1705 train_time:68093ms step_avg:94.70ms +step:720/1705 train_time:68187ms step_avg:94.70ms +step:721/1705 train_time:68280ms step_avg:94.70ms +step:722/1705 train_time:68375ms step_avg:94.70ms +step:723/1705 train_time:68469ms step_avg:94.70ms +step:724/1705 train_time:68563ms step_avg:94.70ms +step:725/1705 train_time:68657ms step_avg:94.70ms +step:726/1705 train_time:68752ms step_avg:94.70ms +step:727/1705 train_time:68846ms step_avg:94.70ms +step:728/1705 train_time:68940ms step_avg:94.70ms +step:729/1705 train_time:69035ms step_avg:94.70ms +step:730/1705 train_time:69130ms step_avg:94.70ms +step:731/1705 train_time:69224ms step_avg:94.70ms +step:732/1705 train_time:69318ms step_avg:94.70ms +step:733/1705 train_time:69413ms step_avg:94.70ms +step:734/1705 train_time:69507ms step_avg:94.70ms +step:735/1705 train_time:69601ms step_avg:94.70ms +step:736/1705 train_time:69696ms step_avg:94.70ms +step:737/1705 train_time:69791ms step_avg:94.70ms +step:738/1705 train_time:69885ms step_avg:94.69ms +step:739/1705 train_time:69978ms step_avg:94.69ms +step:740/1705 train_time:70073ms step_avg:94.69ms +step:741/1705 train_time:70168ms step_avg:94.69ms +step:742/1705 train_time:70261ms step_avg:94.69ms +step:743/1705 train_time:70357ms step_avg:94.69ms +step:744/1705 train_time:70452ms step_avg:94.69ms +step:745/1705 train_time:70547ms step_avg:94.69ms +step:746/1705 train_time:70640ms step_avg:94.69ms +step:747/1705 train_time:70735ms step_avg:94.69ms +step:748/1705 train_time:70829ms step_avg:94.69ms +step:749/1705 train_time:70923ms step_avg:94.69ms +step:750/1705 train_time:71017ms step_avg:94.69ms +step:750/1705 val_loss:3.5689 train_time:71113ms step_avg:94.82ms +step:751/1705 train_time:71134ms step_avg:94.72ms +step:752/1705 train_time:71213ms step_avg:94.70ms +step:753/1705 train_time:71314ms step_avg:94.71ms +step:754/1705 train_time:71409ms step_avg:94.71ms +step:755/1705 train_time:71503ms step_avg:94.71ms +step:756/1705 train_time:71596ms step_avg:94.70ms +step:757/1705 train_time:71689ms step_avg:94.70ms +step:758/1705 train_time:71782ms step_avg:94.70ms +step:759/1705 train_time:71875ms step_avg:94.70ms +step:760/1705 train_time:71969ms step_avg:94.70ms +step:761/1705 train_time:72062ms step_avg:94.69ms +step:762/1705 train_time:72159ms step_avg:94.70ms +step:763/1705 train_time:72256ms step_avg:94.70ms +step:764/1705 train_time:72353ms step_avg:94.70ms +step:765/1705 train_time:72448ms step_avg:94.70ms +step:766/1705 train_time:72541ms step_avg:94.70ms +step:767/1705 train_time:72636ms step_avg:94.70ms +step:768/1705 train_time:72730ms step_avg:94.70ms +step:769/1705 train_time:72822ms step_avg:94.70ms +step:770/1705 train_time:72917ms step_avg:94.70ms +step:771/1705 train_time:73010ms step_avg:94.70ms +step:772/1705 train_time:73104ms step_avg:94.69ms +step:773/1705 train_time:73199ms step_avg:94.70ms +step:774/1705 train_time:73296ms step_avg:94.70ms +step:775/1705 train_time:73392ms step_avg:94.70ms +step:776/1705 train_time:73486ms step_avg:94.70ms +step:777/1705 train_time:73579ms step_avg:94.70ms +step:778/1705 train_time:73674ms step_avg:94.70ms +step:779/1705 train_time:73768ms step_avg:94.70ms +step:780/1705 train_time:73861ms step_avg:94.69ms +step:781/1705 train_time:73955ms step_avg:94.69ms +step:782/1705 train_time:74049ms step_avg:94.69ms +step:783/1705 train_time:74143ms step_avg:94.69ms +step:784/1705 train_time:74239ms step_avg:94.69ms +step:785/1705 train_time:74336ms step_avg:94.70ms +step:786/1705 train_time:74431ms step_avg:94.70ms +step:787/1705 train_time:74525ms step_avg:94.70ms +step:788/1705 train_time:74619ms step_avg:94.69ms +step:789/1705 train_time:74714ms step_avg:94.69ms +step:790/1705 train_time:74808ms step_avg:94.69ms +step:791/1705 train_time:74901ms step_avg:94.69ms +step:792/1705 train_time:74995ms step_avg:94.69ms +step:793/1705 train_time:75089ms step_avg:94.69ms +step:794/1705 train_time:75183ms step_avg:94.69ms +step:795/1705 train_time:75278ms step_avg:94.69ms +step:796/1705 train_time:75373ms step_avg:94.69ms +step:797/1705 train_time:75467ms step_avg:94.69ms +step:798/1705 train_time:75562ms step_avg:94.69ms +step:799/1705 train_time:75656ms step_avg:94.69ms +step:800/1705 train_time:75751ms step_avg:94.69ms +step:801/1705 train_time:75844ms step_avg:94.69ms +step:802/1705 train_time:75938ms step_avg:94.69ms +step:803/1705 train_time:76033ms step_avg:94.69ms +step:804/1705 train_time:76127ms step_avg:94.69ms +step:805/1705 train_time:76221ms step_avg:94.68ms +step:806/1705 train_time:76316ms step_avg:94.69ms +step:807/1705 train_time:76411ms step_avg:94.69ms +step:808/1705 train_time:76507ms step_avg:94.69ms +step:809/1705 train_time:76601ms step_avg:94.69ms +step:810/1705 train_time:76695ms step_avg:94.69ms +step:811/1705 train_time:76789ms step_avg:94.68ms +step:812/1705 train_time:76882ms step_avg:94.68ms +step:813/1705 train_time:76976ms step_avg:94.68ms +step:814/1705 train_time:77071ms step_avg:94.68ms +step:815/1705 train_time:77165ms step_avg:94.68ms +step:816/1705 train_time:77259ms step_avg:94.68ms +step:817/1705 train_time:77356ms step_avg:94.68ms +step:818/1705 train_time:77450ms step_avg:94.68ms +step:819/1705 train_time:77545ms step_avg:94.68ms +step:820/1705 train_time:77640ms step_avg:94.68ms +step:821/1705 train_time:77735ms step_avg:94.68ms +step:822/1705 train_time:77829ms step_avg:94.68ms +step:823/1705 train_time:77923ms step_avg:94.68ms +step:824/1705 train_time:78018ms step_avg:94.68ms +step:825/1705 train_time:78113ms step_avg:94.68ms +step:826/1705 train_time:78207ms step_avg:94.68ms +step:827/1705 train_time:78301ms step_avg:94.68ms +step:828/1705 train_time:78397ms step_avg:94.68ms +step:829/1705 train_time:78492ms step_avg:94.68ms +step:830/1705 train_time:78586ms step_avg:94.68ms +step:831/1705 train_time:78680ms step_avg:94.68ms +step:832/1705 train_time:78775ms step_avg:94.68ms +step:833/1705 train_time:78869ms step_avg:94.68ms +step:834/1705 train_time:78962ms step_avg:94.68ms +step:835/1705 train_time:79057ms step_avg:94.68ms +step:836/1705 train_time:79151ms step_avg:94.68ms +step:837/1705 train_time:79246ms step_avg:94.68ms +step:838/1705 train_time:79339ms step_avg:94.68ms +step:839/1705 train_time:79434ms step_avg:94.68ms +step:840/1705 train_time:79530ms step_avg:94.68ms +step:841/1705 train_time:79624ms step_avg:94.68ms +step:842/1705 train_time:79718ms step_avg:94.68ms +step:843/1705 train_time:79813ms step_avg:94.68ms +step:844/1705 train_time:79907ms step_avg:94.68ms +step:845/1705 train_time:80002ms step_avg:94.68ms +step:846/1705 train_time:80096ms step_avg:94.68ms +step:847/1705 train_time:80190ms step_avg:94.68ms +step:848/1705 train_time:80284ms step_avg:94.67ms +step:849/1705 train_time:80379ms step_avg:94.67ms +step:850/1705 train_time:80474ms step_avg:94.68ms +step:851/1705 train_time:80742ms step_avg:94.88ms +step:852/1705 train_time:80823ms step_avg:94.86ms +step:853/1705 train_time:80916ms step_avg:94.86ms +step:854/1705 train_time:81009ms step_avg:94.86ms +step:855/1705 train_time:81102ms step_avg:94.86ms +step:856/1705 train_time:81196ms step_avg:94.86ms +step:857/1705 train_time:81289ms step_avg:94.85ms +step:858/1705 train_time:81383ms step_avg:94.85ms +step:859/1705 train_time:81476ms step_avg:94.85ms +step:860/1705 train_time:81570ms step_avg:94.85ms +step:861/1705 train_time:81668ms step_avg:94.85ms +step:862/1705 train_time:81764ms step_avg:94.85ms +step:863/1705 train_time:81858ms step_avg:94.85ms +step:864/1705 train_time:81953ms step_avg:94.85ms +step:865/1705 train_time:82047ms step_avg:94.85ms +step:866/1705 train_time:82141ms step_avg:94.85ms +step:867/1705 train_time:82235ms step_avg:94.85ms +step:868/1705 train_time:82328ms step_avg:94.85ms +step:869/1705 train_time:82421ms step_avg:94.85ms +step:870/1705 train_time:82517ms step_avg:94.85ms +step:871/1705 train_time:82611ms step_avg:94.85ms +step:872/1705 train_time:82707ms step_avg:94.85ms +step:873/1705 train_time:82802ms step_avg:94.85ms +step:874/1705 train_time:82897ms step_avg:94.85ms +step:875/1705 train_time:82992ms step_avg:94.85ms +step:875/1705 val_loss:3.5262 train_time:83086ms step_avg:94.96ms +step:876/1705 train_time:83108ms step_avg:94.87ms +step:877/1705 train_time:83186ms step_avg:94.85ms +step:878/1705 train_time:83283ms step_avg:94.86ms +step:879/1705 train_time:83378ms step_avg:94.86ms +step:880/1705 train_time:83473ms step_avg:94.86ms +step:881/1705 train_time:83566ms step_avg:94.85ms +step:882/1705 train_time:83659ms step_avg:94.85ms +step:883/1705 train_time:83754ms step_avg:94.85ms +step:884/1705 train_time:83847ms step_avg:94.85ms +step:885/1705 train_time:83941ms step_avg:94.85ms +step:886/1705 train_time:84036ms step_avg:94.85ms +step:887/1705 train_time:84133ms step_avg:94.85ms +step:888/1705 train_time:84229ms step_avg:94.85ms +step:889/1705 train_time:84326ms step_avg:94.86ms +step:890/1705 train_time:84421ms step_avg:94.85ms +step:891/1705 train_time:84515ms step_avg:94.85ms +step:892/1705 train_time:84608ms step_avg:94.85ms +step:893/1705 train_time:84702ms step_avg:94.85ms +step:894/1705 train_time:84796ms step_avg:94.85ms +step:895/1705 train_time:84889ms step_avg:94.85ms +step:896/1705 train_time:84983ms step_avg:94.85ms +step:897/1705 train_time:85078ms step_avg:94.85ms +step:898/1705 train_time:85174ms step_avg:94.85ms +step:899/1705 train_time:85270ms step_avg:94.85ms +step:900/1705 train_time:85364ms step_avg:94.85ms +step:901/1705 train_time:85458ms step_avg:94.85ms +step:902/1705 train_time:85552ms step_avg:94.85ms +step:903/1705 train_time:85646ms step_avg:94.85ms +step:904/1705 train_time:85740ms step_avg:94.84ms +step:905/1705 train_time:85834ms step_avg:94.84ms +step:906/1705 train_time:85927ms step_avg:94.84ms +step:907/1705 train_time:86022ms step_avg:94.84ms +step:908/1705 train_time:86116ms step_avg:94.84ms +step:909/1705 train_time:86212ms step_avg:94.84ms +step:910/1705 train_time:86307ms step_avg:94.84ms +step:911/1705 train_time:86401ms step_avg:94.84ms +step:912/1705 train_time:86496ms step_avg:94.84ms +step:913/1705 train_time:86590ms step_avg:94.84ms +step:914/1705 train_time:86683ms step_avg:94.84ms +step:915/1705 train_time:86778ms step_avg:94.84ms +step:916/1705 train_time:86872ms step_avg:94.84ms +step:917/1705 train_time:86966ms step_avg:94.84ms +step:918/1705 train_time:87060ms step_avg:94.84ms +step:919/1705 train_time:87155ms step_avg:94.84ms +step:920/1705 train_time:87250ms step_avg:94.84ms +step:921/1705 train_time:87344ms step_avg:94.84ms +step:922/1705 train_time:87439ms step_avg:94.84ms +step:923/1705 train_time:87534ms step_avg:94.84ms +step:924/1705 train_time:87628ms step_avg:94.84ms +step:925/1705 train_time:87722ms step_avg:94.83ms +step:926/1705 train_time:87816ms step_avg:94.83ms +step:927/1705 train_time:87911ms step_avg:94.83ms +step:928/1705 train_time:88005ms step_avg:94.83ms +step:929/1705 train_time:88099ms step_avg:94.83ms +step:930/1705 train_time:88193ms step_avg:94.83ms +step:931/1705 train_time:88287ms step_avg:94.83ms +step:932/1705 train_time:88382ms step_avg:94.83ms +step:933/1705 train_time:88476ms step_avg:94.83ms +step:934/1705 train_time:88571ms step_avg:94.83ms +step:935/1705 train_time:88664ms step_avg:94.83ms +step:936/1705 train_time:88758ms step_avg:94.83ms +step:937/1705 train_time:88853ms step_avg:94.83ms +step:938/1705 train_time:88947ms step_avg:94.83ms +step:939/1705 train_time:89041ms step_avg:94.83ms +step:940/1705 train_time:89137ms step_avg:94.83ms +step:941/1705 train_time:89231ms step_avg:94.83ms +step:942/1705 train_time:89324ms step_avg:94.82ms +step:943/1705 train_time:89419ms step_avg:94.82ms +step:944/1705 train_time:89514ms step_avg:94.82ms +step:945/1705 train_time:89610ms step_avg:94.83ms +step:946/1705 train_time:89704ms step_avg:94.82ms +step:947/1705 train_time:89798ms step_avg:94.82ms +step:948/1705 train_time:89894ms step_avg:94.82ms +step:949/1705 train_time:89988ms step_avg:94.82ms +step:950/1705 train_time:90082ms step_avg:94.82ms +step:951/1705 train_time:90177ms step_avg:94.82ms +step:952/1705 train_time:90271ms step_avg:94.82ms +step:953/1705 train_time:90365ms step_avg:94.82ms +step:954/1705 train_time:90460ms step_avg:94.82ms +step:955/1705 train_time:90555ms step_avg:94.82ms +step:956/1705 train_time:90649ms step_avg:94.82ms +step:957/1705 train_time:90743ms step_avg:94.82ms +step:958/1705 train_time:90838ms step_avg:94.82ms +step:959/1705 train_time:90933ms step_avg:94.82ms +step:960/1705 train_time:91026ms step_avg:94.82ms +step:961/1705 train_time:91121ms step_avg:94.82ms +step:962/1705 train_time:91216ms step_avg:94.82ms +step:963/1705 train_time:91310ms step_avg:94.82ms +step:964/1705 train_time:91404ms step_avg:94.82ms +step:965/1705 train_time:91498ms step_avg:94.82ms +step:966/1705 train_time:91592ms step_avg:94.82ms +step:967/1705 train_time:91686ms step_avg:94.82ms +step:968/1705 train_time:91780ms step_avg:94.81ms +step:969/1705 train_time:91876ms step_avg:94.82ms +step:970/1705 train_time:91971ms step_avg:94.82ms +step:971/1705 train_time:92065ms step_avg:94.81ms +step:972/1705 train_time:92159ms step_avg:94.81ms +step:973/1705 train_time:92254ms step_avg:94.81ms +step:974/1705 train_time:92348ms step_avg:94.81ms +step:975/1705 train_time:92442ms step_avg:94.81ms +step:976/1705 train_time:92537ms step_avg:94.81ms +step:977/1705 train_time:92631ms step_avg:94.81ms +step:978/1705 train_time:92725ms step_avg:94.81ms +step:979/1705 train_time:92820ms step_avg:94.81ms +step:980/1705 train_time:92915ms step_avg:94.81ms +step:981/1705 train_time:93009ms step_avg:94.81ms +step:982/1705 train_time:93103ms step_avg:94.81ms +step:983/1705 train_time:93198ms step_avg:94.81ms +step:984/1705 train_time:93293ms step_avg:94.81ms +step:985/1705 train_time:93387ms step_avg:94.81ms +step:986/1705 train_time:93481ms step_avg:94.81ms +step:987/1705 train_time:93576ms step_avg:94.81ms +step:988/1705 train_time:93670ms step_avg:94.81ms +step:989/1705 train_time:93765ms step_avg:94.81ms +step:990/1705 train_time:93860ms step_avg:94.81ms +step:991/1705 train_time:93955ms step_avg:94.81ms +step:992/1705 train_time:94049ms step_avg:94.81ms +step:993/1705 train_time:94143ms step_avg:94.81ms +step:994/1705 train_time:94237ms step_avg:94.81ms +step:995/1705 train_time:94331ms step_avg:94.81ms +step:996/1705 train_time:94425ms step_avg:94.80ms +step:997/1705 train_time:94520ms step_avg:94.80ms +step:998/1705 train_time:94615ms step_avg:94.80ms +step:999/1705 train_time:94710ms step_avg:94.80ms +step:1000/1705 train_time:94804ms step_avg:94.80ms +step:1000/1705 val_loss:3.4882 train_time:94899ms step_avg:94.90ms +step:1001/1705 train_time:94920ms step_avg:94.83ms +step:1002/1705 train_time:94999ms step_avg:94.81ms +step:1003/1705 train_time:95099ms step_avg:94.81ms +step:1004/1705 train_time:95194ms step_avg:94.81ms +step:1005/1705 train_time:95287ms step_avg:94.81ms +step:1006/1705 train_time:95380ms step_avg:94.81ms +step:1007/1705 train_time:95474ms step_avg:94.81ms +step:1008/1705 train_time:95568ms step_avg:94.81ms +step:1009/1705 train_time:95661ms step_avg:94.81ms +step:1010/1705 train_time:95754ms step_avg:94.81ms +step:1011/1705 train_time:95851ms step_avg:94.81ms +step:1012/1705 train_time:95948ms step_avg:94.81ms +step:1013/1705 train_time:96044ms step_avg:94.81ms +step:1014/1705 train_time:96139ms step_avg:94.81ms +step:1015/1705 train_time:96234ms step_avg:94.81ms +step:1016/1705 train_time:96329ms step_avg:94.81ms +step:1017/1705 train_time:96422ms step_avg:94.81ms +step:1018/1705 train_time:96516ms step_avg:94.81ms +step:1019/1705 train_time:96609ms step_avg:94.81ms +step:1020/1705 train_time:96703ms step_avg:94.81ms +step:1021/1705 train_time:96797ms step_avg:94.81ms +step:1022/1705 train_time:96893ms step_avg:94.81ms +step:1023/1705 train_time:96991ms step_avg:94.81ms +step:1024/1705 train_time:97086ms step_avg:94.81ms +step:1025/1705 train_time:97180ms step_avg:94.81ms +step:1026/1705 train_time:97274ms step_avg:94.81ms +step:1027/1705 train_time:97368ms step_avg:94.81ms +step:1028/1705 train_time:97462ms step_avg:94.81ms +step:1029/1705 train_time:97556ms step_avg:94.81ms +step:1030/1705 train_time:97649ms step_avg:94.81ms +step:1031/1705 train_time:97744ms step_avg:94.80ms +step:1032/1705 train_time:97838ms step_avg:94.80ms +step:1033/1705 train_time:97934ms step_avg:94.81ms +step:1034/1705 train_time:98030ms step_avg:94.81ms +step:1035/1705 train_time:98125ms step_avg:94.81ms +step:1036/1705 train_time:98219ms step_avg:94.81ms +step:1037/1705 train_time:98315ms step_avg:94.81ms +step:1038/1705 train_time:98410ms step_avg:94.81ms +step:1039/1705 train_time:98504ms step_avg:94.81ms +step:1040/1705 train_time:98597ms step_avg:94.81ms +step:1041/1705 train_time:98692ms step_avg:94.80ms +step:1042/1705 train_time:98786ms step_avg:94.80ms +step:1043/1705 train_time:98882ms step_avg:94.81ms +step:1044/1705 train_time:98977ms step_avg:94.81ms +step:1045/1705 train_time:99073ms step_avg:94.81ms +step:1046/1705 train_time:99167ms step_avg:94.81ms +step:1047/1705 train_time:99261ms step_avg:94.81ms +step:1048/1705 train_time:99356ms step_avg:94.80ms +step:1049/1705 train_time:99451ms step_avg:94.81ms +step:1050/1705 train_time:99545ms step_avg:94.80ms +step:1051/1705 train_time:99639ms step_avg:94.80ms +step:1052/1705 train_time:99733ms step_avg:94.80ms +step:1053/1705 train_time:99828ms step_avg:94.80ms +step:1054/1705 train_time:99922ms step_avg:94.80ms +step:1055/1705 train_time:100017ms step_avg:94.80ms +step:1056/1705 train_time:100113ms step_avg:94.80ms +step:1057/1705 train_time:100208ms step_avg:94.80ms +step:1058/1705 train_time:100301ms step_avg:94.80ms +step:1059/1705 train_time:100396ms step_avg:94.80ms +step:1060/1705 train_time:100492ms step_avg:94.80ms +step:1061/1705 train_time:100586ms step_avg:94.80ms +step:1062/1705 train_time:100909ms step_avg:95.02ms +step:1063/1705 train_time:101017ms step_avg:95.03ms +step:1064/1705 train_time:101110ms step_avg:95.03ms +step:1065/1705 train_time:101203ms step_avg:95.03ms +step:1066/1705 train_time:101297ms step_avg:95.03ms +step:1067/1705 train_time:101392ms step_avg:95.03ms +step:1068/1705 train_time:101485ms step_avg:95.02ms +step:1069/1705 train_time:101578ms step_avg:95.02ms +step:1070/1705 train_time:101672ms step_avg:95.02ms +step:1071/1705 train_time:101766ms step_avg:95.02ms +step:1072/1705 train_time:101860ms step_avg:95.02ms +step:1073/1705 train_time:101959ms step_avg:95.02ms +step:1074/1705 train_time:102057ms step_avg:95.03ms +step:1075/1705 train_time:102152ms step_avg:95.02ms +step:1076/1705 train_time:102246ms step_avg:95.02ms +step:1077/1705 train_time:102340ms step_avg:95.02ms +step:1078/1705 train_time:102434ms step_avg:95.02ms +step:1079/1705 train_time:102528ms step_avg:95.02ms +step:1080/1705 train_time:102621ms step_avg:95.02ms +step:1081/1705 train_time:102715ms step_avg:95.02ms +step:1082/1705 train_time:102809ms step_avg:95.02ms +step:1083/1705 train_time:102903ms step_avg:95.02ms +step:1084/1705 train_time:102998ms step_avg:95.02ms +step:1085/1705 train_time:103095ms step_avg:95.02ms +step:1086/1705 train_time:103191ms step_avg:95.02ms +step:1087/1705 train_time:103286ms step_avg:95.02ms +step:1088/1705 train_time:103379ms step_avg:95.02ms +step:1089/1705 train_time:103473ms step_avg:95.02ms +step:1090/1705 train_time:103567ms step_avg:95.02ms +step:1091/1705 train_time:103660ms step_avg:95.01ms +step:1092/1705 train_time:103754ms step_avg:95.01ms +step:1093/1705 train_time:103849ms step_avg:95.01ms +step:1094/1705 train_time:103943ms step_avg:95.01ms +step:1095/1705 train_time:104037ms step_avg:95.01ms +step:1096/1705 train_time:104132ms step_avg:95.01ms +step:1097/1705 train_time:104227ms step_avg:95.01ms +step:1098/1705 train_time:104321ms step_avg:95.01ms +step:1099/1705 train_time:104415ms step_avg:95.01ms +step:1100/1705 train_time:104510ms step_avg:95.01ms +step:1101/1705 train_time:104604ms step_avg:95.01ms +step:1102/1705 train_time:104698ms step_avg:95.01ms +step:1103/1705 train_time:104792ms step_avg:95.01ms +step:1104/1705 train_time:104887ms step_avg:95.01ms +step:1105/1705 train_time:104980ms step_avg:95.00ms +step:1106/1705 train_time:105075ms step_avg:95.00ms +step:1107/1705 train_time:105171ms step_avg:95.01ms +step:1108/1705 train_time:105265ms step_avg:95.00ms +step:1109/1705 train_time:105359ms step_avg:95.00ms +step:1110/1705 train_time:105454ms step_avg:95.00ms +step:1111/1705 train_time:105549ms step_avg:95.00ms +step:1112/1705 train_time:105643ms step_avg:95.00ms +step:1113/1705 train_time:105738ms step_avg:95.00ms +step:1114/1705 train_time:105833ms step_avg:95.00ms +step:1115/1705 train_time:105928ms step_avg:95.00ms +step:1116/1705 train_time:106021ms step_avg:95.00ms +step:1117/1705 train_time:106116ms step_avg:95.00ms +step:1118/1705 train_time:106212ms step_avg:95.00ms +step:1119/1705 train_time:106307ms step_avg:95.00ms +step:1120/1705 train_time:106401ms step_avg:95.00ms +step:1121/1705 train_time:106496ms step_avg:95.00ms +step:1122/1705 train_time:106590ms step_avg:95.00ms +step:1123/1705 train_time:106685ms step_avg:95.00ms +step:1124/1705 train_time:106778ms step_avg:95.00ms +step:1125/1705 train_time:106874ms step_avg:95.00ms +step:1125/1705 val_loss:3.4409 train_time:106969ms step_avg:95.08ms +step:1126/1705 train_time:106990ms step_avg:95.02ms +step:1127/1705 train_time:107068ms step_avg:95.00ms +step:1128/1705 train_time:107166ms step_avg:95.01ms +step:1129/1705 train_time:107261ms step_avg:95.01ms +step:1130/1705 train_time:107355ms step_avg:95.00ms +step:1131/1705 train_time:107449ms step_avg:95.00ms +step:1132/1705 train_time:107543ms step_avg:95.00ms +step:1133/1705 train_time:107636ms step_avg:95.00ms +step:1134/1705 train_time:107730ms step_avg:95.00ms +step:1135/1705 train_time:107823ms step_avg:95.00ms +step:1136/1705 train_time:107917ms step_avg:95.00ms +step:1137/1705 train_time:108015ms step_avg:95.00ms +step:1138/1705 train_time:108111ms step_avg:95.00ms +step:1139/1705 train_time:108207ms step_avg:95.00ms +step:1140/1705 train_time:108302ms step_avg:95.00ms +step:1141/1705 train_time:108396ms step_avg:95.00ms +step:1142/1705 train_time:108491ms step_avg:95.00ms +step:1143/1705 train_time:108586ms step_avg:95.00ms +step:1144/1705 train_time:108680ms step_avg:95.00ms +step:1145/1705 train_time:108775ms step_avg:95.00ms +step:1146/1705 train_time:108870ms step_avg:95.00ms +step:1147/1705 train_time:108965ms step_avg:95.00ms +step:1148/1705 train_time:109060ms step_avg:95.00ms +step:1149/1705 train_time:109157ms step_avg:95.00ms +step:1150/1705 train_time:109253ms step_avg:95.00ms +step:1151/1705 train_time:109348ms step_avg:95.00ms +step:1152/1705 train_time:109443ms step_avg:95.00ms +step:1153/1705 train_time:109537ms step_avg:95.00ms +step:1154/1705 train_time:109632ms step_avg:95.00ms +step:1155/1705 train_time:109727ms step_avg:95.00ms +step:1156/1705 train_time:109823ms step_avg:95.00ms +step:1157/1705 train_time:109917ms step_avg:95.00ms +step:1158/1705 train_time:110014ms step_avg:95.00ms +step:1159/1705 train_time:110110ms step_avg:95.00ms +step:1160/1705 train_time:110206ms step_avg:95.01ms +step:1161/1705 train_time:110301ms step_avg:95.01ms +step:1162/1705 train_time:110397ms step_avg:95.01ms +step:1163/1705 train_time:110493ms step_avg:95.01ms +step:1164/1705 train_time:110587ms step_avg:95.01ms +step:1165/1705 train_time:110681ms step_avg:95.01ms +step:1166/1705 train_time:110776ms step_avg:95.01ms +step:1167/1705 train_time:110872ms step_avg:95.01ms +step:1168/1705 train_time:110967ms step_avg:95.01ms +step:1169/1705 train_time:111062ms step_avg:95.01ms +step:1170/1705 train_time:111157ms step_avg:95.01ms +step:1171/1705 train_time:111255ms step_avg:95.01ms +step:1172/1705 train_time:111352ms step_avg:95.01ms +step:1173/1705 train_time:111449ms step_avg:95.01ms +step:1174/1705 train_time:111544ms step_avg:95.01ms +step:1175/1705 train_time:111639ms step_avg:95.01ms +step:1176/1705 train_time:111734ms step_avg:95.01ms +step:1177/1705 train_time:111830ms step_avg:95.01ms +step:1178/1705 train_time:111925ms step_avg:95.01ms +step:1179/1705 train_time:112019ms step_avg:95.01ms +step:1180/1705 train_time:112116ms step_avg:95.01ms +step:1181/1705 train_time:112212ms step_avg:95.01ms +step:1182/1705 train_time:112307ms step_avg:95.01ms +step:1183/1705 train_time:112402ms step_avg:95.01ms +step:1184/1705 train_time:112497ms step_avg:95.01ms +step:1185/1705 train_time:112592ms step_avg:95.01ms +step:1186/1705 train_time:112687ms step_avg:95.01ms +step:1187/1705 train_time:112782ms step_avg:95.01ms +step:1188/1705 train_time:112877ms step_avg:95.01ms +step:1189/1705 train_time:112973ms step_avg:95.02ms +step:1190/1705 train_time:113069ms step_avg:95.02ms +step:1191/1705 train_time:113164ms step_avg:95.02ms +step:1192/1705 train_time:113259ms step_avg:95.02ms +step:1193/1705 train_time:113356ms step_avg:95.02ms +step:1194/1705 train_time:113453ms step_avg:95.02ms +step:1195/1705 train_time:113548ms step_avg:95.02ms +step:1196/1705 train_time:113644ms step_avg:95.02ms +step:1197/1705 train_time:113738ms step_avg:95.02ms +step:1198/1705 train_time:113834ms step_avg:95.02ms +step:1199/1705 train_time:113930ms step_avg:95.02ms +step:1200/1705 train_time:114026ms step_avg:95.02ms +step:1201/1705 train_time:114122ms step_avg:95.02ms +step:1202/1705 train_time:114217ms step_avg:95.02ms +step:1203/1705 train_time:114313ms step_avg:95.02ms +step:1204/1705 train_time:114408ms step_avg:95.02ms +step:1205/1705 train_time:114503ms step_avg:95.02ms +step:1206/1705 train_time:114598ms step_avg:95.02ms +step:1207/1705 train_time:114694ms step_avg:95.02ms +step:1208/1705 train_time:114788ms step_avg:95.02ms +step:1209/1705 train_time:114883ms step_avg:95.02ms +step:1210/1705 train_time:114978ms step_avg:95.02ms +step:1211/1705 train_time:115074ms step_avg:95.02ms +step:1212/1705 train_time:115170ms step_avg:95.02ms +step:1213/1705 train_time:115265ms step_avg:95.02ms +step:1214/1705 train_time:115359ms step_avg:95.02ms +step:1215/1705 train_time:115454ms step_avg:95.02ms +step:1216/1705 train_time:115551ms step_avg:95.03ms +step:1217/1705 train_time:115647ms step_avg:95.03ms +step:1218/1705 train_time:115742ms step_avg:95.03ms +step:1219/1705 train_time:115836ms step_avg:95.03ms +step:1220/1705 train_time:115932ms step_avg:95.03ms +step:1221/1705 train_time:116027ms step_avg:95.03ms +step:1222/1705 train_time:116122ms step_avg:95.03ms +step:1223/1705 train_time:116217ms step_avg:95.03ms +step:1224/1705 train_time:116312ms step_avg:95.03ms +step:1225/1705 train_time:116408ms step_avg:95.03ms +step:1226/1705 train_time:116503ms step_avg:95.03ms +step:1227/1705 train_time:116598ms step_avg:95.03ms +step:1228/1705 train_time:116694ms step_avg:95.03ms +step:1229/1705 train_time:116790ms step_avg:95.03ms +step:1230/1705 train_time:116884ms step_avg:95.03ms +step:1231/1705 train_time:116980ms step_avg:95.03ms +step:1232/1705 train_time:117075ms step_avg:95.03ms +step:1233/1705 train_time:117171ms step_avg:95.03ms +step:1234/1705 train_time:117267ms step_avg:95.03ms +step:1235/1705 train_time:117361ms step_avg:95.03ms +step:1236/1705 train_time:117456ms step_avg:95.03ms +step:1237/1705 train_time:117552ms step_avg:95.03ms +step:1238/1705 train_time:117647ms step_avg:95.03ms +step:1239/1705 train_time:117742ms step_avg:95.03ms +step:1240/1705 train_time:117837ms step_avg:95.03ms +step:1241/1705 train_time:117933ms step_avg:95.03ms +step:1242/1705 train_time:118028ms step_avg:95.03ms +step:1243/1705 train_time:118124ms step_avg:95.03ms +step:1244/1705 train_time:118218ms step_avg:95.03ms +step:1245/1705 train_time:118314ms step_avg:95.03ms +step:1246/1705 train_time:118409ms step_avg:95.03ms +step:1247/1705 train_time:118504ms step_avg:95.03ms +step:1248/1705 train_time:118599ms step_avg:95.03ms +step:1249/1705 train_time:118695ms step_avg:95.03ms +step:1250/1705 train_time:118790ms step_avg:95.03ms +step:1250/1705 val_loss:3.3920 train_time:118886ms step_avg:95.11ms +step:1251/1705 train_time:118908ms step_avg:95.05ms +step:1252/1705 train_time:118988ms step_avg:95.04ms +step:1253/1705 train_time:119083ms step_avg:95.04ms +step:1254/1705 train_time:119179ms step_avg:95.04ms +step:1255/1705 train_time:119273ms step_avg:95.04ms +step:1256/1705 train_time:119367ms step_avg:95.04ms +step:1257/1705 train_time:119461ms step_avg:95.04ms +step:1258/1705 train_time:119556ms step_avg:95.04ms +step:1259/1705 train_time:119650ms step_avg:95.04ms +step:1260/1705 train_time:119743ms step_avg:95.03ms +step:1261/1705 train_time:119843ms step_avg:95.04ms +step:1262/1705 train_time:119942ms step_avg:95.04ms +step:1263/1705 train_time:120038ms step_avg:95.04ms +step:1264/1705 train_time:120134ms step_avg:95.04ms +step:1265/1705 train_time:120229ms step_avg:95.04ms +step:1266/1705 train_time:120323ms step_avg:95.04ms +step:1267/1705 train_time:120418ms step_avg:95.04ms +step:1268/1705 train_time:120512ms step_avg:95.04ms +step:1269/1705 train_time:120606ms step_avg:95.04ms +step:1270/1705 train_time:120700ms step_avg:95.04ms +step:1271/1705 train_time:120797ms step_avg:95.04ms +step:1272/1705 train_time:120894ms step_avg:95.04ms +step:1273/1705 train_time:120990ms step_avg:95.04ms +step:1274/1705 train_time:121361ms step_avg:95.26ms +step:1275/1705 train_time:121445ms step_avg:95.25ms +step:1276/1705 train_time:121539ms step_avg:95.25ms +step:1277/1705 train_time:121633ms step_avg:95.25ms +step:1278/1705 train_time:121727ms step_avg:95.25ms +step:1279/1705 train_time:121820ms step_avg:95.25ms +step:1280/1705 train_time:121915ms step_avg:95.25ms +step:1281/1705 train_time:122009ms step_avg:95.25ms +step:1282/1705 train_time:122103ms step_avg:95.24ms +step:1283/1705 train_time:122197ms step_avg:95.24ms +step:1284/1705 train_time:122296ms step_avg:95.25ms +step:1285/1705 train_time:122396ms step_avg:95.25ms +step:1286/1705 train_time:122493ms step_avg:95.25ms +step:1287/1705 train_time:122588ms step_avg:95.25ms +step:1288/1705 train_time:122682ms step_avg:95.25ms +step:1289/1705 train_time:122777ms step_avg:95.25ms +step:1290/1705 train_time:122872ms step_avg:95.25ms +step:1291/1705 train_time:122966ms step_avg:95.25ms +step:1292/1705 train_time:123061ms step_avg:95.25ms +step:1293/1705 train_time:123155ms step_avg:95.25ms +step:1294/1705 train_time:123252ms step_avg:95.25ms +step:1295/1705 train_time:123348ms step_avg:95.25ms +step:1296/1705 train_time:123445ms step_avg:95.25ms +step:1297/1705 train_time:123541ms step_avg:95.25ms +step:1298/1705 train_time:123637ms step_avg:95.25ms +step:1299/1705 train_time:123731ms step_avg:95.25ms +step:1300/1705 train_time:123825ms step_avg:95.25ms +step:1301/1705 train_time:123920ms step_avg:95.25ms +step:1302/1705 train_time:124014ms step_avg:95.25ms +step:1303/1705 train_time:124108ms step_avg:95.25ms +step:1304/1705 train_time:124203ms step_avg:95.25ms +step:1305/1705 train_time:124299ms step_avg:95.25ms +step:1306/1705 train_time:124396ms step_avg:95.25ms +step:1307/1705 train_time:124492ms step_avg:95.25ms +step:1308/1705 train_time:124588ms step_avg:95.25ms +step:1309/1705 train_time:124683ms step_avg:95.25ms +step:1310/1705 train_time:124778ms step_avg:95.25ms +step:1311/1705 train_time:124872ms step_avg:95.25ms +step:1312/1705 train_time:124966ms step_avg:95.25ms +step:1313/1705 train_time:125061ms step_avg:95.25ms +step:1314/1705 train_time:125157ms step_avg:95.25ms +step:1315/1705 train_time:125252ms step_avg:95.25ms +step:1316/1705 train_time:125348ms step_avg:95.25ms +step:1317/1705 train_time:125443ms step_avg:95.25ms +step:1318/1705 train_time:125542ms step_avg:95.25ms +step:1319/1705 train_time:125638ms step_avg:95.25ms +step:1320/1705 train_time:125733ms step_avg:95.25ms +step:1321/1705 train_time:125828ms step_avg:95.25ms +step:1322/1705 train_time:125923ms step_avg:95.25ms +step:1323/1705 train_time:126017ms step_avg:95.25ms +step:1324/1705 train_time:126113ms step_avg:95.25ms +step:1325/1705 train_time:126209ms step_avg:95.25ms +step:1326/1705 train_time:126304ms step_avg:95.25ms +step:1327/1705 train_time:126399ms step_avg:95.25ms +step:1328/1705 train_time:126495ms step_avg:95.25ms +step:1329/1705 train_time:126590ms step_avg:95.25ms +step:1330/1705 train_time:126685ms step_avg:95.25ms +step:1331/1705 train_time:126781ms step_avg:95.25ms +step:1332/1705 train_time:126876ms step_avg:95.25ms +step:1333/1705 train_time:126970ms step_avg:95.25ms +step:1334/1705 train_time:127065ms step_avg:95.25ms +step:1335/1705 train_time:127161ms step_avg:95.25ms +step:1336/1705 train_time:127257ms step_avg:95.25ms +step:1337/1705 train_time:127353ms step_avg:95.25ms +step:1338/1705 train_time:127448ms step_avg:95.25ms +step:1339/1705 train_time:127543ms step_avg:95.25ms +step:1340/1705 train_time:127640ms step_avg:95.25ms +step:1341/1705 train_time:127734ms step_avg:95.25ms +step:1342/1705 train_time:127829ms step_avg:95.25ms +step:1343/1705 train_time:127924ms step_avg:95.25ms +step:1344/1705 train_time:128019ms step_avg:95.25ms +step:1345/1705 train_time:128114ms step_avg:95.25ms +step:1346/1705 train_time:128208ms step_avg:95.25ms +step:1347/1705 train_time:128304ms step_avg:95.25ms +step:1348/1705 train_time:128399ms step_avg:95.25ms +step:1349/1705 train_time:128496ms step_avg:95.25ms +step:1350/1705 train_time:128592ms step_avg:95.25ms +step:1351/1705 train_time:128686ms step_avg:95.25ms +step:1352/1705 train_time:128782ms step_avg:95.25ms +step:1353/1705 train_time:128878ms step_avg:95.25ms +step:1354/1705 train_time:128974ms step_avg:95.25ms +step:1355/1705 train_time:129068ms step_avg:95.25ms +step:1356/1705 train_time:129164ms step_avg:95.25ms +step:1357/1705 train_time:129260ms step_avg:95.25ms +step:1358/1705 train_time:129354ms step_avg:95.25ms +step:1359/1705 train_time:129449ms step_avg:95.25ms +step:1360/1705 train_time:129544ms step_avg:95.25ms +step:1361/1705 train_time:129640ms step_avg:95.25ms +step:1362/1705 train_time:129737ms step_avg:95.25ms +step:1363/1705 train_time:129831ms step_avg:95.25ms +step:1364/1705 train_time:129925ms step_avg:95.25ms +step:1365/1705 train_time:130021ms step_avg:95.25ms +step:1366/1705 train_time:130117ms step_avg:95.25ms +step:1367/1705 train_time:130212ms step_avg:95.25ms +step:1368/1705 train_time:130307ms step_avg:95.25ms +step:1369/1705 train_time:130403ms step_avg:95.25ms +step:1370/1705 train_time:130499ms step_avg:95.25ms +step:1371/1705 train_time:130596ms step_avg:95.26ms +step:1372/1705 train_time:130691ms step_avg:95.26ms +step:1373/1705 train_time:130785ms step_avg:95.26ms +step:1374/1705 train_time:130881ms step_avg:95.26ms +step:1375/1705 train_time:130976ms step_avg:95.26ms +step:1375/1705 val_loss:3.3546 train_time:131071ms step_avg:95.32ms +step:1376/1705 train_time:131093ms step_avg:95.27ms +step:1377/1705 train_time:131173ms step_avg:95.26ms +step:1378/1705 train_time:131273ms step_avg:95.26ms +step:1379/1705 train_time:131368ms step_avg:95.26ms +step:1380/1705 train_time:131463ms step_avg:95.26ms +step:1381/1705 train_time:131557ms step_avg:95.26ms +step:1382/1705 train_time:131651ms step_avg:95.26ms +step:1383/1705 train_time:131745ms step_avg:95.26ms +step:1384/1705 train_time:131839ms step_avg:95.26ms +step:1385/1705 train_time:131934ms step_avg:95.26ms +step:1386/1705 train_time:132030ms step_avg:95.26ms +step:1387/1705 train_time:132126ms step_avg:95.26ms +step:1388/1705 train_time:132224ms step_avg:95.26ms +step:1389/1705 train_time:132320ms step_avg:95.26ms +step:1390/1705 train_time:132416ms step_avg:95.26ms +step:1391/1705 train_time:132511ms step_avg:95.26ms +step:1392/1705 train_time:132606ms step_avg:95.26ms +step:1393/1705 train_time:132700ms step_avg:95.26ms +step:1394/1705 train_time:132794ms step_avg:95.26ms +step:1395/1705 train_time:132889ms step_avg:95.26ms +step:1396/1705 train_time:132983ms step_avg:95.26ms +step:1397/1705 train_time:133079ms step_avg:95.26ms +step:1398/1705 train_time:133177ms step_avg:95.26ms +step:1399/1705 train_time:133275ms step_avg:95.26ms +step:1400/1705 train_time:133373ms step_avg:95.27ms +step:1401/1705 train_time:133468ms step_avg:95.27ms +step:1402/1705 train_time:133563ms step_avg:95.27ms +step:1403/1705 train_time:133658ms step_avg:95.27ms +step:1404/1705 train_time:133753ms step_avg:95.27ms +step:1405/1705 train_time:133848ms step_avg:95.27ms +step:1406/1705 train_time:133943ms step_avg:95.27ms +step:1407/1705 train_time:134038ms step_avg:95.26ms +step:1408/1705 train_time:134134ms step_avg:95.27ms +step:1409/1705 train_time:134229ms step_avg:95.27ms +step:1410/1705 train_time:134324ms step_avg:95.27ms +step:1411/1705 train_time:134420ms step_avg:95.27ms +step:1412/1705 train_time:134517ms step_avg:95.27ms +step:1413/1705 train_time:134613ms step_avg:95.27ms +step:1414/1705 train_time:134707ms step_avg:95.27ms +step:1415/1705 train_time:134802ms step_avg:95.27ms +step:1416/1705 train_time:134896ms step_avg:95.27ms +step:1417/1705 train_time:134991ms step_avg:95.27ms +step:1418/1705 train_time:135087ms step_avg:95.27ms +step:1419/1705 train_time:135182ms step_avg:95.27ms +step:1420/1705 train_time:135277ms step_avg:95.27ms +step:1421/1705 train_time:135374ms step_avg:95.27ms +step:1422/1705 train_time:135469ms step_avg:95.27ms +step:1423/1705 train_time:135564ms step_avg:95.27ms +step:1424/1705 train_time:135659ms step_avg:95.27ms +step:1425/1705 train_time:135755ms step_avg:95.27ms +step:1426/1705 train_time:135850ms step_avg:95.27ms +step:1427/1705 train_time:135944ms step_avg:95.27ms +step:1428/1705 train_time:136040ms step_avg:95.27ms +step:1429/1705 train_time:136135ms step_avg:95.27ms +step:1430/1705 train_time:136231ms step_avg:95.27ms +step:1431/1705 train_time:136327ms step_avg:95.27ms +step:1432/1705 train_time:136422ms step_avg:95.27ms +step:1433/1705 train_time:136518ms step_avg:95.27ms +step:1434/1705 train_time:136614ms step_avg:95.27ms +step:1435/1705 train_time:136710ms step_avg:95.27ms +step:1436/1705 train_time:136804ms step_avg:95.27ms +step:1437/1705 train_time:136899ms step_avg:95.27ms +step:1438/1705 train_time:136994ms step_avg:95.27ms +step:1439/1705 train_time:137090ms step_avg:95.27ms +step:1440/1705 train_time:137184ms step_avg:95.27ms +step:1441/1705 train_time:137279ms step_avg:95.27ms +step:1442/1705 train_time:137375ms step_avg:95.27ms +step:1443/1705 train_time:137471ms step_avg:95.27ms +step:1444/1705 train_time:137566ms step_avg:95.27ms +step:1445/1705 train_time:137661ms step_avg:95.27ms +step:1446/1705 train_time:137757ms step_avg:95.27ms +step:1447/1705 train_time:137853ms step_avg:95.27ms +step:1448/1705 train_time:137948ms step_avg:95.27ms +step:1449/1705 train_time:138043ms step_avg:95.27ms +step:1450/1705 train_time:138139ms step_avg:95.27ms +step:1451/1705 train_time:138235ms step_avg:95.27ms +step:1452/1705 train_time:138330ms step_avg:95.27ms +step:1453/1705 train_time:138425ms step_avg:95.27ms +step:1454/1705 train_time:138520ms step_avg:95.27ms +step:1455/1705 train_time:138616ms step_avg:95.27ms +step:1456/1705 train_time:138711ms step_avg:95.27ms +step:1457/1705 train_time:138806ms step_avg:95.27ms +step:1458/1705 train_time:138901ms step_avg:95.27ms +step:1459/1705 train_time:138996ms step_avg:95.27ms +step:1460/1705 train_time:139093ms step_avg:95.27ms +step:1461/1705 train_time:139190ms step_avg:95.27ms +step:1462/1705 train_time:139286ms step_avg:95.27ms +step:1463/1705 train_time:139380ms step_avg:95.27ms +step:1464/1705 train_time:139476ms step_avg:95.27ms +step:1465/1705 train_time:139571ms step_avg:95.27ms +step:1466/1705 train_time:139666ms step_avg:95.27ms +step:1467/1705 train_time:139761ms step_avg:95.27ms +step:1468/1705 train_time:139856ms step_avg:95.27ms +step:1469/1705 train_time:139951ms step_avg:95.27ms +step:1470/1705 train_time:140046ms step_avg:95.27ms +step:1471/1705 train_time:140141ms step_avg:95.27ms +step:1472/1705 train_time:140238ms step_avg:95.27ms +step:1473/1705 train_time:140334ms step_avg:95.27ms +step:1474/1705 train_time:140428ms step_avg:95.27ms +step:1475/1705 train_time:140523ms step_avg:95.27ms +step:1476/1705 train_time:140619ms step_avg:95.27ms +step:1477/1705 train_time:140714ms step_avg:95.27ms +step:1478/1705 train_time:140809ms step_avg:95.27ms +step:1479/1705 train_time:140904ms step_avg:95.27ms +step:1480/1705 train_time:141000ms step_avg:95.27ms +step:1481/1705 train_time:141095ms step_avg:95.27ms +step:1482/1705 train_time:141191ms step_avg:95.27ms +step:1483/1705 train_time:141286ms step_avg:95.27ms +step:1484/1705 train_time:141381ms step_avg:95.27ms +step:1485/1705 train_time:141765ms step_avg:95.46ms +step:1486/1705 train_time:141835ms step_avg:95.45ms +step:1487/1705 train_time:141927ms step_avg:95.45ms +step:1488/1705 train_time:142022ms step_avg:95.44ms +step:1489/1705 train_time:142115ms step_avg:95.44ms +step:1490/1705 train_time:142210ms step_avg:95.44ms +step:1491/1705 train_time:142304ms step_avg:95.44ms +step:1492/1705 train_time:142398ms step_avg:95.44ms +step:1493/1705 train_time:142492ms step_avg:95.44ms +step:1494/1705 train_time:142587ms step_avg:95.44ms +step:1495/1705 train_time:142685ms step_avg:95.44ms +step:1496/1705 train_time:142785ms step_avg:95.44ms +step:1497/1705 train_time:142884ms step_avg:95.45ms +step:1498/1705 train_time:142979ms step_avg:95.45ms +step:1499/1705 train_time:143073ms step_avg:95.45ms +step:1500/1705 train_time:143167ms step_avg:95.44ms +step:1500/1705 val_loss:3.3225 train_time:143261ms step_avg:95.51ms +step:1501/1705 train_time:143283ms step_avg:95.46ms +step:1502/1705 train_time:143364ms step_avg:95.45ms +step:1503/1705 train_time:143460ms step_avg:95.45ms +step:1504/1705 train_time:143554ms step_avg:95.45ms +step:1505/1705 train_time:143648ms step_avg:95.45ms +step:1506/1705 train_time:143743ms step_avg:95.45ms +step:1507/1705 train_time:143836ms step_avg:95.45ms +step:1508/1705 train_time:143932ms step_avg:95.45ms +step:1509/1705 train_time:144027ms step_avg:95.45ms +step:1510/1705 train_time:144121ms step_avg:95.44ms +step:1511/1705 train_time:144217ms step_avg:95.44ms +step:1512/1705 train_time:144314ms step_avg:95.45ms +step:1513/1705 train_time:144412ms step_avg:95.45ms +step:1514/1705 train_time:144509ms step_avg:95.45ms +step:1515/1705 train_time:144604ms step_avg:95.45ms +step:1516/1705 train_time:144698ms step_avg:95.45ms +step:1517/1705 train_time:144793ms step_avg:95.45ms +step:1518/1705 train_time:144888ms step_avg:95.45ms +step:1519/1705 train_time:144982ms step_avg:95.45ms +step:1520/1705 train_time:145076ms step_avg:95.44ms +step:1521/1705 train_time:145171ms step_avg:95.44ms +step:1522/1705 train_time:145269ms step_avg:95.45ms +step:1523/1705 train_time:145366ms step_avg:95.45ms +step:1524/1705 train_time:145464ms step_avg:95.45ms +step:1525/1705 train_time:145558ms step_avg:95.45ms +step:1526/1705 train_time:145653ms step_avg:95.45ms +step:1527/1705 train_time:145749ms step_avg:95.45ms +step:1528/1705 train_time:145844ms step_avg:95.45ms +step:1529/1705 train_time:145938ms step_avg:95.45ms +step:1530/1705 train_time:146032ms step_avg:95.45ms +step:1531/1705 train_time:146127ms step_avg:95.45ms +step:1532/1705 train_time:146223ms step_avg:95.45ms +step:1533/1705 train_time:146319ms step_avg:95.45ms +step:1534/1705 train_time:146415ms step_avg:95.45ms +step:1535/1705 train_time:146512ms step_avg:95.45ms +step:1536/1705 train_time:146608ms step_avg:95.45ms +step:1537/1705 train_time:146703ms step_avg:95.45ms +step:1538/1705 train_time:146798ms step_avg:95.45ms +step:1539/1705 train_time:146892ms step_avg:95.45ms +step:1540/1705 train_time:146987ms step_avg:95.45ms +step:1541/1705 train_time:147082ms step_avg:95.45ms +step:1542/1705 train_time:147177ms step_avg:95.45ms +step:1543/1705 train_time:147273ms step_avg:95.45ms +step:1544/1705 train_time:147369ms step_avg:95.45ms +step:1545/1705 train_time:147466ms step_avg:95.45ms +step:1546/1705 train_time:147562ms step_avg:95.45ms +step:1547/1705 train_time:147657ms step_avg:95.45ms +step:1548/1705 train_time:147751ms step_avg:95.45ms +step:1549/1705 train_time:147847ms step_avg:95.45ms +step:1550/1705 train_time:147941ms step_avg:95.45ms +step:1551/1705 train_time:148035ms step_avg:95.45ms +step:1552/1705 train_time:148131ms step_avg:95.44ms +step:1553/1705 train_time:148226ms step_avg:95.45ms +step:1554/1705 train_time:148322ms step_avg:95.45ms +step:1555/1705 train_time:148417ms step_avg:95.45ms +step:1556/1705 train_time:148513ms step_avg:95.45ms +step:1557/1705 train_time:148609ms step_avg:95.45ms +step:1558/1705 train_time:148705ms step_avg:95.45ms +step:1559/1705 train_time:148800ms step_avg:95.45ms +step:1560/1705 train_time:148895ms step_avg:95.45ms +step:1561/1705 train_time:148990ms step_avg:95.45ms +step:1562/1705 train_time:149086ms step_avg:95.45ms +step:1563/1705 train_time:149181ms step_avg:95.45ms +step:1564/1705 train_time:149276ms step_avg:95.45ms +step:1565/1705 train_time:149372ms step_avg:95.45ms +step:1566/1705 train_time:149468ms step_avg:95.45ms +step:1567/1705 train_time:149563ms step_avg:95.45ms +step:1568/1705 train_time:149658ms step_avg:95.44ms +step:1569/1705 train_time:149752ms step_avg:95.44ms +step:1570/1705 train_time:149848ms step_avg:95.44ms +step:1571/1705 train_time:149944ms step_avg:95.44ms +step:1572/1705 train_time:150038ms step_avg:95.44ms +step:1573/1705 train_time:150133ms step_avg:95.44ms +step:1574/1705 train_time:150229ms step_avg:95.44ms +step:1575/1705 train_time:150326ms step_avg:95.45ms +step:1576/1705 train_time:150422ms step_avg:95.45ms +step:1577/1705 train_time:150518ms step_avg:95.45ms +step:1578/1705 train_time:150614ms step_avg:95.45ms +step:1579/1705 train_time:150709ms step_avg:95.45ms +step:1580/1705 train_time:150804ms step_avg:95.45ms +step:1581/1705 train_time:150901ms step_avg:95.45ms +step:1582/1705 train_time:150995ms step_avg:95.45ms +step:1583/1705 train_time:151091ms step_avg:95.45ms +step:1584/1705 train_time:151186ms step_avg:95.45ms +step:1585/1705 train_time:151281ms step_avg:95.45ms +step:1586/1705 train_time:151376ms step_avg:95.45ms +step:1587/1705 train_time:151471ms step_avg:95.44ms +step:1588/1705 train_time:151567ms step_avg:95.45ms +step:1589/1705 train_time:151663ms step_avg:95.45ms +step:1590/1705 train_time:151757ms step_avg:95.44ms +step:1591/1705 train_time:151852ms step_avg:95.44ms +step:1592/1705 train_time:151949ms step_avg:95.45ms +step:1593/1705 train_time:152046ms step_avg:95.45ms +step:1594/1705 train_time:152141ms step_avg:95.45ms +step:1595/1705 train_time:152236ms step_avg:95.45ms +step:1596/1705 train_time:152331ms step_avg:95.45ms +step:1597/1705 train_time:152427ms step_avg:95.45ms +step:1598/1705 train_time:152523ms step_avg:95.45ms +step:1599/1705 train_time:152617ms step_avg:95.45ms +step:1600/1705 train_time:152712ms step_avg:95.44ms +step:1601/1705 train_time:152807ms step_avg:95.45ms +step:1602/1705 train_time:152902ms step_avg:95.44ms +step:1603/1705 train_time:152997ms step_avg:95.44ms +step:1604/1705 train_time:153093ms step_avg:95.44ms +step:1605/1705 train_time:153189ms step_avg:95.44ms +step:1606/1705 train_time:153283ms step_avg:95.44ms +step:1607/1705 train_time:153378ms step_avg:95.44ms +step:1608/1705 train_time:153473ms step_avg:95.44ms +step:1609/1705 train_time:153569ms step_avg:95.44ms +step:1610/1705 train_time:153664ms step_avg:95.44ms +step:1611/1705 train_time:153759ms step_avg:95.44ms +step:1612/1705 train_time:153854ms step_avg:95.44ms +step:1613/1705 train_time:153950ms step_avg:95.44ms +step:1614/1705 train_time:154046ms step_avg:95.44ms +step:1615/1705 train_time:154141ms step_avg:95.44ms +step:1616/1705 train_time:154236ms step_avg:95.44ms +step:1617/1705 train_time:154331ms step_avg:95.44ms +step:1618/1705 train_time:154426ms step_avg:95.44ms +step:1619/1705 train_time:154522ms step_avg:95.44ms +step:1620/1705 train_time:154616ms step_avg:95.44ms +step:1621/1705 train_time:154711ms step_avg:95.44ms +step:1622/1705 train_time:154807ms step_avg:95.44ms +step:1623/1705 train_time:154903ms step_avg:95.44ms +step:1624/1705 train_time:154998ms step_avg:95.44ms +step:1625/1705 train_time:155094ms step_avg:95.44ms +step:1625/1705 val_loss:3.2949 train_time:155190ms step_avg:95.50ms +step:1626/1705 train_time:155211ms step_avg:95.46ms +step:1627/1705 train_time:155290ms step_avg:95.45ms +step:1628/1705 train_time:155388ms step_avg:95.45ms +step:1629/1705 train_time:155485ms step_avg:95.45ms +step:1630/1705 train_time:155582ms step_avg:95.45ms +step:1631/1705 train_time:155676ms step_avg:95.45ms +step:1632/1705 train_time:155770ms step_avg:95.45ms +step:1633/1705 train_time:155865ms step_avg:95.45ms +step:1634/1705 train_time:155960ms step_avg:95.45ms +step:1635/1705 train_time:156054ms step_avg:95.45ms +step:1636/1705 train_time:156149ms step_avg:95.45ms +step:1637/1705 train_time:156248ms step_avg:95.45ms +step:1638/1705 train_time:156347ms step_avg:95.45ms +step:1639/1705 train_time:156442ms step_avg:95.45ms +step:1640/1705 train_time:156538ms step_avg:95.45ms +step:1641/1705 train_time:156634ms step_avg:95.45ms +step:1642/1705 train_time:156727ms step_avg:95.45ms +step:1643/1705 train_time:156821ms step_avg:95.45ms +step:1644/1705 train_time:156917ms step_avg:95.45ms +step:1645/1705 train_time:157011ms step_avg:95.45ms +step:1646/1705 train_time:157107ms step_avg:95.45ms +step:1647/1705 train_time:157203ms step_avg:95.45ms +step:1648/1705 train_time:157302ms step_avg:95.45ms +step:1649/1705 train_time:157399ms step_avg:95.45ms +step:1650/1705 train_time:157495ms step_avg:95.45ms +step:1651/1705 train_time:157590ms step_avg:95.45ms +step:1652/1705 train_time:157685ms step_avg:95.45ms +step:1653/1705 train_time:157779ms step_avg:95.45ms +step:1654/1705 train_time:157873ms step_avg:95.45ms +step:1655/1705 train_time:157968ms step_avg:95.45ms +step:1656/1705 train_time:158064ms step_avg:95.45ms +step:1657/1705 train_time:158159ms step_avg:95.45ms +step:1658/1705 train_time:158255ms step_avg:95.45ms +step:1659/1705 train_time:158350ms step_avg:95.45ms +step:1660/1705 train_time:158445ms step_avg:95.45ms +step:1661/1705 train_time:158542ms step_avg:95.45ms +step:1662/1705 train_time:158639ms step_avg:95.45ms +step:1663/1705 train_time:158734ms step_avg:95.45ms +step:1664/1705 train_time:158829ms step_avg:95.45ms +step:1665/1705 train_time:158924ms step_avg:95.45ms +step:1666/1705 train_time:159019ms step_avg:95.45ms +step:1667/1705 train_time:159114ms step_avg:95.45ms +step:1668/1705 train_time:159208ms step_avg:95.45ms +step:1669/1705 train_time:159305ms step_avg:95.45ms +step:1670/1705 train_time:159401ms step_avg:95.45ms +step:1671/1705 train_time:159497ms step_avg:95.45ms +step:1672/1705 train_time:159592ms step_avg:95.45ms +step:1673/1705 train_time:159687ms step_avg:95.45ms +step:1674/1705 train_time:159782ms step_avg:95.45ms +step:1675/1705 train_time:159879ms step_avg:95.45ms +step:1676/1705 train_time:159974ms step_avg:95.45ms +step:1677/1705 train_time:160068ms step_avg:95.45ms +step:1678/1705 train_time:160163ms step_avg:95.45ms +step:1679/1705 train_time:160260ms step_avg:95.45ms +step:1680/1705 train_time:160356ms step_avg:95.45ms +step:1681/1705 train_time:160451ms step_avg:95.45ms +step:1682/1705 train_time:160547ms step_avg:95.45ms +step:1683/1705 train_time:160643ms step_avg:95.45ms +step:1684/1705 train_time:160738ms step_avg:95.45ms +step:1685/1705 train_time:160834ms step_avg:95.45ms +step:1686/1705 train_time:160928ms step_avg:95.45ms +step:1687/1705 train_time:161024ms step_avg:95.45ms +step:1688/1705 train_time:161120ms step_avg:95.45ms +step:1689/1705 train_time:161215ms step_avg:95.45ms +step:1690/1705 train_time:161310ms step_avg:95.45ms +step:1691/1705 train_time:161406ms step_avg:95.45ms +step:1692/1705 train_time:161501ms step_avg:95.45ms +step:1693/1705 train_time:161597ms step_avg:95.45ms +step:1694/1705 train_time:161692ms step_avg:95.45ms +step:1695/1705 train_time:161787ms step_avg:95.45ms +step:1696/1705 train_time:161882ms step_avg:95.45ms +step:1697/1705 train_time:161978ms step_avg:95.45ms +step:1698/1705 train_time:162294ms step_avg:95.58ms +step:1699/1705 train_time:162421ms step_avg:95.60ms +step:1700/1705 train_time:162514ms step_avg:95.60ms +step:1701/1705 train_time:162607ms step_avg:95.59ms +step:1702/1705 train_time:162701ms step_avg:95.59ms +step:1703/1705 train_time:162795ms step_avg:95.59ms +step:1704/1705 train_time:162889ms step_avg:95.59ms +step:1705/1705 train_time:162983ms step_avg:95.59ms +step:1705/1705 val_loss:3.2806 train_time:163078ms step_avg:95.65ms +peak memory allocated: 34489 MiB reserved: 48516 MiB diff --git a/records/090525_SkipMLPBlocks/1858912a-2697-4461-9edb-e5ee4246ee3d.txt b/records/090525_SkipMLPBlocks/1858912a-2697-4461-9edb-e5ee4246ee3d.txt new file mode 100644 index 000000000..e269a2db6 --- /dev/null +++ b/records/090525_SkipMLPBlocks/1858912a-2697-4461-9edb-e5ee4246ee3d.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:36:47 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 128W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 42C P0 123W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 131W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 33C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 69624 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 69625 C /usr/bin/python3 610MiB | +| 0 N/A N/A 69626 C /usr/bin/python3 610MiB | +| 0 N/A N/A 69627 C /usr/bin/python3 610MiB | +| 0 N/A N/A 69628 C /usr/bin/python3 610MiB | +| 0 N/A N/A 69629 C /usr/bin/python3 610MiB | +| 0 N/A N/A 69630 C /usr/bin/python3 610MiB | +| 0 N/A N/A 69631 C /usr/bin/python3 610MiB | +| 1 N/A N/A 69625 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 69626 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 69627 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 69628 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 69629 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 69630 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 69631 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1705 train_time:399ms step_avg:398.69ms +step:2/1705 train_time:419ms step_avg:209.65ms +step:3/1705 train_time:488ms step_avg:162.60ms +step:4/1705 train_time:579ms step_avg:144.80ms +step:5/1705 train_time:671ms step_avg:134.24ms +step:6/1705 train_time:763ms step_avg:127.22ms +step:7/1705 train_time:856ms step_avg:122.29ms +step:8/1705 train_time:948ms step_avg:118.50ms +step:9/1705 train_time:1040ms step_avg:115.56ms +step:10/1705 train_time:1132ms step_avg:113.24ms +step:11/1705 train_time:1224ms step_avg:111.31ms +step:12/1705 train_time:1319ms step_avg:109.88ms +step:13/1705 train_time:1415ms step_avg:108.82ms +step:14/1705 train_time:1509ms step_avg:107.77ms +step:15/1705 train_time:1603ms step_avg:106.86ms +step:16/1705 train_time:1695ms step_avg:105.97ms +step:17/1705 train_time:1788ms step_avg:105.15ms +step:18/1705 train_time:1880ms step_avg:104.43ms +step:19/1705 train_time:1973ms step_avg:103.84ms +step:20/1705 train_time:2066ms step_avg:103.29ms +step:21/1705 train_time:2159ms step_avg:102.80ms +step:22/1705 train_time:2252ms step_avg:102.36ms +step:23/1705 train_time:2346ms step_avg:101.99ms +step:24/1705 train_time:2439ms step_avg:101.64ms +step:25/1705 train_time:2534ms step_avg:101.35ms +step:26/1705 train_time:2626ms step_avg:101.00ms +step:27/1705 train_time:2719ms step_avg:100.69ms +step:28/1705 train_time:2812ms step_avg:100.44ms +step:29/1705 train_time:2906ms step_avg:100.19ms +step:30/1705 train_time:2998ms step_avg:99.95ms +step:31/1705 train_time:3091ms step_avg:99.72ms +step:32/1705 train_time:3184ms step_avg:99.51ms +step:33/1705 train_time:3278ms step_avg:99.34ms +step:34/1705 train_time:3372ms step_avg:99.18ms +step:35/1705 train_time:3466ms step_avg:99.02ms +step:36/1705 train_time:3559ms step_avg:98.86ms +step:37/1705 train_time:3652ms step_avg:98.71ms +step:38/1705 train_time:3745ms step_avg:98.56ms +step:39/1705 train_time:3839ms step_avg:98.42ms +step:40/1705 train_time:3932ms step_avg:98.29ms +step:41/1705 train_time:4025ms step_avg:98.17ms +step:42/1705 train_time:4118ms step_avg:98.04ms +step:43/1705 train_time:4211ms step_avg:97.92ms +step:44/1705 train_time:4304ms step_avg:97.82ms +step:45/1705 train_time:4398ms step_avg:97.72ms +step:46/1705 train_time:4491ms step_avg:97.63ms +step:47/1705 train_time:4584ms step_avg:97.54ms +step:48/1705 train_time:4678ms step_avg:97.46ms +step:49/1705 train_time:4771ms step_avg:97.38ms +step:50/1705 train_time:4864ms step_avg:97.29ms +step:51/1705 train_time:4958ms step_avg:97.22ms +step:52/1705 train_time:5051ms step_avg:97.14ms +step:53/1705 train_time:5143ms step_avg:97.04ms +step:54/1705 train_time:5236ms step_avg:96.97ms +step:55/1705 train_time:5330ms step_avg:96.90ms +step:56/1705 train_time:5423ms step_avg:96.84ms +step:57/1705 train_time:5516ms step_avg:96.77ms +step:58/1705 train_time:5609ms step_avg:96.70ms +step:59/1705 train_time:5702ms step_avg:96.64ms +step:60/1705 train_time:5795ms step_avg:96.58ms +step:61/1705 train_time:5888ms step_avg:96.52ms +step:62/1705 train_time:5981ms step_avg:96.47ms +step:63/1705 train_time:6075ms step_avg:96.43ms +step:64/1705 train_time:6168ms step_avg:96.38ms +step:65/1705 train_time:6262ms step_avg:96.34ms +step:66/1705 train_time:6355ms step_avg:96.29ms +step:67/1705 train_time:6448ms step_avg:96.24ms +step:68/1705 train_time:6541ms step_avg:96.19ms +step:69/1705 train_time:6634ms step_avg:96.15ms +step:70/1705 train_time:6728ms step_avg:96.11ms +step:71/1705 train_time:6821ms step_avg:96.06ms +step:72/1705 train_time:6914ms step_avg:96.03ms +step:73/1705 train_time:7007ms step_avg:95.99ms +step:74/1705 train_time:7101ms step_avg:95.96ms +step:75/1705 train_time:7195ms step_avg:95.93ms +step:76/1705 train_time:7288ms step_avg:95.90ms +step:77/1705 train_time:7381ms step_avg:95.86ms +step:78/1705 train_time:7475ms step_avg:95.83ms +step:79/1705 train_time:7567ms step_avg:95.78ms +step:80/1705 train_time:7660ms step_avg:95.75ms +step:81/1705 train_time:7753ms step_avg:95.72ms +step:82/1705 train_time:7846ms step_avg:95.69ms +step:83/1705 train_time:7939ms step_avg:95.65ms +step:84/1705 train_time:8032ms step_avg:95.62ms +step:85/1705 train_time:8125ms step_avg:95.59ms +step:86/1705 train_time:8219ms step_avg:95.56ms +step:87/1705 train_time:8312ms step_avg:95.54ms +step:88/1705 train_time:8404ms step_avg:95.50ms +step:89/1705 train_time:8497ms step_avg:95.47ms +step:90/1705 train_time:8590ms step_avg:95.44ms +step:91/1705 train_time:8683ms step_avg:95.41ms +step:92/1705 train_time:8776ms step_avg:95.39ms +step:93/1705 train_time:8870ms step_avg:95.37ms +step:94/1705 train_time:8962ms step_avg:95.34ms +step:95/1705 train_time:9055ms step_avg:95.32ms +step:96/1705 train_time:9150ms step_avg:95.31ms +step:97/1705 train_time:9241ms step_avg:95.27ms +step:98/1705 train_time:9335ms step_avg:95.25ms +step:99/1705 train_time:9427ms step_avg:95.22ms +step:100/1705 train_time:9521ms step_avg:95.21ms +step:101/1705 train_time:9614ms step_avg:95.19ms +step:102/1705 train_time:9707ms step_avg:95.16ms +step:103/1705 train_time:9800ms step_avg:95.15ms +step:104/1705 train_time:9894ms step_avg:95.13ms +step:105/1705 train_time:9986ms step_avg:95.11ms +step:106/1705 train_time:10079ms step_avg:95.09ms +step:107/1705 train_time:10173ms step_avg:95.07ms +step:108/1705 train_time:10266ms step_avg:95.06ms +step:109/1705 train_time:10360ms step_avg:95.04ms +step:110/1705 train_time:10453ms step_avg:95.03ms +step:111/1705 train_time:10545ms step_avg:95.00ms +step:112/1705 train_time:10638ms step_avg:94.99ms +step:113/1705 train_time:10731ms step_avg:94.97ms +step:114/1705 train_time:10824ms step_avg:94.95ms +step:115/1705 train_time:10918ms step_avg:94.94ms +step:116/1705 train_time:11010ms step_avg:94.92ms +step:117/1705 train_time:11104ms step_avg:94.90ms +step:118/1705 train_time:11196ms step_avg:94.88ms +step:119/1705 train_time:11289ms step_avg:94.87ms +step:120/1705 train_time:11383ms step_avg:94.86ms +step:121/1705 train_time:11476ms step_avg:94.84ms +step:122/1705 train_time:11568ms step_avg:94.82ms +step:123/1705 train_time:11661ms step_avg:94.80ms +step:124/1705 train_time:11754ms step_avg:94.79ms +step:125/1705 train_time:11847ms step_avg:94.78ms +step:125/1705 val_loss:4.3131 train_time:11940ms step_avg:95.52ms +step:126/1705 train_time:11964ms step_avg:94.95ms +step:127/1705 train_time:12041ms step_avg:94.81ms +step:128/1705 train_time:12140ms step_avg:94.85ms +step:129/1705 train_time:12236ms step_avg:94.85ms +step:130/1705 train_time:12329ms step_avg:94.84ms +step:131/1705 train_time:12422ms step_avg:94.82ms +step:132/1705 train_time:12514ms step_avg:94.80ms +step:133/1705 train_time:12606ms step_avg:94.78ms +step:134/1705 train_time:12698ms step_avg:94.76ms +step:135/1705 train_time:12790ms step_avg:94.74ms +step:136/1705 train_time:12882ms step_avg:94.72ms +step:137/1705 train_time:12975ms step_avg:94.71ms +step:138/1705 train_time:13070ms step_avg:94.71ms +step:139/1705 train_time:13165ms step_avg:94.71ms +step:140/1705 train_time:13259ms step_avg:94.71ms +step:141/1705 train_time:13352ms step_avg:94.69ms +step:142/1705 train_time:13445ms step_avg:94.68ms +step:143/1705 train_time:13537ms step_avg:94.67ms +step:144/1705 train_time:13629ms step_avg:94.65ms +step:145/1705 train_time:13722ms step_avg:94.64ms +step:146/1705 train_time:13815ms step_avg:94.62ms +step:147/1705 train_time:13907ms step_avg:94.61ms +step:148/1705 train_time:14001ms step_avg:94.60ms +step:149/1705 train_time:14094ms step_avg:94.59ms +step:150/1705 train_time:14188ms step_avg:94.58ms +step:151/1705 train_time:14281ms step_avg:94.58ms +step:152/1705 train_time:14375ms step_avg:94.57ms +step:153/1705 train_time:14468ms step_avg:94.56ms +step:154/1705 train_time:14560ms step_avg:94.55ms +step:155/1705 train_time:14653ms step_avg:94.54ms +step:156/1705 train_time:14746ms step_avg:94.52ms +step:157/1705 train_time:14838ms step_avg:94.51ms +step:158/1705 train_time:14931ms step_avg:94.50ms +step:159/1705 train_time:15025ms step_avg:94.50ms +step:160/1705 train_time:15119ms step_avg:94.49ms +step:161/1705 train_time:15211ms step_avg:94.48ms +step:162/1705 train_time:15305ms step_avg:94.48ms +step:163/1705 train_time:15398ms step_avg:94.47ms +step:164/1705 train_time:15491ms step_avg:94.45ms +step:165/1705 train_time:15583ms step_avg:94.45ms +step:166/1705 train_time:15676ms step_avg:94.43ms +step:167/1705 train_time:15769ms step_avg:94.42ms +step:168/1705 train_time:15861ms step_avg:94.41ms +step:169/1705 train_time:15954ms step_avg:94.40ms +step:170/1705 train_time:16047ms step_avg:94.39ms +step:171/1705 train_time:16141ms step_avg:94.39ms +step:172/1705 train_time:16234ms step_avg:94.38ms +step:173/1705 train_time:16328ms step_avg:94.38ms +step:174/1705 train_time:16422ms step_avg:94.38ms +step:175/1705 train_time:16515ms step_avg:94.37ms +step:176/1705 train_time:16608ms step_avg:94.36ms +step:177/1705 train_time:16701ms step_avg:94.35ms +step:178/1705 train_time:16793ms step_avg:94.34ms +step:179/1705 train_time:16886ms step_avg:94.33ms +step:180/1705 train_time:16978ms step_avg:94.32ms +step:181/1705 train_time:17071ms step_avg:94.32ms +step:182/1705 train_time:17164ms step_avg:94.31ms +step:183/1705 train_time:17257ms step_avg:94.30ms +step:184/1705 train_time:17350ms step_avg:94.29ms +step:185/1705 train_time:17443ms step_avg:94.29ms +step:186/1705 train_time:17536ms step_avg:94.28ms +step:187/1705 train_time:17628ms step_avg:94.27ms +step:188/1705 train_time:17722ms step_avg:94.26ms +step:189/1705 train_time:17814ms step_avg:94.26ms +step:190/1705 train_time:17907ms step_avg:94.25ms +step:191/1705 train_time:18001ms step_avg:94.25ms +step:192/1705 train_time:18094ms step_avg:94.24ms +step:193/1705 train_time:18186ms step_avg:94.23ms +step:194/1705 train_time:18279ms step_avg:94.22ms +step:195/1705 train_time:18372ms step_avg:94.22ms +step:196/1705 train_time:18466ms step_avg:94.21ms +step:197/1705 train_time:18559ms step_avg:94.21ms +step:198/1705 train_time:18652ms step_avg:94.20ms +step:199/1705 train_time:18745ms step_avg:94.20ms +step:200/1705 train_time:18837ms step_avg:94.19ms +step:201/1705 train_time:18930ms step_avg:94.18ms +step:202/1705 train_time:19024ms step_avg:94.18ms +step:203/1705 train_time:19117ms step_avg:94.17ms +step:204/1705 train_time:19210ms step_avg:94.17ms +step:205/1705 train_time:19303ms step_avg:94.16ms +step:206/1705 train_time:19396ms step_avg:94.16ms +step:207/1705 train_time:19489ms step_avg:94.15ms +step:208/1705 train_time:19583ms step_avg:94.15ms +step:209/1705 train_time:19675ms step_avg:94.14ms +step:210/1705 train_time:19768ms step_avg:94.13ms +step:211/1705 train_time:19861ms step_avg:94.13ms +step:212/1705 train_time:19953ms step_avg:94.12ms +step:213/1705 train_time:20278ms step_avg:95.20ms +step:214/1705 train_time:20382ms step_avg:95.24ms +step:215/1705 train_time:20473ms step_avg:95.22ms +step:216/1705 train_time:20565ms step_avg:95.21ms +step:217/1705 train_time:20657ms step_avg:95.19ms +step:218/1705 train_time:20749ms step_avg:95.18ms +step:219/1705 train_time:20841ms step_avg:95.17ms +step:220/1705 train_time:20933ms step_avg:95.15ms +step:221/1705 train_time:21024ms step_avg:95.13ms +step:222/1705 train_time:21116ms step_avg:95.12ms +step:223/1705 train_time:21211ms step_avg:95.12ms +step:224/1705 train_time:21307ms step_avg:95.12ms +step:225/1705 train_time:21402ms step_avg:95.12ms +step:226/1705 train_time:21496ms step_avg:95.11ms +step:227/1705 train_time:21588ms step_avg:95.10ms +step:228/1705 train_time:21681ms step_avg:95.09ms +step:229/1705 train_time:21773ms step_avg:95.08ms +step:230/1705 train_time:21866ms step_avg:95.07ms +step:231/1705 train_time:21957ms step_avg:95.05ms +step:232/1705 train_time:22049ms step_avg:95.04ms +step:233/1705 train_time:22141ms step_avg:95.03ms +step:234/1705 train_time:22234ms step_avg:95.02ms +step:235/1705 train_time:22328ms step_avg:95.01ms +step:236/1705 train_time:22422ms step_avg:95.01ms +step:237/1705 train_time:22516ms step_avg:95.00ms +step:238/1705 train_time:22608ms step_avg:94.99ms +step:239/1705 train_time:22701ms step_avg:94.98ms +step:240/1705 train_time:22793ms step_avg:94.97ms +step:241/1705 train_time:22885ms step_avg:94.96ms +step:242/1705 train_time:22977ms step_avg:94.95ms +step:243/1705 train_time:23070ms step_avg:94.94ms +step:244/1705 train_time:23162ms step_avg:94.93ms +step:245/1705 train_time:23255ms step_avg:94.92ms +step:246/1705 train_time:23349ms step_avg:94.91ms +step:247/1705 train_time:23443ms step_avg:94.91ms +step:248/1705 train_time:23536ms step_avg:94.90ms +step:249/1705 train_time:23629ms step_avg:94.90ms +step:250/1705 train_time:23722ms step_avg:94.89ms +step:250/1705 val_loss:3.9764 train_time:23815ms step_avg:95.26ms +step:251/1705 train_time:23838ms step_avg:94.97ms +step:252/1705 train_time:23910ms step_avg:94.88ms +step:253/1705 train_time:24002ms step_avg:94.87ms +step:254/1705 train_time:24098ms step_avg:94.87ms +step:255/1705 train_time:24194ms step_avg:94.88ms +step:256/1705 train_time:24286ms step_avg:94.87ms +step:257/1705 train_time:24378ms step_avg:94.86ms +step:258/1705 train_time:24470ms step_avg:94.85ms +step:259/1705 train_time:24562ms step_avg:94.83ms +step:260/1705 train_time:24653ms step_avg:94.82ms +step:261/1705 train_time:24749ms step_avg:94.82ms +step:262/1705 train_time:24843ms step_avg:94.82ms +step:263/1705 train_time:24936ms step_avg:94.82ms +step:264/1705 train_time:25029ms step_avg:94.81ms +step:265/1705 train_time:25123ms step_avg:94.80ms +step:266/1705 train_time:25216ms step_avg:94.80ms +step:267/1705 train_time:25308ms step_avg:94.79ms +step:268/1705 train_time:25400ms step_avg:94.78ms +step:269/1705 train_time:25492ms step_avg:94.76ms +step:270/1705 train_time:25584ms step_avg:94.75ms +step:271/1705 train_time:25677ms step_avg:94.75ms +step:272/1705 train_time:25771ms step_avg:94.75ms +step:273/1705 train_time:25865ms step_avg:94.74ms +step:274/1705 train_time:25958ms step_avg:94.74ms +step:275/1705 train_time:26052ms step_avg:94.73ms +step:276/1705 train_time:26146ms step_avg:94.73ms +step:277/1705 train_time:26238ms step_avg:94.72ms +step:278/1705 train_time:26331ms step_avg:94.72ms +step:279/1705 train_time:26423ms step_avg:94.71ms +step:280/1705 train_time:26515ms step_avg:94.70ms +step:281/1705 train_time:26608ms step_avg:94.69ms +step:282/1705 train_time:26700ms step_avg:94.68ms +step:283/1705 train_time:26793ms step_avg:94.67ms +step:284/1705 train_time:26886ms step_avg:94.67ms +step:285/1705 train_time:26979ms step_avg:94.66ms +step:286/1705 train_time:27072ms step_avg:94.66ms +step:287/1705 train_time:27166ms step_avg:94.66ms +step:288/1705 train_time:27259ms step_avg:94.65ms +step:289/1705 train_time:27352ms step_avg:94.64ms +step:290/1705 train_time:27445ms step_avg:94.64ms +step:291/1705 train_time:27538ms step_avg:94.63ms +step:292/1705 train_time:27631ms step_avg:94.63ms +step:293/1705 train_time:27725ms step_avg:94.62ms +step:294/1705 train_time:27817ms step_avg:94.62ms +step:295/1705 train_time:27910ms step_avg:94.61ms +step:296/1705 train_time:28002ms step_avg:94.60ms +step:297/1705 train_time:28095ms step_avg:94.60ms +step:298/1705 train_time:28188ms step_avg:94.59ms +step:299/1705 train_time:28280ms step_avg:94.58ms +step:300/1705 train_time:28373ms step_avg:94.58ms +step:301/1705 train_time:28467ms step_avg:94.57ms +step:302/1705 train_time:28559ms step_avg:94.57ms +step:303/1705 train_time:28652ms step_avg:94.56ms +step:304/1705 train_time:28745ms step_avg:94.55ms +step:305/1705 train_time:28837ms step_avg:94.55ms +step:306/1705 train_time:28930ms step_avg:94.54ms +step:307/1705 train_time:29023ms step_avg:94.54ms +step:308/1705 train_time:29115ms step_avg:94.53ms +step:309/1705 train_time:29209ms step_avg:94.53ms +step:310/1705 train_time:29302ms step_avg:94.52ms +step:311/1705 train_time:29394ms step_avg:94.51ms +step:312/1705 train_time:29488ms step_avg:94.51ms +step:313/1705 train_time:29581ms step_avg:94.51ms +step:314/1705 train_time:29675ms step_avg:94.51ms +step:315/1705 train_time:29768ms step_avg:94.50ms +step:316/1705 train_time:29860ms step_avg:94.49ms +step:317/1705 train_time:29953ms step_avg:94.49ms +step:318/1705 train_time:30046ms step_avg:94.48ms +step:319/1705 train_time:30138ms step_avg:94.48ms +step:320/1705 train_time:30232ms step_avg:94.47ms +step:321/1705 train_time:30324ms step_avg:94.47ms +step:322/1705 train_time:30417ms step_avg:94.46ms +step:323/1705 train_time:30511ms step_avg:94.46ms +step:324/1705 train_time:30604ms step_avg:94.46ms +step:325/1705 train_time:30696ms step_avg:94.45ms +step:326/1705 train_time:30789ms step_avg:94.45ms +step:327/1705 train_time:30881ms step_avg:94.44ms +step:328/1705 train_time:30975ms step_avg:94.43ms +step:329/1705 train_time:31067ms step_avg:94.43ms +step:330/1705 train_time:31159ms step_avg:94.42ms +step:331/1705 train_time:31252ms step_avg:94.42ms +step:332/1705 train_time:31345ms step_avg:94.41ms +step:333/1705 train_time:31438ms step_avg:94.41ms +step:334/1705 train_time:31531ms step_avg:94.41ms +step:335/1705 train_time:31624ms step_avg:94.40ms +step:336/1705 train_time:31717ms step_avg:94.40ms +step:337/1705 train_time:31811ms step_avg:94.39ms +step:338/1705 train_time:31903ms step_avg:94.39ms +step:339/1705 train_time:31995ms step_avg:94.38ms +step:340/1705 train_time:32088ms step_avg:94.38ms +step:341/1705 train_time:32181ms step_avg:94.37ms +step:342/1705 train_time:32274ms step_avg:94.37ms +step:343/1705 train_time:32366ms step_avg:94.36ms +step:344/1705 train_time:32459ms step_avg:94.36ms +step:345/1705 train_time:32553ms step_avg:94.36ms +step:346/1705 train_time:32647ms step_avg:94.35ms +step:347/1705 train_time:32739ms step_avg:94.35ms +step:348/1705 train_time:32831ms step_avg:94.34ms +step:349/1705 train_time:32924ms step_avg:94.34ms +step:350/1705 train_time:33016ms step_avg:94.33ms +step:351/1705 train_time:33109ms step_avg:94.33ms +step:352/1705 train_time:33202ms step_avg:94.32ms +step:353/1705 train_time:33294ms step_avg:94.32ms +step:354/1705 train_time:33388ms step_avg:94.32ms +step:355/1705 train_time:33481ms step_avg:94.31ms +step:356/1705 train_time:33574ms step_avg:94.31ms +step:357/1705 train_time:33667ms step_avg:94.31ms +step:358/1705 train_time:33761ms step_avg:94.30ms +step:359/1705 train_time:33854ms step_avg:94.30ms +step:360/1705 train_time:33947ms step_avg:94.30ms +step:361/1705 train_time:34039ms step_avg:94.29ms +step:362/1705 train_time:34132ms step_avg:94.29ms +step:363/1705 train_time:34225ms step_avg:94.28ms +step:364/1705 train_time:34318ms step_avg:94.28ms +step:365/1705 train_time:34412ms step_avg:94.28ms +step:366/1705 train_time:34505ms step_avg:94.28ms +step:367/1705 train_time:34598ms step_avg:94.27ms +step:368/1705 train_time:34691ms step_avg:94.27ms +step:369/1705 train_time:34783ms step_avg:94.26ms +step:370/1705 train_time:34876ms step_avg:94.26ms +step:371/1705 train_time:34968ms step_avg:94.25ms +step:372/1705 train_time:35061ms step_avg:94.25ms +step:373/1705 train_time:35153ms step_avg:94.25ms +step:374/1705 train_time:35247ms step_avg:94.24ms +step:375/1705 train_time:35339ms step_avg:94.24ms +step:375/1705 val_loss:3.8205 train_time:35433ms step_avg:94.49ms +step:376/1705 train_time:35458ms step_avg:94.30ms +step:377/1705 train_time:35530ms step_avg:94.24ms +step:378/1705 train_time:35630ms step_avg:94.26ms +step:379/1705 train_time:35724ms step_avg:94.26ms +step:380/1705 train_time:35816ms step_avg:94.25ms +step:381/1705 train_time:35909ms step_avg:94.25ms +step:382/1705 train_time:36001ms step_avg:94.24ms +step:383/1705 train_time:36093ms step_avg:94.24ms +step:384/1705 train_time:36185ms step_avg:94.23ms +step:385/1705 train_time:36277ms step_avg:94.23ms +step:386/1705 train_time:36369ms step_avg:94.22ms +step:387/1705 train_time:36463ms step_avg:94.22ms +step:388/1705 train_time:36557ms step_avg:94.22ms +step:389/1705 train_time:36652ms step_avg:94.22ms +step:390/1705 train_time:36746ms step_avg:94.22ms +step:391/1705 train_time:36839ms step_avg:94.22ms +step:392/1705 train_time:36932ms step_avg:94.21ms +step:393/1705 train_time:37024ms step_avg:94.21ms +step:394/1705 train_time:37117ms step_avg:94.20ms +step:395/1705 train_time:37208ms step_avg:94.20ms +step:396/1705 train_time:37300ms step_avg:94.19ms +step:397/1705 train_time:37393ms step_avg:94.19ms +step:398/1705 train_time:37486ms step_avg:94.19ms +step:399/1705 train_time:37580ms step_avg:94.19ms +step:400/1705 train_time:37673ms step_avg:94.18ms +step:401/1705 train_time:37768ms step_avg:94.18ms +step:402/1705 train_time:37862ms step_avg:94.18ms +step:403/1705 train_time:37954ms step_avg:94.18ms +step:404/1705 train_time:38047ms step_avg:94.18ms +step:405/1705 train_time:38139ms step_avg:94.17ms +step:406/1705 train_time:38231ms step_avg:94.17ms +step:407/1705 train_time:38324ms step_avg:94.16ms +step:408/1705 train_time:38416ms step_avg:94.16ms +step:409/1705 train_time:38509ms step_avg:94.15ms +step:410/1705 train_time:38602ms step_avg:94.15ms +step:411/1705 train_time:38695ms step_avg:94.15ms +step:412/1705 train_time:38790ms step_avg:94.15ms +step:413/1705 train_time:38884ms step_avg:94.15ms +step:414/1705 train_time:38976ms step_avg:94.14ms +step:415/1705 train_time:39069ms step_avg:94.14ms +step:416/1705 train_time:39161ms step_avg:94.14ms +step:417/1705 train_time:39254ms step_avg:94.13ms +step:418/1705 train_time:39346ms step_avg:94.13ms +step:419/1705 train_time:39439ms step_avg:94.13ms +step:420/1705 train_time:39532ms step_avg:94.12ms +step:421/1705 train_time:39625ms step_avg:94.12ms +step:422/1705 train_time:39719ms step_avg:94.12ms +step:423/1705 train_time:39812ms step_avg:94.12ms +step:424/1705 train_time:39906ms step_avg:94.12ms +step:425/1705 train_time:40189ms step_avg:94.56ms +step:426/1705 train_time:40360ms step_avg:94.74ms +step:427/1705 train_time:40451ms step_avg:94.73ms +step:428/1705 train_time:40543ms step_avg:94.73ms +step:429/1705 train_time:40635ms step_avg:94.72ms +step:430/1705 train_time:40727ms step_avg:94.71ms +step:431/1705 train_time:40818ms step_avg:94.71ms +step:432/1705 train_time:40910ms step_avg:94.70ms +step:433/1705 train_time:41002ms step_avg:94.69ms +step:434/1705 train_time:41093ms step_avg:94.69ms +step:435/1705 train_time:41188ms step_avg:94.68ms +step:436/1705 train_time:41285ms step_avg:94.69ms +step:437/1705 train_time:41382ms step_avg:94.69ms +step:438/1705 train_time:41474ms step_avg:94.69ms +step:439/1705 train_time:41568ms step_avg:94.69ms +step:440/1705 train_time:41660ms step_avg:94.68ms +step:441/1705 train_time:41753ms step_avg:94.68ms +step:442/1705 train_time:41845ms step_avg:94.67ms +step:443/1705 train_time:41937ms step_avg:94.66ms +step:444/1705 train_time:42029ms step_avg:94.66ms +step:445/1705 train_time:42121ms step_avg:94.65ms +step:446/1705 train_time:42214ms step_avg:94.65ms +step:447/1705 train_time:42309ms step_avg:94.65ms +step:448/1705 train_time:42403ms step_avg:94.65ms +step:449/1705 train_time:42496ms step_avg:94.65ms +step:450/1705 train_time:42589ms step_avg:94.64ms +step:451/1705 train_time:42683ms step_avg:94.64ms +step:452/1705 train_time:42776ms step_avg:94.64ms +step:453/1705 train_time:42868ms step_avg:94.63ms +step:454/1705 train_time:42961ms step_avg:94.63ms +step:455/1705 train_time:43052ms step_avg:94.62ms +step:456/1705 train_time:43145ms step_avg:94.62ms +step:457/1705 train_time:43238ms step_avg:94.61ms +step:458/1705 train_time:43332ms step_avg:94.61ms +step:459/1705 train_time:43426ms step_avg:94.61ms +step:460/1705 train_time:43519ms step_avg:94.61ms +step:461/1705 train_time:43613ms step_avg:94.60ms +step:462/1705 train_time:43705ms step_avg:94.60ms +step:463/1705 train_time:43798ms step_avg:94.60ms +step:464/1705 train_time:43891ms step_avg:94.59ms +step:465/1705 train_time:43983ms step_avg:94.59ms +step:466/1705 train_time:44076ms step_avg:94.58ms +step:467/1705 train_time:44169ms step_avg:94.58ms +step:468/1705 train_time:44263ms step_avg:94.58ms +step:469/1705 train_time:44356ms step_avg:94.58ms +step:470/1705 train_time:44449ms step_avg:94.57ms +step:471/1705 train_time:44542ms step_avg:94.57ms +step:472/1705 train_time:44635ms step_avg:94.57ms +step:473/1705 train_time:44728ms step_avg:94.56ms +step:474/1705 train_time:44821ms step_avg:94.56ms +step:475/1705 train_time:44913ms step_avg:94.55ms +step:476/1705 train_time:45006ms step_avg:94.55ms +step:477/1705 train_time:45099ms step_avg:94.55ms +step:478/1705 train_time:45192ms step_avg:94.54ms +step:479/1705 train_time:45286ms step_avg:94.54ms +step:480/1705 train_time:45379ms step_avg:94.54ms +step:481/1705 train_time:45473ms step_avg:94.54ms +step:482/1705 train_time:45567ms step_avg:94.54ms +step:483/1705 train_time:45659ms step_avg:94.53ms +step:484/1705 train_time:45751ms step_avg:94.53ms +step:485/1705 train_time:45845ms step_avg:94.52ms +step:486/1705 train_time:45937ms step_avg:94.52ms +step:487/1705 train_time:46031ms step_avg:94.52ms +step:488/1705 train_time:46124ms step_avg:94.52ms +step:489/1705 train_time:46217ms step_avg:94.51ms +step:490/1705 train_time:46311ms step_avg:94.51ms +step:491/1705 train_time:46405ms step_avg:94.51ms +step:492/1705 train_time:46498ms step_avg:94.51ms +step:493/1705 train_time:46591ms step_avg:94.51ms +step:494/1705 train_time:46684ms step_avg:94.50ms +step:495/1705 train_time:46777ms step_avg:94.50ms +step:496/1705 train_time:46870ms step_avg:94.50ms +step:497/1705 train_time:46962ms step_avg:94.49ms +step:498/1705 train_time:47055ms step_avg:94.49ms +step:499/1705 train_time:47148ms step_avg:94.49ms +step:500/1705 train_time:47241ms step_avg:94.48ms +step:500/1705 val_loss:3.7190 train_time:47334ms step_avg:94.67ms +step:501/1705 train_time:47357ms step_avg:94.53ms +step:502/1705 train_time:47432ms step_avg:94.49ms +step:503/1705 train_time:47530ms step_avg:94.49ms +step:504/1705 train_time:47623ms step_avg:94.49ms +step:505/1705 train_time:47716ms step_avg:94.49ms +step:506/1705 train_time:47807ms step_avg:94.48ms +step:507/1705 train_time:47900ms step_avg:94.48ms +step:508/1705 train_time:47992ms step_avg:94.47ms +step:509/1705 train_time:48084ms step_avg:94.47ms +step:510/1705 train_time:48176ms step_avg:94.46ms +step:511/1705 train_time:48269ms step_avg:94.46ms +step:512/1705 train_time:48363ms step_avg:94.46ms +step:513/1705 train_time:48459ms step_avg:94.46ms +step:514/1705 train_time:48553ms step_avg:94.46ms +step:515/1705 train_time:48646ms step_avg:94.46ms +step:516/1705 train_time:48739ms step_avg:94.45ms +step:517/1705 train_time:48831ms step_avg:94.45ms +step:518/1705 train_time:48924ms step_avg:94.45ms +step:519/1705 train_time:49016ms step_avg:94.44ms +step:520/1705 train_time:49107ms step_avg:94.44ms +step:521/1705 train_time:49201ms step_avg:94.44ms +step:522/1705 train_time:49295ms step_avg:94.43ms +step:523/1705 train_time:49388ms step_avg:94.43ms +step:524/1705 train_time:49482ms step_avg:94.43ms +step:525/1705 train_time:49576ms step_avg:94.43ms +step:526/1705 train_time:49670ms step_avg:94.43ms +step:527/1705 train_time:49763ms step_avg:94.43ms +step:528/1705 train_time:49855ms step_avg:94.42ms +step:529/1705 train_time:49947ms step_avg:94.42ms +step:530/1705 train_time:50040ms step_avg:94.41ms +step:531/1705 train_time:50132ms step_avg:94.41ms +step:532/1705 train_time:50225ms step_avg:94.41ms +step:533/1705 train_time:50318ms step_avg:94.40ms +step:534/1705 train_time:50411ms step_avg:94.40ms +step:535/1705 train_time:50504ms step_avg:94.40ms +step:536/1705 train_time:50598ms step_avg:94.40ms +step:537/1705 train_time:50691ms step_avg:94.40ms +step:538/1705 train_time:50784ms step_avg:94.39ms +step:539/1705 train_time:50877ms step_avg:94.39ms +step:540/1705 train_time:50970ms step_avg:94.39ms +step:541/1705 train_time:51062ms step_avg:94.38ms +step:542/1705 train_time:51155ms step_avg:94.38ms +step:543/1705 train_time:51248ms step_avg:94.38ms +step:544/1705 train_time:51341ms step_avg:94.38ms +step:545/1705 train_time:51434ms step_avg:94.37ms +step:546/1705 train_time:51527ms step_avg:94.37ms +step:547/1705 train_time:51621ms step_avg:94.37ms +step:548/1705 train_time:51715ms step_avg:94.37ms +step:549/1705 train_time:51808ms step_avg:94.37ms +step:550/1705 train_time:51901ms step_avg:94.37ms +step:551/1705 train_time:51993ms step_avg:94.36ms +step:552/1705 train_time:52086ms step_avg:94.36ms +step:553/1705 train_time:52179ms step_avg:94.36ms +step:554/1705 train_time:52272ms step_avg:94.35ms +step:555/1705 train_time:52365ms step_avg:94.35ms +step:556/1705 train_time:52458ms step_avg:94.35ms +step:557/1705 train_time:52551ms step_avg:94.35ms +step:558/1705 train_time:52644ms step_avg:94.34ms +step:559/1705 train_time:52737ms step_avg:94.34ms +step:560/1705 train_time:52830ms step_avg:94.34ms +step:561/1705 train_time:52923ms step_avg:94.34ms +step:562/1705 train_time:53016ms step_avg:94.33ms +step:563/1705 train_time:53108ms step_avg:94.33ms +step:564/1705 train_time:53202ms step_avg:94.33ms +step:565/1705 train_time:53295ms step_avg:94.33ms +step:566/1705 train_time:53387ms step_avg:94.32ms +step:567/1705 train_time:53480ms step_avg:94.32ms +step:568/1705 train_time:53573ms step_avg:94.32ms +step:569/1705 train_time:53666ms step_avg:94.32ms +step:570/1705 train_time:53759ms step_avg:94.31ms +step:571/1705 train_time:53854ms step_avg:94.32ms +step:572/1705 train_time:53949ms step_avg:94.32ms +step:573/1705 train_time:54043ms step_avg:94.32ms +step:574/1705 train_time:54137ms step_avg:94.32ms +step:575/1705 train_time:54231ms step_avg:94.32ms +step:576/1705 train_time:54326ms step_avg:94.32ms +step:577/1705 train_time:54421ms step_avg:94.32ms +step:578/1705 train_time:54514ms step_avg:94.31ms +step:579/1705 train_time:54607ms step_avg:94.31ms +step:580/1705 train_time:54702ms step_avg:94.31ms +step:581/1705 train_time:54796ms step_avg:94.31ms +step:582/1705 train_time:54889ms step_avg:94.31ms +step:583/1705 train_time:54983ms step_avg:94.31ms +step:584/1705 train_time:55078ms step_avg:94.31ms +step:585/1705 train_time:55172ms step_avg:94.31ms +step:586/1705 train_time:55266ms step_avg:94.31ms +step:587/1705 train_time:55361ms step_avg:94.31ms +step:588/1705 train_time:55455ms step_avg:94.31ms +step:589/1705 train_time:55549ms step_avg:94.31ms +step:590/1705 train_time:55643ms step_avg:94.31ms +step:591/1705 train_time:55738ms step_avg:94.31ms +step:592/1705 train_time:55832ms step_avg:94.31ms +step:593/1705 train_time:55926ms step_avg:94.31ms +step:594/1705 train_time:56020ms step_avg:94.31ms +step:595/1705 train_time:56115ms step_avg:94.31ms +step:596/1705 train_time:56209ms step_avg:94.31ms +step:597/1705 train_time:56303ms step_avg:94.31ms +step:598/1705 train_time:56398ms step_avg:94.31ms +step:599/1705 train_time:56491ms step_avg:94.31ms +step:600/1705 train_time:56586ms step_avg:94.31ms +step:601/1705 train_time:56680ms step_avg:94.31ms +step:602/1705 train_time:56774ms step_avg:94.31ms +step:603/1705 train_time:56869ms step_avg:94.31ms +step:604/1705 train_time:56964ms step_avg:94.31ms +step:605/1705 train_time:57058ms step_avg:94.31ms +step:606/1705 train_time:57152ms step_avg:94.31ms +step:607/1705 train_time:57246ms step_avg:94.31ms +step:608/1705 train_time:57341ms step_avg:94.31ms +step:609/1705 train_time:57435ms step_avg:94.31ms +step:610/1705 train_time:57529ms step_avg:94.31ms +step:611/1705 train_time:57624ms step_avg:94.31ms +step:612/1705 train_time:57718ms step_avg:94.31ms +step:613/1705 train_time:57812ms step_avg:94.31ms +step:614/1705 train_time:57906ms step_avg:94.31ms +step:615/1705 train_time:58001ms step_avg:94.31ms +step:616/1705 train_time:58095ms step_avg:94.31ms +step:617/1705 train_time:58189ms step_avg:94.31ms +step:618/1705 train_time:58283ms step_avg:94.31ms +step:619/1705 train_time:58378ms step_avg:94.31ms +step:620/1705 train_time:58472ms step_avg:94.31ms +step:621/1705 train_time:58566ms step_avg:94.31ms +step:622/1705 train_time:58660ms step_avg:94.31ms +step:623/1705 train_time:58755ms step_avg:94.31ms +step:624/1705 train_time:58849ms step_avg:94.31ms +step:625/1705 train_time:58944ms step_avg:94.31ms +step:625/1705 val_loss:3.6190 train_time:59040ms step_avg:94.46ms +step:626/1705 train_time:59063ms step_avg:94.35ms +step:627/1705 train_time:59137ms step_avg:94.32ms +step:628/1705 train_time:59235ms step_avg:94.32ms +step:629/1705 train_time:59341ms step_avg:94.34ms +step:630/1705 train_time:59438ms step_avg:94.35ms +step:631/1705 train_time:59531ms step_avg:94.34ms +step:632/1705 train_time:59624ms step_avg:94.34ms +step:633/1705 train_time:59717ms step_avg:94.34ms +step:634/1705 train_time:59811ms step_avg:94.34ms +step:635/1705 train_time:59903ms step_avg:94.34ms +step:636/1705 train_time:59998ms step_avg:94.34ms +step:637/1705 train_time:60093ms step_avg:94.34ms +step:638/1705 train_time:60188ms step_avg:94.34ms +step:639/1705 train_time:60561ms step_avg:94.77ms +step:640/1705 train_time:60636ms step_avg:94.74ms +step:641/1705 train_time:60729ms step_avg:94.74ms +step:642/1705 train_time:60821ms step_avg:94.74ms +step:643/1705 train_time:60915ms step_avg:94.74ms +step:644/1705 train_time:61008ms step_avg:94.73ms +step:645/1705 train_time:61101ms step_avg:94.73ms +step:646/1705 train_time:61194ms step_avg:94.73ms +step:647/1705 train_time:61287ms step_avg:94.72ms +step:648/1705 train_time:61381ms step_avg:94.72ms +step:649/1705 train_time:61478ms step_avg:94.73ms +step:650/1705 train_time:61576ms step_avg:94.73ms +step:651/1705 train_time:61672ms step_avg:94.73ms +step:652/1705 train_time:61766ms step_avg:94.73ms +step:653/1705 train_time:61860ms step_avg:94.73ms +step:654/1705 train_time:61955ms step_avg:94.73ms +step:655/1705 train_time:62048ms step_avg:94.73ms +step:656/1705 train_time:62142ms step_avg:94.73ms +step:657/1705 train_time:62235ms step_avg:94.73ms +step:658/1705 train_time:62329ms step_avg:94.73ms +step:659/1705 train_time:62425ms step_avg:94.73ms +step:660/1705 train_time:62519ms step_avg:94.73ms +step:661/1705 train_time:62615ms step_avg:94.73ms +step:662/1705 train_time:62712ms step_avg:94.73ms +step:663/1705 train_time:62806ms step_avg:94.73ms +step:664/1705 train_time:62900ms step_avg:94.73ms +step:665/1705 train_time:62994ms step_avg:94.73ms +step:666/1705 train_time:63088ms step_avg:94.73ms +step:667/1705 train_time:63182ms step_avg:94.73ms +step:668/1705 train_time:63276ms step_avg:94.72ms +step:669/1705 train_time:63370ms step_avg:94.72ms +step:670/1705 train_time:63463ms step_avg:94.72ms +step:671/1705 train_time:63559ms step_avg:94.72ms +step:672/1705 train_time:63654ms step_avg:94.72ms +step:673/1705 train_time:63750ms step_avg:94.72ms +step:674/1705 train_time:63843ms step_avg:94.72ms +step:675/1705 train_time:63938ms step_avg:94.72ms +step:676/1705 train_time:64032ms step_avg:94.72ms +step:677/1705 train_time:64125ms step_avg:94.72ms +step:678/1705 train_time:64219ms step_avg:94.72ms +step:679/1705 train_time:64313ms step_avg:94.72ms +step:680/1705 train_time:64407ms step_avg:94.72ms +step:681/1705 train_time:64501ms step_avg:94.71ms +step:682/1705 train_time:64595ms step_avg:94.71ms +step:683/1705 train_time:64691ms step_avg:94.72ms +step:684/1705 train_time:64786ms step_avg:94.72ms +step:685/1705 train_time:64880ms step_avg:94.72ms +step:686/1705 train_time:64976ms step_avg:94.72ms +step:687/1705 train_time:65069ms step_avg:94.72ms +step:688/1705 train_time:65163ms step_avg:94.71ms +step:689/1705 train_time:65257ms step_avg:94.71ms +step:690/1705 train_time:65352ms step_avg:94.71ms +step:691/1705 train_time:65446ms step_avg:94.71ms +step:692/1705 train_time:65540ms step_avg:94.71ms +step:693/1705 train_time:65635ms step_avg:94.71ms +step:694/1705 train_time:65729ms step_avg:94.71ms +step:695/1705 train_time:65824ms step_avg:94.71ms +step:696/1705 train_time:65918ms step_avg:94.71ms +step:697/1705 train_time:66013ms step_avg:94.71ms +step:698/1705 train_time:66107ms step_avg:94.71ms +step:699/1705 train_time:66201ms step_avg:94.71ms +step:700/1705 train_time:66296ms step_avg:94.71ms +step:701/1705 train_time:66390ms step_avg:94.71ms +step:702/1705 train_time:66484ms step_avg:94.71ms +step:703/1705 train_time:66578ms step_avg:94.71ms +step:704/1705 train_time:66673ms step_avg:94.71ms +step:705/1705 train_time:66767ms step_avg:94.71ms +step:706/1705 train_time:66861ms step_avg:94.70ms +step:707/1705 train_time:66956ms step_avg:94.70ms +step:708/1705 train_time:67051ms step_avg:94.70ms +step:709/1705 train_time:67145ms step_avg:94.70ms +step:710/1705 train_time:67238ms step_avg:94.70ms +step:711/1705 train_time:67334ms step_avg:94.70ms +step:712/1705 train_time:67429ms step_avg:94.70ms +step:713/1705 train_time:67523ms step_avg:94.70ms +step:714/1705 train_time:67617ms step_avg:94.70ms +step:715/1705 train_time:67712ms step_avg:94.70ms +step:716/1705 train_time:67806ms step_avg:94.70ms +step:717/1705 train_time:67900ms step_avg:94.70ms +step:718/1705 train_time:67994ms step_avg:94.70ms +step:719/1705 train_time:68088ms step_avg:94.70ms +step:720/1705 train_time:68182ms step_avg:94.70ms +step:721/1705 train_time:68276ms step_avg:94.70ms +step:722/1705 train_time:68372ms step_avg:94.70ms +step:723/1705 train_time:68466ms step_avg:94.70ms +step:724/1705 train_time:68560ms step_avg:94.70ms +step:725/1705 train_time:68655ms step_avg:94.70ms +step:726/1705 train_time:68751ms step_avg:94.70ms +step:727/1705 train_time:68845ms step_avg:94.70ms +step:728/1705 train_time:68939ms step_avg:94.70ms +step:729/1705 train_time:69034ms step_avg:94.70ms +step:730/1705 train_time:69128ms step_avg:94.70ms +step:731/1705 train_time:69221ms step_avg:94.69ms +step:732/1705 train_time:69316ms step_avg:94.69ms +step:733/1705 train_time:69410ms step_avg:94.69ms +step:734/1705 train_time:69505ms step_avg:94.69ms +step:735/1705 train_time:69599ms step_avg:94.69ms +step:736/1705 train_time:69694ms step_avg:94.69ms +step:737/1705 train_time:69789ms step_avg:94.69ms +step:738/1705 train_time:69884ms step_avg:94.69ms +step:739/1705 train_time:69978ms step_avg:94.69ms +step:740/1705 train_time:70073ms step_avg:94.69ms +step:741/1705 train_time:70167ms step_avg:94.69ms +step:742/1705 train_time:70261ms step_avg:94.69ms +step:743/1705 train_time:70356ms step_avg:94.69ms +step:744/1705 train_time:70451ms step_avg:94.69ms +step:745/1705 train_time:70546ms step_avg:94.69ms +step:746/1705 train_time:70640ms step_avg:94.69ms +step:747/1705 train_time:70736ms step_avg:94.69ms +step:748/1705 train_time:70830ms step_avg:94.69ms +step:749/1705 train_time:70924ms step_avg:94.69ms +step:750/1705 train_time:71018ms step_avg:94.69ms +step:750/1705 val_loss:3.5638 train_time:71114ms step_avg:94.82ms +step:751/1705 train_time:71137ms step_avg:94.72ms +step:752/1705 train_time:71213ms step_avg:94.70ms +step:753/1705 train_time:71310ms step_avg:94.70ms +step:754/1705 train_time:71404ms step_avg:94.70ms +step:755/1705 train_time:71498ms step_avg:94.70ms +step:756/1705 train_time:71591ms step_avg:94.70ms +step:757/1705 train_time:71684ms step_avg:94.70ms +step:758/1705 train_time:71778ms step_avg:94.69ms +step:759/1705 train_time:71871ms step_avg:94.69ms +step:760/1705 train_time:71964ms step_avg:94.69ms +step:761/1705 train_time:72059ms step_avg:94.69ms +step:762/1705 train_time:72154ms step_avg:94.69ms +step:763/1705 train_time:72249ms step_avg:94.69ms +step:764/1705 train_time:72344ms step_avg:94.69ms +step:765/1705 train_time:72438ms step_avg:94.69ms +step:766/1705 train_time:72533ms step_avg:94.69ms +step:767/1705 train_time:72626ms step_avg:94.69ms +step:768/1705 train_time:72720ms step_avg:94.69ms +step:769/1705 train_time:72815ms step_avg:94.69ms +step:770/1705 train_time:72908ms step_avg:94.69ms +step:771/1705 train_time:73001ms step_avg:94.68ms +step:772/1705 train_time:73097ms step_avg:94.69ms +step:773/1705 train_time:73192ms step_avg:94.69ms +step:774/1705 train_time:73286ms step_avg:94.68ms +step:775/1705 train_time:73381ms step_avg:94.69ms +step:776/1705 train_time:73475ms step_avg:94.68ms +step:777/1705 train_time:73570ms step_avg:94.68ms +step:778/1705 train_time:73663ms step_avg:94.68ms +step:779/1705 train_time:73758ms step_avg:94.68ms +step:780/1705 train_time:73851ms step_avg:94.68ms +step:781/1705 train_time:73944ms step_avg:94.68ms +step:782/1705 train_time:74038ms step_avg:94.68ms +step:783/1705 train_time:74133ms step_avg:94.68ms +step:784/1705 train_time:74227ms step_avg:94.68ms +step:785/1705 train_time:74322ms step_avg:94.68ms +step:786/1705 train_time:74416ms step_avg:94.68ms +step:787/1705 train_time:74511ms step_avg:94.68ms +step:788/1705 train_time:74604ms step_avg:94.68ms +step:789/1705 train_time:74699ms step_avg:94.68ms +step:790/1705 train_time:74793ms step_avg:94.68ms +step:791/1705 train_time:74887ms step_avg:94.67ms +step:792/1705 train_time:74980ms step_avg:94.67ms +step:793/1705 train_time:75076ms step_avg:94.67ms +step:794/1705 train_time:75169ms step_avg:94.67ms +step:795/1705 train_time:75264ms step_avg:94.67ms +step:796/1705 train_time:75359ms step_avg:94.67ms +step:797/1705 train_time:75453ms step_avg:94.67ms +step:798/1705 train_time:75548ms step_avg:94.67ms +step:799/1705 train_time:75642ms step_avg:94.67ms +step:800/1705 train_time:75736ms step_avg:94.67ms +step:801/1705 train_time:75830ms step_avg:94.67ms +step:802/1705 train_time:75924ms step_avg:94.67ms +step:803/1705 train_time:76019ms step_avg:94.67ms +step:804/1705 train_time:76113ms step_avg:94.67ms +step:805/1705 train_time:76206ms step_avg:94.67ms +step:806/1705 train_time:76300ms step_avg:94.67ms +step:807/1705 train_time:76395ms step_avg:94.67ms +step:808/1705 train_time:76489ms step_avg:94.67ms +step:809/1705 train_time:76584ms step_avg:94.67ms +step:810/1705 train_time:76678ms step_avg:94.66ms +step:811/1705 train_time:76772ms step_avg:94.66ms +step:812/1705 train_time:76867ms step_avg:94.66ms +step:813/1705 train_time:76961ms step_avg:94.66ms +step:814/1705 train_time:77056ms step_avg:94.66ms +step:815/1705 train_time:77150ms step_avg:94.66ms +step:816/1705 train_time:77244ms step_avg:94.66ms +step:817/1705 train_time:77339ms step_avg:94.66ms +step:818/1705 train_time:77434ms step_avg:94.66ms +step:819/1705 train_time:77528ms step_avg:94.66ms +step:820/1705 train_time:77622ms step_avg:94.66ms +step:821/1705 train_time:77717ms step_avg:94.66ms +step:822/1705 train_time:77811ms step_avg:94.66ms +step:823/1705 train_time:77905ms step_avg:94.66ms +step:824/1705 train_time:77999ms step_avg:94.66ms +step:825/1705 train_time:78094ms step_avg:94.66ms +step:826/1705 train_time:78187ms step_avg:94.66ms +step:827/1705 train_time:78281ms step_avg:94.66ms +step:828/1705 train_time:78376ms step_avg:94.66ms +step:829/1705 train_time:78471ms step_avg:94.66ms +step:830/1705 train_time:78564ms step_avg:94.66ms +step:831/1705 train_time:78658ms step_avg:94.66ms +step:832/1705 train_time:78753ms step_avg:94.65ms +step:833/1705 train_time:78847ms step_avg:94.65ms +step:834/1705 train_time:78941ms step_avg:94.65ms +step:835/1705 train_time:79035ms step_avg:94.65ms +step:836/1705 train_time:79130ms step_avg:94.65ms +step:837/1705 train_time:79225ms step_avg:94.65ms +step:838/1705 train_time:79319ms step_avg:94.65ms +step:839/1705 train_time:79414ms step_avg:94.65ms +step:840/1705 train_time:79508ms step_avg:94.65ms +step:841/1705 train_time:79602ms step_avg:94.65ms +step:842/1705 train_time:79697ms step_avg:94.65ms +step:843/1705 train_time:79791ms step_avg:94.65ms +step:844/1705 train_time:79885ms step_avg:94.65ms +step:845/1705 train_time:79979ms step_avg:94.65ms +step:846/1705 train_time:80075ms step_avg:94.65ms +step:847/1705 train_time:80169ms step_avg:94.65ms +step:848/1705 train_time:80263ms step_avg:94.65ms +step:849/1705 train_time:80357ms step_avg:94.65ms +step:850/1705 train_time:80451ms step_avg:94.65ms +step:851/1705 train_time:80701ms step_avg:94.83ms +step:852/1705 train_time:80860ms step_avg:94.91ms +step:853/1705 train_time:80952ms step_avg:94.90ms +step:854/1705 train_time:81045ms step_avg:94.90ms +step:855/1705 train_time:81138ms step_avg:94.90ms +step:856/1705 train_time:81231ms step_avg:94.90ms +step:857/1705 train_time:81325ms step_avg:94.89ms +step:858/1705 train_time:81418ms step_avg:94.89ms +step:859/1705 train_time:81511ms step_avg:94.89ms +step:860/1705 train_time:81604ms step_avg:94.89ms +step:861/1705 train_time:81699ms step_avg:94.89ms +step:862/1705 train_time:81798ms step_avg:94.89ms +step:863/1705 train_time:81896ms step_avg:94.90ms +step:864/1705 train_time:81992ms step_avg:94.90ms +step:865/1705 train_time:82085ms step_avg:94.90ms +step:866/1705 train_time:82179ms step_avg:94.89ms +step:867/1705 train_time:82272ms step_avg:94.89ms +step:868/1705 train_time:82365ms step_avg:94.89ms +step:869/1705 train_time:82459ms step_avg:94.89ms +step:870/1705 train_time:82553ms step_avg:94.89ms +step:871/1705 train_time:82646ms step_avg:94.89ms +step:872/1705 train_time:82743ms step_avg:94.89ms +step:873/1705 train_time:82839ms step_avg:94.89ms +step:874/1705 train_time:82934ms step_avg:94.89ms +step:875/1705 train_time:83029ms step_avg:94.89ms +step:875/1705 val_loss:3.5229 train_time:83123ms step_avg:95.00ms +step:876/1705 train_time:83145ms step_avg:94.91ms +step:877/1705 train_time:83224ms step_avg:94.90ms +step:878/1705 train_time:83324ms step_avg:94.90ms +step:879/1705 train_time:83419ms step_avg:94.90ms +step:880/1705 train_time:83512ms step_avg:94.90ms +step:881/1705 train_time:83605ms step_avg:94.90ms +step:882/1705 train_time:83698ms step_avg:94.90ms +step:883/1705 train_time:83791ms step_avg:94.89ms +step:884/1705 train_time:83885ms step_avg:94.89ms +step:885/1705 train_time:83978ms step_avg:94.89ms +step:886/1705 train_time:84072ms step_avg:94.89ms +step:887/1705 train_time:84168ms step_avg:94.89ms +step:888/1705 train_time:84265ms step_avg:94.89ms +step:889/1705 train_time:84363ms step_avg:94.90ms +step:890/1705 train_time:84458ms step_avg:94.90ms +step:891/1705 train_time:84552ms step_avg:94.90ms +step:892/1705 train_time:84647ms step_avg:94.90ms +step:893/1705 train_time:84740ms step_avg:94.89ms +step:894/1705 train_time:84833ms step_avg:94.89ms +step:895/1705 train_time:84927ms step_avg:94.89ms +step:896/1705 train_time:85021ms step_avg:94.89ms +step:897/1705 train_time:85115ms step_avg:94.89ms +step:898/1705 train_time:85211ms step_avg:94.89ms +step:899/1705 train_time:85307ms step_avg:94.89ms +step:900/1705 train_time:85403ms step_avg:94.89ms +step:901/1705 train_time:85497ms step_avg:94.89ms +step:902/1705 train_time:85592ms step_avg:94.89ms +step:903/1705 train_time:85686ms step_avg:94.89ms +step:904/1705 train_time:85779ms step_avg:94.89ms +step:905/1705 train_time:85872ms step_avg:94.89ms +step:906/1705 train_time:85967ms step_avg:94.89ms +step:907/1705 train_time:86061ms step_avg:94.89ms +step:908/1705 train_time:86155ms step_avg:94.88ms +step:909/1705 train_time:86250ms step_avg:94.88ms +step:910/1705 train_time:86345ms step_avg:94.88ms +step:911/1705 train_time:86441ms step_avg:94.89ms +step:912/1705 train_time:86536ms step_avg:94.89ms +step:913/1705 train_time:86631ms step_avg:94.89ms +step:914/1705 train_time:86725ms step_avg:94.89ms +step:915/1705 train_time:86819ms step_avg:94.88ms +step:916/1705 train_time:86913ms step_avg:94.88ms +step:917/1705 train_time:87007ms step_avg:94.88ms +step:918/1705 train_time:87102ms step_avg:94.88ms +step:919/1705 train_time:87197ms step_avg:94.88ms +step:920/1705 train_time:87291ms step_avg:94.88ms +step:921/1705 train_time:87386ms step_avg:94.88ms +step:922/1705 train_time:87482ms step_avg:94.88ms +step:923/1705 train_time:87576ms step_avg:94.88ms +step:924/1705 train_time:87670ms step_avg:94.88ms +step:925/1705 train_time:87765ms step_avg:94.88ms +step:926/1705 train_time:87859ms step_avg:94.88ms +step:927/1705 train_time:87952ms step_avg:94.88ms +step:928/1705 train_time:88047ms step_avg:94.88ms +step:929/1705 train_time:88142ms step_avg:94.88ms +step:930/1705 train_time:88236ms step_avg:94.88ms +step:931/1705 train_time:88331ms step_avg:94.88ms +step:932/1705 train_time:88426ms step_avg:94.88ms +step:933/1705 train_time:88522ms step_avg:94.88ms +step:934/1705 train_time:88616ms step_avg:94.88ms +step:935/1705 train_time:88710ms step_avg:94.88ms +step:936/1705 train_time:88805ms step_avg:94.88ms +step:937/1705 train_time:88899ms step_avg:94.88ms +step:938/1705 train_time:88993ms step_avg:94.87ms +step:939/1705 train_time:89088ms step_avg:94.88ms +step:940/1705 train_time:89182ms step_avg:94.87ms +step:941/1705 train_time:89277ms step_avg:94.87ms +step:942/1705 train_time:89372ms step_avg:94.87ms +step:943/1705 train_time:89467ms step_avg:94.87ms +step:944/1705 train_time:89561ms step_avg:94.87ms +step:945/1705 train_time:89655ms step_avg:94.87ms +step:946/1705 train_time:89750ms step_avg:94.87ms +step:947/1705 train_time:89845ms step_avg:94.87ms +step:948/1705 train_time:89940ms step_avg:94.87ms +step:949/1705 train_time:90034ms step_avg:94.87ms +step:950/1705 train_time:90128ms step_avg:94.87ms +step:951/1705 train_time:90223ms step_avg:94.87ms +step:952/1705 train_time:90318ms step_avg:94.87ms +step:953/1705 train_time:90412ms step_avg:94.87ms +step:954/1705 train_time:90506ms step_avg:94.87ms +step:955/1705 train_time:90601ms step_avg:94.87ms +step:956/1705 train_time:90696ms step_avg:94.87ms +step:957/1705 train_time:90790ms step_avg:94.87ms +step:958/1705 train_time:90885ms step_avg:94.87ms +step:959/1705 train_time:90979ms step_avg:94.87ms +step:960/1705 train_time:91073ms step_avg:94.87ms +step:961/1705 train_time:91168ms step_avg:94.87ms +step:962/1705 train_time:91263ms step_avg:94.87ms +step:963/1705 train_time:91358ms step_avg:94.87ms +step:964/1705 train_time:91453ms step_avg:94.87ms +step:965/1705 train_time:91548ms step_avg:94.87ms +step:966/1705 train_time:91643ms step_avg:94.87ms +step:967/1705 train_time:91737ms step_avg:94.87ms +step:968/1705 train_time:91831ms step_avg:94.87ms +step:969/1705 train_time:91926ms step_avg:94.87ms +step:970/1705 train_time:92020ms step_avg:94.87ms +step:971/1705 train_time:92114ms step_avg:94.87ms +step:972/1705 train_time:92209ms step_avg:94.86ms +step:973/1705 train_time:92303ms step_avg:94.86ms +step:974/1705 train_time:92399ms step_avg:94.87ms +step:975/1705 train_time:92493ms step_avg:94.86ms +step:976/1705 train_time:92587ms step_avg:94.86ms +step:977/1705 train_time:92682ms step_avg:94.86ms +step:978/1705 train_time:92776ms step_avg:94.86ms +step:979/1705 train_time:92871ms step_avg:94.86ms +step:980/1705 train_time:92965ms step_avg:94.86ms +step:981/1705 train_time:93060ms step_avg:94.86ms +step:982/1705 train_time:93154ms step_avg:94.86ms +step:983/1705 train_time:93248ms step_avg:94.86ms +step:984/1705 train_time:93343ms step_avg:94.86ms +step:985/1705 train_time:93437ms step_avg:94.86ms +step:986/1705 train_time:93532ms step_avg:94.86ms +step:987/1705 train_time:93626ms step_avg:94.86ms +step:988/1705 train_time:93720ms step_avg:94.86ms +step:989/1705 train_time:93814ms step_avg:94.86ms +step:990/1705 train_time:93909ms step_avg:94.86ms +step:991/1705 train_time:94003ms step_avg:94.86ms +step:992/1705 train_time:94098ms step_avg:94.86ms +step:993/1705 train_time:94192ms step_avg:94.86ms +step:994/1705 train_time:94287ms step_avg:94.86ms +step:995/1705 train_time:94381ms step_avg:94.86ms +step:996/1705 train_time:94475ms step_avg:94.85ms +step:997/1705 train_time:94570ms step_avg:94.85ms +step:998/1705 train_time:94665ms step_avg:94.85ms +step:999/1705 train_time:94759ms step_avg:94.85ms +step:1000/1705 train_time:94853ms step_avg:94.85ms +step:1000/1705 val_loss:3.4830 train_time:94948ms step_avg:94.95ms +step:1001/1705 train_time:94970ms step_avg:94.87ms +step:1002/1705 train_time:95049ms step_avg:94.86ms +step:1003/1705 train_time:95148ms step_avg:94.86ms +step:1004/1705 train_time:95244ms step_avg:94.86ms +step:1005/1705 train_time:95337ms step_avg:94.86ms +step:1006/1705 train_time:95431ms step_avg:94.86ms +step:1007/1705 train_time:95524ms step_avg:94.86ms +step:1008/1705 train_time:95617ms step_avg:94.86ms +step:1009/1705 train_time:95711ms step_avg:94.86ms +step:1010/1705 train_time:95804ms step_avg:94.85ms +step:1011/1705 train_time:95898ms step_avg:94.85ms +step:1012/1705 train_time:95993ms step_avg:94.86ms +step:1013/1705 train_time:96091ms step_avg:94.86ms +step:1014/1705 train_time:96187ms step_avg:94.86ms +step:1015/1705 train_time:96282ms step_avg:94.86ms +step:1016/1705 train_time:96376ms step_avg:94.86ms +step:1017/1705 train_time:96469ms step_avg:94.86ms +step:1018/1705 train_time:96563ms step_avg:94.86ms +step:1019/1705 train_time:96657ms step_avg:94.85ms +step:1020/1705 train_time:96750ms step_avg:94.85ms +step:1021/1705 train_time:96844ms step_avg:94.85ms +step:1022/1705 train_time:96938ms step_avg:94.85ms +step:1023/1705 train_time:97034ms step_avg:94.85ms +step:1024/1705 train_time:97130ms step_avg:94.85ms +step:1025/1705 train_time:97225ms step_avg:94.85ms +step:1026/1705 train_time:97320ms step_avg:94.85ms +step:1027/1705 train_time:97415ms step_avg:94.85ms +step:1028/1705 train_time:97508ms step_avg:94.85ms +step:1029/1705 train_time:97603ms step_avg:94.85ms +step:1030/1705 train_time:97696ms step_avg:94.85ms +step:1031/1705 train_time:97790ms step_avg:94.85ms +step:1032/1705 train_time:97885ms step_avg:94.85ms +step:1033/1705 train_time:97980ms step_avg:94.85ms +step:1034/1705 train_time:98075ms step_avg:94.85ms +step:1035/1705 train_time:98170ms step_avg:94.85ms +step:1036/1705 train_time:98265ms step_avg:94.85ms +step:1037/1705 train_time:98361ms step_avg:94.85ms +step:1038/1705 train_time:98454ms step_avg:94.85ms +step:1039/1705 train_time:98548ms step_avg:94.85ms +step:1040/1705 train_time:98643ms step_avg:94.85ms +step:1041/1705 train_time:98738ms step_avg:94.85ms +step:1042/1705 train_time:98831ms step_avg:94.85ms +step:1043/1705 train_time:98926ms step_avg:94.85ms +step:1044/1705 train_time:99022ms step_avg:94.85ms +step:1045/1705 train_time:99116ms step_avg:94.85ms +step:1046/1705 train_time:99212ms step_avg:94.85ms +step:1047/1705 train_time:99307ms step_avg:94.85ms +step:1048/1705 train_time:99402ms step_avg:94.85ms +step:1049/1705 train_time:99496ms step_avg:94.85ms +step:1050/1705 train_time:99590ms step_avg:94.85ms +step:1051/1705 train_time:99685ms step_avg:94.85ms +step:1052/1705 train_time:99780ms step_avg:94.85ms +step:1053/1705 train_time:99875ms step_avg:94.85ms +step:1054/1705 train_time:99969ms step_avg:94.85ms +step:1055/1705 train_time:100064ms step_avg:94.85ms +step:1056/1705 train_time:100159ms step_avg:94.85ms +step:1057/1705 train_time:100253ms step_avg:94.85ms +step:1058/1705 train_time:100349ms step_avg:94.85ms +step:1059/1705 train_time:100444ms step_avg:94.85ms +step:1060/1705 train_time:100538ms step_avg:94.85ms +step:1061/1705 train_time:100632ms step_avg:94.85ms +step:1062/1705 train_time:100975ms step_avg:95.08ms +step:1063/1705 train_time:101067ms step_avg:95.08ms +step:1064/1705 train_time:101161ms step_avg:95.08ms +step:1065/1705 train_time:101254ms step_avg:95.07ms +step:1066/1705 train_time:101348ms step_avg:95.07ms +step:1067/1705 train_time:101441ms step_avg:95.07ms +step:1068/1705 train_time:101534ms step_avg:95.07ms +step:1069/1705 train_time:101628ms step_avg:95.07ms +step:1070/1705 train_time:101721ms step_avg:95.07ms +step:1071/1705 train_time:101814ms step_avg:95.06ms +step:1072/1705 train_time:101911ms step_avg:95.07ms +step:1073/1705 train_time:102010ms step_avg:95.07ms +step:1074/1705 train_time:102109ms step_avg:95.07ms +step:1075/1705 train_time:102205ms step_avg:95.07ms +step:1076/1705 train_time:102299ms step_avg:95.07ms +step:1077/1705 train_time:102392ms step_avg:95.07ms +step:1078/1705 train_time:102486ms step_avg:95.07ms +step:1079/1705 train_time:102579ms step_avg:95.07ms +step:1080/1705 train_time:102673ms step_avg:95.07ms +step:1081/1705 train_time:102766ms step_avg:95.07ms +step:1082/1705 train_time:102862ms step_avg:95.07ms +step:1083/1705 train_time:102957ms step_avg:95.07ms +step:1084/1705 train_time:103053ms step_avg:95.07ms +step:1085/1705 train_time:103149ms step_avg:95.07ms +step:1086/1705 train_time:103245ms step_avg:95.07ms +step:1087/1705 train_time:103339ms step_avg:95.07ms +step:1088/1705 train_time:103432ms step_avg:95.07ms +step:1089/1705 train_time:103526ms step_avg:95.07ms +step:1090/1705 train_time:103620ms step_avg:95.06ms +step:1091/1705 train_time:103713ms step_avg:95.06ms +step:1092/1705 train_time:103808ms step_avg:95.06ms +step:1093/1705 train_time:103903ms step_avg:95.06ms +step:1094/1705 train_time:103998ms step_avg:95.06ms +step:1095/1705 train_time:104093ms step_avg:95.06ms +step:1096/1705 train_time:104187ms step_avg:95.06ms +step:1097/1705 train_time:104283ms step_avg:95.06ms +step:1098/1705 train_time:104378ms step_avg:95.06ms +step:1099/1705 train_time:104472ms step_avg:95.06ms +step:1100/1705 train_time:104565ms step_avg:95.06ms +step:1101/1705 train_time:104659ms step_avg:95.06ms +step:1102/1705 train_time:104753ms step_avg:95.06ms +step:1103/1705 train_time:104848ms step_avg:95.06ms +step:1104/1705 train_time:104942ms step_avg:95.06ms +step:1105/1705 train_time:105037ms step_avg:95.06ms +step:1106/1705 train_time:105132ms step_avg:95.06ms +step:1107/1705 train_time:105227ms step_avg:95.06ms +step:1108/1705 train_time:105322ms step_avg:95.06ms +step:1109/1705 train_time:105416ms step_avg:95.06ms +step:1110/1705 train_time:105511ms step_avg:95.05ms +step:1111/1705 train_time:105605ms step_avg:95.05ms +step:1112/1705 train_time:105698ms step_avg:95.05ms +step:1113/1705 train_time:105792ms step_avg:95.05ms +step:1114/1705 train_time:105887ms step_avg:95.05ms +step:1115/1705 train_time:105982ms step_avg:95.05ms +step:1116/1705 train_time:106076ms step_avg:95.05ms +step:1117/1705 train_time:106171ms step_avg:95.05ms +step:1118/1705 train_time:106267ms step_avg:95.05ms +step:1119/1705 train_time:106361ms step_avg:95.05ms +step:1120/1705 train_time:106455ms step_avg:95.05ms +step:1121/1705 train_time:106548ms step_avg:95.05ms +step:1122/1705 train_time:106643ms step_avg:95.05ms +step:1123/1705 train_time:106737ms step_avg:95.05ms +step:1124/1705 train_time:106831ms step_avg:95.05ms +step:1125/1705 train_time:106925ms step_avg:95.04ms +step:1125/1705 val_loss:3.4374 train_time:107020ms step_avg:95.13ms +step:1126/1705 train_time:107043ms step_avg:95.06ms +step:1127/1705 train_time:107121ms step_avg:95.05ms +step:1128/1705 train_time:107217ms step_avg:95.05ms +step:1129/1705 train_time:107312ms step_avg:95.05ms +step:1130/1705 train_time:107405ms step_avg:95.05ms +step:1131/1705 train_time:107498ms step_avg:95.05ms +step:1132/1705 train_time:107592ms step_avg:95.05ms +step:1133/1705 train_time:107686ms step_avg:95.05ms +step:1134/1705 train_time:107780ms step_avg:95.04ms +step:1135/1705 train_time:107873ms step_avg:95.04ms +step:1136/1705 train_time:107968ms step_avg:95.04ms +step:1137/1705 train_time:108065ms step_avg:95.04ms +step:1138/1705 train_time:108161ms step_avg:95.05ms +step:1139/1705 train_time:108257ms step_avg:95.05ms +step:1140/1705 train_time:108352ms step_avg:95.05ms +step:1141/1705 train_time:108447ms step_avg:95.05ms +step:1142/1705 train_time:108541ms step_avg:95.04ms +step:1143/1705 train_time:108634ms step_avg:95.04ms +step:1144/1705 train_time:108730ms step_avg:95.04ms +step:1145/1705 train_time:108824ms step_avg:95.04ms +step:1146/1705 train_time:108919ms step_avg:95.04ms +step:1147/1705 train_time:109015ms step_avg:95.04ms +step:1148/1705 train_time:109111ms step_avg:95.04ms +step:1149/1705 train_time:109209ms step_avg:95.05ms +step:1150/1705 train_time:109305ms step_avg:95.05ms +step:1151/1705 train_time:109399ms step_avg:95.05ms +step:1152/1705 train_time:109494ms step_avg:95.05ms +step:1153/1705 train_time:109589ms step_avg:95.05ms +step:1154/1705 train_time:109683ms step_avg:95.05ms +step:1155/1705 train_time:109778ms step_avg:95.05ms +step:1156/1705 train_time:109872ms step_avg:95.04ms +step:1157/1705 train_time:109967ms step_avg:95.05ms +step:1158/1705 train_time:110064ms step_avg:95.05ms +step:1159/1705 train_time:110160ms step_avg:95.05ms +step:1160/1705 train_time:110255ms step_avg:95.05ms +step:1161/1705 train_time:110351ms step_avg:95.05ms +step:1162/1705 train_time:110446ms step_avg:95.05ms +step:1163/1705 train_time:110541ms step_avg:95.05ms +step:1164/1705 train_time:110635ms step_avg:95.05ms +step:1165/1705 train_time:110730ms step_avg:95.05ms +step:1166/1705 train_time:110826ms step_avg:95.05ms +step:1167/1705 train_time:110921ms step_avg:95.05ms +step:1168/1705 train_time:111016ms step_avg:95.05ms +step:1169/1705 train_time:111112ms step_avg:95.05ms +step:1170/1705 train_time:111208ms step_avg:95.05ms +step:1171/1705 train_time:111305ms step_avg:95.05ms +step:1172/1705 train_time:111400ms step_avg:95.05ms +step:1173/1705 train_time:111495ms step_avg:95.05ms +step:1174/1705 train_time:111589ms step_avg:95.05ms +step:1175/1705 train_time:111685ms step_avg:95.05ms +step:1176/1705 train_time:111781ms step_avg:95.05ms +step:1177/1705 train_time:111875ms step_avg:95.05ms +step:1178/1705 train_time:111971ms step_avg:95.05ms +step:1179/1705 train_time:112065ms step_avg:95.05ms +step:1180/1705 train_time:112162ms step_avg:95.05ms +step:1181/1705 train_time:112258ms step_avg:95.05ms +step:1182/1705 train_time:112353ms step_avg:95.05ms +step:1183/1705 train_time:112449ms step_avg:95.05ms +step:1184/1705 train_time:112544ms step_avg:95.05ms +step:1185/1705 train_time:112638ms step_avg:95.05ms +step:1186/1705 train_time:112733ms step_avg:95.05ms +step:1187/1705 train_time:112828ms step_avg:95.05ms +step:1188/1705 train_time:112923ms step_avg:95.05ms +step:1189/1705 train_time:113018ms step_avg:95.05ms +step:1190/1705 train_time:113114ms step_avg:95.05ms +step:1191/1705 train_time:113210ms step_avg:95.05ms +step:1192/1705 train_time:113306ms step_avg:95.06ms +step:1193/1705 train_time:113401ms step_avg:95.06ms +step:1194/1705 train_time:113495ms step_avg:95.05ms +step:1195/1705 train_time:113591ms step_avg:95.05ms +step:1196/1705 train_time:113686ms step_avg:95.06ms +step:1197/1705 train_time:113781ms step_avg:95.05ms +step:1198/1705 train_time:113876ms step_avg:95.05ms +step:1199/1705 train_time:113970ms step_avg:95.05ms +step:1200/1705 train_time:114065ms step_avg:95.05ms +step:1201/1705 train_time:114161ms step_avg:95.06ms +step:1202/1705 train_time:114256ms step_avg:95.06ms +step:1203/1705 train_time:114352ms step_avg:95.06ms +step:1204/1705 train_time:114448ms step_avg:95.06ms +step:1205/1705 train_time:114544ms step_avg:95.06ms +step:1206/1705 train_time:114639ms step_avg:95.06ms +step:1207/1705 train_time:114734ms step_avg:95.06ms +step:1208/1705 train_time:114830ms step_avg:95.06ms +step:1209/1705 train_time:114926ms step_avg:95.06ms +step:1210/1705 train_time:115021ms step_avg:95.06ms +step:1211/1705 train_time:115116ms step_avg:95.06ms +step:1212/1705 train_time:115212ms step_avg:95.06ms +step:1213/1705 train_time:115307ms step_avg:95.06ms +step:1214/1705 train_time:115402ms step_avg:95.06ms +step:1215/1705 train_time:115499ms step_avg:95.06ms +step:1216/1705 train_time:115594ms step_avg:95.06ms +step:1217/1705 train_time:115689ms step_avg:95.06ms +step:1218/1705 train_time:115785ms step_avg:95.06ms +step:1219/1705 train_time:115880ms step_avg:95.06ms +step:1220/1705 train_time:115975ms step_avg:95.06ms +step:1221/1705 train_time:116070ms step_avg:95.06ms +step:1222/1705 train_time:116166ms step_avg:95.06ms +step:1223/1705 train_time:116260ms step_avg:95.06ms +step:1224/1705 train_time:116354ms step_avg:95.06ms +step:1225/1705 train_time:116451ms step_avg:95.06ms +step:1226/1705 train_time:116547ms step_avg:95.06ms +step:1227/1705 train_time:116643ms step_avg:95.06ms +step:1228/1705 train_time:116737ms step_avg:95.06ms +step:1229/1705 train_time:116833ms step_avg:95.06ms +step:1230/1705 train_time:116929ms step_avg:95.06ms +step:1231/1705 train_time:117024ms step_avg:95.06ms +step:1232/1705 train_time:117120ms step_avg:95.07ms +step:1233/1705 train_time:117215ms step_avg:95.06ms +step:1234/1705 train_time:117310ms step_avg:95.06ms +step:1235/1705 train_time:117405ms step_avg:95.06ms +step:1236/1705 train_time:117500ms step_avg:95.07ms +step:1237/1705 train_time:117595ms step_avg:95.06ms +step:1238/1705 train_time:117691ms step_avg:95.07ms +step:1239/1705 train_time:117787ms step_avg:95.07ms +step:1240/1705 train_time:117884ms step_avg:95.07ms +step:1241/1705 train_time:117978ms step_avg:95.07ms +step:1242/1705 train_time:118074ms step_avg:95.07ms +step:1243/1705 train_time:118169ms step_avg:95.07ms +step:1244/1705 train_time:118263ms step_avg:95.07ms +step:1245/1705 train_time:118358ms step_avg:95.07ms +step:1246/1705 train_time:118454ms step_avg:95.07ms +step:1247/1705 train_time:118549ms step_avg:95.07ms +step:1248/1705 train_time:118645ms step_avg:95.07ms +step:1249/1705 train_time:118741ms step_avg:95.07ms +step:1250/1705 train_time:118837ms step_avg:95.07ms +step:1250/1705 val_loss:3.3884 train_time:118932ms step_avg:95.15ms +step:1251/1705 train_time:118955ms step_avg:95.09ms +step:1252/1705 train_time:119040ms step_avg:95.08ms +step:1253/1705 train_time:119138ms step_avg:95.08ms +step:1254/1705 train_time:119233ms step_avg:95.08ms +step:1255/1705 train_time:119327ms step_avg:95.08ms +step:1256/1705 train_time:119421ms step_avg:95.08ms +step:1257/1705 train_time:119514ms step_avg:95.08ms +step:1258/1705 train_time:119609ms step_avg:95.08ms +step:1259/1705 train_time:119703ms step_avg:95.08ms +step:1260/1705 train_time:119796ms step_avg:95.08ms +step:1261/1705 train_time:119895ms step_avg:95.08ms +step:1262/1705 train_time:119994ms step_avg:95.08ms +step:1263/1705 train_time:120092ms step_avg:95.08ms +step:1264/1705 train_time:120189ms step_avg:95.09ms +step:1265/1705 train_time:120284ms step_avg:95.09ms +step:1266/1705 train_time:120378ms step_avg:95.08ms +step:1267/1705 train_time:120472ms step_avg:95.08ms +step:1268/1705 train_time:120567ms step_avg:95.08ms +step:1269/1705 train_time:120660ms step_avg:95.08ms +step:1270/1705 train_time:120754ms step_avg:95.08ms +step:1271/1705 train_time:120850ms step_avg:95.08ms +step:1272/1705 train_time:120949ms step_avg:95.09ms +step:1273/1705 train_time:121045ms step_avg:95.09ms +step:1274/1705 train_time:121439ms step_avg:95.32ms +step:1275/1705 train_time:121521ms step_avg:95.31ms +step:1276/1705 train_time:121614ms step_avg:95.31ms +step:1277/1705 train_time:121708ms step_avg:95.31ms +step:1278/1705 train_time:121802ms step_avg:95.31ms +step:1279/1705 train_time:121895ms step_avg:95.31ms +step:1280/1705 train_time:121990ms step_avg:95.30ms +step:1281/1705 train_time:122083ms step_avg:95.30ms +step:1282/1705 train_time:122177ms step_avg:95.30ms +step:1283/1705 train_time:122271ms step_avg:95.30ms +step:1284/1705 train_time:122369ms step_avg:95.30ms +step:1285/1705 train_time:122469ms step_avg:95.31ms +step:1286/1705 train_time:122565ms step_avg:95.31ms +step:1287/1705 train_time:122660ms step_avg:95.31ms +step:1288/1705 train_time:122755ms step_avg:95.31ms +step:1289/1705 train_time:122849ms step_avg:95.31ms +step:1290/1705 train_time:122944ms step_avg:95.31ms +step:1291/1705 train_time:123038ms step_avg:95.30ms +step:1292/1705 train_time:123132ms step_avg:95.30ms +step:1293/1705 train_time:123227ms step_avg:95.30ms +step:1294/1705 train_time:123323ms step_avg:95.30ms +step:1295/1705 train_time:123420ms step_avg:95.30ms +step:1296/1705 train_time:123516ms step_avg:95.31ms +step:1297/1705 train_time:123614ms step_avg:95.31ms +step:1298/1705 train_time:123710ms step_avg:95.31ms +step:1299/1705 train_time:123805ms step_avg:95.31ms +step:1300/1705 train_time:123899ms step_avg:95.31ms +step:1301/1705 train_time:123995ms step_avg:95.31ms +step:1302/1705 train_time:124089ms step_avg:95.31ms +step:1303/1705 train_time:124184ms step_avg:95.31ms +step:1304/1705 train_time:124279ms step_avg:95.31ms +step:1305/1705 train_time:124375ms step_avg:95.31ms +step:1306/1705 train_time:124471ms step_avg:95.31ms +step:1307/1705 train_time:124569ms step_avg:95.31ms +step:1308/1705 train_time:124665ms step_avg:95.31ms +step:1309/1705 train_time:124759ms step_avg:95.31ms +step:1310/1705 train_time:124854ms step_avg:95.31ms +step:1311/1705 train_time:124950ms step_avg:95.31ms +step:1312/1705 train_time:125044ms step_avg:95.31ms +step:1313/1705 train_time:125138ms step_avg:95.31ms +step:1314/1705 train_time:125233ms step_avg:95.31ms +step:1315/1705 train_time:125328ms step_avg:95.31ms +step:1316/1705 train_time:125423ms step_avg:95.31ms +step:1317/1705 train_time:125518ms step_avg:95.31ms +step:1318/1705 train_time:125616ms step_avg:95.31ms +step:1319/1705 train_time:125711ms step_avg:95.31ms +step:1320/1705 train_time:125806ms step_avg:95.31ms +step:1321/1705 train_time:125901ms step_avg:95.31ms +step:1322/1705 train_time:125996ms step_avg:95.31ms +step:1323/1705 train_time:126091ms step_avg:95.31ms +step:1324/1705 train_time:126186ms step_avg:95.31ms +step:1325/1705 train_time:126281ms step_avg:95.31ms +step:1326/1705 train_time:126376ms step_avg:95.31ms +step:1327/1705 train_time:126471ms step_avg:95.31ms +step:1328/1705 train_time:126568ms step_avg:95.31ms +step:1329/1705 train_time:126664ms step_avg:95.31ms +step:1330/1705 train_time:126758ms step_avg:95.31ms +step:1331/1705 train_time:126854ms step_avg:95.31ms +step:1332/1705 train_time:126949ms step_avg:95.31ms +step:1333/1705 train_time:127044ms step_avg:95.31ms +step:1334/1705 train_time:127138ms step_avg:95.31ms +step:1335/1705 train_time:127233ms step_avg:95.31ms +step:1336/1705 train_time:127328ms step_avg:95.31ms +step:1337/1705 train_time:127423ms step_avg:95.31ms +step:1338/1705 train_time:127518ms step_avg:95.31ms +step:1339/1705 train_time:127614ms step_avg:95.31ms +step:1340/1705 train_time:127709ms step_avg:95.31ms +step:1341/1705 train_time:127805ms step_avg:95.31ms +step:1342/1705 train_time:127899ms step_avg:95.30ms +step:1343/1705 train_time:127994ms step_avg:95.30ms +step:1344/1705 train_time:128089ms step_avg:95.30ms +step:1345/1705 train_time:128184ms step_avg:95.30ms +step:1346/1705 train_time:128279ms step_avg:95.30ms +step:1347/1705 train_time:128375ms step_avg:95.30ms +step:1348/1705 train_time:128470ms step_avg:95.30ms +step:1349/1705 train_time:128565ms step_avg:95.30ms +step:1350/1705 train_time:128661ms step_avg:95.30ms +step:1351/1705 train_time:128756ms step_avg:95.30ms +step:1352/1705 train_time:128852ms step_avg:95.30ms +step:1353/1705 train_time:128948ms step_avg:95.31ms +step:1354/1705 train_time:129044ms step_avg:95.31ms +step:1355/1705 train_time:129138ms step_avg:95.30ms +step:1356/1705 train_time:129233ms step_avg:95.30ms +step:1357/1705 train_time:129329ms step_avg:95.30ms +step:1358/1705 train_time:129424ms step_avg:95.31ms +step:1359/1705 train_time:129519ms step_avg:95.30ms +step:1360/1705 train_time:129614ms step_avg:95.30ms +step:1361/1705 train_time:129711ms step_avg:95.31ms +step:1362/1705 train_time:129805ms step_avg:95.30ms +step:1363/1705 train_time:129900ms step_avg:95.30ms +step:1364/1705 train_time:129995ms step_avg:95.30ms +step:1365/1705 train_time:130091ms step_avg:95.30ms +step:1366/1705 train_time:130186ms step_avg:95.30ms +step:1367/1705 train_time:130280ms step_avg:95.30ms +step:1368/1705 train_time:130376ms step_avg:95.30ms +step:1369/1705 train_time:130471ms step_avg:95.30ms +step:1370/1705 train_time:130566ms step_avg:95.30ms +step:1371/1705 train_time:130662ms step_avg:95.30ms +step:1372/1705 train_time:130756ms step_avg:95.30ms +step:1373/1705 train_time:130852ms step_avg:95.30ms +step:1374/1705 train_time:130947ms step_avg:95.30ms +step:1375/1705 train_time:131042ms step_avg:95.30ms +step:1375/1705 val_loss:3.3506 train_time:131137ms step_avg:95.37ms +step:1376/1705 train_time:131160ms step_avg:95.32ms +step:1377/1705 train_time:131236ms step_avg:95.31ms +step:1378/1705 train_time:131335ms step_avg:95.31ms +step:1379/1705 train_time:131429ms step_avg:95.31ms +step:1380/1705 train_time:131525ms step_avg:95.31ms +step:1381/1705 train_time:131620ms step_avg:95.31ms +step:1382/1705 train_time:131714ms step_avg:95.31ms +step:1383/1705 train_time:131808ms step_avg:95.31ms +step:1384/1705 train_time:131903ms step_avg:95.31ms +step:1385/1705 train_time:131998ms step_avg:95.31ms +step:1386/1705 train_time:132092ms step_avg:95.30ms +step:1387/1705 train_time:132190ms step_avg:95.31ms +step:1388/1705 train_time:132287ms step_avg:95.31ms +step:1389/1705 train_time:132384ms step_avg:95.31ms +step:1390/1705 train_time:132480ms step_avg:95.31ms +step:1391/1705 train_time:132574ms step_avg:95.31ms +step:1392/1705 train_time:132668ms step_avg:95.31ms +step:1393/1705 train_time:132764ms step_avg:95.31ms +step:1394/1705 train_time:132858ms step_avg:95.31ms +step:1395/1705 train_time:132952ms step_avg:95.31ms +step:1396/1705 train_time:133047ms step_avg:95.31ms +step:1397/1705 train_time:133143ms step_avg:95.31ms +step:1398/1705 train_time:133239ms step_avg:95.31ms +step:1399/1705 train_time:133334ms step_avg:95.31ms +step:1400/1705 train_time:133429ms step_avg:95.31ms +step:1401/1705 train_time:133524ms step_avg:95.31ms +step:1402/1705 train_time:133620ms step_avg:95.31ms +step:1403/1705 train_time:133715ms step_avg:95.31ms +step:1404/1705 train_time:133809ms step_avg:95.31ms +step:1405/1705 train_time:133904ms step_avg:95.31ms +step:1406/1705 train_time:133998ms step_avg:95.30ms +step:1407/1705 train_time:134094ms step_avg:95.30ms +step:1408/1705 train_time:134190ms step_avg:95.31ms +step:1409/1705 train_time:134286ms step_avg:95.31ms +step:1410/1705 train_time:134383ms step_avg:95.31ms +step:1411/1705 train_time:134477ms step_avg:95.31ms +step:1412/1705 train_time:134572ms step_avg:95.31ms +step:1413/1705 train_time:134667ms step_avg:95.31ms +step:1414/1705 train_time:134763ms step_avg:95.31ms +step:1415/1705 train_time:134858ms step_avg:95.31ms +step:1416/1705 train_time:134953ms step_avg:95.31ms +step:1417/1705 train_time:135047ms step_avg:95.31ms +step:1418/1705 train_time:135143ms step_avg:95.31ms +step:1419/1705 train_time:135238ms step_avg:95.31ms +step:1420/1705 train_time:135335ms step_avg:95.31ms +step:1421/1705 train_time:135430ms step_avg:95.31ms +step:1422/1705 train_time:135526ms step_avg:95.31ms +step:1423/1705 train_time:135621ms step_avg:95.31ms +step:1424/1705 train_time:135715ms step_avg:95.31ms +step:1425/1705 train_time:135810ms step_avg:95.31ms +step:1426/1705 train_time:135905ms step_avg:95.31ms +step:1427/1705 train_time:136000ms step_avg:95.30ms +step:1428/1705 train_time:136095ms step_avg:95.30ms +step:1429/1705 train_time:136190ms step_avg:95.30ms +step:1430/1705 train_time:136285ms step_avg:95.30ms +step:1431/1705 train_time:136381ms step_avg:95.30ms +step:1432/1705 train_time:136476ms step_avg:95.30ms +step:1433/1705 train_time:136571ms step_avg:95.30ms +step:1434/1705 train_time:136666ms step_avg:95.30ms +step:1435/1705 train_time:136762ms step_avg:95.30ms +step:1436/1705 train_time:136856ms step_avg:95.30ms +step:1437/1705 train_time:136951ms step_avg:95.30ms +step:1438/1705 train_time:137046ms step_avg:95.30ms +step:1439/1705 train_time:137141ms step_avg:95.30ms +step:1440/1705 train_time:137237ms step_avg:95.30ms +step:1441/1705 train_time:137333ms step_avg:95.30ms +step:1442/1705 train_time:137428ms step_avg:95.30ms +step:1443/1705 train_time:137523ms step_avg:95.30ms +step:1444/1705 train_time:137619ms step_avg:95.30ms +step:1445/1705 train_time:137714ms step_avg:95.30ms +step:1446/1705 train_time:137809ms step_avg:95.30ms +step:1447/1705 train_time:137905ms step_avg:95.30ms +step:1448/1705 train_time:138001ms step_avg:95.30ms +step:1449/1705 train_time:138097ms step_avg:95.31ms +step:1450/1705 train_time:138192ms step_avg:95.30ms +step:1451/1705 train_time:138287ms step_avg:95.30ms +step:1452/1705 train_time:138382ms step_avg:95.30ms +step:1453/1705 train_time:138477ms step_avg:95.30ms +step:1454/1705 train_time:138572ms step_avg:95.30ms +step:1455/1705 train_time:138667ms step_avg:95.30ms +step:1456/1705 train_time:138762ms step_avg:95.30ms +step:1457/1705 train_time:138857ms step_avg:95.30ms +step:1458/1705 train_time:138951ms step_avg:95.30ms +step:1459/1705 train_time:139047ms step_avg:95.30ms +step:1460/1705 train_time:139143ms step_avg:95.30ms +step:1461/1705 train_time:139239ms step_avg:95.30ms +step:1462/1705 train_time:139333ms step_avg:95.30ms +step:1463/1705 train_time:139428ms step_avg:95.30ms +step:1464/1705 train_time:139523ms step_avg:95.30ms +step:1465/1705 train_time:139618ms step_avg:95.30ms +step:1466/1705 train_time:139715ms step_avg:95.30ms +step:1467/1705 train_time:139810ms step_avg:95.30ms +step:1468/1705 train_time:139905ms step_avg:95.30ms +step:1469/1705 train_time:140001ms step_avg:95.30ms +step:1470/1705 train_time:140095ms step_avg:95.30ms +step:1471/1705 train_time:140191ms step_avg:95.30ms +step:1472/1705 train_time:140285ms step_avg:95.30ms +step:1473/1705 train_time:140382ms step_avg:95.30ms +step:1474/1705 train_time:140477ms step_avg:95.30ms +step:1475/1705 train_time:140572ms step_avg:95.30ms +step:1476/1705 train_time:140667ms step_avg:95.30ms +step:1477/1705 train_time:140763ms step_avg:95.30ms +step:1478/1705 train_time:140858ms step_avg:95.30ms +step:1479/1705 train_time:140952ms step_avg:95.30ms +step:1480/1705 train_time:141048ms step_avg:95.30ms +step:1481/1705 train_time:141144ms step_avg:95.30ms +step:1482/1705 train_time:141241ms step_avg:95.30ms +step:1483/1705 train_time:141336ms step_avg:95.30ms +step:1484/1705 train_time:141430ms step_avg:95.30ms +step:1485/1705 train_time:141679ms step_avg:95.41ms +step:1486/1705 train_time:141890ms step_avg:95.48ms +step:1487/1705 train_time:141983ms step_avg:95.48ms +step:1488/1705 train_time:142077ms step_avg:95.48ms +step:1489/1705 train_time:142171ms step_avg:95.48ms +step:1490/1705 train_time:142265ms step_avg:95.48ms +step:1491/1705 train_time:142359ms step_avg:95.48ms +step:1492/1705 train_time:142453ms step_avg:95.48ms +step:1493/1705 train_time:142547ms step_avg:95.48ms +step:1494/1705 train_time:142642ms step_avg:95.48ms +step:1495/1705 train_time:142742ms step_avg:95.48ms +step:1496/1705 train_time:142842ms step_avg:95.48ms +step:1497/1705 train_time:142939ms step_avg:95.48ms +step:1498/1705 train_time:143034ms step_avg:95.48ms +step:1499/1705 train_time:143128ms step_avg:95.48ms +step:1500/1705 train_time:143222ms step_avg:95.48ms +step:1500/1705 val_loss:3.3182 train_time:143317ms step_avg:95.54ms +step:1501/1705 train_time:143339ms step_avg:95.50ms +step:1502/1705 train_time:143420ms step_avg:95.49ms +step:1503/1705 train_time:143520ms step_avg:95.49ms +step:1504/1705 train_time:143616ms step_avg:95.49ms +step:1505/1705 train_time:143709ms step_avg:95.49ms +step:1506/1705 train_time:143803ms step_avg:95.49ms +step:1507/1705 train_time:143896ms step_avg:95.49ms +step:1508/1705 train_time:143990ms step_avg:95.48ms +step:1509/1705 train_time:144084ms step_avg:95.48ms +step:1510/1705 train_time:144178ms step_avg:95.48ms +step:1511/1705 train_time:144274ms step_avg:95.48ms +step:1512/1705 train_time:144371ms step_avg:95.48ms +step:1513/1705 train_time:144469ms step_avg:95.48ms +step:1514/1705 train_time:144566ms step_avg:95.49ms +step:1515/1705 train_time:144662ms step_avg:95.49ms +step:1516/1705 train_time:144756ms step_avg:95.49ms +step:1517/1705 train_time:144850ms step_avg:95.48ms +step:1518/1705 train_time:144944ms step_avg:95.48ms +step:1519/1705 train_time:145039ms step_avg:95.48ms +step:1520/1705 train_time:145133ms step_avg:95.48ms +step:1521/1705 train_time:145228ms step_avg:95.48ms +step:1522/1705 train_time:145324ms step_avg:95.48ms +step:1523/1705 train_time:145421ms step_avg:95.48ms +step:1524/1705 train_time:145518ms step_avg:95.48ms +step:1525/1705 train_time:145614ms step_avg:95.48ms +step:1526/1705 train_time:145708ms step_avg:95.48ms +step:1527/1705 train_time:145803ms step_avg:95.48ms +step:1528/1705 train_time:145898ms step_avg:95.48ms +step:1529/1705 train_time:145992ms step_avg:95.48ms +step:1530/1705 train_time:146086ms step_avg:95.48ms +step:1531/1705 train_time:146181ms step_avg:95.48ms +step:1532/1705 train_time:146277ms step_avg:95.48ms +step:1533/1705 train_time:146372ms step_avg:95.48ms +step:1534/1705 train_time:146468ms step_avg:95.48ms +step:1535/1705 train_time:146564ms step_avg:95.48ms +step:1536/1705 train_time:146660ms step_avg:95.48ms +step:1537/1705 train_time:146757ms step_avg:95.48ms +step:1538/1705 train_time:146851ms step_avg:95.48ms +step:1539/1705 train_time:146945ms step_avg:95.48ms +step:1540/1705 train_time:147040ms step_avg:95.48ms +step:1541/1705 train_time:147135ms step_avg:95.48ms +step:1542/1705 train_time:147230ms step_avg:95.48ms +step:1543/1705 train_time:147325ms step_avg:95.48ms +step:1544/1705 train_time:147422ms step_avg:95.48ms +step:1545/1705 train_time:147518ms step_avg:95.48ms +step:1546/1705 train_time:147615ms step_avg:95.48ms +step:1547/1705 train_time:147711ms step_avg:95.48ms +step:1548/1705 train_time:147805ms step_avg:95.48ms +step:1549/1705 train_time:147901ms step_avg:95.48ms +step:1550/1705 train_time:147995ms step_avg:95.48ms +step:1551/1705 train_time:148090ms step_avg:95.48ms +step:1552/1705 train_time:148185ms step_avg:95.48ms +step:1553/1705 train_time:148281ms step_avg:95.48ms +step:1554/1705 train_time:148376ms step_avg:95.48ms +step:1555/1705 train_time:148471ms step_avg:95.48ms +step:1556/1705 train_time:148567ms step_avg:95.48ms +step:1557/1705 train_time:148664ms step_avg:95.48ms +step:1558/1705 train_time:148759ms step_avg:95.48ms +step:1559/1705 train_time:148855ms step_avg:95.48ms +step:1560/1705 train_time:148949ms step_avg:95.48ms +step:1561/1705 train_time:149045ms step_avg:95.48ms +step:1562/1705 train_time:149140ms step_avg:95.48ms +step:1563/1705 train_time:149235ms step_avg:95.48ms +step:1564/1705 train_time:149331ms step_avg:95.48ms +step:1565/1705 train_time:149425ms step_avg:95.48ms +step:1566/1705 train_time:149522ms step_avg:95.48ms +step:1567/1705 train_time:149618ms step_avg:95.48ms +step:1568/1705 train_time:149714ms step_avg:95.48ms +step:1569/1705 train_time:149809ms step_avg:95.48ms +step:1570/1705 train_time:149904ms step_avg:95.48ms +step:1571/1705 train_time:149998ms step_avg:95.48ms +step:1572/1705 train_time:150094ms step_avg:95.48ms +step:1573/1705 train_time:150188ms step_avg:95.48ms +step:1574/1705 train_time:150283ms step_avg:95.48ms +step:1575/1705 train_time:150378ms step_avg:95.48ms +step:1576/1705 train_time:150473ms step_avg:95.48ms +step:1577/1705 train_time:150568ms step_avg:95.48ms +step:1578/1705 train_time:150664ms step_avg:95.48ms +step:1579/1705 train_time:150761ms step_avg:95.48ms +step:1580/1705 train_time:150858ms step_avg:95.48ms +step:1581/1705 train_time:150954ms step_avg:95.48ms +step:1582/1705 train_time:151050ms step_avg:95.48ms +step:1583/1705 train_time:151144ms step_avg:95.48ms +step:1584/1705 train_time:151239ms step_avg:95.48ms +step:1585/1705 train_time:151334ms step_avg:95.48ms +step:1586/1705 train_time:151429ms step_avg:95.48ms +step:1587/1705 train_time:151524ms step_avg:95.48ms +step:1588/1705 train_time:151619ms step_avg:95.48ms +step:1589/1705 train_time:151714ms step_avg:95.48ms +step:1590/1705 train_time:151809ms step_avg:95.48ms +step:1591/1705 train_time:151905ms step_avg:95.48ms +step:1592/1705 train_time:152001ms step_avg:95.48ms +step:1593/1705 train_time:152097ms step_avg:95.48ms +step:1594/1705 train_time:152192ms step_avg:95.48ms +step:1595/1705 train_time:152286ms step_avg:95.48ms +step:1596/1705 train_time:152381ms step_avg:95.48ms +step:1597/1705 train_time:152477ms step_avg:95.48ms +step:1598/1705 train_time:152571ms step_avg:95.48ms +step:1599/1705 train_time:152667ms step_avg:95.48ms +step:1600/1705 train_time:152763ms step_avg:95.48ms +step:1601/1705 train_time:152859ms step_avg:95.48ms +step:1602/1705 train_time:152954ms step_avg:95.48ms +step:1603/1705 train_time:153048ms step_avg:95.48ms +step:1604/1705 train_time:153145ms step_avg:95.48ms +step:1605/1705 train_time:153241ms step_avg:95.48ms +step:1606/1705 train_time:153337ms step_avg:95.48ms +step:1607/1705 train_time:153434ms step_avg:95.48ms +step:1608/1705 train_time:153531ms step_avg:95.48ms +step:1609/1705 train_time:153626ms step_avg:95.48ms +step:1610/1705 train_time:153721ms step_avg:95.48ms +step:1611/1705 train_time:153817ms step_avg:95.48ms +step:1612/1705 train_time:153912ms step_avg:95.48ms +step:1613/1705 train_time:154008ms step_avg:95.48ms +step:1614/1705 train_time:154103ms step_avg:95.48ms +step:1615/1705 train_time:154197ms step_avg:95.48ms +step:1616/1705 train_time:154292ms step_avg:95.48ms +step:1617/1705 train_time:154387ms step_avg:95.48ms +step:1618/1705 train_time:154484ms step_avg:95.48ms +step:1619/1705 train_time:154580ms step_avg:95.48ms +step:1620/1705 train_time:154676ms step_avg:95.48ms +step:1621/1705 train_time:154771ms step_avg:95.48ms +step:1622/1705 train_time:154865ms step_avg:95.48ms +step:1623/1705 train_time:154961ms step_avg:95.48ms +step:1624/1705 train_time:155056ms step_avg:95.48ms +step:1625/1705 train_time:155151ms step_avg:95.48ms +step:1625/1705 val_loss:3.2906 train_time:155246ms step_avg:95.54ms +step:1626/1705 train_time:155269ms step_avg:95.49ms +step:1627/1705 train_time:155350ms step_avg:95.48ms +step:1628/1705 train_time:155447ms step_avg:95.48ms +step:1629/1705 train_time:155543ms step_avg:95.48ms +step:1630/1705 train_time:155637ms step_avg:95.48ms +step:1631/1705 train_time:155732ms step_avg:95.48ms +step:1632/1705 train_time:155827ms step_avg:95.48ms +step:1633/1705 train_time:155922ms step_avg:95.48ms +step:1634/1705 train_time:156016ms step_avg:95.48ms +step:1635/1705 train_time:156110ms step_avg:95.48ms +step:1636/1705 train_time:156205ms step_avg:95.48ms +step:1637/1705 train_time:156302ms step_avg:95.48ms +step:1638/1705 train_time:156399ms step_avg:95.48ms +step:1639/1705 train_time:156495ms step_avg:95.48ms +step:1640/1705 train_time:156591ms step_avg:95.48ms +step:1641/1705 train_time:156686ms step_avg:95.48ms +step:1642/1705 train_time:156782ms step_avg:95.48ms +step:1643/1705 train_time:156876ms step_avg:95.48ms +step:1644/1705 train_time:156970ms step_avg:95.48ms +step:1645/1705 train_time:157065ms step_avg:95.48ms +step:1646/1705 train_time:157160ms step_avg:95.48ms +step:1647/1705 train_time:157255ms step_avg:95.48ms +step:1648/1705 train_time:157352ms step_avg:95.48ms +step:1649/1705 train_time:157450ms step_avg:95.48ms +step:1650/1705 train_time:157546ms step_avg:95.48ms +step:1651/1705 train_time:157640ms step_avg:95.48ms +step:1652/1705 train_time:157735ms step_avg:95.48ms +step:1653/1705 train_time:157830ms step_avg:95.48ms +step:1654/1705 train_time:157925ms step_avg:95.48ms +step:1655/1705 train_time:158019ms step_avg:95.48ms +step:1656/1705 train_time:158114ms step_avg:95.48ms +step:1657/1705 train_time:158209ms step_avg:95.48ms +step:1658/1705 train_time:158306ms step_avg:95.48ms +step:1659/1705 train_time:158402ms step_avg:95.48ms +step:1660/1705 train_time:158497ms step_avg:95.48ms +step:1661/1705 train_time:158594ms step_avg:95.48ms +step:1662/1705 train_time:158690ms step_avg:95.48ms +step:1663/1705 train_time:158784ms step_avg:95.48ms +step:1664/1705 train_time:158879ms step_avg:95.48ms +step:1665/1705 train_time:158974ms step_avg:95.48ms +step:1666/1705 train_time:159069ms step_avg:95.48ms +step:1667/1705 train_time:159163ms step_avg:95.48ms +step:1668/1705 train_time:159259ms step_avg:95.48ms +step:1669/1705 train_time:159354ms step_avg:95.48ms +step:1670/1705 train_time:159450ms step_avg:95.48ms +step:1671/1705 train_time:159545ms step_avg:95.48ms +step:1672/1705 train_time:159642ms step_avg:95.48ms +step:1673/1705 train_time:159737ms step_avg:95.48ms +step:1674/1705 train_time:159832ms step_avg:95.48ms +step:1675/1705 train_time:159927ms step_avg:95.48ms +step:1676/1705 train_time:160022ms step_avg:95.48ms +step:1677/1705 train_time:160117ms step_avg:95.48ms +step:1678/1705 train_time:160212ms step_avg:95.48ms +step:1679/1705 train_time:160308ms step_avg:95.48ms +step:1680/1705 train_time:160403ms step_avg:95.48ms +step:1681/1705 train_time:160499ms step_avg:95.48ms +step:1682/1705 train_time:160595ms step_avg:95.48ms +step:1683/1705 train_time:160691ms step_avg:95.48ms +step:1684/1705 train_time:160786ms step_avg:95.48ms +step:1685/1705 train_time:160881ms step_avg:95.48ms +step:1686/1705 train_time:160976ms step_avg:95.48ms +step:1687/1705 train_time:161072ms step_avg:95.48ms +step:1688/1705 train_time:161167ms step_avg:95.48ms +step:1689/1705 train_time:161262ms step_avg:95.48ms +step:1690/1705 train_time:161357ms step_avg:95.48ms +step:1691/1705 train_time:161452ms step_avg:95.48ms +step:1692/1705 train_time:161548ms step_avg:95.48ms +step:1693/1705 train_time:161645ms step_avg:95.48ms +step:1694/1705 train_time:161739ms step_avg:95.48ms +step:1695/1705 train_time:161834ms step_avg:95.48ms +step:1696/1705 train_time:161930ms step_avg:95.48ms +step:1697/1705 train_time:162025ms step_avg:95.48ms +step:1698/1705 train_time:162305ms step_avg:95.59ms +step:1699/1705 train_time:162471ms step_avg:95.63ms +step:1700/1705 train_time:162565ms step_avg:95.63ms +step:1701/1705 train_time:162658ms step_avg:95.63ms +step:1702/1705 train_time:162753ms step_avg:95.62ms +step:1703/1705 train_time:162847ms step_avg:95.62ms +step:1704/1705 train_time:162941ms step_avg:95.62ms +step:1705/1705 train_time:163035ms step_avg:95.62ms +step:1705/1705 val_loss:3.2764 train_time:163129ms step_avg:95.68ms +peak memory allocated: 33992 MiB reserved: 49096 MiB diff --git a/records/090525_SkipMLPBlocks/3a3f4c61-475d-4fcb-a606-65aa3784d7af.txt b/records/090525_SkipMLPBlocks/3a3f4c61-475d-4fcb-a606-65aa3784d7af.txt new file mode 100644 index 000000000..fc4774250 --- /dev/null +++ b/records/090525_SkipMLPBlocks/3a3f4c61-475d-4fcb-a606-65aa3784d7af.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:10:06 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 130W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 126W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 42C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 82357 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 82358 C /usr/bin/python3 610MiB | +| 0 N/A N/A 82359 C /usr/bin/python3 610MiB | +| 0 N/A N/A 82360 C /usr/bin/python3 610MiB | +| 0 N/A N/A 82361 C /usr/bin/python3 610MiB | +| 0 N/A N/A 82362 C /usr/bin/python3 610MiB | +| 0 N/A N/A 82363 C /usr/bin/python3 610MiB | +| 0 N/A N/A 82364 C /usr/bin/python3 610MiB | +| 1 N/A N/A 82358 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 82359 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 82360 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 82361 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 82362 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 82363 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 82364 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1705 train_time:399ms step_avg:399.21ms +step:2/1705 train_time:419ms step_avg:209.29ms +step:3/1705 train_time:488ms step_avg:162.77ms +step:4/1705 train_time:579ms step_avg:144.74ms +step:5/1705 train_time:671ms step_avg:134.13ms +step:6/1705 train_time:762ms step_avg:127.05ms +step:7/1705 train_time:854ms step_avg:122.05ms +step:8/1705 train_time:947ms step_avg:118.40ms +step:9/1705 train_time:1040ms step_avg:115.51ms +step:10/1705 train_time:1132ms step_avg:113.18ms +step:11/1705 train_time:1225ms step_avg:111.33ms +step:12/1705 train_time:1319ms step_avg:109.94ms +step:13/1705 train_time:1414ms step_avg:108.78ms +step:14/1705 train_time:1510ms step_avg:107.83ms +step:15/1705 train_time:1604ms step_avg:106.90ms +step:16/1705 train_time:1697ms step_avg:106.04ms +step:17/1705 train_time:1789ms step_avg:105.26ms +step:18/1705 train_time:1882ms step_avg:104.57ms +step:19/1705 train_time:1974ms step_avg:103.91ms +step:20/1705 train_time:2067ms step_avg:103.35ms +step:21/1705 train_time:2160ms step_avg:102.85ms +step:22/1705 train_time:2252ms step_avg:102.38ms +step:23/1705 train_time:2346ms step_avg:102.01ms +step:24/1705 train_time:2440ms step_avg:101.66ms +step:25/1705 train_time:2533ms step_avg:101.32ms +step:26/1705 train_time:2628ms step_avg:101.06ms +step:27/1705 train_time:2721ms step_avg:100.79ms +step:28/1705 train_time:2815ms step_avg:100.52ms +step:29/1705 train_time:2908ms step_avg:100.27ms +step:30/1705 train_time:3001ms step_avg:100.04ms +step:31/1705 train_time:3094ms step_avg:99.82ms +step:32/1705 train_time:3188ms step_avg:99.62ms +step:33/1705 train_time:3280ms step_avg:99.40ms +step:34/1705 train_time:3373ms step_avg:99.20ms +step:35/1705 train_time:3467ms step_avg:99.05ms +step:36/1705 train_time:3560ms step_avg:98.89ms +step:37/1705 train_time:3653ms step_avg:98.73ms +step:38/1705 train_time:3748ms step_avg:98.62ms +step:39/1705 train_time:3841ms step_avg:98.48ms +step:40/1705 train_time:3933ms step_avg:98.33ms +step:41/1705 train_time:4028ms step_avg:98.24ms +step:42/1705 train_time:4121ms step_avg:98.11ms +step:43/1705 train_time:4213ms step_avg:97.98ms +step:44/1705 train_time:4307ms step_avg:97.88ms +step:45/1705 train_time:4401ms step_avg:97.79ms +step:46/1705 train_time:4493ms step_avg:97.67ms +step:47/1705 train_time:4587ms step_avg:97.59ms +step:48/1705 train_time:4680ms step_avg:97.51ms +step:49/1705 train_time:4773ms step_avg:97.40ms +step:50/1705 train_time:4866ms step_avg:97.33ms +step:51/1705 train_time:4960ms step_avg:97.25ms +step:52/1705 train_time:5053ms step_avg:97.16ms +step:53/1705 train_time:5147ms step_avg:97.10ms +step:54/1705 train_time:5239ms step_avg:97.02ms +step:55/1705 train_time:5332ms step_avg:96.95ms +step:56/1705 train_time:5426ms step_avg:96.90ms +step:57/1705 train_time:5520ms step_avg:96.84ms +step:58/1705 train_time:5612ms step_avg:96.77ms +step:59/1705 train_time:5706ms step_avg:96.72ms +step:60/1705 train_time:5799ms step_avg:96.66ms +step:61/1705 train_time:5893ms step_avg:96.60ms +step:62/1705 train_time:5986ms step_avg:96.55ms +step:63/1705 train_time:6079ms step_avg:96.48ms +step:64/1705 train_time:6171ms step_avg:96.42ms +step:65/1705 train_time:6265ms step_avg:96.38ms +step:66/1705 train_time:6358ms step_avg:96.33ms +step:67/1705 train_time:6451ms step_avg:96.29ms +step:68/1705 train_time:6545ms step_avg:96.25ms +step:69/1705 train_time:6638ms step_avg:96.20ms +step:70/1705 train_time:6731ms step_avg:96.16ms +step:71/1705 train_time:6825ms step_avg:96.13ms +step:72/1705 train_time:6919ms step_avg:96.09ms +step:73/1705 train_time:7012ms step_avg:96.05ms +step:74/1705 train_time:7106ms step_avg:96.02ms +step:75/1705 train_time:7198ms step_avg:95.98ms +step:76/1705 train_time:7291ms step_avg:95.94ms +step:77/1705 train_time:7385ms step_avg:95.91ms +step:78/1705 train_time:7477ms step_avg:95.87ms +step:79/1705 train_time:7570ms step_avg:95.83ms +step:80/1705 train_time:7664ms step_avg:95.80ms +step:81/1705 train_time:7757ms step_avg:95.76ms +step:82/1705 train_time:7851ms step_avg:95.74ms +step:83/1705 train_time:7945ms step_avg:95.72ms +step:84/1705 train_time:8038ms step_avg:95.69ms +step:85/1705 train_time:8131ms step_avg:95.66ms +step:86/1705 train_time:8225ms step_avg:95.64ms +step:87/1705 train_time:8318ms step_avg:95.61ms +step:88/1705 train_time:8411ms step_avg:95.58ms +step:89/1705 train_time:8504ms step_avg:95.56ms +step:90/1705 train_time:8598ms step_avg:95.53ms +step:91/1705 train_time:8691ms step_avg:95.50ms +step:92/1705 train_time:8785ms step_avg:95.48ms +step:93/1705 train_time:8877ms step_avg:95.45ms +step:94/1705 train_time:8970ms step_avg:95.43ms +step:95/1705 train_time:9063ms step_avg:95.40ms +step:96/1705 train_time:9155ms step_avg:95.37ms +step:97/1705 train_time:9249ms step_avg:95.35ms +step:98/1705 train_time:9343ms step_avg:95.33ms +step:99/1705 train_time:9435ms step_avg:95.30ms +step:100/1705 train_time:9529ms step_avg:95.29ms +step:101/1705 train_time:9622ms step_avg:95.27ms +step:102/1705 train_time:9715ms step_avg:95.24ms +step:103/1705 train_time:9808ms step_avg:95.22ms +step:104/1705 train_time:9901ms step_avg:95.20ms +step:105/1705 train_time:9993ms step_avg:95.18ms +step:106/1705 train_time:10087ms step_avg:95.16ms +step:107/1705 train_time:10179ms step_avg:95.13ms +step:108/1705 train_time:10271ms step_avg:95.10ms +step:109/1705 train_time:10365ms step_avg:95.09ms +step:110/1705 train_time:10459ms step_avg:95.08ms +step:111/1705 train_time:10551ms step_avg:95.06ms +step:112/1705 train_time:10645ms step_avg:95.05ms +step:113/1705 train_time:10737ms step_avg:95.02ms +step:114/1705 train_time:10830ms step_avg:95.00ms +step:115/1705 train_time:10923ms step_avg:94.98ms +step:116/1705 train_time:11017ms step_avg:94.97ms +step:117/1705 train_time:11109ms step_avg:94.95ms +step:118/1705 train_time:11201ms step_avg:94.93ms +step:119/1705 train_time:11294ms step_avg:94.91ms +step:120/1705 train_time:11387ms step_avg:94.90ms +step:121/1705 train_time:11480ms step_avg:94.88ms +step:122/1705 train_time:11573ms step_avg:94.86ms +step:123/1705 train_time:11666ms step_avg:94.84ms +step:124/1705 train_time:11759ms step_avg:94.83ms +step:125/1705 train_time:11852ms step_avg:94.81ms +step:125/1705 val_loss:4.3053 train_time:11946ms step_avg:95.57ms +step:126/1705 train_time:11967ms step_avg:94.98ms +step:127/1705 train_time:12044ms step_avg:94.84ms +step:128/1705 train_time:12145ms step_avg:94.88ms +step:129/1705 train_time:12239ms step_avg:94.88ms +step:130/1705 train_time:12332ms step_avg:94.86ms +step:131/1705 train_time:12424ms step_avg:94.84ms +step:132/1705 train_time:12515ms step_avg:94.81ms +step:133/1705 train_time:12607ms step_avg:94.79ms +step:134/1705 train_time:12699ms step_avg:94.77ms +step:135/1705 train_time:12792ms step_avg:94.76ms +step:136/1705 train_time:12883ms step_avg:94.73ms +step:137/1705 train_time:12977ms step_avg:94.72ms +step:138/1705 train_time:13073ms step_avg:94.73ms +step:139/1705 train_time:13166ms step_avg:94.72ms +step:140/1705 train_time:13260ms step_avg:94.72ms +step:141/1705 train_time:13354ms step_avg:94.71ms +step:142/1705 train_time:13447ms step_avg:94.70ms +step:143/1705 train_time:13539ms step_avg:94.68ms +step:144/1705 train_time:13631ms step_avg:94.66ms +step:145/1705 train_time:13723ms step_avg:94.64ms +step:146/1705 train_time:13815ms step_avg:94.62ms +step:147/1705 train_time:13907ms step_avg:94.61ms +step:148/1705 train_time:14000ms step_avg:94.60ms +step:149/1705 train_time:14095ms step_avg:94.60ms +step:150/1705 train_time:14189ms step_avg:94.59ms +step:151/1705 train_time:14282ms step_avg:94.58ms +step:152/1705 train_time:14376ms step_avg:94.58ms +step:153/1705 train_time:14469ms step_avg:94.57ms +step:154/1705 train_time:14562ms step_avg:94.56ms +step:155/1705 train_time:14655ms step_avg:94.55ms +step:156/1705 train_time:14748ms step_avg:94.54ms +step:157/1705 train_time:14839ms step_avg:94.52ms +step:158/1705 train_time:14932ms step_avg:94.51ms +step:159/1705 train_time:15024ms step_avg:94.49ms +step:160/1705 train_time:15118ms step_avg:94.49ms +step:161/1705 train_time:15212ms step_avg:94.48ms +step:162/1705 train_time:15305ms step_avg:94.47ms +step:163/1705 train_time:15398ms step_avg:94.47ms +step:164/1705 train_time:15491ms step_avg:94.46ms +step:165/1705 train_time:15583ms step_avg:94.44ms +step:166/1705 train_time:15675ms step_avg:94.43ms +step:167/1705 train_time:15768ms step_avg:94.42ms +step:168/1705 train_time:15860ms step_avg:94.41ms +step:169/1705 train_time:15953ms step_avg:94.40ms +step:170/1705 train_time:16045ms step_avg:94.38ms +step:171/1705 train_time:16138ms step_avg:94.37ms +step:172/1705 train_time:16231ms step_avg:94.37ms +step:173/1705 train_time:16323ms step_avg:94.35ms +step:174/1705 train_time:16416ms step_avg:94.34ms +step:175/1705 train_time:16509ms step_avg:94.34ms +step:176/1705 train_time:16601ms step_avg:94.33ms +step:177/1705 train_time:16695ms step_avg:94.32ms +step:178/1705 train_time:16788ms step_avg:94.31ms +step:179/1705 train_time:16880ms step_avg:94.30ms +step:180/1705 train_time:16973ms step_avg:94.29ms +step:181/1705 train_time:17065ms step_avg:94.28ms +step:182/1705 train_time:17159ms step_avg:94.28ms +step:183/1705 train_time:17252ms step_avg:94.27ms +step:184/1705 train_time:17345ms step_avg:94.27ms +step:185/1705 train_time:17438ms step_avg:94.26ms +step:186/1705 train_time:17532ms step_avg:94.26ms +step:187/1705 train_time:17624ms step_avg:94.25ms +step:188/1705 train_time:17717ms step_avg:94.24ms +step:189/1705 train_time:17810ms step_avg:94.23ms +step:190/1705 train_time:17902ms step_avg:94.22ms +step:191/1705 train_time:17995ms step_avg:94.22ms +step:192/1705 train_time:18087ms step_avg:94.21ms +step:193/1705 train_time:18180ms step_avg:94.20ms +step:194/1705 train_time:18274ms step_avg:94.20ms +step:195/1705 train_time:18367ms step_avg:94.19ms +step:196/1705 train_time:18459ms step_avg:94.18ms +step:197/1705 train_time:18552ms step_avg:94.17ms +step:198/1705 train_time:18644ms step_avg:94.16ms +step:199/1705 train_time:18738ms step_avg:94.16ms +step:200/1705 train_time:18830ms step_avg:94.15ms +step:201/1705 train_time:18922ms step_avg:94.14ms +step:202/1705 train_time:19015ms step_avg:94.14ms +step:203/1705 train_time:19108ms step_avg:94.13ms +step:204/1705 train_time:19200ms step_avg:94.12ms +step:205/1705 train_time:19294ms step_avg:94.12ms +step:206/1705 train_time:19387ms step_avg:94.11ms +step:207/1705 train_time:19479ms step_avg:94.10ms +step:208/1705 train_time:19573ms step_avg:94.10ms +step:209/1705 train_time:19665ms step_avg:94.09ms +step:210/1705 train_time:19759ms step_avg:94.09ms +step:211/1705 train_time:19852ms step_avg:94.09ms +step:212/1705 train_time:19945ms step_avg:94.08ms +step:213/1705 train_time:20259ms step_avg:95.11ms +step:214/1705 train_time:20329ms step_avg:95.00ms +step:215/1705 train_time:20420ms step_avg:94.98ms +step:216/1705 train_time:20513ms step_avg:94.97ms +step:217/1705 train_time:20605ms step_avg:94.95ms +step:218/1705 train_time:20697ms step_avg:94.94ms +step:219/1705 train_time:20788ms step_avg:94.92ms +step:220/1705 train_time:20880ms step_avg:94.91ms +step:221/1705 train_time:20972ms step_avg:94.90ms +step:222/1705 train_time:21063ms step_avg:94.88ms +step:223/1705 train_time:21158ms step_avg:94.88ms +step:224/1705 train_time:21254ms step_avg:94.88ms +step:225/1705 train_time:21350ms step_avg:94.89ms +step:226/1705 train_time:21443ms step_avg:94.88ms +step:227/1705 train_time:21536ms step_avg:94.87ms +step:228/1705 train_time:21627ms step_avg:94.86ms +step:229/1705 train_time:21720ms step_avg:94.85ms +step:230/1705 train_time:21813ms step_avg:94.84ms +step:231/1705 train_time:21904ms step_avg:94.82ms +step:232/1705 train_time:21997ms step_avg:94.81ms +step:233/1705 train_time:22088ms step_avg:94.80ms +step:234/1705 train_time:22181ms step_avg:94.79ms +step:235/1705 train_time:22276ms step_avg:94.79ms +step:236/1705 train_time:22370ms step_avg:94.79ms +step:237/1705 train_time:22463ms step_avg:94.78ms +step:238/1705 train_time:22556ms step_avg:94.77ms +step:239/1705 train_time:22649ms step_avg:94.77ms +step:240/1705 train_time:22741ms step_avg:94.75ms +step:241/1705 train_time:22833ms step_avg:94.74ms +step:242/1705 train_time:22926ms step_avg:94.73ms +step:243/1705 train_time:23018ms step_avg:94.72ms +step:244/1705 train_time:23110ms step_avg:94.71ms +step:245/1705 train_time:23203ms step_avg:94.71ms +step:246/1705 train_time:23297ms step_avg:94.70ms +step:247/1705 train_time:23391ms step_avg:94.70ms +step:248/1705 train_time:23484ms step_avg:94.69ms +step:249/1705 train_time:23578ms step_avg:94.69ms +step:250/1705 train_time:23671ms step_avg:94.68ms +step:250/1705 val_loss:3.9711 train_time:23763ms step_avg:95.05ms +step:251/1705 train_time:23785ms step_avg:94.76ms +step:252/1705 train_time:23856ms step_avg:94.67ms +step:253/1705 train_time:23948ms step_avg:94.66ms +step:254/1705 train_time:24050ms step_avg:94.68ms +step:255/1705 train_time:24148ms step_avg:94.70ms +step:256/1705 train_time:24240ms step_avg:94.69ms +step:257/1705 train_time:24331ms step_avg:94.68ms +step:258/1705 train_time:24423ms step_avg:94.66ms +step:259/1705 train_time:24515ms step_avg:94.65ms +step:260/1705 train_time:24607ms step_avg:94.64ms +step:261/1705 train_time:24700ms step_avg:94.64ms +step:262/1705 train_time:24793ms step_avg:94.63ms +step:263/1705 train_time:24886ms step_avg:94.62ms +step:264/1705 train_time:24978ms step_avg:94.62ms +step:265/1705 train_time:25072ms step_avg:94.61ms +step:266/1705 train_time:25165ms step_avg:94.61ms +step:267/1705 train_time:25257ms step_avg:94.60ms +step:268/1705 train_time:25350ms step_avg:94.59ms +step:269/1705 train_time:25442ms step_avg:94.58ms +step:270/1705 train_time:25534ms step_avg:94.57ms +step:271/1705 train_time:25627ms step_avg:94.57ms +step:272/1705 train_time:25719ms step_avg:94.56ms +step:273/1705 train_time:25812ms step_avg:94.55ms +step:274/1705 train_time:25905ms step_avg:94.54ms +step:275/1705 train_time:25998ms step_avg:94.54ms +step:276/1705 train_time:26092ms step_avg:94.54ms +step:277/1705 train_time:26186ms step_avg:94.54ms +step:278/1705 train_time:26279ms step_avg:94.53ms +step:279/1705 train_time:26372ms step_avg:94.52ms +step:280/1705 train_time:26464ms step_avg:94.52ms +step:281/1705 train_time:26556ms step_avg:94.51ms +step:282/1705 train_time:26649ms step_avg:94.50ms +step:283/1705 train_time:26743ms step_avg:94.50ms +step:284/1705 train_time:26834ms step_avg:94.49ms +step:285/1705 train_time:26927ms step_avg:94.48ms +step:286/1705 train_time:27020ms step_avg:94.47ms +step:287/1705 train_time:27113ms step_avg:94.47ms +step:288/1705 train_time:27207ms step_avg:94.47ms +step:289/1705 train_time:27300ms step_avg:94.46ms +step:290/1705 train_time:27393ms step_avg:94.46ms +step:291/1705 train_time:27485ms step_avg:94.45ms +step:292/1705 train_time:27578ms step_avg:94.44ms +step:293/1705 train_time:27671ms step_avg:94.44ms +step:294/1705 train_time:27763ms step_avg:94.43ms +step:295/1705 train_time:27855ms step_avg:94.42ms +step:296/1705 train_time:27948ms step_avg:94.42ms +step:297/1705 train_time:28041ms step_avg:94.41ms +step:298/1705 train_time:28135ms step_avg:94.41ms +step:299/1705 train_time:28229ms step_avg:94.41ms +step:300/1705 train_time:28321ms step_avg:94.40ms +step:301/1705 train_time:28414ms step_avg:94.40ms +step:302/1705 train_time:28507ms step_avg:94.39ms +step:303/1705 train_time:28599ms step_avg:94.39ms +step:304/1705 train_time:28692ms step_avg:94.38ms +step:305/1705 train_time:28785ms step_avg:94.38ms +step:306/1705 train_time:28878ms step_avg:94.37ms +step:307/1705 train_time:28971ms step_avg:94.37ms +step:308/1705 train_time:29063ms step_avg:94.36ms +step:309/1705 train_time:29156ms step_avg:94.36ms +step:310/1705 train_time:29249ms step_avg:94.35ms +step:311/1705 train_time:29342ms step_avg:94.35ms +step:312/1705 train_time:29435ms step_avg:94.34ms +step:313/1705 train_time:29527ms step_avg:94.34ms +step:314/1705 train_time:29620ms step_avg:94.33ms +step:315/1705 train_time:29713ms step_avg:94.33ms +step:316/1705 train_time:29806ms step_avg:94.32ms +step:317/1705 train_time:29898ms step_avg:94.31ms +step:318/1705 train_time:29991ms step_avg:94.31ms +step:319/1705 train_time:30084ms step_avg:94.31ms +step:320/1705 train_time:30176ms step_avg:94.30ms +step:321/1705 train_time:30270ms step_avg:94.30ms +step:322/1705 train_time:30363ms step_avg:94.30ms +step:323/1705 train_time:30455ms step_avg:94.29ms +step:324/1705 train_time:30549ms step_avg:94.29ms +step:325/1705 train_time:30643ms step_avg:94.29ms +step:326/1705 train_time:30735ms step_avg:94.28ms +step:327/1705 train_time:30828ms step_avg:94.28ms +step:328/1705 train_time:30921ms step_avg:94.27ms +step:329/1705 train_time:31013ms step_avg:94.27ms +step:330/1705 train_time:31107ms step_avg:94.26ms +step:331/1705 train_time:31199ms step_avg:94.26ms +step:332/1705 train_time:31293ms step_avg:94.26ms +step:333/1705 train_time:31386ms step_avg:94.25ms +step:334/1705 train_time:31478ms step_avg:94.25ms +step:335/1705 train_time:31571ms step_avg:94.24ms +step:336/1705 train_time:31664ms step_avg:94.24ms +step:337/1705 train_time:31756ms step_avg:94.23ms +step:338/1705 train_time:31849ms step_avg:94.23ms +step:339/1705 train_time:31942ms step_avg:94.22ms +step:340/1705 train_time:32034ms step_avg:94.22ms +step:341/1705 train_time:32127ms step_avg:94.22ms +step:342/1705 train_time:32220ms step_avg:94.21ms +step:343/1705 train_time:32313ms step_avg:94.21ms +step:344/1705 train_time:32406ms step_avg:94.20ms +step:345/1705 train_time:32499ms step_avg:94.20ms +step:346/1705 train_time:32592ms step_avg:94.20ms +step:347/1705 train_time:32685ms step_avg:94.19ms +step:348/1705 train_time:32778ms step_avg:94.19ms +step:349/1705 train_time:32870ms step_avg:94.18ms +step:350/1705 train_time:32963ms step_avg:94.18ms +step:351/1705 train_time:33056ms step_avg:94.18ms +step:352/1705 train_time:33149ms step_avg:94.17ms +step:353/1705 train_time:33242ms step_avg:94.17ms +step:354/1705 train_time:33334ms step_avg:94.17ms +step:355/1705 train_time:33429ms step_avg:94.17ms +step:356/1705 train_time:33521ms step_avg:94.16ms +step:357/1705 train_time:33614ms step_avg:94.16ms +step:358/1705 train_time:33707ms step_avg:94.15ms +step:359/1705 train_time:33799ms step_avg:94.15ms +step:360/1705 train_time:33892ms step_avg:94.14ms +step:361/1705 train_time:33985ms step_avg:94.14ms +step:362/1705 train_time:34077ms step_avg:94.14ms +step:363/1705 train_time:34171ms step_avg:94.13ms +step:364/1705 train_time:34263ms step_avg:94.13ms +step:365/1705 train_time:34355ms step_avg:94.12ms +step:366/1705 train_time:34448ms step_avg:94.12ms +step:367/1705 train_time:34541ms step_avg:94.12ms +step:368/1705 train_time:34635ms step_avg:94.12ms +step:369/1705 train_time:34728ms step_avg:94.11ms +step:370/1705 train_time:34821ms step_avg:94.11ms +step:371/1705 train_time:34914ms step_avg:94.11ms +step:372/1705 train_time:35007ms step_avg:94.11ms +step:373/1705 train_time:35100ms step_avg:94.10ms +step:374/1705 train_time:35193ms step_avg:94.10ms +step:375/1705 train_time:35287ms step_avg:94.10ms +step:375/1705 val_loss:3.8190 train_time:35380ms step_avg:94.35ms +step:376/1705 train_time:35401ms step_avg:94.15ms +step:377/1705 train_time:35475ms step_avg:94.10ms +step:378/1705 train_time:35572ms step_avg:94.11ms +step:379/1705 train_time:35665ms step_avg:94.10ms +step:380/1705 train_time:35758ms step_avg:94.10ms +step:381/1705 train_time:35850ms step_avg:94.09ms +step:382/1705 train_time:35943ms step_avg:94.09ms +step:383/1705 train_time:36035ms step_avg:94.09ms +step:384/1705 train_time:36126ms step_avg:94.08ms +step:385/1705 train_time:36219ms step_avg:94.08ms +step:386/1705 train_time:36311ms step_avg:94.07ms +step:387/1705 train_time:36405ms step_avg:94.07ms +step:388/1705 train_time:36499ms step_avg:94.07ms +step:389/1705 train_time:36594ms step_avg:94.07ms +step:390/1705 train_time:36687ms step_avg:94.07ms +step:391/1705 train_time:36780ms step_avg:94.07ms +step:392/1705 train_time:36873ms step_avg:94.06ms +step:393/1705 train_time:36965ms step_avg:94.06ms +step:394/1705 train_time:37059ms step_avg:94.06ms +step:395/1705 train_time:37150ms step_avg:94.05ms +step:396/1705 train_time:37243ms step_avg:94.05ms +step:397/1705 train_time:37336ms step_avg:94.05ms +step:398/1705 train_time:37429ms step_avg:94.04ms +step:399/1705 train_time:37523ms step_avg:94.04ms +step:400/1705 train_time:37616ms step_avg:94.04ms +step:401/1705 train_time:37709ms step_avg:94.04ms +step:402/1705 train_time:37803ms step_avg:94.04ms +step:403/1705 train_time:37895ms step_avg:94.03ms +step:404/1705 train_time:37987ms step_avg:94.03ms +step:405/1705 train_time:38080ms step_avg:94.02ms +step:406/1705 train_time:38173ms step_avg:94.02ms +step:407/1705 train_time:38265ms step_avg:94.02ms +step:408/1705 train_time:38359ms step_avg:94.02ms +step:409/1705 train_time:38453ms step_avg:94.02ms +step:410/1705 train_time:38546ms step_avg:94.01ms +step:411/1705 train_time:38639ms step_avg:94.01ms +step:412/1705 train_time:38732ms step_avg:94.01ms +step:413/1705 train_time:38825ms step_avg:94.01ms +step:414/1705 train_time:38917ms step_avg:94.00ms +step:415/1705 train_time:39009ms step_avg:94.00ms +step:416/1705 train_time:39102ms step_avg:93.99ms +step:417/1705 train_time:39194ms step_avg:93.99ms +step:418/1705 train_time:39286ms step_avg:93.99ms +step:419/1705 train_time:39379ms step_avg:93.98ms +step:420/1705 train_time:39472ms step_avg:93.98ms +step:421/1705 train_time:39565ms step_avg:93.98ms +step:422/1705 train_time:39660ms step_avg:93.98ms +step:423/1705 train_time:39753ms step_avg:93.98ms +step:424/1705 train_time:39846ms step_avg:93.98ms +step:425/1705 train_time:40124ms step_avg:94.41ms +step:426/1705 train_time:40241ms step_avg:94.46ms +step:427/1705 train_time:40332ms step_avg:94.45ms +step:428/1705 train_time:40424ms step_avg:94.45ms +step:429/1705 train_time:40516ms step_avg:94.44ms +step:430/1705 train_time:40608ms step_avg:94.44ms +step:431/1705 train_time:40700ms step_avg:94.43ms +step:432/1705 train_time:40792ms step_avg:94.43ms +step:433/1705 train_time:40884ms step_avg:94.42ms +step:434/1705 train_time:40976ms step_avg:94.41ms +step:435/1705 train_time:41068ms step_avg:94.41ms +step:436/1705 train_time:41165ms step_avg:94.42ms +step:437/1705 train_time:41262ms step_avg:94.42ms +step:438/1705 train_time:41356ms step_avg:94.42ms +step:439/1705 train_time:41448ms step_avg:94.42ms +step:440/1705 train_time:41542ms step_avg:94.41ms +step:441/1705 train_time:41635ms step_avg:94.41ms +step:442/1705 train_time:41726ms step_avg:94.40ms +step:443/1705 train_time:41819ms step_avg:94.40ms +step:444/1705 train_time:41910ms step_avg:94.39ms +step:445/1705 train_time:42003ms step_avg:94.39ms +step:446/1705 train_time:42096ms step_avg:94.39ms +step:447/1705 train_time:42190ms step_avg:94.38ms +step:448/1705 train_time:42284ms step_avg:94.38ms +step:449/1705 train_time:42379ms step_avg:94.38ms +step:450/1705 train_time:42472ms step_avg:94.38ms +step:451/1705 train_time:42565ms step_avg:94.38ms +step:452/1705 train_time:42658ms step_avg:94.38ms +step:453/1705 train_time:42751ms step_avg:94.37ms +step:454/1705 train_time:42844ms step_avg:94.37ms +step:455/1705 train_time:42937ms step_avg:94.37ms +step:456/1705 train_time:43030ms step_avg:94.36ms +step:457/1705 train_time:43123ms step_avg:94.36ms +step:458/1705 train_time:43217ms step_avg:94.36ms +step:459/1705 train_time:43310ms step_avg:94.36ms +step:460/1705 train_time:43404ms step_avg:94.36ms +step:461/1705 train_time:43497ms step_avg:94.35ms +step:462/1705 train_time:43590ms step_avg:94.35ms +step:463/1705 train_time:43684ms step_avg:94.35ms +step:464/1705 train_time:43776ms step_avg:94.34ms +step:465/1705 train_time:43868ms step_avg:94.34ms +step:466/1705 train_time:43961ms step_avg:94.34ms +step:467/1705 train_time:44054ms step_avg:94.33ms +step:468/1705 train_time:44148ms step_avg:94.33ms +step:469/1705 train_time:44242ms step_avg:94.33ms +step:470/1705 train_time:44334ms step_avg:94.33ms +step:471/1705 train_time:44428ms step_avg:94.33ms +step:472/1705 train_time:44522ms step_avg:94.33ms +step:473/1705 train_time:44615ms step_avg:94.32ms +step:474/1705 train_time:44707ms step_avg:94.32ms +step:475/1705 train_time:44800ms step_avg:94.32ms +step:476/1705 train_time:44894ms step_avg:94.31ms +step:477/1705 train_time:44986ms step_avg:94.31ms +step:478/1705 train_time:45079ms step_avg:94.31ms +step:479/1705 train_time:45172ms step_avg:94.30ms +step:480/1705 train_time:45265ms step_avg:94.30ms +step:481/1705 train_time:45358ms step_avg:94.30ms +step:482/1705 train_time:45450ms step_avg:94.30ms +step:483/1705 train_time:45544ms step_avg:94.29ms +step:484/1705 train_time:45638ms step_avg:94.29ms +step:485/1705 train_time:45730ms step_avg:94.29ms +step:486/1705 train_time:45823ms step_avg:94.29ms +step:487/1705 train_time:45916ms step_avg:94.28ms +step:488/1705 train_time:46008ms step_avg:94.28ms +step:489/1705 train_time:46101ms step_avg:94.28ms +step:490/1705 train_time:46194ms step_avg:94.27ms +step:491/1705 train_time:46286ms step_avg:94.27ms +step:492/1705 train_time:46380ms step_avg:94.27ms +step:493/1705 train_time:46472ms step_avg:94.26ms +step:494/1705 train_time:46565ms step_avg:94.26ms +step:495/1705 train_time:46658ms step_avg:94.26ms +step:496/1705 train_time:46751ms step_avg:94.26ms +step:497/1705 train_time:46844ms step_avg:94.25ms +step:498/1705 train_time:46937ms step_avg:94.25ms +step:499/1705 train_time:47030ms step_avg:94.25ms +step:500/1705 train_time:47123ms step_avg:94.25ms +step:500/1705 val_loss:3.7215 train_time:47217ms step_avg:94.43ms +step:501/1705 train_time:47239ms step_avg:94.29ms +step:502/1705 train_time:47314ms step_avg:94.25ms +step:503/1705 train_time:47412ms step_avg:94.26ms +step:504/1705 train_time:47506ms step_avg:94.26ms +step:505/1705 train_time:47598ms step_avg:94.25ms +step:506/1705 train_time:47689ms step_avg:94.25ms +step:507/1705 train_time:47781ms step_avg:94.24ms +step:508/1705 train_time:47874ms step_avg:94.24ms +step:509/1705 train_time:47966ms step_avg:94.24ms +step:510/1705 train_time:48058ms step_avg:94.23ms +step:511/1705 train_time:48151ms step_avg:94.23ms +step:512/1705 train_time:48245ms step_avg:94.23ms +step:513/1705 train_time:48339ms step_avg:94.23ms +step:514/1705 train_time:48434ms step_avg:94.23ms +step:515/1705 train_time:48528ms step_avg:94.23ms +step:516/1705 train_time:48621ms step_avg:94.23ms +step:517/1705 train_time:48713ms step_avg:94.22ms +step:518/1705 train_time:48807ms step_avg:94.22ms +step:519/1705 train_time:48899ms step_avg:94.22ms +step:520/1705 train_time:48991ms step_avg:94.21ms +step:521/1705 train_time:49084ms step_avg:94.21ms +step:522/1705 train_time:49177ms step_avg:94.21ms +step:523/1705 train_time:49271ms step_avg:94.21ms +step:524/1705 train_time:49364ms step_avg:94.21ms +step:525/1705 train_time:49457ms step_avg:94.20ms +step:526/1705 train_time:49551ms step_avg:94.20ms +step:527/1705 train_time:49644ms step_avg:94.20ms +step:528/1705 train_time:49736ms step_avg:94.20ms +step:529/1705 train_time:49829ms step_avg:94.20ms +step:530/1705 train_time:49922ms step_avg:94.19ms +step:531/1705 train_time:50014ms step_avg:94.19ms +step:532/1705 train_time:50108ms step_avg:94.19ms +step:533/1705 train_time:50201ms step_avg:94.19ms +step:534/1705 train_time:50294ms step_avg:94.18ms +step:535/1705 train_time:50387ms step_avg:94.18ms +step:536/1705 train_time:50480ms step_avg:94.18ms +step:537/1705 train_time:50573ms step_avg:94.18ms +step:538/1705 train_time:50667ms step_avg:94.18ms +step:539/1705 train_time:50760ms step_avg:94.17ms +step:540/1705 train_time:50852ms step_avg:94.17ms +step:541/1705 train_time:50945ms step_avg:94.17ms +step:542/1705 train_time:51038ms step_avg:94.17ms +step:543/1705 train_time:51131ms step_avg:94.16ms +step:544/1705 train_time:51224ms step_avg:94.16ms +step:545/1705 train_time:51317ms step_avg:94.16ms +step:546/1705 train_time:51410ms step_avg:94.16ms +step:547/1705 train_time:51503ms step_avg:94.16ms +step:548/1705 train_time:51596ms step_avg:94.15ms +step:549/1705 train_time:51690ms step_avg:94.15ms +step:550/1705 train_time:51783ms step_avg:94.15ms +step:551/1705 train_time:51875ms step_avg:94.15ms +step:552/1705 train_time:51969ms step_avg:94.15ms +step:553/1705 train_time:52062ms step_avg:94.14ms +step:554/1705 train_time:52154ms step_avg:94.14ms +step:555/1705 train_time:52248ms step_avg:94.14ms +step:556/1705 train_time:52341ms step_avg:94.14ms +step:557/1705 train_time:52435ms step_avg:94.14ms +step:558/1705 train_time:52529ms step_avg:94.14ms +step:559/1705 train_time:52622ms step_avg:94.14ms +step:560/1705 train_time:52715ms step_avg:94.13ms +step:561/1705 train_time:52809ms step_avg:94.13ms +step:562/1705 train_time:52902ms step_avg:94.13ms +step:563/1705 train_time:52994ms step_avg:94.13ms +step:564/1705 train_time:53087ms step_avg:94.13ms +step:565/1705 train_time:53180ms step_avg:94.12ms +step:566/1705 train_time:53273ms step_avg:94.12ms +step:567/1705 train_time:53367ms step_avg:94.12ms +step:568/1705 train_time:53460ms step_avg:94.12ms +step:569/1705 train_time:53553ms step_avg:94.12ms +step:570/1705 train_time:53646ms step_avg:94.12ms +step:571/1705 train_time:53741ms step_avg:94.12ms +step:572/1705 train_time:53835ms step_avg:94.12ms +step:573/1705 train_time:53930ms step_avg:94.12ms +step:574/1705 train_time:54023ms step_avg:94.12ms +step:575/1705 train_time:54117ms step_avg:94.12ms +step:576/1705 train_time:54212ms step_avg:94.12ms +step:577/1705 train_time:54306ms step_avg:94.12ms +step:578/1705 train_time:54401ms step_avg:94.12ms +step:579/1705 train_time:54495ms step_avg:94.12ms +step:580/1705 train_time:54589ms step_avg:94.12ms +step:581/1705 train_time:54684ms step_avg:94.12ms +step:582/1705 train_time:54777ms step_avg:94.12ms +step:583/1705 train_time:54872ms step_avg:94.12ms +step:584/1705 train_time:54966ms step_avg:94.12ms +step:585/1705 train_time:55060ms step_avg:94.12ms +step:586/1705 train_time:55154ms step_avg:94.12ms +step:587/1705 train_time:55249ms step_avg:94.12ms +step:588/1705 train_time:55343ms step_avg:94.12ms +step:589/1705 train_time:55438ms step_avg:94.12ms +step:590/1705 train_time:55532ms step_avg:94.12ms +step:591/1705 train_time:55627ms step_avg:94.12ms +step:592/1705 train_time:55721ms step_avg:94.12ms +step:593/1705 train_time:55815ms step_avg:94.12ms +step:594/1705 train_time:55910ms step_avg:94.13ms +step:595/1705 train_time:56005ms step_avg:94.13ms +step:596/1705 train_time:56098ms step_avg:94.12ms +step:597/1705 train_time:56193ms step_avg:94.13ms +step:598/1705 train_time:56287ms step_avg:94.13ms +step:599/1705 train_time:56382ms step_avg:94.13ms +step:600/1705 train_time:56476ms step_avg:94.13ms +step:601/1705 train_time:56571ms step_avg:94.13ms +step:602/1705 train_time:56666ms step_avg:94.13ms +step:603/1705 train_time:56760ms step_avg:94.13ms +step:604/1705 train_time:56854ms step_avg:94.13ms +step:605/1705 train_time:56949ms step_avg:94.13ms +step:606/1705 train_time:57044ms step_avg:94.13ms +step:607/1705 train_time:57138ms step_avg:94.13ms +step:608/1705 train_time:57232ms step_avg:94.13ms +step:609/1705 train_time:57327ms step_avg:94.13ms +step:610/1705 train_time:57421ms step_avg:94.13ms +step:611/1705 train_time:57516ms step_avg:94.13ms +step:612/1705 train_time:57611ms step_avg:94.13ms +step:613/1705 train_time:57706ms step_avg:94.14ms +step:614/1705 train_time:57800ms step_avg:94.14ms +step:615/1705 train_time:57894ms step_avg:94.14ms +step:616/1705 train_time:57988ms step_avg:94.14ms +step:617/1705 train_time:58083ms step_avg:94.14ms +step:618/1705 train_time:58177ms step_avg:94.14ms +step:619/1705 train_time:58272ms step_avg:94.14ms +step:620/1705 train_time:58366ms step_avg:94.14ms +step:621/1705 train_time:58461ms step_avg:94.14ms +step:622/1705 train_time:58554ms step_avg:94.14ms +step:623/1705 train_time:58648ms step_avg:94.14ms +step:624/1705 train_time:58742ms step_avg:94.14ms +step:625/1705 train_time:58836ms step_avg:94.14ms +step:625/1705 val_loss:3.6203 train_time:58932ms step_avg:94.29ms +step:626/1705 train_time:58955ms step_avg:94.18ms +step:627/1705 train_time:59038ms step_avg:94.16ms +step:628/1705 train_time:59137ms step_avg:94.17ms +step:629/1705 train_time:59231ms step_avg:94.17ms +step:630/1705 train_time:59325ms step_avg:94.17ms +step:631/1705 train_time:59418ms step_avg:94.16ms +step:632/1705 train_time:59511ms step_avg:94.16ms +step:633/1705 train_time:59604ms step_avg:94.16ms +step:634/1705 train_time:59697ms step_avg:94.16ms +step:635/1705 train_time:59790ms step_avg:94.16ms +step:636/1705 train_time:59885ms step_avg:94.16ms +step:637/1705 train_time:59980ms step_avg:94.16ms +step:638/1705 train_time:60078ms step_avg:94.17ms +step:639/1705 train_time:60438ms step_avg:94.58ms +step:640/1705 train_time:60529ms step_avg:94.58ms +step:641/1705 train_time:60621ms step_avg:94.57ms +step:642/1705 train_time:60715ms step_avg:94.57ms +step:643/1705 train_time:60809ms step_avg:94.57ms +step:644/1705 train_time:60902ms step_avg:94.57ms +step:645/1705 train_time:60995ms step_avg:94.57ms +step:646/1705 train_time:61088ms step_avg:94.56ms +step:647/1705 train_time:61181ms step_avg:94.56ms +step:648/1705 train_time:61274ms step_avg:94.56ms +step:649/1705 train_time:61372ms step_avg:94.56ms +step:650/1705 train_time:61469ms step_avg:94.57ms +step:651/1705 train_time:61565ms step_avg:94.57ms +step:652/1705 train_time:61660ms step_avg:94.57ms +step:653/1705 train_time:61754ms step_avg:94.57ms +step:654/1705 train_time:61848ms step_avg:94.57ms +step:655/1705 train_time:61941ms step_avg:94.57ms +step:656/1705 train_time:62035ms step_avg:94.57ms +step:657/1705 train_time:62129ms step_avg:94.56ms +step:658/1705 train_time:62222ms step_avg:94.56ms +step:659/1705 train_time:62316ms step_avg:94.56ms +step:660/1705 train_time:62413ms step_avg:94.56ms +step:661/1705 train_time:62508ms step_avg:94.57ms +step:662/1705 train_time:62602ms step_avg:94.57ms +step:663/1705 train_time:62697ms step_avg:94.57ms +step:664/1705 train_time:62792ms step_avg:94.57ms +step:665/1705 train_time:62886ms step_avg:94.57ms +step:666/1705 train_time:62979ms step_avg:94.56ms +step:667/1705 train_time:63073ms step_avg:94.56ms +step:668/1705 train_time:63166ms step_avg:94.56ms +step:669/1705 train_time:63260ms step_avg:94.56ms +step:670/1705 train_time:63355ms step_avg:94.56ms +step:671/1705 train_time:63450ms step_avg:94.56ms +step:672/1705 train_time:63545ms step_avg:94.56ms +step:673/1705 train_time:63639ms step_avg:94.56ms +step:674/1705 train_time:63733ms step_avg:94.56ms +step:675/1705 train_time:63828ms step_avg:94.56ms +step:676/1705 train_time:63922ms step_avg:94.56ms +step:677/1705 train_time:64016ms step_avg:94.56ms +step:678/1705 train_time:64110ms step_avg:94.56ms +step:679/1705 train_time:64204ms step_avg:94.56ms +step:680/1705 train_time:64298ms step_avg:94.56ms +step:681/1705 train_time:64392ms step_avg:94.56ms +step:682/1705 train_time:64488ms step_avg:94.56ms +step:683/1705 train_time:64585ms step_avg:94.56ms +step:684/1705 train_time:64677ms step_avg:94.56ms +step:685/1705 train_time:64772ms step_avg:94.56ms +step:686/1705 train_time:64868ms step_avg:94.56ms +step:687/1705 train_time:64962ms step_avg:94.56ms +step:688/1705 train_time:65055ms step_avg:94.56ms +step:689/1705 train_time:65150ms step_avg:94.56ms +step:690/1705 train_time:65245ms step_avg:94.56ms +step:691/1705 train_time:65339ms step_avg:94.56ms +step:692/1705 train_time:65434ms step_avg:94.56ms +step:693/1705 train_time:65529ms step_avg:94.56ms +step:694/1705 train_time:65623ms step_avg:94.56ms +step:695/1705 train_time:65717ms step_avg:94.56ms +step:696/1705 train_time:65813ms step_avg:94.56ms +step:697/1705 train_time:65907ms step_avg:94.56ms +step:698/1705 train_time:66000ms step_avg:94.56ms +step:699/1705 train_time:66094ms step_avg:94.56ms +step:700/1705 train_time:66190ms step_avg:94.56ms +step:701/1705 train_time:66283ms step_avg:94.56ms +step:702/1705 train_time:66378ms step_avg:94.56ms +step:703/1705 train_time:66473ms step_avg:94.56ms +step:704/1705 train_time:66567ms step_avg:94.56ms +step:705/1705 train_time:66661ms step_avg:94.55ms +step:706/1705 train_time:66755ms step_avg:94.55ms +step:707/1705 train_time:66850ms step_avg:94.55ms +step:708/1705 train_time:66944ms step_avg:94.55ms +step:709/1705 train_time:67037ms step_avg:94.55ms +step:710/1705 train_time:67132ms step_avg:94.55ms +step:711/1705 train_time:67226ms step_avg:94.55ms +step:712/1705 train_time:67320ms step_avg:94.55ms +step:713/1705 train_time:67414ms step_avg:94.55ms +step:714/1705 train_time:67510ms step_avg:94.55ms +step:715/1705 train_time:67604ms step_avg:94.55ms +step:716/1705 train_time:67698ms step_avg:94.55ms +step:717/1705 train_time:67793ms step_avg:94.55ms +step:718/1705 train_time:67889ms step_avg:94.55ms +step:719/1705 train_time:67983ms step_avg:94.55ms +step:720/1705 train_time:68077ms step_avg:94.55ms +step:721/1705 train_time:68172ms step_avg:94.55ms +step:722/1705 train_time:68266ms step_avg:94.55ms +step:723/1705 train_time:68359ms step_avg:94.55ms +step:724/1705 train_time:68454ms step_avg:94.55ms +step:725/1705 train_time:68549ms step_avg:94.55ms +step:726/1705 train_time:68643ms step_avg:94.55ms +step:727/1705 train_time:68738ms step_avg:94.55ms +step:728/1705 train_time:68833ms step_avg:94.55ms +step:729/1705 train_time:68928ms step_avg:94.55ms +step:730/1705 train_time:69023ms step_avg:94.55ms +step:731/1705 train_time:69117ms step_avg:94.55ms +step:732/1705 train_time:69212ms step_avg:94.55ms +step:733/1705 train_time:69306ms step_avg:94.55ms +step:734/1705 train_time:69400ms step_avg:94.55ms +step:735/1705 train_time:69495ms step_avg:94.55ms +step:736/1705 train_time:69591ms step_avg:94.55ms +step:737/1705 train_time:69685ms step_avg:94.55ms +step:738/1705 train_time:69779ms step_avg:94.55ms +step:739/1705 train_time:69874ms step_avg:94.55ms +step:740/1705 train_time:69969ms step_avg:94.55ms +step:741/1705 train_time:70063ms step_avg:94.55ms +step:742/1705 train_time:70157ms step_avg:94.55ms +step:743/1705 train_time:70252ms step_avg:94.55ms +step:744/1705 train_time:70346ms step_avg:94.55ms +step:745/1705 train_time:70440ms step_avg:94.55ms +step:746/1705 train_time:70535ms step_avg:94.55ms +step:747/1705 train_time:70630ms step_avg:94.55ms +step:748/1705 train_time:70724ms step_avg:94.55ms +step:749/1705 train_time:70817ms step_avg:94.55ms +step:750/1705 train_time:70912ms step_avg:94.55ms +step:750/1705 val_loss:3.5658 train_time:71007ms step_avg:94.68ms +step:751/1705 train_time:71029ms step_avg:94.58ms +step:752/1705 train_time:71106ms step_avg:94.56ms +step:753/1705 train_time:71203ms step_avg:94.56ms +step:754/1705 train_time:71299ms step_avg:94.56ms +step:755/1705 train_time:71393ms step_avg:94.56ms +step:756/1705 train_time:71487ms step_avg:94.56ms +step:757/1705 train_time:71580ms step_avg:94.56ms +step:758/1705 train_time:71674ms step_avg:94.56ms +step:759/1705 train_time:71767ms step_avg:94.55ms +step:760/1705 train_time:71860ms step_avg:94.55ms +step:761/1705 train_time:71955ms step_avg:94.55ms +step:762/1705 train_time:72051ms step_avg:94.56ms +step:763/1705 train_time:72148ms step_avg:94.56ms +step:764/1705 train_time:72242ms step_avg:94.56ms +step:765/1705 train_time:72337ms step_avg:94.56ms +step:766/1705 train_time:72432ms step_avg:94.56ms +step:767/1705 train_time:72526ms step_avg:94.56ms +step:768/1705 train_time:72620ms step_avg:94.56ms +step:769/1705 train_time:72715ms step_avg:94.56ms +step:770/1705 train_time:72809ms step_avg:94.56ms +step:771/1705 train_time:72902ms step_avg:94.55ms +step:772/1705 train_time:72997ms step_avg:94.56ms +step:773/1705 train_time:73093ms step_avg:94.56ms +step:774/1705 train_time:73188ms step_avg:94.56ms +step:775/1705 train_time:73282ms step_avg:94.56ms +step:776/1705 train_time:73376ms step_avg:94.56ms +step:777/1705 train_time:73472ms step_avg:94.56ms +step:778/1705 train_time:73565ms step_avg:94.56ms +step:779/1705 train_time:73659ms step_avg:94.56ms +step:780/1705 train_time:73754ms step_avg:94.56ms +step:781/1705 train_time:73848ms step_avg:94.56ms +step:782/1705 train_time:73942ms step_avg:94.55ms +step:783/1705 train_time:74038ms step_avg:94.56ms +step:784/1705 train_time:74133ms step_avg:94.56ms +step:785/1705 train_time:74228ms step_avg:94.56ms +step:786/1705 train_time:74322ms step_avg:94.56ms +step:787/1705 train_time:74418ms step_avg:94.56ms +step:788/1705 train_time:74512ms step_avg:94.56ms +step:789/1705 train_time:74606ms step_avg:94.56ms +step:790/1705 train_time:74699ms step_avg:94.56ms +step:791/1705 train_time:74795ms step_avg:94.56ms +step:792/1705 train_time:74889ms step_avg:94.56ms +step:793/1705 train_time:74983ms step_avg:94.56ms +step:794/1705 train_time:75077ms step_avg:94.56ms +step:795/1705 train_time:75172ms step_avg:94.56ms +step:796/1705 train_time:75267ms step_avg:94.56ms +step:797/1705 train_time:75361ms step_avg:94.56ms +step:798/1705 train_time:75456ms step_avg:94.56ms +step:799/1705 train_time:75550ms step_avg:94.56ms +step:800/1705 train_time:75644ms step_avg:94.56ms +step:801/1705 train_time:75739ms step_avg:94.56ms +step:802/1705 train_time:75833ms step_avg:94.55ms +step:803/1705 train_time:75928ms step_avg:94.55ms +step:804/1705 train_time:76022ms step_avg:94.55ms +step:805/1705 train_time:76116ms step_avg:94.55ms +step:806/1705 train_time:76211ms step_avg:94.55ms +step:807/1705 train_time:76305ms step_avg:94.55ms +step:808/1705 train_time:76400ms step_avg:94.55ms +step:809/1705 train_time:76496ms step_avg:94.56ms +step:810/1705 train_time:76590ms step_avg:94.56ms +step:811/1705 train_time:76684ms step_avg:94.55ms +step:812/1705 train_time:76778ms step_avg:94.55ms +step:813/1705 train_time:76873ms step_avg:94.55ms +step:814/1705 train_time:76967ms step_avg:94.55ms +step:815/1705 train_time:77061ms step_avg:94.55ms +step:816/1705 train_time:77156ms step_avg:94.55ms +step:817/1705 train_time:77250ms step_avg:94.55ms +step:818/1705 train_time:77344ms step_avg:94.55ms +step:819/1705 train_time:77438ms step_avg:94.55ms +step:820/1705 train_time:77533ms step_avg:94.55ms +step:821/1705 train_time:77628ms step_avg:94.55ms +step:822/1705 train_time:77722ms step_avg:94.55ms +step:823/1705 train_time:77816ms step_avg:94.55ms +step:824/1705 train_time:77911ms step_avg:94.55ms +step:825/1705 train_time:78004ms step_avg:94.55ms +step:826/1705 train_time:78098ms step_avg:94.55ms +step:827/1705 train_time:78193ms step_avg:94.55ms +step:828/1705 train_time:78288ms step_avg:94.55ms +step:829/1705 train_time:78382ms step_avg:94.55ms +step:830/1705 train_time:78477ms step_avg:94.55ms +step:831/1705 train_time:78573ms step_avg:94.55ms +step:832/1705 train_time:78667ms step_avg:94.55ms +step:833/1705 train_time:78761ms step_avg:94.55ms +step:834/1705 train_time:78856ms step_avg:94.55ms +step:835/1705 train_time:78951ms step_avg:94.55ms +step:836/1705 train_time:79045ms step_avg:94.55ms +step:837/1705 train_time:79140ms step_avg:94.55ms +step:838/1705 train_time:79234ms step_avg:94.55ms +step:839/1705 train_time:79329ms step_avg:94.55ms +step:840/1705 train_time:79423ms step_avg:94.55ms +step:841/1705 train_time:79518ms step_avg:94.55ms +step:842/1705 train_time:79613ms step_avg:94.55ms +step:843/1705 train_time:79707ms step_avg:94.55ms +step:844/1705 train_time:79801ms step_avg:94.55ms +step:845/1705 train_time:79896ms step_avg:94.55ms +step:846/1705 train_time:79990ms step_avg:94.55ms +step:847/1705 train_time:80083ms step_avg:94.55ms +step:848/1705 train_time:80178ms step_avg:94.55ms +step:849/1705 train_time:80273ms step_avg:94.55ms +step:850/1705 train_time:80368ms step_avg:94.55ms +step:851/1705 train_time:80626ms step_avg:94.74ms +step:852/1705 train_time:80734ms step_avg:94.76ms +step:853/1705 train_time:80826ms step_avg:94.75ms +step:854/1705 train_time:80919ms step_avg:94.75ms +step:855/1705 train_time:81012ms step_avg:94.75ms +step:856/1705 train_time:81105ms step_avg:94.75ms +step:857/1705 train_time:81199ms step_avg:94.75ms +step:858/1705 train_time:81292ms step_avg:94.75ms +step:859/1705 train_time:81386ms step_avg:94.75ms +step:860/1705 train_time:81480ms step_avg:94.74ms +step:861/1705 train_time:81576ms step_avg:94.75ms +step:862/1705 train_time:81675ms step_avg:94.75ms +step:863/1705 train_time:81774ms step_avg:94.76ms +step:864/1705 train_time:81868ms step_avg:94.75ms +step:865/1705 train_time:81962ms step_avg:94.75ms +step:866/1705 train_time:82056ms step_avg:94.75ms +step:867/1705 train_time:82149ms step_avg:94.75ms +step:868/1705 train_time:82242ms step_avg:94.75ms +step:869/1705 train_time:82336ms step_avg:94.75ms +step:870/1705 train_time:82429ms step_avg:94.75ms +step:871/1705 train_time:82523ms step_avg:94.74ms +step:872/1705 train_time:82620ms step_avg:94.75ms +step:873/1705 train_time:82718ms step_avg:94.75ms +step:874/1705 train_time:82815ms step_avg:94.75ms +step:875/1705 train_time:82910ms step_avg:94.75ms +step:875/1705 val_loss:3.5261 train_time:83004ms step_avg:94.86ms +step:876/1705 train_time:83025ms step_avg:94.78ms +step:877/1705 train_time:83103ms step_avg:94.76ms +step:878/1705 train_time:83204ms step_avg:94.76ms +step:879/1705 train_time:83298ms step_avg:94.76ms +step:880/1705 train_time:83392ms step_avg:94.76ms +step:881/1705 train_time:83484ms step_avg:94.76ms +step:882/1705 train_time:83578ms step_avg:94.76ms +step:883/1705 train_time:83672ms step_avg:94.76ms +step:884/1705 train_time:83764ms step_avg:94.76ms +step:885/1705 train_time:83858ms step_avg:94.75ms +step:886/1705 train_time:83952ms step_avg:94.75ms +step:887/1705 train_time:84048ms step_avg:94.75ms +step:888/1705 train_time:84145ms step_avg:94.76ms +step:889/1705 train_time:84242ms step_avg:94.76ms +step:890/1705 train_time:84337ms step_avg:94.76ms +step:891/1705 train_time:84431ms step_avg:94.76ms +step:892/1705 train_time:84525ms step_avg:94.76ms +step:893/1705 train_time:84620ms step_avg:94.76ms +step:894/1705 train_time:84713ms step_avg:94.76ms +step:895/1705 train_time:84806ms step_avg:94.76ms +step:896/1705 train_time:84900ms step_avg:94.75ms +step:897/1705 train_time:84995ms step_avg:94.76ms +step:898/1705 train_time:85091ms step_avg:94.76ms +step:899/1705 train_time:85185ms step_avg:94.76ms +step:900/1705 train_time:85280ms step_avg:94.76ms +step:901/1705 train_time:85375ms step_avg:94.76ms +step:902/1705 train_time:85470ms step_avg:94.76ms +step:903/1705 train_time:85564ms step_avg:94.75ms +step:904/1705 train_time:85658ms step_avg:94.75ms +step:905/1705 train_time:85751ms step_avg:94.75ms +step:906/1705 train_time:85844ms step_avg:94.75ms +step:907/1705 train_time:85938ms step_avg:94.75ms +step:908/1705 train_time:86034ms step_avg:94.75ms +step:909/1705 train_time:86128ms step_avg:94.75ms +step:910/1705 train_time:86223ms step_avg:94.75ms +step:911/1705 train_time:86318ms step_avg:94.75ms +step:912/1705 train_time:86414ms step_avg:94.75ms +step:913/1705 train_time:86508ms step_avg:94.75ms +step:914/1705 train_time:86603ms step_avg:94.75ms +step:915/1705 train_time:86698ms step_avg:94.75ms +step:916/1705 train_time:86792ms step_avg:94.75ms +step:917/1705 train_time:86886ms step_avg:94.75ms +step:918/1705 train_time:86980ms step_avg:94.75ms +step:919/1705 train_time:87076ms step_avg:94.75ms +step:920/1705 train_time:87171ms step_avg:94.75ms +step:921/1705 train_time:87265ms step_avg:94.75ms +step:922/1705 train_time:87360ms step_avg:94.75ms +step:923/1705 train_time:87455ms step_avg:94.75ms +step:924/1705 train_time:87550ms step_avg:94.75ms +step:925/1705 train_time:87644ms step_avg:94.75ms +step:926/1705 train_time:87739ms step_avg:94.75ms +step:927/1705 train_time:87833ms step_avg:94.75ms +step:928/1705 train_time:87927ms step_avg:94.75ms +step:929/1705 train_time:88021ms step_avg:94.75ms +step:930/1705 train_time:88117ms step_avg:94.75ms +step:931/1705 train_time:88212ms step_avg:94.75ms +step:932/1705 train_time:88306ms step_avg:94.75ms +step:933/1705 train_time:88400ms step_avg:94.75ms +step:934/1705 train_time:88496ms step_avg:94.75ms +step:935/1705 train_time:88591ms step_avg:94.75ms +step:936/1705 train_time:88685ms step_avg:94.75ms +step:937/1705 train_time:88779ms step_avg:94.75ms +step:938/1705 train_time:88874ms step_avg:94.75ms +step:939/1705 train_time:88968ms step_avg:94.75ms +step:940/1705 train_time:89063ms step_avg:94.75ms +step:941/1705 train_time:89158ms step_avg:94.75ms +step:942/1705 train_time:89252ms step_avg:94.75ms +step:943/1705 train_time:89345ms step_avg:94.75ms +step:944/1705 train_time:89440ms step_avg:94.75ms +step:945/1705 train_time:89536ms step_avg:94.75ms +step:946/1705 train_time:89632ms step_avg:94.75ms +step:947/1705 train_time:89724ms step_avg:94.75ms +step:948/1705 train_time:89819ms step_avg:94.75ms +step:949/1705 train_time:89914ms step_avg:94.75ms +step:950/1705 train_time:90008ms step_avg:94.75ms +step:951/1705 train_time:90102ms step_avg:94.74ms +step:952/1705 train_time:90198ms step_avg:94.75ms +step:953/1705 train_time:90293ms step_avg:94.75ms +step:954/1705 train_time:90386ms step_avg:94.74ms +step:955/1705 train_time:90481ms step_avg:94.74ms +step:956/1705 train_time:90576ms step_avg:94.75ms +step:957/1705 train_time:90671ms step_avg:94.74ms +step:958/1705 train_time:90764ms step_avg:94.74ms +step:959/1705 train_time:90859ms step_avg:94.74ms +step:960/1705 train_time:90954ms step_avg:94.74ms +step:961/1705 train_time:91047ms step_avg:94.74ms +step:962/1705 train_time:91142ms step_avg:94.74ms +step:963/1705 train_time:91237ms step_avg:94.74ms +step:964/1705 train_time:91331ms step_avg:94.74ms +step:965/1705 train_time:91425ms step_avg:94.74ms +step:966/1705 train_time:91520ms step_avg:94.74ms +step:967/1705 train_time:91615ms step_avg:94.74ms +step:968/1705 train_time:91709ms step_avg:94.74ms +step:969/1705 train_time:91803ms step_avg:94.74ms +step:970/1705 train_time:91897ms step_avg:94.74ms +step:971/1705 train_time:91992ms step_avg:94.74ms +step:972/1705 train_time:92087ms step_avg:94.74ms +step:973/1705 train_time:92181ms step_avg:94.74ms +step:974/1705 train_time:92276ms step_avg:94.74ms +step:975/1705 train_time:92370ms step_avg:94.74ms +step:976/1705 train_time:92465ms step_avg:94.74ms +step:977/1705 train_time:92559ms step_avg:94.74ms +step:978/1705 train_time:92654ms step_avg:94.74ms +step:979/1705 train_time:92748ms step_avg:94.74ms +step:980/1705 train_time:92842ms step_avg:94.74ms +step:981/1705 train_time:92938ms step_avg:94.74ms +step:982/1705 train_time:93032ms step_avg:94.74ms +step:983/1705 train_time:93126ms step_avg:94.74ms +step:984/1705 train_time:93221ms step_avg:94.74ms +step:985/1705 train_time:93316ms step_avg:94.74ms +step:986/1705 train_time:93411ms step_avg:94.74ms +step:987/1705 train_time:93505ms step_avg:94.74ms +step:988/1705 train_time:93601ms step_avg:94.74ms +step:989/1705 train_time:93696ms step_avg:94.74ms +step:990/1705 train_time:93790ms step_avg:94.74ms +step:991/1705 train_time:93884ms step_avg:94.74ms +step:992/1705 train_time:93979ms step_avg:94.74ms +step:993/1705 train_time:94073ms step_avg:94.74ms +step:994/1705 train_time:94168ms step_avg:94.74ms +step:995/1705 train_time:94262ms step_avg:94.74ms +step:996/1705 train_time:94358ms step_avg:94.74ms +step:997/1705 train_time:94454ms step_avg:94.74ms +step:998/1705 train_time:94548ms step_avg:94.74ms +step:999/1705 train_time:94642ms step_avg:94.74ms +step:1000/1705 train_time:94737ms step_avg:94.74ms +step:1000/1705 val_loss:3.4867 train_time:94832ms step_avg:94.83ms +step:1001/1705 train_time:94853ms step_avg:94.76ms +step:1002/1705 train_time:94931ms step_avg:94.74ms +step:1003/1705 train_time:95027ms step_avg:94.74ms +step:1004/1705 train_time:95122ms step_avg:94.74ms +step:1005/1705 train_time:95216ms step_avg:94.74ms +step:1006/1705 train_time:95309ms step_avg:94.74ms +step:1007/1705 train_time:95403ms step_avg:94.74ms +step:1008/1705 train_time:95497ms step_avg:94.74ms +step:1009/1705 train_time:95590ms step_avg:94.74ms +step:1010/1705 train_time:95684ms step_avg:94.74ms +step:1011/1705 train_time:95779ms step_avg:94.74ms +step:1012/1705 train_time:95877ms step_avg:94.74ms +step:1013/1705 train_time:95975ms step_avg:94.74ms +step:1014/1705 train_time:96069ms step_avg:94.74ms +step:1015/1705 train_time:96163ms step_avg:94.74ms +step:1016/1705 train_time:96257ms step_avg:94.74ms +step:1017/1705 train_time:96351ms step_avg:94.74ms +step:1018/1705 train_time:96444ms step_avg:94.74ms +step:1019/1705 train_time:96538ms step_avg:94.74ms +step:1020/1705 train_time:96632ms step_avg:94.74ms +step:1021/1705 train_time:96725ms step_avg:94.74ms +step:1022/1705 train_time:96821ms step_avg:94.74ms +step:1023/1705 train_time:96917ms step_avg:94.74ms +step:1024/1705 train_time:97013ms step_avg:94.74ms +step:1025/1705 train_time:97107ms step_avg:94.74ms +step:1026/1705 train_time:97201ms step_avg:94.74ms +step:1027/1705 train_time:97297ms step_avg:94.74ms +step:1028/1705 train_time:97391ms step_avg:94.74ms +step:1029/1705 train_time:97485ms step_avg:94.74ms +step:1030/1705 train_time:97579ms step_avg:94.74ms +step:1031/1705 train_time:97673ms step_avg:94.74ms +step:1032/1705 train_time:97767ms step_avg:94.74ms +step:1033/1705 train_time:97863ms step_avg:94.74ms +step:1034/1705 train_time:97959ms step_avg:94.74ms +step:1035/1705 train_time:98055ms step_avg:94.74ms +step:1036/1705 train_time:98149ms step_avg:94.74ms +step:1037/1705 train_time:98243ms step_avg:94.74ms +step:1038/1705 train_time:98339ms step_avg:94.74ms +step:1039/1705 train_time:98432ms step_avg:94.74ms +step:1040/1705 train_time:98526ms step_avg:94.74ms +step:1041/1705 train_time:98620ms step_avg:94.74ms +step:1042/1705 train_time:98715ms step_avg:94.74ms +step:1043/1705 train_time:98809ms step_avg:94.74ms +step:1044/1705 train_time:98904ms step_avg:94.74ms +step:1045/1705 train_time:99000ms step_avg:94.74ms +step:1046/1705 train_time:99096ms step_avg:94.74ms +step:1047/1705 train_time:99190ms step_avg:94.74ms +step:1048/1705 train_time:99284ms step_avg:94.74ms +step:1049/1705 train_time:99379ms step_avg:94.74ms +step:1050/1705 train_time:99473ms step_avg:94.74ms +step:1051/1705 train_time:99566ms step_avg:94.73ms +step:1052/1705 train_time:99661ms step_avg:94.73ms +step:1053/1705 train_time:99755ms step_avg:94.73ms +step:1054/1705 train_time:99851ms step_avg:94.73ms +step:1055/1705 train_time:99945ms step_avg:94.73ms +step:1056/1705 train_time:100040ms step_avg:94.73ms +step:1057/1705 train_time:100136ms step_avg:94.74ms +step:1058/1705 train_time:100230ms step_avg:94.74ms +step:1059/1705 train_time:100324ms step_avg:94.73ms +step:1060/1705 train_time:100420ms step_avg:94.74ms +step:1061/1705 train_time:100515ms step_avg:94.74ms +step:1062/1705 train_time:100779ms step_avg:94.90ms +step:1063/1705 train_time:100964ms step_avg:94.98ms +step:1064/1705 train_time:101056ms step_avg:94.98ms +step:1065/1705 train_time:101150ms step_avg:94.98ms +step:1066/1705 train_time:101243ms step_avg:94.97ms +step:1067/1705 train_time:101337ms step_avg:94.97ms +step:1068/1705 train_time:101430ms step_avg:94.97ms +step:1069/1705 train_time:101524ms step_avg:94.97ms +step:1070/1705 train_time:101617ms step_avg:94.97ms +step:1071/1705 train_time:101711ms step_avg:94.97ms +step:1072/1705 train_time:101805ms step_avg:94.97ms +step:1073/1705 train_time:101905ms step_avg:94.97ms +step:1074/1705 train_time:102003ms step_avg:94.97ms +step:1075/1705 train_time:102099ms step_avg:94.98ms +step:1076/1705 train_time:102194ms step_avg:94.98ms +step:1077/1705 train_time:102288ms step_avg:94.98ms +step:1078/1705 train_time:102381ms step_avg:94.97ms +step:1079/1705 train_time:102477ms step_avg:94.97ms +step:1080/1705 train_time:102570ms step_avg:94.97ms +step:1081/1705 train_time:102663ms step_avg:94.97ms +step:1082/1705 train_time:102757ms step_avg:94.97ms +step:1083/1705 train_time:102853ms step_avg:94.97ms +step:1084/1705 train_time:102948ms step_avg:94.97ms +step:1085/1705 train_time:103043ms step_avg:94.97ms +step:1086/1705 train_time:103138ms step_avg:94.97ms +step:1087/1705 train_time:103233ms step_avg:94.97ms +step:1088/1705 train_time:103326ms step_avg:94.97ms +step:1089/1705 train_time:103420ms step_avg:94.97ms +step:1090/1705 train_time:103515ms step_avg:94.97ms +step:1091/1705 train_time:103609ms step_avg:94.97ms +step:1092/1705 train_time:103703ms step_avg:94.97ms +step:1093/1705 train_time:103798ms step_avg:94.97ms +step:1094/1705 train_time:103893ms step_avg:94.97ms +step:1095/1705 train_time:103988ms step_avg:94.97ms +step:1096/1705 train_time:104082ms step_avg:94.97ms +step:1097/1705 train_time:104177ms step_avg:94.97ms +step:1098/1705 train_time:104271ms step_avg:94.96ms +step:1099/1705 train_time:104365ms step_avg:94.96ms +step:1100/1705 train_time:104459ms step_avg:94.96ms +step:1101/1705 train_time:104554ms step_avg:94.96ms +step:1102/1705 train_time:104648ms step_avg:94.96ms +step:1103/1705 train_time:104741ms step_avg:94.96ms +step:1104/1705 train_time:104836ms step_avg:94.96ms +step:1105/1705 train_time:104931ms step_avg:94.96ms +step:1106/1705 train_time:105026ms step_avg:94.96ms +step:1107/1705 train_time:105121ms step_avg:94.96ms +step:1108/1705 train_time:105216ms step_avg:94.96ms +step:1109/1705 train_time:105310ms step_avg:94.96ms +step:1110/1705 train_time:105404ms step_avg:94.96ms +step:1111/1705 train_time:105499ms step_avg:94.96ms +step:1112/1705 train_time:105594ms step_avg:94.96ms +step:1113/1705 train_time:105687ms step_avg:94.96ms +step:1114/1705 train_time:105781ms step_avg:94.96ms +step:1115/1705 train_time:105877ms step_avg:94.96ms +step:1116/1705 train_time:105972ms step_avg:94.96ms +step:1117/1705 train_time:106066ms step_avg:94.96ms +step:1118/1705 train_time:106161ms step_avg:94.96ms +step:1119/1705 train_time:106256ms step_avg:94.96ms +step:1120/1705 train_time:106351ms step_avg:94.96ms +step:1121/1705 train_time:106444ms step_avg:94.95ms +step:1122/1705 train_time:106539ms step_avg:94.95ms +step:1123/1705 train_time:106633ms step_avg:94.95ms +step:1124/1705 train_time:106728ms step_avg:94.95ms +step:1125/1705 train_time:106822ms step_avg:94.95ms +step:1125/1705 val_loss:3.4396 train_time:106917ms step_avg:95.04ms +step:1126/1705 train_time:106938ms step_avg:94.97ms +step:1127/1705 train_time:107018ms step_avg:94.96ms +step:1128/1705 train_time:107116ms step_avg:94.96ms +step:1129/1705 train_time:107211ms step_avg:94.96ms +step:1130/1705 train_time:107304ms step_avg:94.96ms +step:1131/1705 train_time:107398ms step_avg:94.96ms +step:1132/1705 train_time:107492ms step_avg:94.96ms +step:1133/1705 train_time:107585ms step_avg:94.96ms +step:1134/1705 train_time:107679ms step_avg:94.95ms +step:1135/1705 train_time:107773ms step_avg:94.95ms +step:1136/1705 train_time:107867ms step_avg:94.95ms +step:1137/1705 train_time:107963ms step_avg:94.95ms +step:1138/1705 train_time:108060ms step_avg:94.96ms +step:1139/1705 train_time:108157ms step_avg:94.96ms +step:1140/1705 train_time:108252ms step_avg:94.96ms +step:1141/1705 train_time:108348ms step_avg:94.96ms +step:1142/1705 train_time:108441ms step_avg:94.96ms +step:1143/1705 train_time:108537ms step_avg:94.96ms +step:1144/1705 train_time:108631ms step_avg:94.96ms +step:1145/1705 train_time:108726ms step_avg:94.96ms +step:1146/1705 train_time:108821ms step_avg:94.96ms +step:1147/1705 train_time:108917ms step_avg:94.96ms +step:1148/1705 train_time:109014ms step_avg:94.96ms +step:1149/1705 train_time:109110ms step_avg:94.96ms +step:1150/1705 train_time:109205ms step_avg:94.96ms +step:1151/1705 train_time:109301ms step_avg:94.96ms +step:1152/1705 train_time:109396ms step_avg:94.96ms +step:1153/1705 train_time:109492ms step_avg:94.96ms +step:1154/1705 train_time:109585ms step_avg:94.96ms +step:1155/1705 train_time:109681ms step_avg:94.96ms +step:1156/1705 train_time:109776ms step_avg:94.96ms +step:1157/1705 train_time:109871ms step_avg:94.96ms +step:1158/1705 train_time:109966ms step_avg:94.96ms +step:1159/1705 train_time:110063ms step_avg:94.96ms +step:1160/1705 train_time:110159ms step_avg:94.96ms +step:1161/1705 train_time:110256ms step_avg:94.97ms +step:1162/1705 train_time:110351ms step_avg:94.97ms +step:1163/1705 train_time:110447ms step_avg:94.97ms +step:1164/1705 train_time:110541ms step_avg:94.97ms +step:1165/1705 train_time:110636ms step_avg:94.97ms +step:1166/1705 train_time:110732ms step_avg:94.97ms +step:1167/1705 train_time:110827ms step_avg:94.97ms +step:1168/1705 train_time:110922ms step_avg:94.97ms +step:1169/1705 train_time:111017ms step_avg:94.97ms +step:1170/1705 train_time:111112ms step_avg:94.97ms +step:1171/1705 train_time:111208ms step_avg:94.97ms +step:1172/1705 train_time:111303ms step_avg:94.97ms +step:1173/1705 train_time:111399ms step_avg:94.97ms +step:1174/1705 train_time:111496ms step_avg:94.97ms +step:1175/1705 train_time:111591ms step_avg:94.97ms +step:1176/1705 train_time:111686ms step_avg:94.97ms +step:1177/1705 train_time:111781ms step_avg:94.97ms +step:1178/1705 train_time:111876ms step_avg:94.97ms +step:1179/1705 train_time:111971ms step_avg:94.97ms +step:1180/1705 train_time:112066ms step_avg:94.97ms +step:1181/1705 train_time:112162ms step_avg:94.97ms +step:1182/1705 train_time:112258ms step_avg:94.97ms +step:1183/1705 train_time:112354ms step_avg:94.97ms +step:1184/1705 train_time:112449ms step_avg:94.97ms +step:1185/1705 train_time:112544ms step_avg:94.97ms +step:1186/1705 train_time:112639ms step_avg:94.97ms +step:1187/1705 train_time:112735ms step_avg:94.97ms +step:1188/1705 train_time:112831ms step_avg:94.98ms +step:1189/1705 train_time:112925ms step_avg:94.98ms +step:1190/1705 train_time:113021ms step_avg:94.98ms +step:1191/1705 train_time:113118ms step_avg:94.98ms +step:1192/1705 train_time:113214ms step_avg:94.98ms +step:1193/1705 train_time:113309ms step_avg:94.98ms +step:1194/1705 train_time:113404ms step_avg:94.98ms +step:1195/1705 train_time:113500ms step_avg:94.98ms +step:1196/1705 train_time:113595ms step_avg:94.98ms +step:1197/1705 train_time:113691ms step_avg:94.98ms +step:1198/1705 train_time:113785ms step_avg:94.98ms +step:1199/1705 train_time:113880ms step_avg:94.98ms +step:1200/1705 train_time:113976ms step_avg:94.98ms +step:1201/1705 train_time:114070ms step_avg:94.98ms +step:1202/1705 train_time:114165ms step_avg:94.98ms +step:1203/1705 train_time:114260ms step_avg:94.98ms +step:1204/1705 train_time:114356ms step_avg:94.98ms +step:1205/1705 train_time:114453ms step_avg:94.98ms +step:1206/1705 train_time:114548ms step_avg:94.98ms +step:1207/1705 train_time:114643ms step_avg:94.98ms +step:1208/1705 train_time:114738ms step_avg:94.98ms +step:1209/1705 train_time:114834ms step_avg:94.98ms +step:1210/1705 train_time:114929ms step_avg:94.98ms +step:1211/1705 train_time:115023ms step_avg:94.98ms +step:1212/1705 train_time:115119ms step_avg:94.98ms +step:1213/1705 train_time:115214ms step_avg:94.98ms +step:1214/1705 train_time:115310ms step_avg:94.98ms +step:1215/1705 train_time:115405ms step_avg:94.98ms +step:1216/1705 train_time:115500ms step_avg:94.98ms +step:1217/1705 train_time:115596ms step_avg:94.98ms +step:1218/1705 train_time:115692ms step_avg:94.99ms +step:1219/1705 train_time:115789ms step_avg:94.99ms +step:1220/1705 train_time:115883ms step_avg:94.99ms +step:1221/1705 train_time:115979ms step_avg:94.99ms +step:1222/1705 train_time:116074ms step_avg:94.99ms +step:1223/1705 train_time:116169ms step_avg:94.99ms +step:1224/1705 train_time:116265ms step_avg:94.99ms +step:1225/1705 train_time:116360ms step_avg:94.99ms +step:1226/1705 train_time:116456ms step_avg:94.99ms +step:1227/1705 train_time:116552ms step_avg:94.99ms +step:1228/1705 train_time:116647ms step_avg:94.99ms +step:1229/1705 train_time:116741ms step_avg:94.99ms +step:1230/1705 train_time:116837ms step_avg:94.99ms +step:1231/1705 train_time:116933ms step_avg:94.99ms +step:1232/1705 train_time:117028ms step_avg:94.99ms +step:1233/1705 train_time:117123ms step_avg:94.99ms +step:1234/1705 train_time:117219ms step_avg:94.99ms +step:1235/1705 train_time:117314ms step_avg:94.99ms +step:1236/1705 train_time:117409ms step_avg:94.99ms +step:1237/1705 train_time:117504ms step_avg:94.99ms +step:1238/1705 train_time:117600ms step_avg:94.99ms +step:1239/1705 train_time:117696ms step_avg:94.99ms +step:1240/1705 train_time:117792ms step_avg:94.99ms +step:1241/1705 train_time:117887ms step_avg:94.99ms +step:1242/1705 train_time:117982ms step_avg:94.99ms +step:1243/1705 train_time:118078ms step_avg:94.99ms +step:1244/1705 train_time:118174ms step_avg:94.99ms +step:1245/1705 train_time:118269ms step_avg:94.99ms +step:1246/1705 train_time:118363ms step_avg:94.99ms +step:1247/1705 train_time:118458ms step_avg:94.99ms +step:1248/1705 train_time:118554ms step_avg:95.00ms +step:1249/1705 train_time:118650ms step_avg:95.00ms +step:1250/1705 train_time:118745ms step_avg:95.00ms +step:1250/1705 val_loss:3.3903 train_time:118841ms step_avg:95.07ms +step:1251/1705 train_time:118864ms step_avg:95.02ms +step:1252/1705 train_time:118946ms step_avg:95.00ms +step:1253/1705 train_time:119041ms step_avg:95.00ms +step:1254/1705 train_time:119136ms step_avg:95.01ms +step:1255/1705 train_time:119231ms step_avg:95.00ms +step:1256/1705 train_time:119325ms step_avg:95.00ms +step:1257/1705 train_time:119419ms step_avg:95.00ms +step:1258/1705 train_time:119514ms step_avg:95.00ms +step:1259/1705 train_time:119607ms step_avg:95.00ms +step:1260/1705 train_time:119701ms step_avg:95.00ms +step:1261/1705 train_time:119797ms step_avg:95.00ms +step:1262/1705 train_time:119896ms step_avg:95.00ms +step:1263/1705 train_time:119995ms step_avg:95.01ms +step:1264/1705 train_time:120090ms step_avg:95.01ms +step:1265/1705 train_time:120185ms step_avg:95.01ms +step:1266/1705 train_time:120279ms step_avg:95.01ms +step:1267/1705 train_time:120373ms step_avg:95.01ms +step:1268/1705 train_time:120468ms step_avg:95.01ms +step:1269/1705 train_time:120562ms step_avg:95.01ms +step:1270/1705 train_time:120656ms step_avg:95.01ms +step:1271/1705 train_time:120751ms step_avg:95.00ms +step:1272/1705 train_time:120847ms step_avg:95.01ms +step:1273/1705 train_time:120943ms step_avg:95.01ms +step:1274/1705 train_time:121206ms step_avg:95.14ms +step:1275/1705 train_time:121398ms step_avg:95.21ms +step:1276/1705 train_time:121492ms step_avg:95.21ms +step:1277/1705 train_time:121586ms step_avg:95.21ms +step:1278/1705 train_time:121680ms step_avg:95.21ms +step:1279/1705 train_time:121775ms step_avg:95.21ms +step:1280/1705 train_time:121869ms step_avg:95.21ms +step:1281/1705 train_time:121962ms step_avg:95.21ms +step:1282/1705 train_time:122057ms step_avg:95.21ms +step:1283/1705 train_time:122151ms step_avg:95.21ms +step:1284/1705 train_time:122252ms step_avg:95.21ms +step:1285/1705 train_time:122352ms step_avg:95.22ms +step:1286/1705 train_time:122447ms step_avg:95.22ms +step:1287/1705 train_time:122542ms step_avg:95.21ms +step:1288/1705 train_time:122637ms step_avg:95.22ms +step:1289/1705 train_time:122731ms step_avg:95.21ms +step:1290/1705 train_time:122826ms step_avg:95.21ms +step:1291/1705 train_time:122921ms step_avg:95.21ms +step:1292/1705 train_time:123015ms step_avg:95.21ms +step:1293/1705 train_time:123110ms step_avg:95.21ms +step:1294/1705 train_time:123205ms step_avg:95.21ms +step:1295/1705 train_time:123302ms step_avg:95.21ms +step:1296/1705 train_time:123398ms step_avg:95.21ms +step:1297/1705 train_time:123495ms step_avg:95.22ms +step:1298/1705 train_time:123590ms step_avg:95.22ms +step:1299/1705 train_time:123684ms step_avg:95.22ms +step:1300/1705 train_time:123779ms step_avg:95.21ms +step:1301/1705 train_time:123875ms step_avg:95.22ms +step:1302/1705 train_time:123969ms step_avg:95.21ms +step:1303/1705 train_time:124064ms step_avg:95.21ms +step:1304/1705 train_time:124159ms step_avg:95.21ms +step:1305/1705 train_time:124256ms step_avg:95.22ms +step:1306/1705 train_time:124353ms step_avg:95.22ms +step:1307/1705 train_time:124450ms step_avg:95.22ms +step:1308/1705 train_time:124544ms step_avg:95.22ms +step:1309/1705 train_time:124639ms step_avg:95.22ms +step:1310/1705 train_time:124734ms step_avg:95.22ms +step:1311/1705 train_time:124829ms step_avg:95.22ms +step:1312/1705 train_time:124923ms step_avg:95.22ms +step:1313/1705 train_time:125019ms step_avg:95.22ms +step:1314/1705 train_time:125114ms step_avg:95.22ms +step:1315/1705 train_time:125209ms step_avg:95.22ms +step:1316/1705 train_time:125304ms step_avg:95.22ms +step:1317/1705 train_time:125400ms step_avg:95.22ms +step:1318/1705 train_time:125496ms step_avg:95.22ms +step:1319/1705 train_time:125593ms step_avg:95.22ms +step:1320/1705 train_time:125688ms step_avg:95.22ms +step:1321/1705 train_time:125782ms step_avg:95.22ms +step:1322/1705 train_time:125878ms step_avg:95.22ms +step:1323/1705 train_time:125973ms step_avg:95.22ms +step:1324/1705 train_time:126068ms step_avg:95.22ms +step:1325/1705 train_time:126162ms step_avg:95.22ms +step:1326/1705 train_time:126257ms step_avg:95.22ms +step:1327/1705 train_time:126352ms step_avg:95.22ms +step:1328/1705 train_time:126447ms step_avg:95.22ms +step:1329/1705 train_time:126542ms step_avg:95.22ms +step:1330/1705 train_time:126638ms step_avg:95.22ms +step:1331/1705 train_time:126734ms step_avg:95.22ms +step:1332/1705 train_time:126829ms step_avg:95.22ms +step:1333/1705 train_time:126923ms step_avg:95.22ms +step:1334/1705 train_time:127018ms step_avg:95.22ms +step:1335/1705 train_time:127114ms step_avg:95.22ms +step:1336/1705 train_time:127210ms step_avg:95.22ms +step:1337/1705 train_time:127306ms step_avg:95.22ms +step:1338/1705 train_time:127400ms step_avg:95.22ms +step:1339/1705 train_time:127496ms step_avg:95.22ms +step:1340/1705 train_time:127592ms step_avg:95.22ms +step:1341/1705 train_time:127687ms step_avg:95.22ms +step:1342/1705 train_time:127781ms step_avg:95.22ms +step:1343/1705 train_time:127877ms step_avg:95.22ms +step:1344/1705 train_time:127971ms step_avg:95.22ms +step:1345/1705 train_time:128065ms step_avg:95.22ms +step:1346/1705 train_time:128160ms step_avg:95.22ms +step:1347/1705 train_time:128256ms step_avg:95.22ms +step:1348/1705 train_time:128351ms step_avg:95.22ms +step:1349/1705 train_time:128446ms step_avg:95.22ms +step:1350/1705 train_time:128541ms step_avg:95.22ms +step:1351/1705 train_time:128637ms step_avg:95.22ms +step:1352/1705 train_time:128734ms step_avg:95.22ms +step:1353/1705 train_time:128829ms step_avg:95.22ms +step:1354/1705 train_time:128923ms step_avg:95.22ms +step:1355/1705 train_time:129018ms step_avg:95.22ms +step:1356/1705 train_time:129114ms step_avg:95.22ms +step:1357/1705 train_time:129209ms step_avg:95.22ms +step:1358/1705 train_time:129303ms step_avg:95.22ms +step:1359/1705 train_time:129399ms step_avg:95.22ms +step:1360/1705 train_time:129495ms step_avg:95.22ms +step:1361/1705 train_time:129590ms step_avg:95.22ms +step:1362/1705 train_time:129686ms step_avg:95.22ms +step:1363/1705 train_time:129781ms step_avg:95.22ms +step:1364/1705 train_time:129876ms step_avg:95.22ms +step:1365/1705 train_time:129971ms step_avg:95.22ms +step:1366/1705 train_time:130067ms step_avg:95.22ms +step:1367/1705 train_time:130161ms step_avg:95.22ms +step:1368/1705 train_time:130257ms step_avg:95.22ms +step:1369/1705 train_time:130352ms step_avg:95.22ms +step:1370/1705 train_time:130448ms step_avg:95.22ms +step:1371/1705 train_time:130543ms step_avg:95.22ms +step:1372/1705 train_time:130638ms step_avg:95.22ms +step:1373/1705 train_time:130734ms step_avg:95.22ms +step:1374/1705 train_time:130829ms step_avg:95.22ms +step:1375/1705 train_time:130923ms step_avg:95.22ms +step:1375/1705 val_loss:3.3528 train_time:131019ms step_avg:95.29ms +step:1376/1705 train_time:131040ms step_avg:95.23ms +step:1377/1705 train_time:131119ms step_avg:95.22ms +step:1378/1705 train_time:131219ms step_avg:95.22ms +step:1379/1705 train_time:131313ms step_avg:95.22ms +step:1380/1705 train_time:131407ms step_avg:95.22ms +step:1381/1705 train_time:131501ms step_avg:95.22ms +step:1382/1705 train_time:131595ms step_avg:95.22ms +step:1383/1705 train_time:131689ms step_avg:95.22ms +step:1384/1705 train_time:131784ms step_avg:95.22ms +step:1385/1705 train_time:131879ms step_avg:95.22ms +step:1386/1705 train_time:131974ms step_avg:95.22ms +step:1387/1705 train_time:132072ms step_avg:95.22ms +step:1388/1705 train_time:132170ms step_avg:95.22ms +step:1389/1705 train_time:132266ms step_avg:95.22ms +step:1390/1705 train_time:132363ms step_avg:95.23ms +step:1391/1705 train_time:132459ms step_avg:95.23ms +step:1392/1705 train_time:132553ms step_avg:95.22ms +step:1393/1705 train_time:132647ms step_avg:95.22ms +step:1394/1705 train_time:132742ms step_avg:95.22ms +step:1395/1705 train_time:132836ms step_avg:95.22ms +step:1396/1705 train_time:132931ms step_avg:95.22ms +step:1397/1705 train_time:133028ms step_avg:95.22ms +step:1398/1705 train_time:133125ms step_avg:95.23ms +step:1399/1705 train_time:133221ms step_avg:95.23ms +step:1400/1705 train_time:133318ms step_avg:95.23ms +step:1401/1705 train_time:133412ms step_avg:95.23ms +step:1402/1705 train_time:133507ms step_avg:95.23ms +step:1403/1705 train_time:133602ms step_avg:95.23ms +step:1404/1705 train_time:133696ms step_avg:95.22ms +step:1405/1705 train_time:133790ms step_avg:95.22ms +step:1406/1705 train_time:133886ms step_avg:95.22ms +step:1407/1705 train_time:133983ms step_avg:95.23ms +step:1408/1705 train_time:134079ms step_avg:95.23ms +step:1409/1705 train_time:134175ms step_avg:95.23ms +step:1410/1705 train_time:134270ms step_avg:95.23ms +step:1411/1705 train_time:134366ms step_avg:95.23ms +step:1412/1705 train_time:134462ms step_avg:95.23ms +step:1413/1705 train_time:134557ms step_avg:95.23ms +step:1414/1705 train_time:134651ms step_avg:95.23ms +step:1415/1705 train_time:134746ms step_avg:95.23ms +step:1416/1705 train_time:134842ms step_avg:95.23ms +step:1417/1705 train_time:134937ms step_avg:95.23ms +step:1418/1705 train_time:135031ms step_avg:95.23ms +step:1419/1705 train_time:135127ms step_avg:95.23ms +step:1420/1705 train_time:135224ms step_avg:95.23ms +step:1421/1705 train_time:135320ms step_avg:95.23ms +step:1422/1705 train_time:135415ms step_avg:95.23ms +step:1423/1705 train_time:135510ms step_avg:95.23ms +step:1424/1705 train_time:135605ms step_avg:95.23ms +step:1425/1705 train_time:135701ms step_avg:95.23ms +step:1426/1705 train_time:135796ms step_avg:95.23ms +step:1427/1705 train_time:135890ms step_avg:95.23ms +step:1428/1705 train_time:135985ms step_avg:95.23ms +step:1429/1705 train_time:136081ms step_avg:95.23ms +step:1430/1705 train_time:136177ms step_avg:95.23ms +step:1431/1705 train_time:136272ms step_avg:95.23ms +step:1432/1705 train_time:136369ms step_avg:95.23ms +step:1433/1705 train_time:136465ms step_avg:95.23ms +step:1434/1705 train_time:136561ms step_avg:95.23ms +step:1435/1705 train_time:136656ms step_avg:95.23ms +step:1436/1705 train_time:136751ms step_avg:95.23ms +step:1437/1705 train_time:136846ms step_avg:95.23ms +step:1438/1705 train_time:136941ms step_avg:95.23ms +step:1439/1705 train_time:137036ms step_avg:95.23ms +step:1440/1705 train_time:137131ms step_avg:95.23ms +step:1441/1705 train_time:137226ms step_avg:95.23ms +step:1442/1705 train_time:137324ms step_avg:95.23ms +step:1443/1705 train_time:137419ms step_avg:95.23ms +step:1444/1705 train_time:137514ms step_avg:95.23ms +step:1445/1705 train_time:137609ms step_avg:95.23ms +step:1446/1705 train_time:137704ms step_avg:95.23ms +step:1447/1705 train_time:137800ms step_avg:95.23ms +step:1448/1705 train_time:137896ms step_avg:95.23ms +step:1449/1705 train_time:137992ms step_avg:95.23ms +step:1450/1705 train_time:138088ms step_avg:95.23ms +step:1451/1705 train_time:138184ms step_avg:95.23ms +step:1452/1705 train_time:138278ms step_avg:95.23ms +step:1453/1705 train_time:138373ms step_avg:95.23ms +step:1454/1705 train_time:138469ms step_avg:95.23ms +step:1455/1705 train_time:138564ms step_avg:95.23ms +step:1456/1705 train_time:138659ms step_avg:95.23ms +step:1457/1705 train_time:138754ms step_avg:95.23ms +step:1458/1705 train_time:138849ms step_avg:95.23ms +step:1459/1705 train_time:138945ms step_avg:95.23ms +step:1460/1705 train_time:139041ms step_avg:95.23ms +step:1461/1705 train_time:139135ms step_avg:95.23ms +step:1462/1705 train_time:139230ms step_avg:95.23ms +step:1463/1705 train_time:139326ms step_avg:95.23ms +step:1464/1705 train_time:139423ms step_avg:95.23ms +step:1465/1705 train_time:139519ms step_avg:95.23ms +step:1466/1705 train_time:139615ms step_avg:95.24ms +step:1467/1705 train_time:139709ms step_avg:95.23ms +step:1468/1705 train_time:139804ms step_avg:95.23ms +step:1469/1705 train_time:139900ms step_avg:95.23ms +step:1470/1705 train_time:139996ms step_avg:95.24ms +step:1471/1705 train_time:140091ms step_avg:95.23ms +step:1472/1705 train_time:140186ms step_avg:95.23ms +step:1473/1705 train_time:140281ms step_avg:95.23ms +step:1474/1705 train_time:140375ms step_avg:95.23ms +step:1475/1705 train_time:140471ms step_avg:95.23ms +step:1476/1705 train_time:140566ms step_avg:95.23ms +step:1477/1705 train_time:140663ms step_avg:95.24ms +step:1478/1705 train_time:140757ms step_avg:95.23ms +step:1479/1705 train_time:140852ms step_avg:95.23ms +step:1480/1705 train_time:140949ms step_avg:95.24ms +step:1481/1705 train_time:141045ms step_avg:95.24ms +step:1482/1705 train_time:141141ms step_avg:95.24ms +step:1483/1705 train_time:141236ms step_avg:95.24ms +step:1484/1705 train_time:141330ms step_avg:95.24ms +step:1485/1705 train_time:141592ms step_avg:95.35ms +step:1486/1705 train_time:141783ms step_avg:95.41ms +step:1487/1705 train_time:141877ms step_avg:95.41ms +step:1488/1705 train_time:141971ms step_avg:95.41ms +step:1489/1705 train_time:142065ms step_avg:95.41ms +step:1490/1705 train_time:142160ms step_avg:95.41ms +step:1491/1705 train_time:142254ms step_avg:95.41ms +step:1492/1705 train_time:142348ms step_avg:95.41ms +step:1493/1705 train_time:142443ms step_avg:95.41ms +step:1494/1705 train_time:142537ms step_avg:95.41ms +step:1495/1705 train_time:142636ms step_avg:95.41ms +step:1496/1705 train_time:142735ms step_avg:95.41ms +step:1497/1705 train_time:142833ms step_avg:95.41ms +step:1498/1705 train_time:142929ms step_avg:95.41ms +step:1499/1705 train_time:143023ms step_avg:95.41ms +step:1500/1705 train_time:143118ms step_avg:95.41ms +step:1500/1705 val_loss:3.3205 train_time:143213ms step_avg:95.48ms +step:1501/1705 train_time:143234ms step_avg:95.43ms +step:1502/1705 train_time:143312ms step_avg:95.41ms +step:1503/1705 train_time:143410ms step_avg:95.42ms +step:1504/1705 train_time:143506ms step_avg:95.42ms +step:1505/1705 train_time:143600ms step_avg:95.42ms +step:1506/1705 train_time:143694ms step_avg:95.41ms +step:1507/1705 train_time:143789ms step_avg:95.41ms +step:1508/1705 train_time:143883ms step_avg:95.41ms +step:1509/1705 train_time:143978ms step_avg:95.41ms +step:1510/1705 train_time:144072ms step_avg:95.41ms +step:1511/1705 train_time:144168ms step_avg:95.41ms +step:1512/1705 train_time:144264ms step_avg:95.41ms +step:1513/1705 train_time:144360ms step_avg:95.41ms +step:1514/1705 train_time:144458ms step_avg:95.41ms +step:1515/1705 train_time:144554ms step_avg:95.42ms +step:1516/1705 train_time:144648ms step_avg:95.41ms +step:1517/1705 train_time:144743ms step_avg:95.41ms +step:1518/1705 train_time:144838ms step_avg:95.41ms +step:1519/1705 train_time:144933ms step_avg:95.41ms +step:1520/1705 train_time:145027ms step_avg:95.41ms +step:1521/1705 train_time:145123ms step_avg:95.41ms +step:1522/1705 train_time:145218ms step_avg:95.41ms +step:1523/1705 train_time:145315ms step_avg:95.41ms +step:1524/1705 train_time:145412ms step_avg:95.41ms +step:1525/1705 train_time:145506ms step_avg:95.41ms +step:1526/1705 train_time:145602ms step_avg:95.41ms +step:1527/1705 train_time:145697ms step_avg:95.41ms +step:1528/1705 train_time:145793ms step_avg:95.41ms +step:1529/1705 train_time:145887ms step_avg:95.41ms +step:1530/1705 train_time:145982ms step_avg:95.41ms +step:1531/1705 train_time:146076ms step_avg:95.41ms +step:1532/1705 train_time:146171ms step_avg:95.41ms +step:1533/1705 train_time:146267ms step_avg:95.41ms +step:1534/1705 train_time:146362ms step_avg:95.41ms +step:1535/1705 train_time:146458ms step_avg:95.41ms +step:1536/1705 train_time:146554ms step_avg:95.41ms +step:1537/1705 train_time:146649ms step_avg:95.41ms +step:1538/1705 train_time:146744ms step_avg:95.41ms +step:1539/1705 train_time:146840ms step_avg:95.41ms +step:1540/1705 train_time:146935ms step_avg:95.41ms +step:1541/1705 train_time:147030ms step_avg:95.41ms +step:1542/1705 train_time:147125ms step_avg:95.41ms +step:1543/1705 train_time:147220ms step_avg:95.41ms +step:1544/1705 train_time:147316ms step_avg:95.41ms +step:1545/1705 train_time:147412ms step_avg:95.41ms +step:1546/1705 train_time:147507ms step_avg:95.41ms +step:1547/1705 train_time:147601ms step_avg:95.41ms +step:1548/1705 train_time:147698ms step_avg:95.41ms +step:1549/1705 train_time:147793ms step_avg:95.41ms +step:1550/1705 train_time:147888ms step_avg:95.41ms +step:1551/1705 train_time:147982ms step_avg:95.41ms +step:1552/1705 train_time:148078ms step_avg:95.41ms +step:1553/1705 train_time:148173ms step_avg:95.41ms +step:1554/1705 train_time:148269ms step_avg:95.41ms +step:1555/1705 train_time:148364ms step_avg:95.41ms +step:1556/1705 train_time:148459ms step_avg:95.41ms +step:1557/1705 train_time:148555ms step_avg:95.41ms +step:1558/1705 train_time:148651ms step_avg:95.41ms +step:1559/1705 train_time:148746ms step_avg:95.41ms +step:1560/1705 train_time:148842ms step_avg:95.41ms +step:1561/1705 train_time:148937ms step_avg:95.41ms +step:1562/1705 train_time:149032ms step_avg:95.41ms +step:1563/1705 train_time:149127ms step_avg:95.41ms +step:1564/1705 train_time:149222ms step_avg:95.41ms +step:1565/1705 train_time:149318ms step_avg:95.41ms +step:1566/1705 train_time:149413ms step_avg:95.41ms +step:1567/1705 train_time:149508ms step_avg:95.41ms +step:1568/1705 train_time:149603ms step_avg:95.41ms +step:1569/1705 train_time:149698ms step_avg:95.41ms +step:1570/1705 train_time:149795ms step_avg:95.41ms +step:1571/1705 train_time:149890ms step_avg:95.41ms +step:1572/1705 train_time:149985ms step_avg:95.41ms +step:1573/1705 train_time:150080ms step_avg:95.41ms +step:1574/1705 train_time:150176ms step_avg:95.41ms +step:1575/1705 train_time:150272ms step_avg:95.41ms +step:1576/1705 train_time:150367ms step_avg:95.41ms +step:1577/1705 train_time:150462ms step_avg:95.41ms +step:1578/1705 train_time:150557ms step_avg:95.41ms +step:1579/1705 train_time:150653ms step_avg:95.41ms +step:1580/1705 train_time:150748ms step_avg:95.41ms +step:1581/1705 train_time:150843ms step_avg:95.41ms +step:1582/1705 train_time:150938ms step_avg:95.41ms +step:1583/1705 train_time:151034ms step_avg:95.41ms +step:1584/1705 train_time:151129ms step_avg:95.41ms +step:1585/1705 train_time:151224ms step_avg:95.41ms +step:1586/1705 train_time:151319ms step_avg:95.41ms +step:1587/1705 train_time:151416ms step_avg:95.41ms +step:1588/1705 train_time:151511ms step_avg:95.41ms +step:1589/1705 train_time:151606ms step_avg:95.41ms +step:1590/1705 train_time:151701ms step_avg:95.41ms +step:1591/1705 train_time:151797ms step_avg:95.41ms +step:1592/1705 train_time:151893ms step_avg:95.41ms +step:1593/1705 train_time:151989ms step_avg:95.41ms +step:1594/1705 train_time:152083ms step_avg:95.41ms +step:1595/1705 train_time:152179ms step_avg:95.41ms +step:1596/1705 train_time:152275ms step_avg:95.41ms +step:1597/1705 train_time:152371ms step_avg:95.41ms +step:1598/1705 train_time:152467ms step_avg:95.41ms +step:1599/1705 train_time:152562ms step_avg:95.41ms +step:1600/1705 train_time:152657ms step_avg:95.41ms +step:1601/1705 train_time:152752ms step_avg:95.41ms +step:1602/1705 train_time:152847ms step_avg:95.41ms +step:1603/1705 train_time:152943ms step_avg:95.41ms +step:1604/1705 train_time:153038ms step_avg:95.41ms +step:1605/1705 train_time:153134ms step_avg:95.41ms +step:1606/1705 train_time:153229ms step_avg:95.41ms +step:1607/1705 train_time:153323ms step_avg:95.41ms +step:1608/1705 train_time:153419ms step_avg:95.41ms +step:1609/1705 train_time:153515ms step_avg:95.41ms +step:1610/1705 train_time:153610ms step_avg:95.41ms +step:1611/1705 train_time:153705ms step_avg:95.41ms +step:1612/1705 train_time:153801ms step_avg:95.41ms +step:1613/1705 train_time:153897ms step_avg:95.41ms +step:1614/1705 train_time:153992ms step_avg:95.41ms +step:1615/1705 train_time:154087ms step_avg:95.41ms +step:1616/1705 train_time:154182ms step_avg:95.41ms +step:1617/1705 train_time:154278ms step_avg:95.41ms +step:1618/1705 train_time:154373ms step_avg:95.41ms +step:1619/1705 train_time:154469ms step_avg:95.41ms +step:1620/1705 train_time:154564ms step_avg:95.41ms +step:1621/1705 train_time:154660ms step_avg:95.41ms +step:1622/1705 train_time:154755ms step_avg:95.41ms +step:1623/1705 train_time:154850ms step_avg:95.41ms +step:1624/1705 train_time:154946ms step_avg:95.41ms +step:1625/1705 train_time:155042ms step_avg:95.41ms +step:1625/1705 val_loss:3.2927 train_time:155138ms step_avg:95.47ms +step:1626/1705 train_time:155159ms step_avg:95.42ms +step:1627/1705 train_time:155239ms step_avg:95.41ms +step:1628/1705 train_time:155336ms step_avg:95.42ms +step:1629/1705 train_time:155431ms step_avg:95.41ms +step:1630/1705 train_time:155525ms step_avg:95.41ms +step:1631/1705 train_time:155620ms step_avg:95.41ms +step:1632/1705 train_time:155714ms step_avg:95.41ms +step:1633/1705 train_time:155808ms step_avg:95.41ms +step:1634/1705 train_time:155903ms step_avg:95.41ms +step:1635/1705 train_time:155996ms step_avg:95.41ms +step:1636/1705 train_time:156093ms step_avg:95.41ms +step:1637/1705 train_time:156193ms step_avg:95.41ms +step:1638/1705 train_time:156291ms step_avg:95.42ms +step:1639/1705 train_time:156388ms step_avg:95.42ms +step:1640/1705 train_time:156483ms step_avg:95.42ms +step:1641/1705 train_time:156577ms step_avg:95.42ms +step:1642/1705 train_time:156673ms step_avg:95.42ms +step:1643/1705 train_time:156767ms step_avg:95.42ms +step:1644/1705 train_time:156862ms step_avg:95.41ms +step:1645/1705 train_time:156956ms step_avg:95.41ms +step:1646/1705 train_time:157052ms step_avg:95.41ms +step:1647/1705 train_time:157149ms step_avg:95.42ms +step:1648/1705 train_time:157247ms step_avg:95.42ms +step:1649/1705 train_time:157342ms step_avg:95.42ms +step:1650/1705 train_time:157437ms step_avg:95.42ms +step:1651/1705 train_time:157532ms step_avg:95.42ms +step:1652/1705 train_time:157627ms step_avg:95.42ms +step:1653/1705 train_time:157722ms step_avg:95.42ms +step:1654/1705 train_time:157816ms step_avg:95.41ms +step:1655/1705 train_time:157911ms step_avg:95.41ms +step:1656/1705 train_time:158007ms step_avg:95.41ms +step:1657/1705 train_time:158103ms step_avg:95.42ms +step:1658/1705 train_time:158198ms step_avg:95.42ms +step:1659/1705 train_time:158295ms step_avg:95.42ms +step:1660/1705 train_time:158391ms step_avg:95.42ms +step:1661/1705 train_time:158487ms step_avg:95.42ms +step:1662/1705 train_time:158582ms step_avg:95.42ms +step:1663/1705 train_time:158678ms step_avg:95.42ms +step:1664/1705 train_time:158772ms step_avg:95.42ms +step:1665/1705 train_time:158867ms step_avg:95.42ms +step:1666/1705 train_time:158963ms step_avg:95.42ms +step:1667/1705 train_time:159057ms step_avg:95.42ms +step:1668/1705 train_time:159152ms step_avg:95.42ms +step:1669/1705 train_time:159248ms step_avg:95.42ms +step:1670/1705 train_time:159344ms step_avg:95.42ms +step:1671/1705 train_time:159439ms step_avg:95.42ms +step:1672/1705 train_time:159534ms step_avg:95.42ms +step:1673/1705 train_time:159631ms step_avg:95.42ms +step:1674/1705 train_time:159727ms step_avg:95.42ms +step:1675/1705 train_time:159822ms step_avg:95.42ms +step:1676/1705 train_time:159916ms step_avg:95.42ms +step:1677/1705 train_time:160012ms step_avg:95.42ms +step:1678/1705 train_time:160108ms step_avg:95.42ms +step:1679/1705 train_time:160202ms step_avg:95.42ms +step:1680/1705 train_time:160297ms step_avg:95.41ms +step:1681/1705 train_time:160394ms step_avg:95.42ms +step:1682/1705 train_time:160491ms step_avg:95.42ms +step:1683/1705 train_time:160587ms step_avg:95.42ms +step:1684/1705 train_time:160683ms step_avg:95.42ms +step:1685/1705 train_time:160776ms step_avg:95.42ms +step:1686/1705 train_time:160872ms step_avg:95.42ms +step:1687/1705 train_time:160967ms step_avg:95.42ms +step:1688/1705 train_time:161062ms step_avg:95.42ms +step:1689/1705 train_time:161158ms step_avg:95.42ms +step:1690/1705 train_time:161252ms step_avg:95.42ms +step:1691/1705 train_time:161348ms step_avg:95.42ms +step:1692/1705 train_time:161444ms step_avg:95.42ms +step:1693/1705 train_time:161540ms step_avg:95.42ms +step:1694/1705 train_time:161635ms step_avg:95.42ms +step:1695/1705 train_time:161730ms step_avg:95.42ms +step:1696/1705 train_time:161826ms step_avg:95.42ms +step:1697/1705 train_time:161921ms step_avg:95.42ms +step:1698/1705 train_time:162181ms step_avg:95.51ms +step:1699/1705 train_time:162371ms step_avg:95.57ms +step:1700/1705 train_time:162464ms step_avg:95.57ms +step:1701/1705 train_time:162557ms step_avg:95.57ms +step:1702/1705 train_time:162652ms step_avg:95.56ms +step:1703/1705 train_time:162746ms step_avg:95.56ms +step:1704/1705 train_time:162840ms step_avg:95.56ms +step:1705/1705 train_time:162935ms step_avg:95.56ms +step:1705/1705 val_loss:3.2787 train_time:163029ms step_avg:95.62ms +peak memory allocated: 33992 MiB reserved: 48836 MiB diff --git a/records/090525_SkipMLPBlocks/56955462-7201-4627-91d9-b2426a1424e2.txt b/records/090525_SkipMLPBlocks/56955462-7201-4627-91d9-b2426a1424e2.txt new file mode 100644 index 000000000..953806534 --- /dev/null +++ b/records/090525_SkipMLPBlocks/56955462-7201-4627-91d9-b2426a1424e2.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:35:03 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 128W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 131W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 93208 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 93209 C /usr/bin/python3 610MiB | +| 0 N/A N/A 93210 C /usr/bin/python3 610MiB | +| 0 N/A N/A 93211 C /usr/bin/python3 610MiB | +| 0 N/A N/A 93212 C /usr/bin/python3 610MiB | +| 0 N/A N/A 93213 C /usr/bin/python3 610MiB | +| 0 N/A N/A 93214 C /usr/bin/python3 610MiB | +| 0 N/A N/A 93215 C /usr/bin/python3 610MiB | +| 1 N/A N/A 93209 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 93210 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 93211 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 93212 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 93213 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 93214 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 93215 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1705 train_time:392ms step_avg:391.78ms +step:2/1705 train_time:412ms step_avg:205.99ms +step:3/1705 train_time:481ms step_avg:160.40ms +step:4/1705 train_time:572ms step_avg:143.00ms +step:5/1705 train_time:664ms step_avg:132.81ms +step:6/1705 train_time:756ms step_avg:125.98ms +step:7/1705 train_time:848ms step_avg:121.20ms +step:8/1705 train_time:940ms step_avg:117.56ms +step:9/1705 train_time:1033ms step_avg:114.76ms +step:10/1705 train_time:1126ms step_avg:112.55ms +step:11/1705 train_time:1218ms step_avg:110.70ms +step:12/1705 train_time:1311ms step_avg:109.29ms +step:13/1705 train_time:1409ms step_avg:108.35ms +step:14/1705 train_time:1503ms step_avg:107.34ms +step:15/1705 train_time:1595ms step_avg:106.34ms +step:16/1705 train_time:1688ms step_avg:105.51ms +step:17/1705 train_time:1781ms step_avg:104.77ms +step:18/1705 train_time:1874ms step_avg:104.11ms +step:19/1705 train_time:1967ms step_avg:103.51ms +step:20/1705 train_time:2059ms step_avg:102.96ms +step:21/1705 train_time:2152ms step_avg:102.46ms +step:22/1705 train_time:2245ms step_avg:102.03ms +step:23/1705 train_time:2339ms step_avg:101.69ms +step:24/1705 train_time:2432ms step_avg:101.34ms +step:25/1705 train_time:2527ms step_avg:101.09ms +step:26/1705 train_time:2621ms step_avg:100.81ms +step:27/1705 train_time:2713ms step_avg:100.49ms +step:28/1705 train_time:2807ms step_avg:100.24ms +step:29/1705 train_time:2900ms step_avg:99.99ms +step:30/1705 train_time:2992ms step_avg:99.73ms +step:31/1705 train_time:3084ms step_avg:99.49ms +step:32/1705 train_time:3177ms step_avg:99.28ms +step:33/1705 train_time:3270ms step_avg:99.10ms +step:34/1705 train_time:3365ms step_avg:98.96ms +step:35/1705 train_time:3458ms step_avg:98.81ms +step:36/1705 train_time:3551ms step_avg:98.65ms +step:37/1705 train_time:3645ms step_avg:98.53ms +step:38/1705 train_time:3739ms step_avg:98.39ms +step:39/1705 train_time:3832ms step_avg:98.26ms +step:40/1705 train_time:3925ms step_avg:98.13ms +step:41/1705 train_time:4017ms step_avg:97.99ms +step:42/1705 train_time:4110ms step_avg:97.86ms +step:43/1705 train_time:4203ms step_avg:97.74ms +step:44/1705 train_time:4296ms step_avg:97.63ms +step:45/1705 train_time:4389ms step_avg:97.54ms +step:46/1705 train_time:4483ms step_avg:97.46ms +step:47/1705 train_time:4575ms step_avg:97.34ms +step:48/1705 train_time:4669ms step_avg:97.28ms +step:49/1705 train_time:4763ms step_avg:97.20ms +step:50/1705 train_time:4856ms step_avg:97.13ms +step:51/1705 train_time:4950ms step_avg:97.06ms +step:52/1705 train_time:5043ms step_avg:96.97ms +step:53/1705 train_time:5135ms step_avg:96.88ms +step:54/1705 train_time:5227ms step_avg:96.80ms +step:55/1705 train_time:5321ms step_avg:96.75ms +step:56/1705 train_time:5414ms step_avg:96.68ms +step:57/1705 train_time:5507ms step_avg:96.61ms +step:58/1705 train_time:5599ms step_avg:96.54ms +step:59/1705 train_time:5693ms step_avg:96.49ms +step:60/1705 train_time:5787ms step_avg:96.45ms +step:61/1705 train_time:5880ms step_avg:96.40ms +step:62/1705 train_time:5973ms step_avg:96.33ms +step:63/1705 train_time:6066ms step_avg:96.29ms +step:64/1705 train_time:6160ms step_avg:96.25ms +step:65/1705 train_time:6253ms step_avg:96.19ms +step:66/1705 train_time:6345ms step_avg:96.14ms +step:67/1705 train_time:6438ms step_avg:96.09ms +step:68/1705 train_time:6531ms step_avg:96.05ms +step:69/1705 train_time:6625ms step_avg:96.02ms +step:70/1705 train_time:6719ms step_avg:95.98ms +step:71/1705 train_time:6812ms step_avg:95.94ms +step:72/1705 train_time:6906ms step_avg:95.91ms +step:73/1705 train_time:6999ms step_avg:95.88ms +step:74/1705 train_time:7092ms step_avg:95.84ms +step:75/1705 train_time:7186ms step_avg:95.81ms +step:76/1705 train_time:7279ms step_avg:95.78ms +step:77/1705 train_time:7372ms step_avg:95.73ms +step:78/1705 train_time:7464ms step_avg:95.69ms +step:79/1705 train_time:7556ms step_avg:95.65ms +step:80/1705 train_time:7649ms step_avg:95.62ms +step:81/1705 train_time:7743ms step_avg:95.60ms +step:82/1705 train_time:7836ms step_avg:95.56ms +step:83/1705 train_time:7929ms step_avg:95.53ms +step:84/1705 train_time:8023ms step_avg:95.51ms +step:85/1705 train_time:8116ms step_avg:95.48ms +step:86/1705 train_time:8209ms step_avg:95.45ms +step:87/1705 train_time:8302ms step_avg:95.43ms +step:88/1705 train_time:8395ms step_avg:95.39ms +step:89/1705 train_time:8487ms step_avg:95.36ms +step:90/1705 train_time:8580ms step_avg:95.33ms +step:91/1705 train_time:8673ms step_avg:95.30ms +step:92/1705 train_time:8766ms step_avg:95.28ms +step:93/1705 train_time:8860ms step_avg:95.27ms +step:94/1705 train_time:8952ms step_avg:95.24ms +step:95/1705 train_time:9045ms step_avg:95.21ms +step:96/1705 train_time:9138ms step_avg:95.19ms +step:97/1705 train_time:9231ms step_avg:95.17ms +step:98/1705 train_time:9325ms step_avg:95.15ms +step:99/1705 train_time:9418ms step_avg:95.13ms +step:100/1705 train_time:9511ms step_avg:95.11ms +step:101/1705 train_time:9604ms step_avg:95.09ms +step:102/1705 train_time:9697ms step_avg:95.07ms +step:103/1705 train_time:9791ms step_avg:95.05ms +step:104/1705 train_time:9884ms step_avg:95.04ms +step:105/1705 train_time:9976ms step_avg:95.01ms +step:106/1705 train_time:10070ms step_avg:95.00ms +step:107/1705 train_time:10164ms step_avg:94.99ms +step:108/1705 train_time:10257ms step_avg:94.97ms +step:109/1705 train_time:10351ms step_avg:94.96ms +step:110/1705 train_time:10444ms step_avg:94.95ms +step:111/1705 train_time:10537ms step_avg:94.93ms +step:112/1705 train_time:10630ms step_avg:94.91ms +step:113/1705 train_time:10723ms step_avg:94.90ms +step:114/1705 train_time:10815ms step_avg:94.87ms +step:115/1705 train_time:10908ms step_avg:94.86ms +step:116/1705 train_time:11001ms step_avg:94.84ms +step:117/1705 train_time:11094ms step_avg:94.82ms +step:118/1705 train_time:11187ms step_avg:94.81ms +step:119/1705 train_time:11281ms step_avg:94.80ms +step:120/1705 train_time:11374ms step_avg:94.78ms +step:121/1705 train_time:11467ms step_avg:94.77ms +step:122/1705 train_time:11559ms step_avg:94.75ms +step:123/1705 train_time:11652ms step_avg:94.73ms +step:124/1705 train_time:11745ms step_avg:94.72ms +step:125/1705 train_time:11838ms step_avg:94.71ms +step:125/1705 val_loss:4.3025 train_time:11931ms step_avg:95.45ms +step:126/1705 train_time:11953ms step_avg:94.87ms +step:127/1705 train_time:12031ms step_avg:94.73ms +step:128/1705 train_time:12133ms step_avg:94.79ms +step:129/1705 train_time:12229ms step_avg:94.80ms +step:130/1705 train_time:12322ms step_avg:94.78ms +step:131/1705 train_time:12413ms step_avg:94.76ms +step:132/1705 train_time:12505ms step_avg:94.74ms +step:133/1705 train_time:12597ms step_avg:94.72ms +step:134/1705 train_time:12689ms step_avg:94.70ms +step:135/1705 train_time:12781ms step_avg:94.68ms +step:136/1705 train_time:12873ms step_avg:94.66ms +step:137/1705 train_time:12966ms step_avg:94.64ms +step:138/1705 train_time:13061ms step_avg:94.65ms +step:139/1705 train_time:13157ms step_avg:94.65ms +step:140/1705 train_time:13250ms step_avg:94.64ms +step:141/1705 train_time:13343ms step_avg:94.63ms +step:142/1705 train_time:13436ms step_avg:94.62ms +step:143/1705 train_time:13528ms step_avg:94.60ms +step:144/1705 train_time:13621ms step_avg:94.59ms +step:145/1705 train_time:13712ms step_avg:94.57ms +step:146/1705 train_time:13805ms step_avg:94.55ms +step:147/1705 train_time:13897ms step_avg:94.53ms +step:148/1705 train_time:13989ms step_avg:94.52ms +step:149/1705 train_time:14083ms step_avg:94.52ms +step:150/1705 train_time:14177ms step_avg:94.52ms +step:151/1705 train_time:14270ms step_avg:94.50ms +step:152/1705 train_time:14364ms step_avg:94.50ms +step:153/1705 train_time:14457ms step_avg:94.49ms +step:154/1705 train_time:14549ms step_avg:94.47ms +step:155/1705 train_time:14641ms step_avg:94.46ms +step:156/1705 train_time:14734ms step_avg:94.45ms +step:157/1705 train_time:14826ms step_avg:94.43ms +step:158/1705 train_time:14919ms step_avg:94.43ms +step:159/1705 train_time:15012ms step_avg:94.41ms +step:160/1705 train_time:15106ms step_avg:94.41ms +step:161/1705 train_time:15201ms step_avg:94.41ms +step:162/1705 train_time:15294ms step_avg:94.41ms +step:163/1705 train_time:15387ms step_avg:94.40ms +step:164/1705 train_time:15480ms step_avg:94.39ms +step:165/1705 train_time:15573ms step_avg:94.38ms +step:166/1705 train_time:15666ms step_avg:94.37ms +step:167/1705 train_time:15758ms step_avg:94.36ms +step:168/1705 train_time:15850ms step_avg:94.35ms +step:169/1705 train_time:15943ms step_avg:94.34ms +step:170/1705 train_time:16036ms step_avg:94.33ms +step:171/1705 train_time:16129ms step_avg:94.32ms +step:172/1705 train_time:16222ms step_avg:94.32ms +step:173/1705 train_time:16315ms step_avg:94.31ms +step:174/1705 train_time:16409ms step_avg:94.31ms +step:175/1705 train_time:16502ms step_avg:94.30ms +step:176/1705 train_time:16595ms step_avg:94.29ms +step:177/1705 train_time:16687ms step_avg:94.28ms +step:178/1705 train_time:16780ms step_avg:94.27ms +step:179/1705 train_time:16872ms step_avg:94.26ms +step:180/1705 train_time:16965ms step_avg:94.25ms +step:181/1705 train_time:17058ms step_avg:94.25ms +step:182/1705 train_time:17151ms step_avg:94.24ms +step:183/1705 train_time:17245ms step_avg:94.24ms +step:184/1705 train_time:17338ms step_avg:94.23ms +step:185/1705 train_time:17430ms step_avg:94.22ms +step:186/1705 train_time:17523ms step_avg:94.21ms +step:187/1705 train_time:17617ms step_avg:94.21ms +step:188/1705 train_time:17710ms step_avg:94.20ms +step:189/1705 train_time:17803ms step_avg:94.19ms +step:190/1705 train_time:17895ms step_avg:94.19ms +step:191/1705 train_time:17988ms step_avg:94.18ms +step:192/1705 train_time:18081ms step_avg:94.17ms +step:193/1705 train_time:18173ms step_avg:94.16ms +step:194/1705 train_time:18267ms step_avg:94.16ms +step:195/1705 train_time:18360ms step_avg:94.16ms +step:196/1705 train_time:18453ms step_avg:94.15ms +step:197/1705 train_time:18546ms step_avg:94.14ms +step:198/1705 train_time:18639ms step_avg:94.14ms +step:199/1705 train_time:18731ms step_avg:94.13ms +step:200/1705 train_time:18824ms step_avg:94.12ms +step:201/1705 train_time:18917ms step_avg:94.11ms +step:202/1705 train_time:19009ms step_avg:94.11ms +step:203/1705 train_time:19102ms step_avg:94.10ms +step:204/1705 train_time:19195ms step_avg:94.09ms +step:205/1705 train_time:19288ms step_avg:94.09ms +step:206/1705 train_time:19380ms step_avg:94.08ms +step:207/1705 train_time:19473ms step_avg:94.07ms +step:208/1705 train_time:19566ms step_avg:94.07ms +step:209/1705 train_time:19658ms step_avg:94.06ms +step:210/1705 train_time:19750ms step_avg:94.05ms +step:211/1705 train_time:19843ms step_avg:94.04ms +step:212/1705 train_time:19936ms step_avg:94.04ms +step:213/1705 train_time:20234ms step_avg:94.99ms +step:214/1705 train_time:20366ms step_avg:95.17ms +step:215/1705 train_time:20457ms step_avg:95.15ms +step:216/1705 train_time:20549ms step_avg:95.13ms +step:217/1705 train_time:20640ms step_avg:95.12ms +step:218/1705 train_time:20732ms step_avg:95.10ms +step:219/1705 train_time:20824ms step_avg:95.09ms +step:220/1705 train_time:20916ms step_avg:95.07ms +step:221/1705 train_time:21008ms step_avg:95.06ms +step:222/1705 train_time:21100ms step_avg:95.04ms +step:223/1705 train_time:21192ms step_avg:95.03ms +step:224/1705 train_time:21289ms step_avg:95.04ms +step:225/1705 train_time:21385ms step_avg:95.04ms +step:226/1705 train_time:21478ms step_avg:95.04ms +step:227/1705 train_time:21571ms step_avg:95.03ms +step:228/1705 train_time:21664ms step_avg:95.02ms +step:229/1705 train_time:21756ms step_avg:95.00ms +step:230/1705 train_time:21848ms step_avg:94.99ms +step:231/1705 train_time:21940ms step_avg:94.98ms +step:232/1705 train_time:22032ms step_avg:94.97ms +step:233/1705 train_time:22124ms step_avg:94.95ms +step:234/1705 train_time:22218ms step_avg:94.95ms +step:235/1705 train_time:22311ms step_avg:94.94ms +step:236/1705 train_time:22405ms step_avg:94.94ms +step:237/1705 train_time:22498ms step_avg:94.93ms +step:238/1705 train_time:22592ms step_avg:94.92ms +step:239/1705 train_time:22684ms step_avg:94.91ms +step:240/1705 train_time:22776ms step_avg:94.90ms +step:241/1705 train_time:22868ms step_avg:94.89ms +step:242/1705 train_time:22960ms step_avg:94.88ms +step:243/1705 train_time:23052ms step_avg:94.86ms +step:244/1705 train_time:23144ms step_avg:94.85ms +step:245/1705 train_time:23237ms step_avg:94.85ms +step:246/1705 train_time:23330ms step_avg:94.84ms +step:247/1705 train_time:23425ms step_avg:94.84ms +step:248/1705 train_time:23518ms step_avg:94.83ms +step:249/1705 train_time:23610ms step_avg:94.82ms +step:250/1705 train_time:23703ms step_avg:94.81ms +step:250/1705 val_loss:3.9743 train_time:23796ms step_avg:95.19ms +step:251/1705 train_time:23818ms step_avg:94.89ms +step:252/1705 train_time:23893ms step_avg:94.81ms +step:253/1705 train_time:23990ms step_avg:94.82ms +step:254/1705 train_time:24085ms step_avg:94.82ms +step:255/1705 train_time:24177ms step_avg:94.81ms +step:256/1705 train_time:24269ms step_avg:94.80ms +step:257/1705 train_time:24362ms step_avg:94.79ms +step:258/1705 train_time:24453ms step_avg:94.78ms +step:259/1705 train_time:24544ms step_avg:94.77ms +step:260/1705 train_time:24636ms step_avg:94.75ms +step:261/1705 train_time:24729ms step_avg:94.75ms +step:262/1705 train_time:24823ms step_avg:94.74ms +step:263/1705 train_time:24917ms step_avg:94.74ms +step:264/1705 train_time:25011ms step_avg:94.74ms +step:265/1705 train_time:25105ms step_avg:94.74ms +step:266/1705 train_time:25198ms step_avg:94.73ms +step:267/1705 train_time:25290ms step_avg:94.72ms +step:268/1705 train_time:25383ms step_avg:94.71ms +step:269/1705 train_time:25476ms step_avg:94.70ms +step:270/1705 train_time:25568ms step_avg:94.70ms +step:271/1705 train_time:25661ms step_avg:94.69ms +step:272/1705 train_time:25753ms step_avg:94.68ms +step:273/1705 train_time:25847ms step_avg:94.68ms +step:274/1705 train_time:25940ms step_avg:94.67ms +step:275/1705 train_time:26033ms step_avg:94.67ms +step:276/1705 train_time:26126ms step_avg:94.66ms +step:277/1705 train_time:26219ms step_avg:94.65ms +step:278/1705 train_time:26311ms step_avg:94.65ms +step:279/1705 train_time:26405ms step_avg:94.64ms +step:280/1705 train_time:26497ms step_avg:94.63ms +step:281/1705 train_time:26589ms step_avg:94.62ms +step:282/1705 train_time:26682ms step_avg:94.62ms +step:283/1705 train_time:26776ms step_avg:94.61ms +step:284/1705 train_time:26868ms step_avg:94.61ms +step:285/1705 train_time:26962ms step_avg:94.60ms +step:286/1705 train_time:27056ms step_avg:94.60ms +step:287/1705 train_time:27148ms step_avg:94.59ms +step:288/1705 train_time:27242ms step_avg:94.59ms +step:289/1705 train_time:27334ms step_avg:94.58ms +step:290/1705 train_time:27427ms step_avg:94.57ms +step:291/1705 train_time:27519ms step_avg:94.57ms +step:292/1705 train_time:27611ms step_avg:94.56ms +step:293/1705 train_time:27704ms step_avg:94.55ms +step:294/1705 train_time:27797ms step_avg:94.55ms +step:295/1705 train_time:27889ms step_avg:94.54ms +step:296/1705 train_time:27983ms step_avg:94.54ms +step:297/1705 train_time:28076ms step_avg:94.53ms +step:298/1705 train_time:28168ms step_avg:94.53ms +step:299/1705 train_time:28261ms step_avg:94.52ms +step:300/1705 train_time:28355ms step_avg:94.52ms +step:301/1705 train_time:28447ms step_avg:94.51ms +step:302/1705 train_time:28540ms step_avg:94.50ms +step:303/1705 train_time:28632ms step_avg:94.50ms +step:304/1705 train_time:28725ms step_avg:94.49ms +step:305/1705 train_time:28818ms step_avg:94.49ms +step:306/1705 train_time:28910ms step_avg:94.48ms +step:307/1705 train_time:29004ms step_avg:94.47ms +step:308/1705 train_time:29097ms step_avg:94.47ms +step:309/1705 train_time:29189ms step_avg:94.46ms +step:310/1705 train_time:29282ms step_avg:94.46ms +step:311/1705 train_time:29375ms step_avg:94.45ms +step:312/1705 train_time:29468ms step_avg:94.45ms +step:313/1705 train_time:29561ms step_avg:94.44ms +step:314/1705 train_time:29653ms step_avg:94.44ms +step:315/1705 train_time:29746ms step_avg:94.43ms +step:316/1705 train_time:29839ms step_avg:94.43ms +step:317/1705 train_time:29931ms step_avg:94.42ms +step:318/1705 train_time:30025ms step_avg:94.42ms +step:319/1705 train_time:30118ms step_avg:94.42ms +step:320/1705 train_time:30211ms step_avg:94.41ms +step:321/1705 train_time:30304ms step_avg:94.41ms +step:322/1705 train_time:30397ms step_avg:94.40ms +step:323/1705 train_time:30489ms step_avg:94.39ms +step:324/1705 train_time:30582ms step_avg:94.39ms +step:325/1705 train_time:30675ms step_avg:94.38ms +step:326/1705 train_time:30767ms step_avg:94.38ms +step:327/1705 train_time:30860ms step_avg:94.37ms +step:328/1705 train_time:30952ms step_avg:94.37ms +step:329/1705 train_time:31045ms step_avg:94.36ms +step:330/1705 train_time:31138ms step_avg:94.36ms +step:331/1705 train_time:31231ms step_avg:94.35ms +step:332/1705 train_time:31324ms step_avg:94.35ms +step:333/1705 train_time:31417ms step_avg:94.35ms +step:334/1705 train_time:31510ms step_avg:94.34ms +step:335/1705 train_time:31602ms step_avg:94.34ms +step:336/1705 train_time:31695ms step_avg:94.33ms +step:337/1705 train_time:31788ms step_avg:94.33ms +step:338/1705 train_time:31881ms step_avg:94.32ms +step:339/1705 train_time:31974ms step_avg:94.32ms +step:340/1705 train_time:32067ms step_avg:94.31ms +step:341/1705 train_time:32160ms step_avg:94.31ms +step:342/1705 train_time:32253ms step_avg:94.31ms +step:343/1705 train_time:32345ms step_avg:94.30ms +step:344/1705 train_time:32439ms step_avg:94.30ms +step:345/1705 train_time:32531ms step_avg:94.29ms +step:346/1705 train_time:32625ms step_avg:94.29ms +step:347/1705 train_time:32718ms step_avg:94.29ms +step:348/1705 train_time:32810ms step_avg:94.28ms +step:349/1705 train_time:32903ms step_avg:94.28ms +step:350/1705 train_time:32996ms step_avg:94.27ms +step:351/1705 train_time:33088ms step_avg:94.27ms +step:352/1705 train_time:33182ms step_avg:94.27ms +step:353/1705 train_time:33275ms step_avg:94.26ms +step:354/1705 train_time:33368ms step_avg:94.26ms +step:355/1705 train_time:33462ms step_avg:94.26ms +step:356/1705 train_time:33554ms step_avg:94.25ms +step:357/1705 train_time:33647ms step_avg:94.25ms +step:358/1705 train_time:33739ms step_avg:94.24ms +step:359/1705 train_time:33831ms step_avg:94.24ms +step:360/1705 train_time:33924ms step_avg:94.23ms +step:361/1705 train_time:34017ms step_avg:94.23ms +step:362/1705 train_time:34110ms step_avg:94.23ms +step:363/1705 train_time:34203ms step_avg:94.22ms +step:364/1705 train_time:34296ms step_avg:94.22ms +step:365/1705 train_time:34389ms step_avg:94.22ms +step:366/1705 train_time:34483ms step_avg:94.22ms +step:367/1705 train_time:34576ms step_avg:94.21ms +step:368/1705 train_time:34668ms step_avg:94.21ms +step:369/1705 train_time:34761ms step_avg:94.20ms +step:370/1705 train_time:34853ms step_avg:94.20ms +step:371/1705 train_time:34946ms step_avg:94.19ms +step:372/1705 train_time:35039ms step_avg:94.19ms +step:373/1705 train_time:35132ms step_avg:94.19ms +step:374/1705 train_time:35225ms step_avg:94.18ms +step:375/1705 train_time:35318ms step_avg:94.18ms +step:375/1705 val_loss:3.8206 train_time:35411ms step_avg:94.43ms +step:376/1705 train_time:35433ms step_avg:94.24ms +step:377/1705 train_time:35512ms step_avg:94.20ms +step:378/1705 train_time:35608ms step_avg:94.20ms +step:379/1705 train_time:35702ms step_avg:94.20ms +step:380/1705 train_time:35795ms step_avg:94.20ms +step:381/1705 train_time:35887ms step_avg:94.19ms +step:382/1705 train_time:35978ms step_avg:94.18ms +step:383/1705 train_time:36070ms step_avg:94.18ms +step:384/1705 train_time:36162ms step_avg:94.17ms +step:385/1705 train_time:36254ms step_avg:94.17ms +step:386/1705 train_time:36346ms step_avg:94.16ms +step:387/1705 train_time:36442ms step_avg:94.16ms +step:388/1705 train_time:36536ms step_avg:94.17ms +step:389/1705 train_time:36630ms step_avg:94.17ms +step:390/1705 train_time:36725ms step_avg:94.17ms +step:391/1705 train_time:36818ms step_avg:94.16ms +step:392/1705 train_time:36910ms step_avg:94.16ms +step:393/1705 train_time:37002ms step_avg:94.15ms +step:394/1705 train_time:37094ms step_avg:94.15ms +step:395/1705 train_time:37186ms step_avg:94.14ms +step:396/1705 train_time:37279ms step_avg:94.14ms +step:397/1705 train_time:37373ms step_avg:94.14ms +step:398/1705 train_time:37466ms step_avg:94.14ms +step:399/1705 train_time:37561ms step_avg:94.14ms +step:400/1705 train_time:37654ms step_avg:94.14ms +step:401/1705 train_time:37747ms step_avg:94.13ms +step:402/1705 train_time:37842ms step_avg:94.13ms +step:403/1705 train_time:37935ms step_avg:94.13ms +step:404/1705 train_time:38027ms step_avg:94.13ms +step:405/1705 train_time:38119ms step_avg:94.12ms +step:406/1705 train_time:38211ms step_avg:94.12ms +step:407/1705 train_time:38304ms step_avg:94.11ms +step:408/1705 train_time:38397ms step_avg:94.11ms +step:409/1705 train_time:38490ms step_avg:94.11ms +step:410/1705 train_time:38583ms step_avg:94.10ms +step:411/1705 train_time:38677ms step_avg:94.10ms +step:412/1705 train_time:38770ms step_avg:94.10ms +step:413/1705 train_time:38863ms step_avg:94.10ms +step:414/1705 train_time:38956ms step_avg:94.10ms +step:415/1705 train_time:39049ms step_avg:94.09ms +step:416/1705 train_time:39142ms step_avg:94.09ms +step:417/1705 train_time:39234ms step_avg:94.09ms +step:418/1705 train_time:39326ms step_avg:94.08ms +step:419/1705 train_time:39419ms step_avg:94.08ms +step:420/1705 train_time:39512ms step_avg:94.08ms +step:421/1705 train_time:39605ms step_avg:94.07ms +step:422/1705 train_time:39698ms step_avg:94.07ms +step:423/1705 train_time:39791ms step_avg:94.07ms +step:424/1705 train_time:39884ms step_avg:94.07ms +step:425/1705 train_time:40174ms step_avg:94.53ms +step:426/1705 train_time:40314ms step_avg:94.63ms +step:427/1705 train_time:40405ms step_avg:94.63ms +step:428/1705 train_time:40497ms step_avg:94.62ms +step:429/1705 train_time:40589ms step_avg:94.61ms +step:430/1705 train_time:40681ms step_avg:94.61ms +step:431/1705 train_time:40773ms step_avg:94.60ms +step:432/1705 train_time:40865ms step_avg:94.60ms +step:433/1705 train_time:40957ms step_avg:94.59ms +step:434/1705 train_time:41049ms step_avg:94.58ms +step:435/1705 train_time:41144ms step_avg:94.58ms +step:436/1705 train_time:41239ms step_avg:94.59ms +step:437/1705 train_time:41334ms step_avg:94.59ms +step:438/1705 train_time:41427ms step_avg:94.58ms +step:439/1705 train_time:41520ms step_avg:94.58ms +step:440/1705 train_time:41613ms step_avg:94.57ms +step:441/1705 train_time:41705ms step_avg:94.57ms +step:442/1705 train_time:41797ms step_avg:94.56ms +step:443/1705 train_time:41889ms step_avg:94.56ms +step:444/1705 train_time:41981ms step_avg:94.55ms +step:445/1705 train_time:42074ms step_avg:94.55ms +step:446/1705 train_time:42168ms step_avg:94.55ms +step:447/1705 train_time:42262ms step_avg:94.55ms +step:448/1705 train_time:42356ms step_avg:94.54ms +step:449/1705 train_time:42449ms step_avg:94.54ms +step:450/1705 train_time:42543ms step_avg:94.54ms +step:451/1705 train_time:42637ms step_avg:94.54ms +step:452/1705 train_time:42729ms step_avg:94.53ms +step:453/1705 train_time:42822ms step_avg:94.53ms +step:454/1705 train_time:42914ms step_avg:94.52ms +step:455/1705 train_time:43007ms step_avg:94.52ms +step:456/1705 train_time:43100ms step_avg:94.52ms +step:457/1705 train_time:43192ms step_avg:94.51ms +step:458/1705 train_time:43286ms step_avg:94.51ms +step:459/1705 train_time:43379ms step_avg:94.51ms +step:460/1705 train_time:43472ms step_avg:94.50ms +step:461/1705 train_time:43566ms step_avg:94.50ms +step:462/1705 train_time:43659ms step_avg:94.50ms +step:463/1705 train_time:43751ms step_avg:94.49ms +step:464/1705 train_time:43844ms step_avg:94.49ms +step:465/1705 train_time:43937ms step_avg:94.49ms +step:466/1705 train_time:44029ms step_avg:94.48ms +step:467/1705 train_time:44123ms step_avg:94.48ms +step:468/1705 train_time:44216ms step_avg:94.48ms +step:469/1705 train_time:44308ms step_avg:94.47ms +step:470/1705 train_time:44401ms step_avg:94.47ms +step:471/1705 train_time:44494ms step_avg:94.47ms +step:472/1705 train_time:44587ms step_avg:94.47ms +step:473/1705 train_time:44680ms step_avg:94.46ms +step:474/1705 train_time:44772ms step_avg:94.46ms +step:475/1705 train_time:44865ms step_avg:94.45ms +step:476/1705 train_time:44959ms step_avg:94.45ms +step:477/1705 train_time:45051ms step_avg:94.45ms +step:478/1705 train_time:45144ms step_avg:94.44ms +step:479/1705 train_time:45237ms step_avg:94.44ms +step:480/1705 train_time:45329ms step_avg:94.44ms +step:481/1705 train_time:45422ms step_avg:94.43ms +step:482/1705 train_time:45515ms step_avg:94.43ms +step:483/1705 train_time:45608ms step_avg:94.43ms +step:484/1705 train_time:45702ms step_avg:94.42ms +step:485/1705 train_time:45794ms step_avg:94.42ms +step:486/1705 train_time:45887ms step_avg:94.42ms +step:487/1705 train_time:45980ms step_avg:94.41ms +step:488/1705 train_time:46072ms step_avg:94.41ms +step:489/1705 train_time:46165ms step_avg:94.41ms +step:490/1705 train_time:46258ms step_avg:94.40ms +step:491/1705 train_time:46350ms step_avg:94.40ms +step:492/1705 train_time:46445ms step_avg:94.40ms +step:493/1705 train_time:46538ms step_avg:94.40ms +step:494/1705 train_time:46631ms step_avg:94.39ms +step:495/1705 train_time:46725ms step_avg:94.39ms +step:496/1705 train_time:46817ms step_avg:94.39ms +step:497/1705 train_time:46910ms step_avg:94.39ms +step:498/1705 train_time:47003ms step_avg:94.38ms +step:499/1705 train_time:47096ms step_avg:94.38ms +step:500/1705 train_time:47188ms step_avg:94.38ms +step:500/1705 val_loss:3.7177 train_time:47281ms step_avg:94.56ms +step:501/1705 train_time:47303ms step_avg:94.42ms +step:502/1705 train_time:47380ms step_avg:94.38ms +step:503/1705 train_time:47479ms step_avg:94.39ms +step:504/1705 train_time:47573ms step_avg:94.39ms +step:505/1705 train_time:47665ms step_avg:94.39ms +step:506/1705 train_time:47757ms step_avg:94.38ms +step:507/1705 train_time:47849ms step_avg:94.38ms +step:508/1705 train_time:47941ms step_avg:94.37ms +step:509/1705 train_time:48033ms step_avg:94.37ms +step:510/1705 train_time:48125ms step_avg:94.36ms +step:511/1705 train_time:48218ms step_avg:94.36ms +step:512/1705 train_time:48314ms step_avg:94.36ms +step:513/1705 train_time:48409ms step_avg:94.36ms +step:514/1705 train_time:48503ms step_avg:94.36ms +step:515/1705 train_time:48597ms step_avg:94.36ms +step:516/1705 train_time:48690ms step_avg:94.36ms +step:517/1705 train_time:48782ms step_avg:94.36ms +step:518/1705 train_time:48875ms step_avg:94.35ms +step:519/1705 train_time:48967ms step_avg:94.35ms +step:520/1705 train_time:49060ms step_avg:94.35ms +step:521/1705 train_time:49152ms step_avg:94.34ms +step:522/1705 train_time:49244ms step_avg:94.34ms +step:523/1705 train_time:49338ms step_avg:94.34ms +step:524/1705 train_time:49433ms step_avg:94.34ms +step:525/1705 train_time:49526ms step_avg:94.33ms +step:526/1705 train_time:49620ms step_avg:94.33ms +step:527/1705 train_time:49713ms step_avg:94.33ms +step:528/1705 train_time:49806ms step_avg:94.33ms +step:529/1705 train_time:49898ms step_avg:94.32ms +step:530/1705 train_time:49991ms step_avg:94.32ms +step:531/1705 train_time:50083ms step_avg:94.32ms +step:532/1705 train_time:50175ms step_avg:94.31ms +step:533/1705 train_time:50268ms step_avg:94.31ms +step:534/1705 train_time:50361ms step_avg:94.31ms +step:535/1705 train_time:50455ms step_avg:94.31ms +step:536/1705 train_time:50549ms step_avg:94.31ms +step:537/1705 train_time:50642ms step_avg:94.30ms +step:538/1705 train_time:50735ms step_avg:94.30ms +step:539/1705 train_time:50828ms step_avg:94.30ms +step:540/1705 train_time:50921ms step_avg:94.30ms +step:541/1705 train_time:51015ms step_avg:94.30ms +step:542/1705 train_time:51106ms step_avg:94.29ms +step:543/1705 train_time:51199ms step_avg:94.29ms +step:544/1705 train_time:51293ms step_avg:94.29ms +step:545/1705 train_time:51386ms step_avg:94.29ms +step:546/1705 train_time:51480ms step_avg:94.29ms +step:547/1705 train_time:51573ms step_avg:94.28ms +step:548/1705 train_time:51666ms step_avg:94.28ms +step:549/1705 train_time:51759ms step_avg:94.28ms +step:550/1705 train_time:51853ms step_avg:94.28ms +step:551/1705 train_time:51945ms step_avg:94.27ms +step:552/1705 train_time:52037ms step_avg:94.27ms +step:553/1705 train_time:52130ms step_avg:94.27ms +step:554/1705 train_time:52222ms step_avg:94.26ms +step:555/1705 train_time:52315ms step_avg:94.26ms +step:556/1705 train_time:52408ms step_avg:94.26ms +step:557/1705 train_time:52501ms step_avg:94.26ms +step:558/1705 train_time:52595ms step_avg:94.26ms +step:559/1705 train_time:52688ms step_avg:94.25ms +step:560/1705 train_time:52781ms step_avg:94.25ms +step:561/1705 train_time:52875ms step_avg:94.25ms +step:562/1705 train_time:52968ms step_avg:94.25ms +step:563/1705 train_time:53060ms step_avg:94.25ms +step:564/1705 train_time:53153ms step_avg:94.24ms +step:565/1705 train_time:53245ms step_avg:94.24ms +step:566/1705 train_time:53338ms step_avg:94.24ms +step:567/1705 train_time:53432ms step_avg:94.24ms +step:568/1705 train_time:53525ms step_avg:94.23ms +step:569/1705 train_time:53618ms step_avg:94.23ms +step:570/1705 train_time:53712ms step_avg:94.23ms +step:571/1705 train_time:53806ms step_avg:94.23ms +step:572/1705 train_time:53900ms step_avg:94.23ms +step:573/1705 train_time:53995ms step_avg:94.23ms +step:574/1705 train_time:54089ms step_avg:94.23ms +step:575/1705 train_time:54182ms step_avg:94.23ms +step:576/1705 train_time:54277ms step_avg:94.23ms +step:577/1705 train_time:54371ms step_avg:94.23ms +step:578/1705 train_time:54465ms step_avg:94.23ms +step:579/1705 train_time:54560ms step_avg:94.23ms +step:580/1705 train_time:54655ms step_avg:94.23ms +step:581/1705 train_time:54749ms step_avg:94.23ms +step:582/1705 train_time:54843ms step_avg:94.23ms +step:583/1705 train_time:54938ms step_avg:94.23ms +step:584/1705 train_time:55032ms step_avg:94.23ms +step:585/1705 train_time:55125ms step_avg:94.23ms +step:586/1705 train_time:55219ms step_avg:94.23ms +step:587/1705 train_time:55313ms step_avg:94.23ms +step:588/1705 train_time:55408ms step_avg:94.23ms +step:589/1705 train_time:55502ms step_avg:94.23ms +step:590/1705 train_time:55597ms step_avg:94.23ms +step:591/1705 train_time:55693ms step_avg:94.23ms +step:592/1705 train_time:55786ms step_avg:94.23ms +step:593/1705 train_time:55881ms step_avg:94.23ms +step:594/1705 train_time:55976ms step_avg:94.24ms +step:595/1705 train_time:56070ms step_avg:94.24ms +step:596/1705 train_time:56164ms step_avg:94.23ms +step:597/1705 train_time:56258ms step_avg:94.24ms +step:598/1705 train_time:56353ms step_avg:94.24ms +step:599/1705 train_time:56447ms step_avg:94.24ms +step:600/1705 train_time:56541ms step_avg:94.24ms +step:601/1705 train_time:56637ms step_avg:94.24ms +step:602/1705 train_time:56732ms step_avg:94.24ms +step:603/1705 train_time:56827ms step_avg:94.24ms +step:604/1705 train_time:56921ms step_avg:94.24ms +step:605/1705 train_time:57016ms step_avg:94.24ms +step:606/1705 train_time:57110ms step_avg:94.24ms +step:607/1705 train_time:57203ms step_avg:94.24ms +step:608/1705 train_time:57298ms step_avg:94.24ms +step:609/1705 train_time:57393ms step_avg:94.24ms +step:610/1705 train_time:57488ms step_avg:94.24ms +step:611/1705 train_time:57582ms step_avg:94.24ms +step:612/1705 train_time:57676ms step_avg:94.24ms +step:613/1705 train_time:57770ms step_avg:94.24ms +step:614/1705 train_time:57864ms step_avg:94.24ms +step:615/1705 train_time:57958ms step_avg:94.24ms +step:616/1705 train_time:58052ms step_avg:94.24ms +step:617/1705 train_time:58146ms step_avg:94.24ms +step:618/1705 train_time:58240ms step_avg:94.24ms +step:619/1705 train_time:58335ms step_avg:94.24ms +step:620/1705 train_time:58429ms step_avg:94.24ms +step:621/1705 train_time:58523ms step_avg:94.24ms +step:622/1705 train_time:58617ms step_avg:94.24ms +step:623/1705 train_time:58712ms step_avg:94.24ms +step:624/1705 train_time:58806ms step_avg:94.24ms +step:625/1705 train_time:58900ms step_avg:94.24ms +step:625/1705 val_loss:3.6185 train_time:58995ms step_avg:94.39ms +step:626/1705 train_time:59017ms step_avg:94.28ms +step:627/1705 train_time:59103ms step_avg:94.26ms +step:628/1705 train_time:59201ms step_avg:94.27ms +step:629/1705 train_time:59297ms step_avg:94.27ms +step:630/1705 train_time:59390ms step_avg:94.27ms +step:631/1705 train_time:59483ms step_avg:94.27ms +step:632/1705 train_time:59576ms step_avg:94.27ms +step:633/1705 train_time:59669ms step_avg:94.26ms +step:634/1705 train_time:59762ms step_avg:94.26ms +step:635/1705 train_time:59855ms step_avg:94.26ms +step:636/1705 train_time:59950ms step_avg:94.26ms +step:637/1705 train_time:60046ms step_avg:94.26ms +step:638/1705 train_time:60142ms step_avg:94.27ms +step:639/1705 train_time:60412ms step_avg:94.54ms +step:640/1705 train_time:60592ms step_avg:94.67ms +step:641/1705 train_time:60684ms step_avg:94.67ms +step:642/1705 train_time:60778ms step_avg:94.67ms +step:643/1705 train_time:60871ms step_avg:94.67ms +step:644/1705 train_time:60963ms step_avg:94.66ms +step:645/1705 train_time:61057ms step_avg:94.66ms +step:646/1705 train_time:61150ms step_avg:94.66ms +step:647/1705 train_time:61243ms step_avg:94.66ms +step:648/1705 train_time:61336ms step_avg:94.65ms +step:649/1705 train_time:61437ms step_avg:94.66ms +step:650/1705 train_time:61536ms step_avg:94.67ms +step:651/1705 train_time:61632ms step_avg:94.67ms +step:652/1705 train_time:61726ms step_avg:94.67ms +step:653/1705 train_time:61819ms step_avg:94.67ms +step:654/1705 train_time:61913ms step_avg:94.67ms +step:655/1705 train_time:62006ms step_avg:94.67ms +step:656/1705 train_time:62099ms step_avg:94.66ms +step:657/1705 train_time:62193ms step_avg:94.66ms +step:658/1705 train_time:62286ms step_avg:94.66ms +step:659/1705 train_time:62382ms step_avg:94.66ms +step:660/1705 train_time:62478ms step_avg:94.66ms +step:661/1705 train_time:62574ms step_avg:94.67ms +step:662/1705 train_time:62670ms step_avg:94.67ms +step:663/1705 train_time:62763ms step_avg:94.67ms +step:664/1705 train_time:62858ms step_avg:94.67ms +step:665/1705 train_time:62952ms step_avg:94.66ms +step:666/1705 train_time:63045ms step_avg:94.66ms +step:667/1705 train_time:63139ms step_avg:94.66ms +step:668/1705 train_time:63232ms step_avg:94.66ms +step:669/1705 train_time:63327ms step_avg:94.66ms +step:670/1705 train_time:63421ms step_avg:94.66ms +step:671/1705 train_time:63517ms step_avg:94.66ms +step:672/1705 train_time:63612ms step_avg:94.66ms +step:673/1705 train_time:63707ms step_avg:94.66ms +step:674/1705 train_time:63800ms step_avg:94.66ms +step:675/1705 train_time:63895ms step_avg:94.66ms +step:676/1705 train_time:63990ms step_avg:94.66ms +step:677/1705 train_time:64083ms step_avg:94.66ms +step:678/1705 train_time:64177ms step_avg:94.66ms +step:679/1705 train_time:64271ms step_avg:94.65ms +step:680/1705 train_time:64364ms step_avg:94.65ms +step:681/1705 train_time:64459ms step_avg:94.65ms +step:682/1705 train_time:64555ms step_avg:94.66ms +step:683/1705 train_time:64650ms step_avg:94.66ms +step:684/1705 train_time:64744ms step_avg:94.66ms +step:685/1705 train_time:64839ms step_avg:94.66ms +step:686/1705 train_time:64934ms step_avg:94.66ms +step:687/1705 train_time:65027ms step_avg:94.65ms +step:688/1705 train_time:65120ms step_avg:94.65ms +step:689/1705 train_time:65215ms step_avg:94.65ms +step:690/1705 train_time:65309ms step_avg:94.65ms +step:691/1705 train_time:65403ms step_avg:94.65ms +step:692/1705 train_time:65498ms step_avg:94.65ms +step:693/1705 train_time:65593ms step_avg:94.65ms +step:694/1705 train_time:65688ms step_avg:94.65ms +step:695/1705 train_time:65782ms step_avg:94.65ms +step:696/1705 train_time:65876ms step_avg:94.65ms +step:697/1705 train_time:65970ms step_avg:94.65ms +step:698/1705 train_time:66063ms step_avg:94.65ms +step:699/1705 train_time:66158ms step_avg:94.65ms +step:700/1705 train_time:66253ms step_avg:94.65ms +step:701/1705 train_time:66346ms step_avg:94.64ms +step:702/1705 train_time:66440ms step_avg:94.64ms +step:703/1705 train_time:66535ms step_avg:94.64ms +step:704/1705 train_time:66630ms step_avg:94.64ms +step:705/1705 train_time:66724ms step_avg:94.64ms +step:706/1705 train_time:66819ms step_avg:94.64ms +step:707/1705 train_time:66914ms step_avg:94.64ms +step:708/1705 train_time:67008ms step_avg:94.64ms +step:709/1705 train_time:67102ms step_avg:94.64ms +step:710/1705 train_time:67196ms step_avg:94.64ms +step:711/1705 train_time:67291ms step_avg:94.64ms +step:712/1705 train_time:67385ms step_avg:94.64ms +step:713/1705 train_time:67479ms step_avg:94.64ms +step:714/1705 train_time:67574ms step_avg:94.64ms +step:715/1705 train_time:67669ms step_avg:94.64ms +step:716/1705 train_time:67763ms step_avg:94.64ms +step:717/1705 train_time:67858ms step_avg:94.64ms +step:718/1705 train_time:67952ms step_avg:94.64ms +step:719/1705 train_time:68047ms step_avg:94.64ms +step:720/1705 train_time:68140ms step_avg:94.64ms +step:721/1705 train_time:68235ms step_avg:94.64ms +step:722/1705 train_time:68329ms step_avg:94.64ms +step:723/1705 train_time:68422ms step_avg:94.64ms +step:724/1705 train_time:68517ms step_avg:94.64ms +step:725/1705 train_time:68611ms step_avg:94.64ms +step:726/1705 train_time:68705ms step_avg:94.63ms +step:727/1705 train_time:68799ms step_avg:94.63ms +step:728/1705 train_time:68894ms step_avg:94.63ms +step:729/1705 train_time:68989ms step_avg:94.63ms +step:730/1705 train_time:69082ms step_avg:94.63ms +step:731/1705 train_time:69177ms step_avg:94.63ms +step:732/1705 train_time:69272ms step_avg:94.63ms +step:733/1705 train_time:69367ms step_avg:94.63ms +step:734/1705 train_time:69461ms step_avg:94.63ms +step:735/1705 train_time:69555ms step_avg:94.63ms +step:736/1705 train_time:69651ms step_avg:94.64ms +step:737/1705 train_time:69745ms step_avg:94.63ms +step:738/1705 train_time:69839ms step_avg:94.63ms +step:739/1705 train_time:69934ms step_avg:94.63ms +step:740/1705 train_time:70029ms step_avg:94.63ms +step:741/1705 train_time:70122ms step_avg:94.63ms +step:742/1705 train_time:70217ms step_avg:94.63ms +step:743/1705 train_time:70311ms step_avg:94.63ms +step:744/1705 train_time:70405ms step_avg:94.63ms +step:745/1705 train_time:70500ms step_avg:94.63ms +step:746/1705 train_time:70596ms step_avg:94.63ms +step:747/1705 train_time:70690ms step_avg:94.63ms +step:748/1705 train_time:70783ms step_avg:94.63ms +step:749/1705 train_time:70878ms step_avg:94.63ms +step:750/1705 train_time:70972ms step_avg:94.63ms +step:750/1705 val_loss:3.5654 train_time:71067ms step_avg:94.76ms +step:751/1705 train_time:71089ms step_avg:94.66ms +step:752/1705 train_time:71168ms step_avg:94.64ms +step:753/1705 train_time:71267ms step_avg:94.64ms +step:754/1705 train_time:71362ms step_avg:94.64ms +step:755/1705 train_time:71455ms step_avg:94.64ms +step:756/1705 train_time:71548ms step_avg:94.64ms +step:757/1705 train_time:71642ms step_avg:94.64ms +step:758/1705 train_time:71735ms step_avg:94.64ms +step:759/1705 train_time:71828ms step_avg:94.64ms +step:760/1705 train_time:71922ms step_avg:94.63ms +step:761/1705 train_time:72017ms step_avg:94.64ms +step:762/1705 train_time:72113ms step_avg:94.64ms +step:763/1705 train_time:72210ms step_avg:94.64ms +step:764/1705 train_time:72306ms step_avg:94.64ms +step:765/1705 train_time:72402ms step_avg:94.64ms +step:766/1705 train_time:72496ms step_avg:94.64ms +step:767/1705 train_time:72590ms step_avg:94.64ms +step:768/1705 train_time:72684ms step_avg:94.64ms +step:769/1705 train_time:72777ms step_avg:94.64ms +step:770/1705 train_time:72871ms step_avg:94.64ms +step:771/1705 train_time:72965ms step_avg:94.64ms +step:772/1705 train_time:73060ms step_avg:94.64ms +step:773/1705 train_time:73156ms step_avg:94.64ms +step:774/1705 train_time:73251ms step_avg:94.64ms +step:775/1705 train_time:73347ms step_avg:94.64ms +step:776/1705 train_time:73441ms step_avg:94.64ms +step:777/1705 train_time:73536ms step_avg:94.64ms +step:778/1705 train_time:73629ms step_avg:94.64ms +step:779/1705 train_time:73723ms step_avg:94.64ms +step:780/1705 train_time:73817ms step_avg:94.64ms +step:781/1705 train_time:73910ms step_avg:94.63ms +step:782/1705 train_time:74004ms step_avg:94.63ms +step:783/1705 train_time:74100ms step_avg:94.64ms +step:784/1705 train_time:74196ms step_avg:94.64ms +step:785/1705 train_time:74290ms step_avg:94.64ms +step:786/1705 train_time:74385ms step_avg:94.64ms +step:787/1705 train_time:74480ms step_avg:94.64ms +step:788/1705 train_time:74575ms step_avg:94.64ms +step:789/1705 train_time:74668ms step_avg:94.64ms +step:790/1705 train_time:74762ms step_avg:94.64ms +step:791/1705 train_time:74857ms step_avg:94.64ms +step:792/1705 train_time:74950ms step_avg:94.63ms +step:793/1705 train_time:75045ms step_avg:94.63ms +step:794/1705 train_time:75140ms step_avg:94.63ms +step:795/1705 train_time:75235ms step_avg:94.63ms +step:796/1705 train_time:75329ms step_avg:94.63ms +step:797/1705 train_time:75424ms step_avg:94.63ms +step:798/1705 train_time:75519ms step_avg:94.64ms +step:799/1705 train_time:75613ms step_avg:94.63ms +step:800/1705 train_time:75707ms step_avg:94.63ms +step:801/1705 train_time:75801ms step_avg:94.63ms +step:802/1705 train_time:75895ms step_avg:94.63ms +step:803/1705 train_time:75989ms step_avg:94.63ms +step:804/1705 train_time:76084ms step_avg:94.63ms +step:805/1705 train_time:76180ms step_avg:94.63ms +step:806/1705 train_time:76276ms step_avg:94.63ms +step:807/1705 train_time:76370ms step_avg:94.63ms +step:808/1705 train_time:76465ms step_avg:94.63ms +step:809/1705 train_time:76560ms step_avg:94.64ms +step:810/1705 train_time:76654ms step_avg:94.63ms +step:811/1705 train_time:76748ms step_avg:94.63ms +step:812/1705 train_time:76842ms step_avg:94.63ms +step:813/1705 train_time:76936ms step_avg:94.63ms +step:814/1705 train_time:77030ms step_avg:94.63ms +step:815/1705 train_time:77124ms step_avg:94.63ms +step:816/1705 train_time:77220ms step_avg:94.63ms +step:817/1705 train_time:77315ms step_avg:94.63ms +step:818/1705 train_time:77409ms step_avg:94.63ms +step:819/1705 train_time:77504ms step_avg:94.63ms +step:820/1705 train_time:77599ms step_avg:94.63ms +step:821/1705 train_time:77693ms step_avg:94.63ms +step:822/1705 train_time:77788ms step_avg:94.63ms +step:823/1705 train_time:77882ms step_avg:94.63ms +step:824/1705 train_time:77977ms step_avg:94.63ms +step:825/1705 train_time:78070ms step_avg:94.63ms +step:826/1705 train_time:78165ms step_avg:94.63ms +step:827/1705 train_time:78260ms step_avg:94.63ms +step:828/1705 train_time:78354ms step_avg:94.63ms +step:829/1705 train_time:78448ms step_avg:94.63ms +step:830/1705 train_time:78543ms step_avg:94.63ms +step:831/1705 train_time:78638ms step_avg:94.63ms +step:832/1705 train_time:78731ms step_avg:94.63ms +step:833/1705 train_time:78825ms step_avg:94.63ms +step:834/1705 train_time:78921ms step_avg:94.63ms +step:835/1705 train_time:79015ms step_avg:94.63ms +step:836/1705 train_time:79109ms step_avg:94.63ms +step:837/1705 train_time:79204ms step_avg:94.63ms +step:838/1705 train_time:79298ms step_avg:94.63ms +step:839/1705 train_time:79393ms step_avg:94.63ms +step:840/1705 train_time:79487ms step_avg:94.63ms +step:841/1705 train_time:79581ms step_avg:94.63ms +step:842/1705 train_time:79675ms step_avg:94.63ms +step:843/1705 train_time:79769ms step_avg:94.63ms +step:844/1705 train_time:79864ms step_avg:94.63ms +step:845/1705 train_time:79959ms step_avg:94.63ms +step:846/1705 train_time:80053ms step_avg:94.63ms +step:847/1705 train_time:80148ms step_avg:94.63ms +step:848/1705 train_time:80242ms step_avg:94.63ms +step:849/1705 train_time:80336ms step_avg:94.62ms +step:850/1705 train_time:80431ms step_avg:94.62ms +step:851/1705 train_time:80698ms step_avg:94.83ms +step:852/1705 train_time:80879ms step_avg:94.93ms +step:853/1705 train_time:80971ms step_avg:94.92ms +step:854/1705 train_time:81065ms step_avg:94.92ms +step:855/1705 train_time:81159ms step_avg:94.92ms +step:856/1705 train_time:81252ms step_avg:94.92ms +step:857/1705 train_time:81346ms step_avg:94.92ms +step:858/1705 train_time:81439ms step_avg:94.92ms +step:859/1705 train_time:81532ms step_avg:94.91ms +step:860/1705 train_time:81625ms step_avg:94.91ms +step:861/1705 train_time:81722ms step_avg:94.92ms +step:862/1705 train_time:81819ms step_avg:94.92ms +step:863/1705 train_time:81915ms step_avg:94.92ms +step:864/1705 train_time:82009ms step_avg:94.92ms +step:865/1705 train_time:82103ms step_avg:94.92ms +step:866/1705 train_time:82198ms step_avg:94.92ms +step:867/1705 train_time:82291ms step_avg:94.91ms +step:868/1705 train_time:82384ms step_avg:94.91ms +step:869/1705 train_time:82479ms step_avg:94.91ms +step:870/1705 train_time:82573ms step_avg:94.91ms +step:871/1705 train_time:82666ms step_avg:94.91ms +step:872/1705 train_time:82762ms step_avg:94.91ms +step:873/1705 train_time:82859ms step_avg:94.91ms +step:874/1705 train_time:82953ms step_avg:94.91ms +step:875/1705 train_time:83047ms step_avg:94.91ms +step:875/1705 val_loss:3.5228 train_time:83143ms step_avg:95.02ms +step:876/1705 train_time:83164ms step_avg:94.94ms +step:877/1705 train_time:83242ms step_avg:94.92ms +step:878/1705 train_time:83341ms step_avg:94.92ms +step:879/1705 train_time:83437ms step_avg:94.92ms +step:880/1705 train_time:83529ms step_avg:94.92ms +step:881/1705 train_time:83623ms step_avg:94.92ms +step:882/1705 train_time:83716ms step_avg:94.92ms +step:883/1705 train_time:83809ms step_avg:94.91ms +step:884/1705 train_time:83902ms step_avg:94.91ms +step:885/1705 train_time:83996ms step_avg:94.91ms +step:886/1705 train_time:84090ms step_avg:94.91ms +step:887/1705 train_time:84187ms step_avg:94.91ms +step:888/1705 train_time:84284ms step_avg:94.91ms +step:889/1705 train_time:84380ms step_avg:94.92ms +step:890/1705 train_time:84475ms step_avg:94.92ms +step:891/1705 train_time:84569ms step_avg:94.91ms +step:892/1705 train_time:84664ms step_avg:94.91ms +step:893/1705 train_time:84758ms step_avg:94.91ms +step:894/1705 train_time:84850ms step_avg:94.91ms +step:895/1705 train_time:84944ms step_avg:94.91ms +step:896/1705 train_time:85040ms step_avg:94.91ms +step:897/1705 train_time:85136ms step_avg:94.91ms +step:898/1705 train_time:85230ms step_avg:94.91ms +step:899/1705 train_time:85325ms step_avg:94.91ms +step:900/1705 train_time:85421ms step_avg:94.91ms +step:901/1705 train_time:85517ms step_avg:94.91ms +step:902/1705 train_time:85610ms step_avg:94.91ms +step:903/1705 train_time:85704ms step_avg:94.91ms +step:904/1705 train_time:85798ms step_avg:94.91ms +step:905/1705 train_time:85891ms step_avg:94.91ms +step:906/1705 train_time:85985ms step_avg:94.91ms +step:907/1705 train_time:86080ms step_avg:94.91ms +step:908/1705 train_time:86175ms step_avg:94.91ms +step:909/1705 train_time:86269ms step_avg:94.91ms +step:910/1705 train_time:86366ms step_avg:94.91ms +step:911/1705 train_time:86462ms step_avg:94.91ms +step:912/1705 train_time:86557ms step_avg:94.91ms +step:913/1705 train_time:86650ms step_avg:94.91ms +step:914/1705 train_time:86745ms step_avg:94.91ms +step:915/1705 train_time:86840ms step_avg:94.91ms +step:916/1705 train_time:86933ms step_avg:94.91ms +step:917/1705 train_time:87027ms step_avg:94.90ms +step:918/1705 train_time:87123ms step_avg:94.90ms +step:919/1705 train_time:87218ms step_avg:94.91ms +step:920/1705 train_time:87313ms step_avg:94.91ms +step:921/1705 train_time:87407ms step_avg:94.90ms +step:922/1705 train_time:87503ms step_avg:94.91ms +step:923/1705 train_time:87597ms step_avg:94.90ms +step:924/1705 train_time:87691ms step_avg:94.90ms +step:925/1705 train_time:87785ms step_avg:94.90ms +step:926/1705 train_time:87880ms step_avg:94.90ms +step:927/1705 train_time:87974ms step_avg:94.90ms +step:928/1705 train_time:88069ms step_avg:94.90ms +step:929/1705 train_time:88163ms step_avg:94.90ms +step:930/1705 train_time:88258ms step_avg:94.90ms +step:931/1705 train_time:88352ms step_avg:94.90ms +step:932/1705 train_time:88446ms step_avg:94.90ms +step:933/1705 train_time:88542ms step_avg:94.90ms +step:934/1705 train_time:88637ms step_avg:94.90ms +step:935/1705 train_time:88731ms step_avg:94.90ms +step:936/1705 train_time:88826ms step_avg:94.90ms +step:937/1705 train_time:88920ms step_avg:94.90ms +step:938/1705 train_time:89015ms step_avg:94.90ms +step:939/1705 train_time:89109ms step_avg:94.90ms +step:940/1705 train_time:89204ms step_avg:94.90ms +step:941/1705 train_time:89299ms step_avg:94.90ms +step:942/1705 train_time:89392ms step_avg:94.90ms +step:943/1705 train_time:89486ms step_avg:94.90ms +step:944/1705 train_time:89581ms step_avg:94.90ms +step:945/1705 train_time:89676ms step_avg:94.90ms +step:946/1705 train_time:89770ms step_avg:94.89ms +step:947/1705 train_time:89864ms step_avg:94.89ms +step:948/1705 train_time:89960ms step_avg:94.89ms +step:949/1705 train_time:90054ms step_avg:94.89ms +step:950/1705 train_time:90148ms step_avg:94.89ms +step:951/1705 train_time:90243ms step_avg:94.89ms +step:952/1705 train_time:90338ms step_avg:94.89ms +step:953/1705 train_time:90433ms step_avg:94.89ms +step:954/1705 train_time:90527ms step_avg:94.89ms +step:955/1705 train_time:90621ms step_avg:94.89ms +step:956/1705 train_time:90716ms step_avg:94.89ms +step:957/1705 train_time:90810ms step_avg:94.89ms +step:958/1705 train_time:90904ms step_avg:94.89ms +step:959/1705 train_time:90998ms step_avg:94.89ms +step:960/1705 train_time:91092ms step_avg:94.89ms +step:961/1705 train_time:91187ms step_avg:94.89ms +step:962/1705 train_time:91282ms step_avg:94.89ms +step:963/1705 train_time:91376ms step_avg:94.89ms +step:964/1705 train_time:91471ms step_avg:94.89ms +step:965/1705 train_time:91565ms step_avg:94.89ms +step:966/1705 train_time:91661ms step_avg:94.89ms +step:967/1705 train_time:91755ms step_avg:94.89ms +step:968/1705 train_time:91848ms step_avg:94.88ms +step:969/1705 train_time:91943ms step_avg:94.88ms +step:970/1705 train_time:92038ms step_avg:94.88ms +step:971/1705 train_time:92133ms step_avg:94.88ms +step:972/1705 train_time:92227ms step_avg:94.88ms +step:973/1705 train_time:92322ms step_avg:94.88ms +step:974/1705 train_time:92417ms step_avg:94.88ms +step:975/1705 train_time:92511ms step_avg:94.88ms +step:976/1705 train_time:92605ms step_avg:94.88ms +step:977/1705 train_time:92700ms step_avg:94.88ms +step:978/1705 train_time:92794ms step_avg:94.88ms +step:979/1705 train_time:92888ms step_avg:94.88ms +step:980/1705 train_time:92983ms step_avg:94.88ms +step:981/1705 train_time:93077ms step_avg:94.88ms +step:982/1705 train_time:93171ms step_avg:94.88ms +step:983/1705 train_time:93266ms step_avg:94.88ms +step:984/1705 train_time:93362ms step_avg:94.88ms +step:985/1705 train_time:93457ms step_avg:94.88ms +step:986/1705 train_time:93551ms step_avg:94.88ms +step:987/1705 train_time:93646ms step_avg:94.88ms +step:988/1705 train_time:93740ms step_avg:94.88ms +step:989/1705 train_time:93834ms step_avg:94.88ms +step:990/1705 train_time:93928ms step_avg:94.88ms +step:991/1705 train_time:94024ms step_avg:94.88ms +step:992/1705 train_time:94118ms step_avg:94.88ms +step:993/1705 train_time:94212ms step_avg:94.88ms +step:994/1705 train_time:94307ms step_avg:94.88ms +step:995/1705 train_time:94402ms step_avg:94.88ms +step:996/1705 train_time:94498ms step_avg:94.88ms +step:997/1705 train_time:94591ms step_avg:94.88ms +step:998/1705 train_time:94685ms step_avg:94.87ms +step:999/1705 train_time:94779ms step_avg:94.87ms +step:1000/1705 train_time:94874ms step_avg:94.87ms +step:1000/1705 val_loss:3.4836 train_time:94968ms step_avg:94.97ms +step:1001/1705 train_time:94989ms step_avg:94.89ms +step:1002/1705 train_time:95069ms step_avg:94.88ms +step:1003/1705 train_time:95166ms step_avg:94.88ms +step:1004/1705 train_time:95263ms step_avg:94.88ms +step:1005/1705 train_time:95356ms step_avg:94.88ms +step:1006/1705 train_time:95450ms step_avg:94.88ms +step:1007/1705 train_time:95543ms step_avg:94.88ms +step:1008/1705 train_time:95637ms step_avg:94.88ms +step:1009/1705 train_time:95730ms step_avg:94.88ms +step:1010/1705 train_time:95824ms step_avg:94.88ms +step:1011/1705 train_time:95920ms step_avg:94.88ms +step:1012/1705 train_time:96017ms step_avg:94.88ms +step:1013/1705 train_time:96114ms step_avg:94.88ms +step:1014/1705 train_time:96210ms step_avg:94.88ms +step:1015/1705 train_time:96305ms step_avg:94.88ms +step:1016/1705 train_time:96399ms step_avg:94.88ms +step:1017/1705 train_time:96494ms step_avg:94.88ms +step:1018/1705 train_time:96587ms step_avg:94.88ms +step:1019/1705 train_time:96681ms step_avg:94.88ms +step:1020/1705 train_time:96776ms step_avg:94.88ms +step:1021/1705 train_time:96871ms step_avg:94.88ms +step:1022/1705 train_time:96965ms step_avg:94.88ms +step:1023/1705 train_time:97061ms step_avg:94.88ms +step:1024/1705 train_time:97157ms step_avg:94.88ms +step:1025/1705 train_time:97253ms step_avg:94.88ms +step:1026/1705 train_time:97348ms step_avg:94.88ms +step:1027/1705 train_time:97442ms step_avg:94.88ms +step:1028/1705 train_time:97536ms step_avg:94.88ms +step:1029/1705 train_time:97630ms step_avg:94.88ms +step:1030/1705 train_time:97724ms step_avg:94.88ms +step:1031/1705 train_time:97820ms step_avg:94.88ms +step:1032/1705 train_time:97914ms step_avg:94.88ms +step:1033/1705 train_time:98008ms step_avg:94.88ms +step:1034/1705 train_time:98103ms step_avg:94.88ms +step:1035/1705 train_time:98199ms step_avg:94.88ms +step:1036/1705 train_time:98294ms step_avg:94.88ms +step:1037/1705 train_time:98389ms step_avg:94.88ms +step:1038/1705 train_time:98483ms step_avg:94.88ms +step:1039/1705 train_time:98578ms step_avg:94.88ms +step:1040/1705 train_time:98673ms step_avg:94.88ms +step:1041/1705 train_time:98766ms step_avg:94.88ms +step:1042/1705 train_time:98861ms step_avg:94.88ms +step:1043/1705 train_time:98956ms step_avg:94.88ms +step:1044/1705 train_time:99052ms step_avg:94.88ms +step:1045/1705 train_time:99147ms step_avg:94.88ms +step:1046/1705 train_time:99242ms step_avg:94.88ms +step:1047/1705 train_time:99336ms step_avg:94.88ms +step:1048/1705 train_time:99431ms step_avg:94.88ms +step:1049/1705 train_time:99525ms step_avg:94.88ms +step:1050/1705 train_time:99619ms step_avg:94.87ms +step:1051/1705 train_time:99714ms step_avg:94.87ms +step:1052/1705 train_time:99807ms step_avg:94.87ms +step:1053/1705 train_time:99902ms step_avg:94.87ms +step:1054/1705 train_time:99997ms step_avg:94.87ms +step:1055/1705 train_time:100092ms step_avg:94.87ms +step:1056/1705 train_time:100185ms step_avg:94.87ms +step:1057/1705 train_time:100281ms step_avg:94.87ms +step:1058/1705 train_time:100377ms step_avg:94.87ms +step:1059/1705 train_time:100471ms step_avg:94.87ms +step:1060/1705 train_time:100565ms step_avg:94.87ms +step:1061/1705 train_time:100660ms step_avg:94.87ms +step:1062/1705 train_time:100930ms step_avg:95.04ms +step:1063/1705 train_time:101033ms step_avg:95.04ms +step:1064/1705 train_time:101125ms step_avg:95.04ms +step:1065/1705 train_time:101218ms step_avg:95.04ms +step:1066/1705 train_time:101311ms step_avg:95.04ms +step:1067/1705 train_time:101404ms step_avg:95.04ms +step:1068/1705 train_time:101498ms step_avg:95.04ms +step:1069/1705 train_time:101592ms step_avg:95.03ms +step:1070/1705 train_time:101685ms step_avg:95.03ms +step:1071/1705 train_time:101778ms step_avg:95.03ms +step:1072/1705 train_time:101877ms step_avg:95.03ms +step:1073/1705 train_time:101975ms step_avg:95.04ms +step:1074/1705 train_time:102073ms step_avg:95.04ms +step:1075/1705 train_time:102166ms step_avg:95.04ms +step:1076/1705 train_time:102261ms step_avg:95.04ms +step:1077/1705 train_time:102356ms step_avg:95.04ms +step:1078/1705 train_time:102450ms step_avg:95.04ms +step:1079/1705 train_time:102543ms step_avg:95.04ms +step:1080/1705 train_time:102637ms step_avg:95.03ms +step:1081/1705 train_time:102730ms step_avg:95.03ms +step:1082/1705 train_time:102824ms step_avg:95.03ms +step:1083/1705 train_time:102921ms step_avg:95.03ms +step:1084/1705 train_time:103017ms step_avg:95.03ms +step:1085/1705 train_time:103112ms step_avg:95.03ms +step:1086/1705 train_time:103207ms step_avg:95.03ms +step:1087/1705 train_time:103302ms step_avg:95.03ms +step:1088/1705 train_time:103397ms step_avg:95.03ms +step:1089/1705 train_time:103491ms step_avg:95.03ms +step:1090/1705 train_time:103585ms step_avg:95.03ms +step:1091/1705 train_time:103679ms step_avg:95.03ms +step:1092/1705 train_time:103775ms step_avg:95.03ms +step:1093/1705 train_time:103869ms step_avg:95.03ms +step:1094/1705 train_time:103964ms step_avg:95.03ms +step:1095/1705 train_time:104059ms step_avg:95.03ms +step:1096/1705 train_time:104155ms step_avg:95.03ms +step:1097/1705 train_time:104250ms step_avg:95.03ms +step:1098/1705 train_time:104344ms step_avg:95.03ms +step:1099/1705 train_time:104439ms step_avg:95.03ms +step:1100/1705 train_time:104533ms step_avg:95.03ms +step:1101/1705 train_time:104626ms step_avg:95.03ms +step:1102/1705 train_time:104721ms step_avg:95.03ms +step:1103/1705 train_time:104816ms step_avg:95.03ms +step:1104/1705 train_time:104910ms step_avg:95.03ms +step:1105/1705 train_time:105004ms step_avg:95.03ms +step:1106/1705 train_time:105100ms step_avg:95.03ms +step:1107/1705 train_time:105195ms step_avg:95.03ms +step:1108/1705 train_time:105289ms step_avg:95.03ms +step:1109/1705 train_time:105383ms step_avg:95.03ms +step:1110/1705 train_time:105478ms step_avg:95.02ms +step:1111/1705 train_time:105572ms step_avg:95.02ms +step:1112/1705 train_time:105666ms step_avg:95.02ms +step:1113/1705 train_time:105760ms step_avg:95.02ms +step:1114/1705 train_time:105855ms step_avg:95.02ms +step:1115/1705 train_time:105950ms step_avg:95.02ms +step:1116/1705 train_time:106045ms step_avg:95.02ms +step:1117/1705 train_time:106140ms step_avg:95.02ms +step:1118/1705 train_time:106235ms step_avg:95.02ms +step:1119/1705 train_time:106329ms step_avg:95.02ms +step:1120/1705 train_time:106423ms step_avg:95.02ms +step:1121/1705 train_time:106518ms step_avg:95.02ms +step:1122/1705 train_time:106611ms step_avg:95.02ms +step:1123/1705 train_time:106705ms step_avg:95.02ms +step:1124/1705 train_time:106800ms step_avg:95.02ms +step:1125/1705 train_time:106895ms step_avg:95.02ms +step:1125/1705 val_loss:3.4373 train_time:106990ms step_avg:95.10ms +step:1126/1705 train_time:107012ms step_avg:95.04ms +step:1127/1705 train_time:107091ms step_avg:95.02ms +step:1128/1705 train_time:107191ms step_avg:95.03ms +step:1129/1705 train_time:107287ms step_avg:95.03ms +step:1130/1705 train_time:107381ms step_avg:95.03ms +step:1131/1705 train_time:107475ms step_avg:95.03ms +step:1132/1705 train_time:107568ms step_avg:95.02ms +step:1133/1705 train_time:107662ms step_avg:95.02ms +step:1134/1705 train_time:107755ms step_avg:95.02ms +step:1135/1705 train_time:107848ms step_avg:95.02ms +step:1136/1705 train_time:107943ms step_avg:95.02ms +step:1137/1705 train_time:108039ms step_avg:95.02ms +step:1138/1705 train_time:108137ms step_avg:95.02ms +step:1139/1705 train_time:108233ms step_avg:95.02ms +step:1140/1705 train_time:108328ms step_avg:95.02ms +step:1141/1705 train_time:108424ms step_avg:95.03ms +step:1142/1705 train_time:108518ms step_avg:95.02ms +step:1143/1705 train_time:108612ms step_avg:95.02ms +step:1144/1705 train_time:108708ms step_avg:95.02ms +step:1145/1705 train_time:108802ms step_avg:95.02ms +step:1146/1705 train_time:108897ms step_avg:95.02ms +step:1147/1705 train_time:108992ms step_avg:95.02ms +step:1148/1705 train_time:109088ms step_avg:95.02ms +step:1149/1705 train_time:109184ms step_avg:95.03ms +step:1150/1705 train_time:109281ms step_avg:95.03ms +step:1151/1705 train_time:109376ms step_avg:95.03ms +step:1152/1705 train_time:109470ms step_avg:95.03ms +step:1153/1705 train_time:109565ms step_avg:95.03ms +step:1154/1705 train_time:109660ms step_avg:95.03ms +step:1155/1705 train_time:109754ms step_avg:95.03ms +step:1156/1705 train_time:109850ms step_avg:95.03ms +step:1157/1705 train_time:109945ms step_avg:95.03ms +step:1158/1705 train_time:110042ms step_avg:95.03ms +step:1159/1705 train_time:110137ms step_avg:95.03ms +step:1160/1705 train_time:110233ms step_avg:95.03ms +step:1161/1705 train_time:110329ms step_avg:95.03ms +step:1162/1705 train_time:110425ms step_avg:95.03ms +step:1163/1705 train_time:110520ms step_avg:95.03ms +step:1164/1705 train_time:110615ms step_avg:95.03ms +step:1165/1705 train_time:110709ms step_avg:95.03ms +step:1166/1705 train_time:110805ms step_avg:95.03ms +step:1167/1705 train_time:110900ms step_avg:95.03ms +step:1168/1705 train_time:110995ms step_avg:95.03ms +step:1169/1705 train_time:111091ms step_avg:95.03ms +step:1170/1705 train_time:111187ms step_avg:95.03ms +step:1171/1705 train_time:111283ms step_avg:95.03ms +step:1172/1705 train_time:111380ms step_avg:95.03ms +step:1173/1705 train_time:111475ms step_avg:95.03ms +step:1174/1705 train_time:111569ms step_avg:95.03ms +step:1175/1705 train_time:111664ms step_avg:95.03ms +step:1176/1705 train_time:111760ms step_avg:95.03ms +step:1177/1705 train_time:111855ms step_avg:95.03ms +step:1178/1705 train_time:111950ms step_avg:95.03ms +step:1179/1705 train_time:112045ms step_avg:95.03ms +step:1180/1705 train_time:112140ms step_avg:95.03ms +step:1181/1705 train_time:112236ms step_avg:95.04ms +step:1182/1705 train_time:112331ms step_avg:95.03ms +step:1183/1705 train_time:112427ms step_avg:95.04ms +step:1184/1705 train_time:112523ms step_avg:95.04ms +step:1185/1705 train_time:112618ms step_avg:95.04ms +step:1186/1705 train_time:112713ms step_avg:95.04ms +step:1187/1705 train_time:112808ms step_avg:95.04ms +step:1188/1705 train_time:112903ms step_avg:95.04ms +step:1189/1705 train_time:112999ms step_avg:95.04ms +step:1190/1705 train_time:113094ms step_avg:95.04ms +step:1191/1705 train_time:113189ms step_avg:95.04ms +step:1192/1705 train_time:113285ms step_avg:95.04ms +step:1193/1705 train_time:113380ms step_avg:95.04ms +step:1194/1705 train_time:113476ms step_avg:95.04ms +step:1195/1705 train_time:113570ms step_avg:95.04ms +step:1196/1705 train_time:113665ms step_avg:95.04ms +step:1197/1705 train_time:113761ms step_avg:95.04ms +step:1198/1705 train_time:113856ms step_avg:95.04ms +step:1199/1705 train_time:113950ms step_avg:95.04ms +step:1200/1705 train_time:114046ms step_avg:95.04ms +step:1201/1705 train_time:114142ms step_avg:95.04ms +step:1202/1705 train_time:114237ms step_avg:95.04ms +step:1203/1705 train_time:114332ms step_avg:95.04ms +step:1204/1705 train_time:114427ms step_avg:95.04ms +step:1205/1705 train_time:114523ms step_avg:95.04ms +step:1206/1705 train_time:114620ms step_avg:95.04ms +step:1207/1705 train_time:114715ms step_avg:95.04ms +step:1208/1705 train_time:114810ms step_avg:95.04ms +step:1209/1705 train_time:114905ms step_avg:95.04ms +step:1210/1705 train_time:115001ms step_avg:95.04ms +step:1211/1705 train_time:115096ms step_avg:95.04ms +step:1212/1705 train_time:115191ms step_avg:95.04ms +step:1213/1705 train_time:115286ms step_avg:95.04ms +step:1214/1705 train_time:115381ms step_avg:95.04ms +step:1215/1705 train_time:115477ms step_avg:95.04ms +step:1216/1705 train_time:115571ms step_avg:95.04ms +step:1217/1705 train_time:115667ms step_avg:95.04ms +step:1218/1705 train_time:115764ms step_avg:95.04ms +step:1219/1705 train_time:115860ms step_avg:95.05ms +step:1220/1705 train_time:115956ms step_avg:95.05ms +step:1221/1705 train_time:116051ms step_avg:95.05ms +step:1222/1705 train_time:116146ms step_avg:95.05ms +step:1223/1705 train_time:116242ms step_avg:95.05ms +step:1224/1705 train_time:116336ms step_avg:95.05ms +step:1225/1705 train_time:116431ms step_avg:95.05ms +step:1226/1705 train_time:116527ms step_avg:95.05ms +step:1227/1705 train_time:116622ms step_avg:95.05ms +step:1228/1705 train_time:116718ms step_avg:95.05ms +step:1229/1705 train_time:116813ms step_avg:95.05ms +step:1230/1705 train_time:116908ms step_avg:95.05ms +step:1231/1705 train_time:117004ms step_avg:95.05ms +step:1232/1705 train_time:117100ms step_avg:95.05ms +step:1233/1705 train_time:117195ms step_avg:95.05ms +step:1234/1705 train_time:117289ms step_avg:95.05ms +step:1235/1705 train_time:117385ms step_avg:95.05ms +step:1236/1705 train_time:117481ms step_avg:95.05ms +step:1237/1705 train_time:117576ms step_avg:95.05ms +step:1238/1705 train_time:117672ms step_avg:95.05ms +step:1239/1705 train_time:117768ms step_avg:95.05ms +step:1240/1705 train_time:117863ms step_avg:95.05ms +step:1241/1705 train_time:117958ms step_avg:95.05ms +step:1242/1705 train_time:118052ms step_avg:95.05ms +step:1243/1705 train_time:118147ms step_avg:95.05ms +step:1244/1705 train_time:118243ms step_avg:95.05ms +step:1245/1705 train_time:118338ms step_avg:95.05ms +step:1246/1705 train_time:118433ms step_avg:95.05ms +step:1247/1705 train_time:118529ms step_avg:95.05ms +step:1248/1705 train_time:118625ms step_avg:95.05ms +step:1249/1705 train_time:118720ms step_avg:95.05ms +step:1250/1705 train_time:118815ms step_avg:95.05ms +step:1250/1705 val_loss:3.3888 train_time:118911ms step_avg:95.13ms +step:1251/1705 train_time:118932ms step_avg:95.07ms +step:1252/1705 train_time:119014ms step_avg:95.06ms +step:1253/1705 train_time:119112ms step_avg:95.06ms +step:1254/1705 train_time:119206ms step_avg:95.06ms +step:1255/1705 train_time:119300ms step_avg:95.06ms +step:1256/1705 train_time:119394ms step_avg:95.06ms +step:1257/1705 train_time:119488ms step_avg:95.06ms +step:1258/1705 train_time:119583ms step_avg:95.06ms +step:1259/1705 train_time:119677ms step_avg:95.06ms +step:1260/1705 train_time:119770ms step_avg:95.06ms +step:1261/1705 train_time:119867ms step_avg:95.06ms +step:1262/1705 train_time:119966ms step_avg:95.06ms +step:1263/1705 train_time:120065ms step_avg:95.06ms +step:1264/1705 train_time:120161ms step_avg:95.06ms +step:1265/1705 train_time:120256ms step_avg:95.06ms +step:1266/1705 train_time:120350ms step_avg:95.06ms +step:1267/1705 train_time:120445ms step_avg:95.06ms +step:1268/1705 train_time:120539ms step_avg:95.06ms +step:1269/1705 train_time:120632ms step_avg:95.06ms +step:1270/1705 train_time:120727ms step_avg:95.06ms +step:1271/1705 train_time:120823ms step_avg:95.06ms +step:1272/1705 train_time:120918ms step_avg:95.06ms +step:1273/1705 train_time:121015ms step_avg:95.06ms +step:1274/1705 train_time:121423ms step_avg:95.31ms +step:1275/1705 train_time:121493ms step_avg:95.29ms +step:1276/1705 train_time:121588ms step_avg:95.29ms +step:1277/1705 train_time:121682ms step_avg:95.29ms +step:1278/1705 train_time:121776ms step_avg:95.29ms +step:1279/1705 train_time:121869ms step_avg:95.28ms +step:1280/1705 train_time:121963ms step_avg:95.28ms +step:1281/1705 train_time:122057ms step_avg:95.28ms +step:1282/1705 train_time:122151ms step_avg:95.28ms +step:1283/1705 train_time:122245ms step_avg:95.28ms +step:1284/1705 train_time:122347ms step_avg:95.29ms +step:1285/1705 train_time:122445ms step_avg:95.29ms +step:1286/1705 train_time:122544ms step_avg:95.29ms +step:1287/1705 train_time:122640ms step_avg:95.29ms +step:1288/1705 train_time:122735ms step_avg:95.29ms +step:1289/1705 train_time:122829ms step_avg:95.29ms +step:1290/1705 train_time:122924ms step_avg:95.29ms +step:1291/1705 train_time:123018ms step_avg:95.29ms +step:1292/1705 train_time:123112ms step_avg:95.29ms +step:1293/1705 train_time:123207ms step_avg:95.29ms +step:1294/1705 train_time:123303ms step_avg:95.29ms +step:1295/1705 train_time:123400ms step_avg:95.29ms +step:1296/1705 train_time:123497ms step_avg:95.29ms +step:1297/1705 train_time:123592ms step_avg:95.29ms +step:1298/1705 train_time:123688ms step_avg:95.29ms +step:1299/1705 train_time:123785ms step_avg:95.29ms +step:1300/1705 train_time:123880ms step_avg:95.29ms +step:1301/1705 train_time:123976ms step_avg:95.29ms +step:1302/1705 train_time:124069ms step_avg:95.29ms +step:1303/1705 train_time:124165ms step_avg:95.29ms +step:1304/1705 train_time:124260ms step_avg:95.29ms +step:1305/1705 train_time:124355ms step_avg:95.29ms +step:1306/1705 train_time:124452ms step_avg:95.29ms +step:1307/1705 train_time:124549ms step_avg:95.29ms +step:1308/1705 train_time:124645ms step_avg:95.29ms +step:1309/1705 train_time:124741ms step_avg:95.29ms +step:1310/1705 train_time:124836ms step_avg:95.29ms +step:1311/1705 train_time:124931ms step_avg:95.29ms +step:1312/1705 train_time:125025ms step_avg:95.29ms +step:1313/1705 train_time:125121ms step_avg:95.29ms +step:1314/1705 train_time:125216ms step_avg:95.29ms +step:1315/1705 train_time:125310ms step_avg:95.29ms +step:1316/1705 train_time:125406ms step_avg:95.29ms +step:1317/1705 train_time:125502ms step_avg:95.29ms +step:1318/1705 train_time:125597ms step_avg:95.29ms +step:1319/1705 train_time:125692ms step_avg:95.29ms +step:1320/1705 train_time:125788ms step_avg:95.29ms +step:1321/1705 train_time:125884ms step_avg:95.29ms +step:1322/1705 train_time:125979ms step_avg:95.29ms +step:1323/1705 train_time:126073ms step_avg:95.29ms +step:1324/1705 train_time:126168ms step_avg:95.29ms +step:1325/1705 train_time:126263ms step_avg:95.29ms +step:1326/1705 train_time:126359ms step_avg:95.29ms +step:1327/1705 train_time:126454ms step_avg:95.29ms +step:1328/1705 train_time:126550ms step_avg:95.29ms +step:1329/1705 train_time:126647ms step_avg:95.30ms +step:1330/1705 train_time:126742ms step_avg:95.30ms +step:1331/1705 train_time:126838ms step_avg:95.29ms +step:1332/1705 train_time:126932ms step_avg:95.29ms +step:1333/1705 train_time:127028ms step_avg:95.29ms +step:1334/1705 train_time:127123ms step_avg:95.29ms +step:1335/1705 train_time:127218ms step_avg:95.29ms +step:1336/1705 train_time:127312ms step_avg:95.29ms +step:1337/1705 train_time:127408ms step_avg:95.29ms +step:1338/1705 train_time:127504ms step_avg:95.29ms +step:1339/1705 train_time:127600ms step_avg:95.30ms +step:1340/1705 train_time:127696ms step_avg:95.30ms +step:1341/1705 train_time:127791ms step_avg:95.30ms +step:1342/1705 train_time:127886ms step_avg:95.30ms +step:1343/1705 train_time:127982ms step_avg:95.30ms +step:1344/1705 train_time:128076ms step_avg:95.29ms +step:1345/1705 train_time:128170ms step_avg:95.29ms +step:1346/1705 train_time:128266ms step_avg:95.29ms +step:1347/1705 train_time:128362ms step_avg:95.29ms +step:1348/1705 train_time:128457ms step_avg:95.29ms +step:1349/1705 train_time:128553ms step_avg:95.30ms +step:1350/1705 train_time:128649ms step_avg:95.30ms +step:1351/1705 train_time:128745ms step_avg:95.30ms +step:1352/1705 train_time:128841ms step_avg:95.30ms +step:1353/1705 train_time:128937ms step_avg:95.30ms +step:1354/1705 train_time:129032ms step_avg:95.30ms +step:1355/1705 train_time:129127ms step_avg:95.30ms +step:1356/1705 train_time:129223ms step_avg:95.30ms +step:1357/1705 train_time:129319ms step_avg:95.30ms +step:1358/1705 train_time:129413ms step_avg:95.30ms +step:1359/1705 train_time:129509ms step_avg:95.30ms +step:1360/1705 train_time:129605ms step_avg:95.30ms +step:1361/1705 train_time:129699ms step_avg:95.30ms +step:1362/1705 train_time:129795ms step_avg:95.30ms +step:1363/1705 train_time:129890ms step_avg:95.30ms +step:1364/1705 train_time:129985ms step_avg:95.30ms +step:1365/1705 train_time:130080ms step_avg:95.30ms +step:1366/1705 train_time:130174ms step_avg:95.30ms +step:1367/1705 train_time:130269ms step_avg:95.30ms +step:1368/1705 train_time:130365ms step_avg:95.30ms +step:1369/1705 train_time:130461ms step_avg:95.30ms +step:1370/1705 train_time:130557ms step_avg:95.30ms +step:1371/1705 train_time:130653ms step_avg:95.30ms +step:1372/1705 train_time:130748ms step_avg:95.30ms +step:1373/1705 train_time:130844ms step_avg:95.30ms +step:1374/1705 train_time:130939ms step_avg:95.30ms +step:1375/1705 train_time:131034ms step_avg:95.30ms +step:1375/1705 val_loss:3.3520 train_time:131130ms step_avg:95.37ms +step:1376/1705 train_time:131151ms step_avg:95.31ms +step:1377/1705 train_time:131233ms step_avg:95.30ms +step:1378/1705 train_time:131335ms step_avg:95.31ms +step:1379/1705 train_time:131431ms step_avg:95.31ms +step:1380/1705 train_time:131524ms step_avg:95.31ms +step:1381/1705 train_time:131619ms step_avg:95.31ms +step:1382/1705 train_time:131713ms step_avg:95.31ms +step:1383/1705 train_time:131808ms step_avg:95.31ms +step:1384/1705 train_time:131901ms step_avg:95.30ms +step:1385/1705 train_time:131996ms step_avg:95.30ms +step:1386/1705 train_time:132091ms step_avg:95.30ms +step:1387/1705 train_time:132188ms step_avg:95.30ms +step:1388/1705 train_time:132286ms step_avg:95.31ms +step:1389/1705 train_time:132382ms step_avg:95.31ms +step:1390/1705 train_time:132478ms step_avg:95.31ms +step:1391/1705 train_time:132573ms step_avg:95.31ms +step:1392/1705 train_time:132667ms step_avg:95.31ms +step:1393/1705 train_time:132761ms step_avg:95.31ms +step:1394/1705 train_time:132856ms step_avg:95.31ms +step:1395/1705 train_time:132951ms step_avg:95.31ms +step:1396/1705 train_time:133045ms step_avg:95.30ms +step:1397/1705 train_time:133140ms step_avg:95.30ms +step:1398/1705 train_time:133237ms step_avg:95.31ms +step:1399/1705 train_time:133335ms step_avg:95.31ms +step:1400/1705 train_time:133431ms step_avg:95.31ms +step:1401/1705 train_time:133527ms step_avg:95.31ms +step:1402/1705 train_time:133621ms step_avg:95.31ms +step:1403/1705 train_time:133715ms step_avg:95.31ms +step:1404/1705 train_time:133810ms step_avg:95.31ms +step:1405/1705 train_time:133905ms step_avg:95.31ms +step:1406/1705 train_time:134000ms step_avg:95.31ms +step:1407/1705 train_time:134096ms step_avg:95.31ms +step:1408/1705 train_time:134192ms step_avg:95.31ms +step:1409/1705 train_time:134288ms step_avg:95.31ms +step:1410/1705 train_time:134384ms step_avg:95.31ms +step:1411/1705 train_time:134479ms step_avg:95.31ms +step:1412/1705 train_time:134575ms step_avg:95.31ms +step:1413/1705 train_time:134671ms step_avg:95.31ms +step:1414/1705 train_time:134765ms step_avg:95.31ms +step:1415/1705 train_time:134860ms step_avg:95.31ms +step:1416/1705 train_time:134955ms step_avg:95.31ms +step:1417/1705 train_time:135050ms step_avg:95.31ms +step:1418/1705 train_time:135144ms step_avg:95.31ms +step:1419/1705 train_time:135240ms step_avg:95.31ms +step:1420/1705 train_time:135337ms step_avg:95.31ms +step:1421/1705 train_time:135433ms step_avg:95.31ms +step:1422/1705 train_time:135527ms step_avg:95.31ms +step:1423/1705 train_time:135622ms step_avg:95.31ms +step:1424/1705 train_time:135718ms step_avg:95.31ms +step:1425/1705 train_time:135813ms step_avg:95.31ms +step:1426/1705 train_time:135907ms step_avg:95.31ms +step:1427/1705 train_time:136002ms step_avg:95.31ms +step:1428/1705 train_time:136097ms step_avg:95.31ms +step:1429/1705 train_time:136193ms step_avg:95.31ms +step:1430/1705 train_time:136290ms step_avg:95.31ms +step:1431/1705 train_time:136385ms step_avg:95.31ms +step:1432/1705 train_time:136480ms step_avg:95.31ms +step:1433/1705 train_time:136576ms step_avg:95.31ms +step:1434/1705 train_time:136672ms step_avg:95.31ms +step:1435/1705 train_time:136767ms step_avg:95.31ms +step:1436/1705 train_time:136862ms step_avg:95.31ms +step:1437/1705 train_time:136958ms step_avg:95.31ms +step:1438/1705 train_time:137052ms step_avg:95.31ms +step:1439/1705 train_time:137147ms step_avg:95.31ms +step:1440/1705 train_time:137243ms step_avg:95.31ms +step:1441/1705 train_time:137338ms step_avg:95.31ms +step:1442/1705 train_time:137434ms step_avg:95.31ms +step:1443/1705 train_time:137530ms step_avg:95.31ms +step:1444/1705 train_time:137625ms step_avg:95.31ms +step:1445/1705 train_time:137720ms step_avg:95.31ms +step:1446/1705 train_time:137815ms step_avg:95.31ms +step:1447/1705 train_time:137911ms step_avg:95.31ms +step:1448/1705 train_time:138007ms step_avg:95.31ms +step:1449/1705 train_time:138102ms step_avg:95.31ms +step:1450/1705 train_time:138198ms step_avg:95.31ms +step:1451/1705 train_time:138293ms step_avg:95.31ms +step:1452/1705 train_time:138388ms step_avg:95.31ms +step:1453/1705 train_time:138483ms step_avg:95.31ms +step:1454/1705 train_time:138579ms step_avg:95.31ms +step:1455/1705 train_time:138675ms step_avg:95.31ms +step:1456/1705 train_time:138772ms step_avg:95.31ms +step:1457/1705 train_time:138869ms step_avg:95.31ms +step:1458/1705 train_time:138964ms step_avg:95.31ms +step:1459/1705 train_time:139059ms step_avg:95.31ms +step:1460/1705 train_time:139154ms step_avg:95.31ms +step:1461/1705 train_time:139250ms step_avg:95.31ms +step:1462/1705 train_time:139345ms step_avg:95.31ms +step:1463/1705 train_time:139440ms step_avg:95.31ms +step:1464/1705 train_time:139535ms step_avg:95.31ms +step:1465/1705 train_time:139630ms step_avg:95.31ms +step:1466/1705 train_time:139725ms step_avg:95.31ms +step:1467/1705 train_time:139821ms step_avg:95.31ms +step:1468/1705 train_time:139917ms step_avg:95.31ms +step:1469/1705 train_time:140013ms step_avg:95.31ms +step:1470/1705 train_time:140108ms step_avg:95.31ms +step:1471/1705 train_time:140203ms step_avg:95.31ms +step:1472/1705 train_time:140298ms step_avg:95.31ms +step:1473/1705 train_time:140395ms step_avg:95.31ms +step:1474/1705 train_time:140490ms step_avg:95.31ms +step:1475/1705 train_time:140585ms step_avg:95.31ms +step:1476/1705 train_time:140681ms step_avg:95.31ms +step:1477/1705 train_time:140776ms step_avg:95.31ms +step:1478/1705 train_time:140872ms step_avg:95.31ms +step:1479/1705 train_time:140967ms step_avg:95.31ms +step:1480/1705 train_time:141063ms step_avg:95.31ms +step:1481/1705 train_time:141159ms step_avg:95.31ms +step:1482/1705 train_time:141255ms step_avg:95.31ms +step:1483/1705 train_time:141351ms step_avg:95.31ms +step:1484/1705 train_time:141445ms step_avg:95.31ms +step:1485/1705 train_time:141723ms step_avg:95.44ms +step:1486/1705 train_time:141887ms step_avg:95.48ms +step:1487/1705 train_time:141980ms step_avg:95.48ms +step:1488/1705 train_time:142074ms step_avg:95.48ms +step:1489/1705 train_time:142168ms step_avg:95.48ms +step:1490/1705 train_time:142262ms step_avg:95.48ms +step:1491/1705 train_time:142357ms step_avg:95.48ms +step:1492/1705 train_time:142451ms step_avg:95.48ms +step:1493/1705 train_time:142545ms step_avg:95.48ms +step:1494/1705 train_time:142640ms step_avg:95.48ms +step:1495/1705 train_time:142740ms step_avg:95.48ms +step:1496/1705 train_time:142842ms step_avg:95.48ms +step:1497/1705 train_time:142939ms step_avg:95.48ms +step:1498/1705 train_time:143035ms step_avg:95.48ms +step:1499/1705 train_time:143131ms step_avg:95.48ms +step:1500/1705 train_time:143224ms step_avg:95.48ms +step:1500/1705 val_loss:3.3197 train_time:143319ms step_avg:95.55ms +step:1501/1705 train_time:143340ms step_avg:95.50ms +step:1502/1705 train_time:143420ms step_avg:95.49ms +step:1503/1705 train_time:143518ms step_avg:95.49ms +step:1504/1705 train_time:143613ms step_avg:95.49ms +step:1505/1705 train_time:143709ms step_avg:95.49ms +step:1506/1705 train_time:143803ms step_avg:95.49ms +step:1507/1705 train_time:143896ms step_avg:95.49ms +step:1508/1705 train_time:143990ms step_avg:95.48ms +step:1509/1705 train_time:144085ms step_avg:95.48ms +step:1510/1705 train_time:144179ms step_avg:95.48ms +step:1511/1705 train_time:144274ms step_avg:95.48ms +step:1512/1705 train_time:144372ms step_avg:95.48ms +step:1513/1705 train_time:144470ms step_avg:95.49ms +step:1514/1705 train_time:144569ms step_avg:95.49ms +step:1515/1705 train_time:144665ms step_avg:95.49ms +step:1516/1705 train_time:144760ms step_avg:95.49ms +step:1517/1705 train_time:144853ms step_avg:95.49ms +step:1518/1705 train_time:144948ms step_avg:95.49ms +step:1519/1705 train_time:145042ms step_avg:95.49ms +step:1520/1705 train_time:145137ms step_avg:95.48ms +step:1521/1705 train_time:145231ms step_avg:95.48ms +step:1522/1705 train_time:145327ms step_avg:95.48ms +step:1523/1705 train_time:145425ms step_avg:95.49ms +step:1524/1705 train_time:145522ms step_avg:95.49ms +step:1525/1705 train_time:145618ms step_avg:95.49ms +step:1526/1705 train_time:145713ms step_avg:95.49ms +step:1527/1705 train_time:145808ms step_avg:95.49ms +step:1528/1705 train_time:145904ms step_avg:95.49ms +step:1529/1705 train_time:145999ms step_avg:95.49ms +step:1530/1705 train_time:146093ms step_avg:95.49ms +step:1531/1705 train_time:146187ms step_avg:95.48ms +step:1532/1705 train_time:146283ms step_avg:95.48ms +step:1533/1705 train_time:146379ms step_avg:95.49ms +step:1534/1705 train_time:146474ms step_avg:95.49ms +step:1535/1705 train_time:146571ms step_avg:95.49ms +step:1536/1705 train_time:146666ms step_avg:95.49ms +step:1537/1705 train_time:146762ms step_avg:95.49ms +step:1538/1705 train_time:146857ms step_avg:95.49ms +step:1539/1705 train_time:146951ms step_avg:95.48ms +step:1540/1705 train_time:147046ms step_avg:95.48ms +step:1541/1705 train_time:147142ms step_avg:95.48ms +step:1542/1705 train_time:147237ms step_avg:95.48ms +step:1543/1705 train_time:147332ms step_avg:95.48ms +step:1544/1705 train_time:147428ms step_avg:95.48ms +step:1545/1705 train_time:147525ms step_avg:95.49ms +step:1546/1705 train_time:147622ms step_avg:95.49ms +step:1547/1705 train_time:147717ms step_avg:95.49ms +step:1548/1705 train_time:147811ms step_avg:95.49ms +step:1549/1705 train_time:147907ms step_avg:95.49ms +step:1550/1705 train_time:148003ms step_avg:95.49ms +step:1551/1705 train_time:148098ms step_avg:95.49ms +step:1552/1705 train_time:148192ms step_avg:95.48ms +step:1553/1705 train_time:148289ms step_avg:95.49ms +step:1554/1705 train_time:148384ms step_avg:95.49ms +step:1555/1705 train_time:148480ms step_avg:95.49ms +step:1556/1705 train_time:148575ms step_avg:95.49ms +step:1557/1705 train_time:148671ms step_avg:95.49ms +step:1558/1705 train_time:148766ms step_avg:95.49ms +step:1559/1705 train_time:148862ms step_avg:95.49ms +step:1560/1705 train_time:148958ms step_avg:95.49ms +step:1561/1705 train_time:149053ms step_avg:95.49ms +step:1562/1705 train_time:149148ms step_avg:95.49ms +step:1563/1705 train_time:149244ms step_avg:95.49ms +step:1564/1705 train_time:149340ms step_avg:95.49ms +step:1565/1705 train_time:149436ms step_avg:95.49ms +step:1566/1705 train_time:149531ms step_avg:95.49ms +step:1567/1705 train_time:149626ms step_avg:95.49ms +step:1568/1705 train_time:149721ms step_avg:95.49ms +step:1569/1705 train_time:149816ms step_avg:95.49ms +step:1570/1705 train_time:149912ms step_avg:95.49ms +step:1571/1705 train_time:150007ms step_avg:95.49ms +step:1572/1705 train_time:150102ms step_avg:95.48ms +step:1573/1705 train_time:150197ms step_avg:95.48ms +step:1574/1705 train_time:150292ms step_avg:95.48ms +step:1575/1705 train_time:150387ms step_avg:95.48ms +step:1576/1705 train_time:150482ms step_avg:95.48ms +step:1577/1705 train_time:150577ms step_avg:95.48ms +step:1578/1705 train_time:150673ms step_avg:95.48ms +step:1579/1705 train_time:150768ms step_avg:95.48ms +step:1580/1705 train_time:150864ms step_avg:95.48ms +step:1581/1705 train_time:150959ms step_avg:95.48ms +step:1582/1705 train_time:151054ms step_avg:95.48ms +step:1583/1705 train_time:151149ms step_avg:95.48ms +step:1584/1705 train_time:151245ms step_avg:95.48ms +step:1585/1705 train_time:151342ms step_avg:95.48ms +step:1586/1705 train_time:151437ms step_avg:95.48ms +step:1587/1705 train_time:151531ms step_avg:95.48ms +step:1588/1705 train_time:151627ms step_avg:95.48ms +step:1589/1705 train_time:151723ms step_avg:95.48ms +step:1590/1705 train_time:151818ms step_avg:95.48ms +step:1591/1705 train_time:151913ms step_avg:95.48ms +step:1592/1705 train_time:152009ms step_avg:95.48ms +step:1593/1705 train_time:152104ms step_avg:95.48ms +step:1594/1705 train_time:152200ms step_avg:95.48ms +step:1595/1705 train_time:152294ms step_avg:95.48ms +step:1596/1705 train_time:152389ms step_avg:95.48ms +step:1597/1705 train_time:152485ms step_avg:95.48ms +step:1598/1705 train_time:152581ms step_avg:95.48ms +step:1599/1705 train_time:152677ms step_avg:95.48ms +step:1600/1705 train_time:152772ms step_avg:95.48ms +step:1601/1705 train_time:152867ms step_avg:95.48ms +step:1602/1705 train_time:152963ms step_avg:95.48ms +step:1603/1705 train_time:153058ms step_avg:95.48ms +step:1604/1705 train_time:153153ms step_avg:95.48ms +step:1605/1705 train_time:153248ms step_avg:95.48ms +step:1606/1705 train_time:153344ms step_avg:95.48ms +step:1607/1705 train_time:153440ms step_avg:95.48ms +step:1608/1705 train_time:153534ms step_avg:95.48ms +step:1609/1705 train_time:153629ms step_avg:95.48ms +step:1610/1705 train_time:153726ms step_avg:95.48ms +step:1611/1705 train_time:153821ms step_avg:95.48ms +step:1612/1705 train_time:153916ms step_avg:95.48ms +step:1613/1705 train_time:154012ms step_avg:95.48ms +step:1614/1705 train_time:154107ms step_avg:95.48ms +step:1615/1705 train_time:154203ms step_avg:95.48ms +step:1616/1705 train_time:154298ms step_avg:95.48ms +step:1617/1705 train_time:154393ms step_avg:95.48ms +step:1618/1705 train_time:154488ms step_avg:95.48ms +step:1619/1705 train_time:154585ms step_avg:95.48ms +step:1620/1705 train_time:154680ms step_avg:95.48ms +step:1621/1705 train_time:154775ms step_avg:95.48ms +step:1622/1705 train_time:154870ms step_avg:95.48ms +step:1623/1705 train_time:154966ms step_avg:95.48ms +step:1624/1705 train_time:155062ms step_avg:95.48ms +step:1625/1705 train_time:155158ms step_avg:95.48ms +step:1625/1705 val_loss:3.2922 train_time:155253ms step_avg:95.54ms +step:1626/1705 train_time:155275ms step_avg:95.50ms +step:1627/1705 train_time:155355ms step_avg:95.49ms +step:1628/1705 train_time:155457ms step_avg:95.49ms +step:1629/1705 train_time:155553ms step_avg:95.49ms +step:1630/1705 train_time:155647ms step_avg:95.49ms +step:1631/1705 train_time:155741ms step_avg:95.49ms +step:1632/1705 train_time:155836ms step_avg:95.49ms +step:1633/1705 train_time:155930ms step_avg:95.49ms +step:1634/1705 train_time:156024ms step_avg:95.49ms +step:1635/1705 train_time:156119ms step_avg:95.49ms +step:1636/1705 train_time:156215ms step_avg:95.49ms +step:1637/1705 train_time:156313ms step_avg:95.49ms +step:1638/1705 train_time:156411ms step_avg:95.49ms +step:1639/1705 train_time:156508ms step_avg:95.49ms +step:1640/1705 train_time:156604ms step_avg:95.49ms +step:1641/1705 train_time:156700ms step_avg:95.49ms +step:1642/1705 train_time:156795ms step_avg:95.49ms +step:1643/1705 train_time:156889ms step_avg:95.49ms +step:1644/1705 train_time:156984ms step_avg:95.49ms +step:1645/1705 train_time:157079ms step_avg:95.49ms +step:1646/1705 train_time:157174ms step_avg:95.49ms +step:1647/1705 train_time:157269ms step_avg:95.49ms +step:1648/1705 train_time:157365ms step_avg:95.49ms +step:1649/1705 train_time:157461ms step_avg:95.49ms +step:1650/1705 train_time:157557ms step_avg:95.49ms +step:1651/1705 train_time:157652ms step_avg:95.49ms +step:1652/1705 train_time:157747ms step_avg:95.49ms +step:1653/1705 train_time:157843ms step_avg:95.49ms +step:1654/1705 train_time:157937ms step_avg:95.49ms +step:1655/1705 train_time:158032ms step_avg:95.49ms +step:1656/1705 train_time:158126ms step_avg:95.49ms +step:1657/1705 train_time:158222ms step_avg:95.49ms +step:1658/1705 train_time:158318ms step_avg:95.49ms +step:1659/1705 train_time:158416ms step_avg:95.49ms +step:1660/1705 train_time:158512ms step_avg:95.49ms +step:1661/1705 train_time:158607ms step_avg:95.49ms +step:1662/1705 train_time:158702ms step_avg:95.49ms +step:1663/1705 train_time:158798ms step_avg:95.49ms +step:1664/1705 train_time:158894ms step_avg:95.49ms +step:1665/1705 train_time:158988ms step_avg:95.49ms +step:1666/1705 train_time:159083ms step_avg:95.49ms +step:1667/1705 train_time:159179ms step_avg:95.49ms +step:1668/1705 train_time:159274ms step_avg:95.49ms +step:1669/1705 train_time:159369ms step_avg:95.49ms +step:1670/1705 train_time:159465ms step_avg:95.49ms +step:1671/1705 train_time:159560ms step_avg:95.49ms +step:1672/1705 train_time:159656ms step_avg:95.49ms +step:1673/1705 train_time:159752ms step_avg:95.49ms +step:1674/1705 train_time:159847ms step_avg:95.49ms +step:1675/1705 train_time:159943ms step_avg:95.49ms +step:1676/1705 train_time:160039ms step_avg:95.49ms +step:1677/1705 train_time:160134ms step_avg:95.49ms +step:1678/1705 train_time:160229ms step_avg:95.49ms +step:1679/1705 train_time:160324ms step_avg:95.49ms +step:1680/1705 train_time:160421ms step_avg:95.49ms +step:1681/1705 train_time:160516ms step_avg:95.49ms +step:1682/1705 train_time:160612ms step_avg:95.49ms +step:1683/1705 train_time:160707ms step_avg:95.49ms +step:1684/1705 train_time:160803ms step_avg:95.49ms +step:1685/1705 train_time:160900ms step_avg:95.49ms +step:1686/1705 train_time:160995ms step_avg:95.49ms +step:1687/1705 train_time:161090ms step_avg:95.49ms +step:1688/1705 train_time:161185ms step_avg:95.49ms +step:1689/1705 train_time:161280ms step_avg:95.49ms +step:1690/1705 train_time:161376ms step_avg:95.49ms +step:1691/1705 train_time:161471ms step_avg:95.49ms +step:1692/1705 train_time:161566ms step_avg:95.49ms +step:1693/1705 train_time:161661ms step_avg:95.49ms +step:1694/1705 train_time:161757ms step_avg:95.49ms +step:1695/1705 train_time:161853ms step_avg:95.49ms +step:1696/1705 train_time:161948ms step_avg:95.49ms +step:1697/1705 train_time:162043ms step_avg:95.49ms +step:1698/1705 train_time:162363ms step_avg:95.62ms +step:1699/1705 train_time:162483ms step_avg:95.63ms +step:1700/1705 train_time:162575ms step_avg:95.63ms +step:1701/1705 train_time:162669ms step_avg:95.63ms +step:1702/1705 train_time:162763ms step_avg:95.63ms +step:1703/1705 train_time:162857ms step_avg:95.63ms +step:1704/1705 train_time:162952ms step_avg:95.63ms +step:1705/1705 train_time:163046ms step_avg:95.63ms +step:1705/1705 val_loss:3.2778 train_time:163141ms step_avg:95.68ms +peak memory allocated: 33848 MiB reserved: 48936 MiB diff --git a/records/090525_SkipMLPBlocks/5ab34e6e-f1db-4ceb-a639-9186a26a48f5.txt b/records/090525_SkipMLPBlocks/5ab34e6e-f1db-4ceb-a639-9186a26a48f5.txt new file mode 100644 index 000000000..3c0cce51c --- /dev/null +++ b/records/090525_SkipMLPBlocks/5ab34e6e-f1db-4ceb-a639-9186a26a48f5.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:18:13 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 36C P0 123W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 31C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 37C P0 122W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 31C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 31C P0 115W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 35C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 30C P0 122W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 47592 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 47593 C /usr/bin/python3 610MiB | +| 0 N/A N/A 47594 C /usr/bin/python3 610MiB | +| 0 N/A N/A 47595 C /usr/bin/python3 610MiB | +| 0 N/A N/A 47596 C /usr/bin/python3 610MiB | +| 0 N/A N/A 47597 C /usr/bin/python3 610MiB | +| 0 N/A N/A 47598 C /usr/bin/python3 610MiB | +| 0 N/A N/A 47599 C /usr/bin/python3 610MiB | +| 1 N/A N/A 47593 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 47594 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 47595 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 47596 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 47597 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 47598 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 47599 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1705 train_time:408ms step_avg:407.67ms +step:2/1705 train_time:429ms step_avg:214.72ms +step:3/1705 train_time:497ms step_avg:165.70ms +step:4/1705 train_time:588ms step_avg:146.96ms +step:5/1705 train_time:679ms step_avg:135.84ms +step:6/1705 train_time:772ms step_avg:128.59ms +step:7/1705 train_time:863ms step_avg:123.24ms +step:8/1705 train_time:955ms step_avg:119.35ms +step:9/1705 train_time:1047ms step_avg:116.30ms +step:10/1705 train_time:1139ms step_avg:113.93ms +step:11/1705 train_time:1231ms step_avg:111.95ms +step:12/1705 train_time:1325ms step_avg:110.38ms +step:13/1705 train_time:1419ms step_avg:109.15ms +step:14/1705 train_time:1513ms step_avg:108.09ms +step:15/1705 train_time:1607ms step_avg:107.14ms +step:16/1705 train_time:1699ms step_avg:106.18ms +step:17/1705 train_time:1791ms step_avg:105.38ms +step:18/1705 train_time:1884ms step_avg:104.66ms +step:19/1705 train_time:1976ms step_avg:103.99ms +step:20/1705 train_time:2069ms step_avg:103.45ms +step:21/1705 train_time:2161ms step_avg:102.93ms +step:22/1705 train_time:2255ms step_avg:102.50ms +step:23/1705 train_time:2349ms step_avg:102.15ms +step:24/1705 train_time:2443ms step_avg:101.78ms +step:25/1705 train_time:2537ms step_avg:101.47ms +step:26/1705 train_time:2630ms step_avg:101.16ms +step:27/1705 train_time:2722ms step_avg:100.83ms +step:28/1705 train_time:2815ms step_avg:100.54ms +step:29/1705 train_time:2908ms step_avg:100.27ms +step:30/1705 train_time:3000ms step_avg:99.99ms +step:31/1705 train_time:3092ms step_avg:99.76ms +step:32/1705 train_time:3186ms step_avg:99.56ms +step:33/1705 train_time:3279ms step_avg:99.35ms +step:34/1705 train_time:3373ms step_avg:99.20ms +step:35/1705 train_time:3466ms step_avg:99.04ms +step:36/1705 train_time:3559ms step_avg:98.87ms +step:37/1705 train_time:3653ms step_avg:98.73ms +step:38/1705 train_time:3746ms step_avg:98.59ms +step:39/1705 train_time:3839ms step_avg:98.42ms +step:40/1705 train_time:3932ms step_avg:98.29ms +step:41/1705 train_time:4024ms step_avg:98.15ms +step:42/1705 train_time:4117ms step_avg:98.03ms +step:43/1705 train_time:4211ms step_avg:97.92ms +step:44/1705 train_time:4303ms step_avg:97.80ms +step:45/1705 train_time:4397ms step_avg:97.70ms +step:46/1705 train_time:4490ms step_avg:97.61ms +step:47/1705 train_time:4583ms step_avg:97.51ms +step:48/1705 train_time:4676ms step_avg:97.42ms +step:49/1705 train_time:4770ms step_avg:97.34ms +step:50/1705 train_time:4862ms step_avg:97.24ms +step:51/1705 train_time:4955ms step_avg:97.17ms +step:52/1705 train_time:5049ms step_avg:97.09ms +step:53/1705 train_time:5142ms step_avg:97.02ms +step:54/1705 train_time:5235ms step_avg:96.95ms +step:55/1705 train_time:5328ms step_avg:96.87ms +step:56/1705 train_time:5420ms step_avg:96.79ms +step:57/1705 train_time:5514ms step_avg:96.74ms +step:58/1705 train_time:5607ms step_avg:96.67ms +step:59/1705 train_time:5699ms step_avg:96.59ms +step:60/1705 train_time:5792ms step_avg:96.54ms +step:61/1705 train_time:5885ms step_avg:96.47ms +step:62/1705 train_time:5977ms step_avg:96.40ms +step:63/1705 train_time:6070ms step_avg:96.36ms +step:64/1705 train_time:6162ms step_avg:96.28ms +step:65/1705 train_time:6255ms step_avg:96.23ms +step:66/1705 train_time:6349ms step_avg:96.19ms +step:67/1705 train_time:6441ms step_avg:96.14ms +step:68/1705 train_time:6535ms step_avg:96.10ms +step:69/1705 train_time:6628ms step_avg:96.06ms +step:70/1705 train_time:6720ms step_avg:96.01ms +step:71/1705 train_time:6813ms step_avg:95.96ms +step:72/1705 train_time:6906ms step_avg:95.92ms +step:73/1705 train_time:6999ms step_avg:95.87ms +step:74/1705 train_time:7092ms step_avg:95.84ms +step:75/1705 train_time:7185ms step_avg:95.80ms +step:76/1705 train_time:7278ms step_avg:95.76ms +step:77/1705 train_time:7371ms step_avg:95.73ms +step:78/1705 train_time:7464ms step_avg:95.70ms +step:79/1705 train_time:7557ms step_avg:95.66ms +step:80/1705 train_time:7651ms step_avg:95.64ms +step:81/1705 train_time:7746ms step_avg:95.63ms +step:82/1705 train_time:7838ms step_avg:95.59ms +step:83/1705 train_time:7931ms step_avg:95.55ms +step:84/1705 train_time:8024ms step_avg:95.52ms +step:85/1705 train_time:8117ms step_avg:95.49ms +step:86/1705 train_time:8210ms step_avg:95.47ms +step:87/1705 train_time:8302ms step_avg:95.43ms +step:88/1705 train_time:8395ms step_avg:95.40ms +step:89/1705 train_time:8489ms step_avg:95.38ms +step:90/1705 train_time:8582ms step_avg:95.35ms +step:91/1705 train_time:8676ms step_avg:95.35ms +step:92/1705 train_time:8768ms step_avg:95.31ms +step:93/1705 train_time:8860ms step_avg:95.27ms +step:94/1705 train_time:8953ms step_avg:95.25ms +step:95/1705 train_time:9046ms step_avg:95.22ms +step:96/1705 train_time:9138ms step_avg:95.19ms +step:97/1705 train_time:9230ms step_avg:95.16ms +step:98/1705 train_time:9322ms step_avg:95.13ms +step:99/1705 train_time:9415ms step_avg:95.10ms +step:100/1705 train_time:9509ms step_avg:95.09ms +step:101/1705 train_time:9601ms step_avg:95.06ms +step:102/1705 train_time:9695ms step_avg:95.05ms +step:103/1705 train_time:9788ms step_avg:95.03ms +step:104/1705 train_time:9881ms step_avg:95.01ms +step:105/1705 train_time:9974ms step_avg:94.99ms +step:106/1705 train_time:10067ms step_avg:94.97ms +step:107/1705 train_time:10159ms step_avg:94.94ms +step:108/1705 train_time:10253ms step_avg:94.94ms +step:109/1705 train_time:10346ms step_avg:94.92ms +step:110/1705 train_time:10438ms step_avg:94.90ms +step:111/1705 train_time:10531ms step_avg:94.88ms +step:112/1705 train_time:10624ms step_avg:94.85ms +step:113/1705 train_time:10716ms step_avg:94.84ms +step:114/1705 train_time:10810ms step_avg:94.82ms +step:115/1705 train_time:10902ms step_avg:94.80ms +step:116/1705 train_time:10995ms step_avg:94.79ms +step:117/1705 train_time:11088ms step_avg:94.77ms +step:118/1705 train_time:11180ms step_avg:94.75ms +step:119/1705 train_time:11273ms step_avg:94.73ms +step:120/1705 train_time:11366ms step_avg:94.72ms +step:121/1705 train_time:11458ms step_avg:94.69ms +step:122/1705 train_time:11552ms step_avg:94.69ms +step:123/1705 train_time:11646ms step_avg:94.68ms +step:124/1705 train_time:11738ms step_avg:94.66ms +step:125/1705 train_time:11831ms step_avg:94.65ms +step:125/1705 val_loss:4.3026 train_time:11924ms step_avg:95.39ms +step:126/1705 train_time:11948ms step_avg:94.83ms +step:127/1705 train_time:12023ms step_avg:94.67ms +step:128/1705 train_time:12126ms step_avg:94.74ms +step:129/1705 train_time:12222ms step_avg:94.74ms +step:130/1705 train_time:12314ms step_avg:94.72ms +step:131/1705 train_time:12406ms step_avg:94.70ms +step:132/1705 train_time:12498ms step_avg:94.68ms +step:133/1705 train_time:12590ms step_avg:94.66ms +step:134/1705 train_time:12682ms step_avg:94.64ms +step:135/1705 train_time:12774ms step_avg:94.62ms +step:136/1705 train_time:12867ms step_avg:94.61ms +step:137/1705 train_time:12959ms step_avg:94.59ms +step:138/1705 train_time:13054ms step_avg:94.59ms +step:139/1705 train_time:13151ms step_avg:94.61ms +step:140/1705 train_time:13244ms step_avg:94.60ms +step:141/1705 train_time:13337ms step_avg:94.59ms +step:142/1705 train_time:13429ms step_avg:94.57ms +step:143/1705 train_time:13522ms step_avg:94.56ms +step:144/1705 train_time:13614ms step_avg:94.54ms +step:145/1705 train_time:13706ms step_avg:94.53ms +step:146/1705 train_time:13798ms step_avg:94.51ms +step:147/1705 train_time:13890ms step_avg:94.49ms +step:148/1705 train_time:13982ms step_avg:94.48ms +step:149/1705 train_time:14075ms step_avg:94.46ms +step:150/1705 train_time:14169ms step_avg:94.46ms +step:151/1705 train_time:14263ms step_avg:94.46ms +step:152/1705 train_time:14356ms step_avg:94.45ms +step:153/1705 train_time:14449ms step_avg:94.44ms +step:154/1705 train_time:14542ms step_avg:94.43ms +step:155/1705 train_time:14634ms step_avg:94.41ms +step:156/1705 train_time:14727ms step_avg:94.40ms +step:157/1705 train_time:14819ms step_avg:94.39ms +step:158/1705 train_time:14911ms step_avg:94.37ms +step:159/1705 train_time:15004ms step_avg:94.37ms +step:160/1705 train_time:15098ms step_avg:94.36ms +step:161/1705 train_time:15191ms step_avg:94.36ms +step:162/1705 train_time:15285ms step_avg:94.35ms +step:163/1705 train_time:15378ms step_avg:94.35ms +step:164/1705 train_time:15471ms step_avg:94.33ms +step:165/1705 train_time:15564ms step_avg:94.33ms +step:166/1705 train_time:15656ms step_avg:94.31ms +step:167/1705 train_time:15749ms step_avg:94.30ms +step:168/1705 train_time:15841ms step_avg:94.29ms +step:169/1705 train_time:15933ms step_avg:94.28ms +step:170/1705 train_time:16026ms step_avg:94.27ms +step:171/1705 train_time:16119ms step_avg:94.26ms +step:172/1705 train_time:16212ms step_avg:94.25ms +step:173/1705 train_time:16306ms step_avg:94.25ms +step:174/1705 train_time:16398ms step_avg:94.24ms +step:175/1705 train_time:16491ms step_avg:94.24ms +step:176/1705 train_time:16584ms step_avg:94.23ms +step:177/1705 train_time:16677ms step_avg:94.22ms +step:178/1705 train_time:16769ms step_avg:94.21ms +step:179/1705 train_time:16862ms step_avg:94.20ms +step:180/1705 train_time:16954ms step_avg:94.19ms +step:181/1705 train_time:17048ms step_avg:94.19ms +step:182/1705 train_time:17140ms step_avg:94.18ms +step:183/1705 train_time:17232ms step_avg:94.16ms +step:184/1705 train_time:17326ms step_avg:94.16ms +step:185/1705 train_time:17418ms step_avg:94.15ms +step:186/1705 train_time:17511ms step_avg:94.15ms +step:187/1705 train_time:17604ms step_avg:94.14ms +step:188/1705 train_time:17697ms step_avg:94.13ms +step:189/1705 train_time:17789ms step_avg:94.12ms +step:190/1705 train_time:17882ms step_avg:94.11ms +step:191/1705 train_time:17973ms step_avg:94.10ms +step:192/1705 train_time:18067ms step_avg:94.10ms +step:193/1705 train_time:18160ms step_avg:94.09ms +step:194/1705 train_time:18251ms step_avg:94.08ms +step:195/1705 train_time:18344ms step_avg:94.07ms +step:196/1705 train_time:18437ms step_avg:94.07ms +step:197/1705 train_time:18530ms step_avg:94.06ms +step:198/1705 train_time:18623ms step_avg:94.05ms +step:199/1705 train_time:18715ms step_avg:94.04ms +step:200/1705 train_time:18807ms step_avg:94.03ms +step:201/1705 train_time:18900ms step_avg:94.03ms +step:202/1705 train_time:18992ms step_avg:94.02ms +step:203/1705 train_time:19084ms step_avg:94.01ms +step:204/1705 train_time:19177ms step_avg:94.00ms +step:205/1705 train_time:19270ms step_avg:94.00ms +step:206/1705 train_time:19363ms step_avg:94.00ms +step:207/1705 train_time:19456ms step_avg:93.99ms +step:208/1705 train_time:19548ms step_avg:93.98ms +step:209/1705 train_time:19641ms step_avg:93.97ms +step:210/1705 train_time:19733ms step_avg:93.97ms +step:211/1705 train_time:19826ms step_avg:93.96ms +step:212/1705 train_time:19918ms step_avg:93.95ms +step:213/1705 train_time:20202ms step_avg:94.84ms +step:214/1705 train_time:20330ms step_avg:95.00ms +step:215/1705 train_time:20421ms step_avg:94.98ms +step:216/1705 train_time:20513ms step_avg:94.97ms +step:217/1705 train_time:20604ms step_avg:94.95ms +step:218/1705 train_time:20696ms step_avg:94.94ms +step:219/1705 train_time:20788ms step_avg:94.92ms +step:220/1705 train_time:20880ms step_avg:94.91ms +step:221/1705 train_time:20972ms step_avg:94.90ms +step:222/1705 train_time:21064ms step_avg:94.88ms +step:223/1705 train_time:21157ms step_avg:94.87ms +step:224/1705 train_time:21253ms step_avg:94.88ms +step:225/1705 train_time:21351ms step_avg:94.89ms +step:226/1705 train_time:21444ms step_avg:94.89ms +step:227/1705 train_time:21537ms step_avg:94.88ms +step:228/1705 train_time:21629ms step_avg:94.86ms +step:229/1705 train_time:21721ms step_avg:94.85ms +step:230/1705 train_time:21813ms step_avg:94.84ms +step:231/1705 train_time:21906ms step_avg:94.83ms +step:232/1705 train_time:21998ms step_avg:94.82ms +step:233/1705 train_time:22090ms step_avg:94.81ms +step:234/1705 train_time:22184ms step_avg:94.80ms +step:235/1705 train_time:22278ms step_avg:94.80ms +step:236/1705 train_time:22371ms step_avg:94.79ms +step:237/1705 train_time:22465ms step_avg:94.79ms +step:238/1705 train_time:22558ms step_avg:94.78ms +step:239/1705 train_time:22650ms step_avg:94.77ms +step:240/1705 train_time:22744ms step_avg:94.77ms +step:241/1705 train_time:22836ms step_avg:94.76ms +step:242/1705 train_time:22928ms step_avg:94.75ms +step:243/1705 train_time:23021ms step_avg:94.73ms +step:244/1705 train_time:23113ms step_avg:94.73ms +step:245/1705 train_time:23207ms step_avg:94.72ms +step:246/1705 train_time:23300ms step_avg:94.72ms +step:247/1705 train_time:23393ms step_avg:94.71ms +step:248/1705 train_time:23487ms step_avg:94.71ms +step:249/1705 train_time:23580ms step_avg:94.70ms +step:250/1705 train_time:23673ms step_avg:94.69ms +step:250/1705 val_loss:3.9817 train_time:23766ms step_avg:95.06ms +step:251/1705 train_time:23789ms step_avg:94.78ms +step:252/1705 train_time:23864ms step_avg:94.70ms +step:253/1705 train_time:23965ms step_avg:94.72ms +step:254/1705 train_time:24059ms step_avg:94.72ms +step:255/1705 train_time:24151ms step_avg:94.71ms +step:256/1705 train_time:24243ms step_avg:94.70ms +step:257/1705 train_time:24335ms step_avg:94.69ms +step:258/1705 train_time:24427ms step_avg:94.68ms +step:259/1705 train_time:24518ms step_avg:94.67ms +step:260/1705 train_time:24610ms step_avg:94.65ms +step:261/1705 train_time:24702ms step_avg:94.64ms +step:262/1705 train_time:24796ms step_avg:94.64ms +step:263/1705 train_time:24891ms step_avg:94.64ms +step:264/1705 train_time:24984ms step_avg:94.64ms +step:265/1705 train_time:25078ms step_avg:94.63ms +step:266/1705 train_time:25171ms step_avg:94.63ms +step:267/1705 train_time:25263ms step_avg:94.62ms +step:268/1705 train_time:25355ms step_avg:94.61ms +step:269/1705 train_time:25447ms step_avg:94.60ms +step:270/1705 train_time:25540ms step_avg:94.59ms +step:271/1705 train_time:25632ms step_avg:94.58ms +step:272/1705 train_time:25724ms step_avg:94.57ms +step:273/1705 train_time:25818ms step_avg:94.57ms +step:274/1705 train_time:25912ms step_avg:94.57ms +step:275/1705 train_time:26005ms step_avg:94.56ms +step:276/1705 train_time:26099ms step_avg:94.56ms +step:277/1705 train_time:26192ms step_avg:94.55ms +step:278/1705 train_time:26283ms step_avg:94.54ms +step:279/1705 train_time:26376ms step_avg:94.54ms +step:280/1705 train_time:26467ms step_avg:94.53ms +step:281/1705 train_time:26559ms step_avg:94.52ms +step:282/1705 train_time:26652ms step_avg:94.51ms +step:283/1705 train_time:26744ms step_avg:94.50ms +step:284/1705 train_time:26837ms step_avg:94.50ms +step:285/1705 train_time:26930ms step_avg:94.49ms +step:286/1705 train_time:27023ms step_avg:94.49ms +step:287/1705 train_time:27116ms step_avg:94.48ms +step:288/1705 train_time:27208ms step_avg:94.47ms +step:289/1705 train_time:27302ms step_avg:94.47ms +step:290/1705 train_time:27394ms step_avg:94.46ms +step:291/1705 train_time:27486ms step_avg:94.45ms +step:292/1705 train_time:27579ms step_avg:94.45ms +step:293/1705 train_time:27671ms step_avg:94.44ms +step:294/1705 train_time:27763ms step_avg:94.43ms +step:295/1705 train_time:27857ms step_avg:94.43ms +step:296/1705 train_time:27950ms step_avg:94.43ms +step:297/1705 train_time:28044ms step_avg:94.42ms +step:298/1705 train_time:28138ms step_avg:94.42ms +step:299/1705 train_time:28229ms step_avg:94.41ms +step:300/1705 train_time:28322ms step_avg:94.41ms +step:301/1705 train_time:28415ms step_avg:94.40ms +step:302/1705 train_time:28507ms step_avg:94.40ms +step:303/1705 train_time:28601ms step_avg:94.39ms +step:304/1705 train_time:28693ms step_avg:94.38ms +step:305/1705 train_time:28785ms step_avg:94.38ms +step:306/1705 train_time:28879ms step_avg:94.37ms +step:307/1705 train_time:28971ms step_avg:94.37ms +step:308/1705 train_time:29064ms step_avg:94.36ms +step:309/1705 train_time:29157ms step_avg:94.36ms +step:310/1705 train_time:29250ms step_avg:94.35ms +step:311/1705 train_time:29343ms step_avg:94.35ms +step:312/1705 train_time:29436ms step_avg:94.35ms +step:313/1705 train_time:29530ms step_avg:94.34ms +step:314/1705 train_time:29622ms step_avg:94.34ms +step:315/1705 train_time:29714ms step_avg:94.33ms +step:316/1705 train_time:29807ms step_avg:94.33ms +step:317/1705 train_time:29901ms step_avg:94.32ms +step:318/1705 train_time:29993ms step_avg:94.32ms +step:319/1705 train_time:30085ms step_avg:94.31ms +step:320/1705 train_time:30179ms step_avg:94.31ms +step:321/1705 train_time:30272ms step_avg:94.30ms +step:322/1705 train_time:30364ms step_avg:94.30ms +step:323/1705 train_time:30457ms step_avg:94.29ms +step:324/1705 train_time:30549ms step_avg:94.29ms +step:325/1705 train_time:30643ms step_avg:94.29ms +step:326/1705 train_time:30736ms step_avg:94.28ms +step:327/1705 train_time:30828ms step_avg:94.28ms +step:328/1705 train_time:30921ms step_avg:94.27ms +step:329/1705 train_time:31014ms step_avg:94.27ms +step:330/1705 train_time:31106ms step_avg:94.26ms +step:331/1705 train_time:31199ms step_avg:94.26ms +step:332/1705 train_time:31293ms step_avg:94.26ms +step:333/1705 train_time:31385ms step_avg:94.25ms +step:334/1705 train_time:31478ms step_avg:94.25ms +step:335/1705 train_time:31570ms step_avg:94.24ms +step:336/1705 train_time:31663ms step_avg:94.24ms +step:337/1705 train_time:31756ms step_avg:94.23ms +step:338/1705 train_time:31849ms step_avg:94.23ms +step:339/1705 train_time:31941ms step_avg:94.22ms +step:340/1705 train_time:32034ms step_avg:94.22ms +step:341/1705 train_time:32126ms step_avg:94.21ms +step:342/1705 train_time:32219ms step_avg:94.21ms +step:343/1705 train_time:32311ms step_avg:94.20ms +step:344/1705 train_time:32404ms step_avg:94.20ms +step:345/1705 train_time:32498ms step_avg:94.20ms +step:346/1705 train_time:32590ms step_avg:94.19ms +step:347/1705 train_time:32683ms step_avg:94.19ms +step:348/1705 train_time:32776ms step_avg:94.18ms +step:349/1705 train_time:32868ms step_avg:94.18ms +step:350/1705 train_time:32962ms step_avg:94.18ms +step:351/1705 train_time:33056ms step_avg:94.18ms +step:352/1705 train_time:33148ms step_avg:94.17ms +step:353/1705 train_time:33242ms step_avg:94.17ms +step:354/1705 train_time:33335ms step_avg:94.17ms +step:355/1705 train_time:33427ms step_avg:94.16ms +step:356/1705 train_time:33520ms step_avg:94.16ms +step:357/1705 train_time:33613ms step_avg:94.15ms +step:358/1705 train_time:33706ms step_avg:94.15ms +step:359/1705 train_time:33798ms step_avg:94.14ms +step:360/1705 train_time:33891ms step_avg:94.14ms +step:361/1705 train_time:33983ms step_avg:94.14ms +step:362/1705 train_time:34076ms step_avg:94.13ms +step:363/1705 train_time:34168ms step_avg:94.13ms +step:364/1705 train_time:34261ms step_avg:94.12ms +step:365/1705 train_time:34354ms step_avg:94.12ms +step:366/1705 train_time:34446ms step_avg:94.11ms +step:367/1705 train_time:34539ms step_avg:94.11ms +step:368/1705 train_time:34632ms step_avg:94.11ms +step:369/1705 train_time:34724ms step_avg:94.10ms +step:370/1705 train_time:34818ms step_avg:94.10ms +step:371/1705 train_time:34910ms step_avg:94.10ms +step:372/1705 train_time:35003ms step_avg:94.09ms +step:373/1705 train_time:35096ms step_avg:94.09ms +step:374/1705 train_time:35189ms step_avg:94.09ms +step:375/1705 train_time:35282ms step_avg:94.08ms +step:375/1705 val_loss:3.8213 train_time:35375ms step_avg:94.33ms +step:376/1705 train_time:35398ms step_avg:94.14ms +step:377/1705 train_time:35473ms step_avg:94.09ms +step:378/1705 train_time:35574ms step_avg:94.11ms +step:379/1705 train_time:35667ms step_avg:94.11ms +step:380/1705 train_time:35759ms step_avg:94.10ms +step:381/1705 train_time:35851ms step_avg:94.10ms +step:382/1705 train_time:35943ms step_avg:94.09ms +step:383/1705 train_time:36034ms step_avg:94.08ms +step:384/1705 train_time:36126ms step_avg:94.08ms +step:385/1705 train_time:36217ms step_avg:94.07ms +step:386/1705 train_time:36309ms step_avg:94.06ms +step:387/1705 train_time:36402ms step_avg:94.06ms +step:388/1705 train_time:36496ms step_avg:94.06ms +step:389/1705 train_time:36592ms step_avg:94.07ms +step:390/1705 train_time:36685ms step_avg:94.06ms +step:391/1705 train_time:36777ms step_avg:94.06ms +step:392/1705 train_time:36870ms step_avg:94.06ms +step:393/1705 train_time:36963ms step_avg:94.05ms +step:394/1705 train_time:37055ms step_avg:94.05ms +step:395/1705 train_time:37148ms step_avg:94.04ms +step:396/1705 train_time:37239ms step_avg:94.04ms +step:397/1705 train_time:37331ms step_avg:94.03ms +step:398/1705 train_time:37424ms step_avg:94.03ms +step:399/1705 train_time:37518ms step_avg:94.03ms +step:400/1705 train_time:37613ms step_avg:94.03ms +step:401/1705 train_time:37706ms step_avg:94.03ms +step:402/1705 train_time:37798ms step_avg:94.02ms +step:403/1705 train_time:37891ms step_avg:94.02ms +step:404/1705 train_time:37983ms step_avg:94.02ms +step:405/1705 train_time:38075ms step_avg:94.01ms +step:406/1705 train_time:38168ms step_avg:94.01ms +step:407/1705 train_time:38260ms step_avg:94.00ms +step:408/1705 train_time:38353ms step_avg:94.00ms +step:409/1705 train_time:38446ms step_avg:94.00ms +step:410/1705 train_time:38538ms step_avg:94.00ms +step:411/1705 train_time:38632ms step_avg:94.00ms +step:412/1705 train_time:38725ms step_avg:93.99ms +step:413/1705 train_time:38818ms step_avg:93.99ms +step:414/1705 train_time:38911ms step_avg:93.99ms +step:415/1705 train_time:39004ms step_avg:93.99ms +step:416/1705 train_time:39096ms step_avg:93.98ms +step:417/1705 train_time:39188ms step_avg:93.98ms +step:418/1705 train_time:39280ms step_avg:93.97ms +step:419/1705 train_time:39374ms step_avg:93.97ms +step:420/1705 train_time:39466ms step_avg:93.97ms +step:421/1705 train_time:39559ms step_avg:93.96ms +step:422/1705 train_time:39652ms step_avg:93.96ms +step:423/1705 train_time:39745ms step_avg:93.96ms +step:424/1705 train_time:39837ms step_avg:93.96ms +step:425/1705 train_time:40139ms step_avg:94.44ms +step:426/1705 train_time:40218ms step_avg:94.41ms +step:427/1705 train_time:40310ms step_avg:94.40ms +step:428/1705 train_time:40402ms step_avg:94.40ms +step:429/1705 train_time:40494ms step_avg:94.39ms +step:430/1705 train_time:40586ms step_avg:94.39ms +step:431/1705 train_time:40677ms step_avg:94.38ms +step:432/1705 train_time:40770ms step_avg:94.37ms +step:433/1705 train_time:40861ms step_avg:94.37ms +step:434/1705 train_time:40953ms step_avg:94.36ms +step:435/1705 train_time:41049ms step_avg:94.36ms +step:436/1705 train_time:41144ms step_avg:94.37ms +step:437/1705 train_time:41237ms step_avg:94.36ms +step:438/1705 train_time:41331ms step_avg:94.36ms +step:439/1705 train_time:41423ms step_avg:94.36ms +step:440/1705 train_time:41516ms step_avg:94.36ms +step:441/1705 train_time:41608ms step_avg:94.35ms +step:442/1705 train_time:41700ms step_avg:94.34ms +step:443/1705 train_time:41792ms step_avg:94.34ms +step:444/1705 train_time:41884ms step_avg:94.33ms +step:445/1705 train_time:41976ms step_avg:94.33ms +step:446/1705 train_time:42070ms step_avg:94.33ms +step:447/1705 train_time:42164ms step_avg:94.33ms +step:448/1705 train_time:42256ms step_avg:94.32ms +step:449/1705 train_time:42349ms step_avg:94.32ms +step:450/1705 train_time:42443ms step_avg:94.32ms +step:451/1705 train_time:42535ms step_avg:94.31ms +step:452/1705 train_time:42628ms step_avg:94.31ms +step:453/1705 train_time:42720ms step_avg:94.30ms +step:454/1705 train_time:42813ms step_avg:94.30ms +step:455/1705 train_time:42905ms step_avg:94.30ms +step:456/1705 train_time:42997ms step_avg:94.29ms +step:457/1705 train_time:43091ms step_avg:94.29ms +step:458/1705 train_time:43184ms step_avg:94.29ms +step:459/1705 train_time:43277ms step_avg:94.29ms +step:460/1705 train_time:43370ms step_avg:94.28ms +step:461/1705 train_time:43463ms step_avg:94.28ms +step:462/1705 train_time:43555ms step_avg:94.28ms +step:463/1705 train_time:43648ms step_avg:94.27ms +step:464/1705 train_time:43741ms step_avg:94.27ms +step:465/1705 train_time:43834ms step_avg:94.27ms +step:466/1705 train_time:43926ms step_avg:94.26ms +step:467/1705 train_time:44019ms step_avg:94.26ms +step:468/1705 train_time:44112ms step_avg:94.26ms +step:469/1705 train_time:44205ms step_avg:94.25ms +step:470/1705 train_time:44297ms step_avg:94.25ms +step:471/1705 train_time:44390ms step_avg:94.25ms +step:472/1705 train_time:44483ms step_avg:94.24ms +step:473/1705 train_time:44575ms step_avg:94.24ms +step:474/1705 train_time:44668ms step_avg:94.24ms +step:475/1705 train_time:44760ms step_avg:94.23ms +step:476/1705 train_time:44853ms step_avg:94.23ms +step:477/1705 train_time:44946ms step_avg:94.23ms +step:478/1705 train_time:45038ms step_avg:94.22ms +step:479/1705 train_time:45131ms step_avg:94.22ms +step:480/1705 train_time:45224ms step_avg:94.22ms +step:481/1705 train_time:45317ms step_avg:94.21ms +step:482/1705 train_time:45411ms step_avg:94.21ms +step:483/1705 train_time:45504ms step_avg:94.21ms +step:484/1705 train_time:45596ms step_avg:94.21ms +step:485/1705 train_time:45689ms step_avg:94.20ms +step:486/1705 train_time:45782ms step_avg:94.20ms +step:487/1705 train_time:45875ms step_avg:94.20ms +step:488/1705 train_time:45968ms step_avg:94.20ms +step:489/1705 train_time:46060ms step_avg:94.19ms +step:490/1705 train_time:46153ms step_avg:94.19ms +step:491/1705 train_time:46245ms step_avg:94.19ms +step:492/1705 train_time:46338ms step_avg:94.18ms +step:493/1705 train_time:46431ms step_avg:94.18ms +step:494/1705 train_time:46524ms step_avg:94.18ms +step:495/1705 train_time:46616ms step_avg:94.17ms +step:496/1705 train_time:46710ms step_avg:94.17ms +step:497/1705 train_time:46803ms step_avg:94.17ms +step:498/1705 train_time:46896ms step_avg:94.17ms +step:499/1705 train_time:46989ms step_avg:94.17ms +step:500/1705 train_time:47081ms step_avg:94.16ms +step:500/1705 val_loss:3.7198 train_time:47174ms step_avg:94.35ms +step:501/1705 train_time:47198ms step_avg:94.21ms +step:502/1705 train_time:47272ms step_avg:94.17ms +step:503/1705 train_time:47369ms step_avg:94.17ms +step:504/1705 train_time:47463ms step_avg:94.17ms +step:505/1705 train_time:47556ms step_avg:94.17ms +step:506/1705 train_time:47648ms step_avg:94.17ms +step:507/1705 train_time:47740ms step_avg:94.16ms +step:508/1705 train_time:47832ms step_avg:94.16ms +step:509/1705 train_time:47924ms step_avg:94.15ms +step:510/1705 train_time:48016ms step_avg:94.15ms +step:511/1705 train_time:48108ms step_avg:94.14ms +step:512/1705 train_time:48202ms step_avg:94.14ms +step:513/1705 train_time:48298ms step_avg:94.15ms +step:514/1705 train_time:48391ms step_avg:94.15ms +step:515/1705 train_time:48485ms step_avg:94.15ms +step:516/1705 train_time:48579ms step_avg:94.14ms +step:517/1705 train_time:48671ms step_avg:94.14ms +step:518/1705 train_time:48763ms step_avg:94.14ms +step:519/1705 train_time:48855ms step_avg:94.13ms +step:520/1705 train_time:48947ms step_avg:94.13ms +step:521/1705 train_time:49039ms step_avg:94.12ms +step:522/1705 train_time:49131ms step_avg:94.12ms +step:523/1705 train_time:49224ms step_avg:94.12ms +step:524/1705 train_time:49318ms step_avg:94.12ms +step:525/1705 train_time:49411ms step_avg:94.12ms +step:526/1705 train_time:49505ms step_avg:94.12ms +step:527/1705 train_time:49599ms step_avg:94.12ms +step:528/1705 train_time:49692ms step_avg:94.11ms +step:529/1705 train_time:49785ms step_avg:94.11ms +step:530/1705 train_time:49877ms step_avg:94.11ms +step:531/1705 train_time:49969ms step_avg:94.10ms +step:532/1705 train_time:50061ms step_avg:94.10ms +step:533/1705 train_time:50154ms step_avg:94.10ms +step:534/1705 train_time:50246ms step_avg:94.09ms +step:535/1705 train_time:50340ms step_avg:94.09ms +step:536/1705 train_time:50433ms step_avg:94.09ms +step:537/1705 train_time:50527ms step_avg:94.09ms +step:538/1705 train_time:50620ms step_avg:94.09ms +step:539/1705 train_time:50713ms step_avg:94.09ms +step:540/1705 train_time:50806ms step_avg:94.08ms +step:541/1705 train_time:50898ms step_avg:94.08ms +step:542/1705 train_time:50990ms step_avg:94.08ms +step:543/1705 train_time:51083ms step_avg:94.07ms +step:544/1705 train_time:51175ms step_avg:94.07ms +step:545/1705 train_time:51267ms step_avg:94.07ms +step:546/1705 train_time:51361ms step_avg:94.07ms +step:547/1705 train_time:51453ms step_avg:94.06ms +step:548/1705 train_time:51546ms step_avg:94.06ms +step:549/1705 train_time:51640ms step_avg:94.06ms +step:550/1705 train_time:51733ms step_avg:94.06ms +step:551/1705 train_time:51825ms step_avg:94.06ms +step:552/1705 train_time:51919ms step_avg:94.06ms +step:553/1705 train_time:52011ms step_avg:94.05ms +step:554/1705 train_time:52104ms step_avg:94.05ms +step:555/1705 train_time:52197ms step_avg:94.05ms +step:556/1705 train_time:52289ms step_avg:94.04ms +step:557/1705 train_time:52382ms step_avg:94.04ms +step:558/1705 train_time:52476ms step_avg:94.04ms +step:559/1705 train_time:52569ms step_avg:94.04ms +step:560/1705 train_time:52662ms step_avg:94.04ms +step:561/1705 train_time:52757ms step_avg:94.04ms +step:562/1705 train_time:52849ms step_avg:94.04ms +step:563/1705 train_time:52942ms step_avg:94.04ms +step:564/1705 train_time:53034ms step_avg:94.03ms +step:565/1705 train_time:53127ms step_avg:94.03ms +step:566/1705 train_time:53220ms step_avg:94.03ms +step:567/1705 train_time:53312ms step_avg:94.02ms +step:568/1705 train_time:53405ms step_avg:94.02ms +step:569/1705 train_time:53499ms step_avg:94.02ms +step:570/1705 train_time:53592ms step_avg:94.02ms +step:571/1705 train_time:53686ms step_avg:94.02ms +step:572/1705 train_time:53781ms step_avg:94.02ms +step:573/1705 train_time:53876ms step_avg:94.02ms +step:574/1705 train_time:53969ms step_avg:94.02ms +step:575/1705 train_time:54063ms step_avg:94.02ms +step:576/1705 train_time:54159ms step_avg:94.03ms +step:577/1705 train_time:54253ms step_avg:94.03ms +step:578/1705 train_time:54346ms step_avg:94.02ms +step:579/1705 train_time:54440ms step_avg:94.02ms +step:580/1705 train_time:54534ms step_avg:94.02ms +step:581/1705 train_time:54628ms step_avg:94.02ms +step:582/1705 train_time:54723ms step_avg:94.03ms +step:583/1705 train_time:54817ms step_avg:94.03ms +step:584/1705 train_time:54911ms step_avg:94.03ms +step:585/1705 train_time:55005ms step_avg:94.03ms +step:586/1705 train_time:55099ms step_avg:94.03ms +step:587/1705 train_time:55194ms step_avg:94.03ms +step:588/1705 train_time:55287ms step_avg:94.03ms +step:589/1705 train_time:55382ms step_avg:94.03ms +step:590/1705 train_time:55476ms step_avg:94.03ms +step:591/1705 train_time:55570ms step_avg:94.03ms +step:592/1705 train_time:55664ms step_avg:94.03ms +step:593/1705 train_time:55759ms step_avg:94.03ms +step:594/1705 train_time:55853ms step_avg:94.03ms +step:595/1705 train_time:55947ms step_avg:94.03ms +step:596/1705 train_time:56041ms step_avg:94.03ms +step:597/1705 train_time:56135ms step_avg:94.03ms +step:598/1705 train_time:56229ms step_avg:94.03ms +step:599/1705 train_time:56323ms step_avg:94.03ms +step:600/1705 train_time:56418ms step_avg:94.03ms +step:601/1705 train_time:56512ms step_avg:94.03ms +step:602/1705 train_time:56606ms step_avg:94.03ms +step:603/1705 train_time:56700ms step_avg:94.03ms +step:604/1705 train_time:56795ms step_avg:94.03ms +step:605/1705 train_time:56888ms step_avg:94.03ms +step:606/1705 train_time:56983ms step_avg:94.03ms +step:607/1705 train_time:57077ms step_avg:94.03ms +step:608/1705 train_time:57171ms step_avg:94.03ms +step:609/1705 train_time:57265ms step_avg:94.03ms +step:610/1705 train_time:57359ms step_avg:94.03ms +step:611/1705 train_time:57453ms step_avg:94.03ms +step:612/1705 train_time:57547ms step_avg:94.03ms +step:613/1705 train_time:57642ms step_avg:94.03ms +step:614/1705 train_time:57736ms step_avg:94.03ms +step:615/1705 train_time:57830ms step_avg:94.03ms +step:616/1705 train_time:57924ms step_avg:94.03ms +step:617/1705 train_time:58018ms step_avg:94.03ms +step:618/1705 train_time:58112ms step_avg:94.03ms +step:619/1705 train_time:58206ms step_avg:94.03ms +step:620/1705 train_time:58301ms step_avg:94.03ms +step:621/1705 train_time:58394ms step_avg:94.03ms +step:622/1705 train_time:58488ms step_avg:94.03ms +step:623/1705 train_time:58582ms step_avg:94.03ms +step:624/1705 train_time:58676ms step_avg:94.03ms +step:625/1705 train_time:58770ms step_avg:94.03ms +step:625/1705 val_loss:3.6221 train_time:58864ms step_avg:94.18ms +step:626/1705 train_time:58889ms step_avg:94.07ms +step:627/1705 train_time:58971ms step_avg:94.05ms +step:628/1705 train_time:59072ms step_avg:94.06ms +step:629/1705 train_time:59167ms step_avg:94.07ms +step:630/1705 train_time:59260ms step_avg:94.06ms +step:631/1705 train_time:59353ms step_avg:94.06ms +step:632/1705 train_time:59446ms step_avg:94.06ms +step:633/1705 train_time:59539ms step_avg:94.06ms +step:634/1705 train_time:59632ms step_avg:94.06ms +step:635/1705 train_time:59725ms step_avg:94.06ms +step:636/1705 train_time:59819ms step_avg:94.05ms +step:637/1705 train_time:59915ms step_avg:94.06ms +step:638/1705 train_time:60012ms step_avg:94.06ms +step:639/1705 train_time:60277ms step_avg:94.33ms +step:640/1705 train_time:60427ms step_avg:94.42ms +step:641/1705 train_time:60519ms step_avg:94.41ms +step:642/1705 train_time:60612ms step_avg:94.41ms +step:643/1705 train_time:60705ms step_avg:94.41ms +step:644/1705 train_time:60798ms step_avg:94.41ms +step:645/1705 train_time:60891ms step_avg:94.40ms +step:646/1705 train_time:60984ms step_avg:94.40ms +step:647/1705 train_time:61076ms step_avg:94.40ms +step:648/1705 train_time:61169ms step_avg:94.40ms +step:649/1705 train_time:61265ms step_avg:94.40ms +step:650/1705 train_time:61362ms step_avg:94.40ms +step:651/1705 train_time:61460ms step_avg:94.41ms +step:652/1705 train_time:61553ms step_avg:94.41ms +step:653/1705 train_time:61647ms step_avg:94.41ms +step:654/1705 train_time:61740ms step_avg:94.40ms +step:655/1705 train_time:61834ms step_avg:94.40ms +step:656/1705 train_time:61927ms step_avg:94.40ms +step:657/1705 train_time:62020ms step_avg:94.40ms +step:658/1705 train_time:62112ms step_avg:94.40ms +step:659/1705 train_time:62206ms step_avg:94.40ms +step:660/1705 train_time:62302ms step_avg:94.40ms +step:661/1705 train_time:62397ms step_avg:94.40ms +step:662/1705 train_time:62491ms step_avg:94.40ms +step:663/1705 train_time:62586ms step_avg:94.40ms +step:664/1705 train_time:62680ms step_avg:94.40ms +step:665/1705 train_time:62774ms step_avg:94.40ms +step:666/1705 train_time:62867ms step_avg:94.39ms +step:667/1705 train_time:62961ms step_avg:94.39ms +step:668/1705 train_time:63054ms step_avg:94.39ms +step:669/1705 train_time:63147ms step_avg:94.39ms +step:670/1705 train_time:63241ms step_avg:94.39ms +step:671/1705 train_time:63334ms step_avg:94.39ms +step:672/1705 train_time:63429ms step_avg:94.39ms +step:673/1705 train_time:63523ms step_avg:94.39ms +step:674/1705 train_time:63617ms step_avg:94.39ms +step:675/1705 train_time:63710ms step_avg:94.39ms +step:676/1705 train_time:63805ms step_avg:94.39ms +step:677/1705 train_time:63899ms step_avg:94.39ms +step:678/1705 train_time:63993ms step_avg:94.39ms +step:679/1705 train_time:64086ms step_avg:94.38ms +step:680/1705 train_time:64180ms step_avg:94.38ms +step:681/1705 train_time:64273ms step_avg:94.38ms +step:682/1705 train_time:64367ms step_avg:94.38ms +step:683/1705 train_time:64461ms step_avg:94.38ms +step:684/1705 train_time:64556ms step_avg:94.38ms +step:685/1705 train_time:64650ms step_avg:94.38ms +step:686/1705 train_time:64744ms step_avg:94.38ms +step:687/1705 train_time:64838ms step_avg:94.38ms +step:688/1705 train_time:64931ms step_avg:94.38ms +step:689/1705 train_time:65025ms step_avg:94.38ms +step:690/1705 train_time:65119ms step_avg:94.37ms +step:691/1705 train_time:65212ms step_avg:94.37ms +step:692/1705 train_time:65307ms step_avg:94.37ms +step:693/1705 train_time:65401ms step_avg:94.37ms +step:694/1705 train_time:65494ms step_avg:94.37ms +step:695/1705 train_time:65588ms step_avg:94.37ms +step:696/1705 train_time:65683ms step_avg:94.37ms +step:697/1705 train_time:65778ms step_avg:94.37ms +step:698/1705 train_time:65871ms step_avg:94.37ms +step:699/1705 train_time:65965ms step_avg:94.37ms +step:700/1705 train_time:66059ms step_avg:94.37ms +step:701/1705 train_time:66153ms step_avg:94.37ms +step:702/1705 train_time:66247ms step_avg:94.37ms +step:703/1705 train_time:66341ms step_avg:94.37ms +step:704/1705 train_time:66434ms step_avg:94.37ms +step:705/1705 train_time:66527ms step_avg:94.37ms +step:706/1705 train_time:66622ms step_avg:94.37ms +step:707/1705 train_time:66717ms step_avg:94.37ms +step:708/1705 train_time:66810ms step_avg:94.36ms +step:709/1705 train_time:66905ms step_avg:94.37ms +step:710/1705 train_time:66999ms step_avg:94.37ms +step:711/1705 train_time:67092ms step_avg:94.36ms +step:712/1705 train_time:67186ms step_avg:94.36ms +step:713/1705 train_time:67280ms step_avg:94.36ms +step:714/1705 train_time:67373ms step_avg:94.36ms +step:715/1705 train_time:67467ms step_avg:94.36ms +step:716/1705 train_time:67561ms step_avg:94.36ms +step:717/1705 train_time:67655ms step_avg:94.36ms +step:718/1705 train_time:67748ms step_avg:94.36ms +step:719/1705 train_time:67843ms step_avg:94.36ms +step:720/1705 train_time:67937ms step_avg:94.36ms +step:721/1705 train_time:68030ms step_avg:94.36ms +step:722/1705 train_time:68125ms step_avg:94.36ms +step:723/1705 train_time:68218ms step_avg:94.35ms +step:724/1705 train_time:68312ms step_avg:94.35ms +step:725/1705 train_time:68406ms step_avg:94.35ms +step:726/1705 train_time:68500ms step_avg:94.35ms +step:727/1705 train_time:68594ms step_avg:94.35ms +step:728/1705 train_time:68688ms step_avg:94.35ms +step:729/1705 train_time:68782ms step_avg:94.35ms +step:730/1705 train_time:68876ms step_avg:94.35ms +step:731/1705 train_time:68970ms step_avg:94.35ms +step:732/1705 train_time:69065ms step_avg:94.35ms +step:733/1705 train_time:69159ms step_avg:94.35ms +step:734/1705 train_time:69252ms step_avg:94.35ms +step:735/1705 train_time:69346ms step_avg:94.35ms +step:736/1705 train_time:69440ms step_avg:94.35ms +step:737/1705 train_time:69534ms step_avg:94.35ms +step:738/1705 train_time:69629ms step_avg:94.35ms +step:739/1705 train_time:69724ms step_avg:94.35ms +step:740/1705 train_time:69818ms step_avg:94.35ms +step:741/1705 train_time:69911ms step_avg:94.35ms +step:742/1705 train_time:70006ms step_avg:94.35ms +step:743/1705 train_time:70099ms step_avg:94.35ms +step:744/1705 train_time:70193ms step_avg:94.35ms +step:745/1705 train_time:70288ms step_avg:94.35ms +step:746/1705 train_time:70382ms step_avg:94.35ms +step:747/1705 train_time:70475ms step_avg:94.34ms +step:748/1705 train_time:70569ms step_avg:94.34ms +step:749/1705 train_time:70664ms step_avg:94.34ms +step:750/1705 train_time:70757ms step_avg:94.34ms +step:750/1705 val_loss:3.5651 train_time:70852ms step_avg:94.47ms +step:751/1705 train_time:70875ms step_avg:94.37ms +step:752/1705 train_time:70954ms step_avg:94.35ms +step:753/1705 train_time:71052ms step_avg:94.36ms +step:754/1705 train_time:71147ms step_avg:94.36ms +step:755/1705 train_time:71241ms step_avg:94.36ms +step:756/1705 train_time:71334ms step_avg:94.36ms +step:757/1705 train_time:71428ms step_avg:94.36ms +step:758/1705 train_time:71521ms step_avg:94.35ms +step:759/1705 train_time:71614ms step_avg:94.35ms +step:760/1705 train_time:71707ms step_avg:94.35ms +step:761/1705 train_time:71800ms step_avg:94.35ms +step:762/1705 train_time:71895ms step_avg:94.35ms +step:763/1705 train_time:71991ms step_avg:94.35ms +step:764/1705 train_time:72087ms step_avg:94.35ms +step:765/1705 train_time:72182ms step_avg:94.36ms +step:766/1705 train_time:72277ms step_avg:94.36ms +step:767/1705 train_time:72370ms step_avg:94.36ms +step:768/1705 train_time:72464ms step_avg:94.35ms +step:769/1705 train_time:72557ms step_avg:94.35ms +step:770/1705 train_time:72650ms step_avg:94.35ms +step:771/1705 train_time:72744ms step_avg:94.35ms +step:772/1705 train_time:72838ms step_avg:94.35ms +step:773/1705 train_time:72932ms step_avg:94.35ms +step:774/1705 train_time:73027ms step_avg:94.35ms +step:775/1705 train_time:73122ms step_avg:94.35ms +step:776/1705 train_time:73216ms step_avg:94.35ms +step:777/1705 train_time:73310ms step_avg:94.35ms +step:778/1705 train_time:73404ms step_avg:94.35ms +step:779/1705 train_time:73497ms step_avg:94.35ms +step:780/1705 train_time:73590ms step_avg:94.35ms +step:781/1705 train_time:73684ms step_avg:94.35ms +step:782/1705 train_time:73779ms step_avg:94.35ms +step:783/1705 train_time:73873ms step_avg:94.35ms +step:784/1705 train_time:73967ms step_avg:94.35ms +step:785/1705 train_time:74062ms step_avg:94.35ms +step:786/1705 train_time:74156ms step_avg:94.35ms +step:787/1705 train_time:74250ms step_avg:94.35ms +step:788/1705 train_time:74346ms step_avg:94.35ms +step:789/1705 train_time:74440ms step_avg:94.35ms +step:790/1705 train_time:74533ms step_avg:94.35ms +step:791/1705 train_time:74627ms step_avg:94.34ms +step:792/1705 train_time:74721ms step_avg:94.34ms +step:793/1705 train_time:74815ms step_avg:94.34ms +step:794/1705 train_time:74909ms step_avg:94.34ms +step:795/1705 train_time:75004ms step_avg:94.35ms +step:796/1705 train_time:75099ms step_avg:94.34ms +step:797/1705 train_time:75192ms step_avg:94.34ms +step:798/1705 train_time:75287ms step_avg:94.34ms +step:799/1705 train_time:75382ms step_avg:94.35ms +step:800/1705 train_time:75475ms step_avg:94.34ms +step:801/1705 train_time:75569ms step_avg:94.34ms +step:802/1705 train_time:75663ms step_avg:94.34ms +step:803/1705 train_time:75758ms step_avg:94.34ms +step:804/1705 train_time:75851ms step_avg:94.34ms +step:805/1705 train_time:75946ms step_avg:94.34ms +step:806/1705 train_time:76040ms step_avg:94.34ms +step:807/1705 train_time:76134ms step_avg:94.34ms +step:808/1705 train_time:76228ms step_avg:94.34ms +step:809/1705 train_time:76324ms step_avg:94.34ms +step:810/1705 train_time:76418ms step_avg:94.34ms +step:811/1705 train_time:76511ms step_avg:94.34ms +step:812/1705 train_time:76605ms step_avg:94.34ms +step:813/1705 train_time:76699ms step_avg:94.34ms +step:814/1705 train_time:76793ms step_avg:94.34ms +step:815/1705 train_time:76888ms step_avg:94.34ms +step:816/1705 train_time:76982ms step_avg:94.34ms +step:817/1705 train_time:77077ms step_avg:94.34ms +step:818/1705 train_time:77171ms step_avg:94.34ms +step:819/1705 train_time:77265ms step_avg:94.34ms +step:820/1705 train_time:77360ms step_avg:94.34ms +step:821/1705 train_time:77453ms step_avg:94.34ms +step:822/1705 train_time:77547ms step_avg:94.34ms +step:823/1705 train_time:77641ms step_avg:94.34ms +step:824/1705 train_time:77735ms step_avg:94.34ms +step:825/1705 train_time:77829ms step_avg:94.34ms +step:826/1705 train_time:77924ms step_avg:94.34ms +step:827/1705 train_time:78019ms step_avg:94.34ms +step:828/1705 train_time:78113ms step_avg:94.34ms +step:829/1705 train_time:78207ms step_avg:94.34ms +step:830/1705 train_time:78301ms step_avg:94.34ms +step:831/1705 train_time:78395ms step_avg:94.34ms +step:832/1705 train_time:78489ms step_avg:94.34ms +step:833/1705 train_time:78583ms step_avg:94.34ms +step:834/1705 train_time:78678ms step_avg:94.34ms +step:835/1705 train_time:78772ms step_avg:94.34ms +step:836/1705 train_time:78865ms step_avg:94.34ms +step:837/1705 train_time:78959ms step_avg:94.34ms +step:838/1705 train_time:79052ms step_avg:94.33ms +step:839/1705 train_time:79147ms step_avg:94.33ms +step:840/1705 train_time:79242ms step_avg:94.34ms +step:841/1705 train_time:79336ms step_avg:94.34ms +step:842/1705 train_time:79430ms step_avg:94.34ms +step:843/1705 train_time:79524ms step_avg:94.33ms +step:844/1705 train_time:79619ms step_avg:94.34ms +step:845/1705 train_time:79713ms step_avg:94.33ms +step:846/1705 train_time:79807ms step_avg:94.33ms +step:847/1705 train_time:79901ms step_avg:94.33ms +step:848/1705 train_time:79995ms step_avg:94.33ms +step:849/1705 train_time:80089ms step_avg:94.33ms +step:850/1705 train_time:80184ms step_avg:94.33ms +step:851/1705 train_time:80459ms step_avg:94.55ms +step:852/1705 train_time:80595ms step_avg:94.60ms +step:853/1705 train_time:80688ms step_avg:94.59ms +step:854/1705 train_time:80782ms step_avg:94.59ms +step:855/1705 train_time:80874ms step_avg:94.59ms +step:856/1705 train_time:80968ms step_avg:94.59ms +step:857/1705 train_time:81061ms step_avg:94.59ms +step:858/1705 train_time:81154ms step_avg:94.58ms +step:859/1705 train_time:81247ms step_avg:94.58ms +step:860/1705 train_time:81341ms step_avg:94.58ms +step:861/1705 train_time:81436ms step_avg:94.58ms +step:862/1705 train_time:81534ms step_avg:94.59ms +step:863/1705 train_time:81630ms step_avg:94.59ms +step:864/1705 train_time:81725ms step_avg:94.59ms +step:865/1705 train_time:81819ms step_avg:94.59ms +step:866/1705 train_time:81912ms step_avg:94.59ms +step:867/1705 train_time:82006ms step_avg:94.59ms +step:868/1705 train_time:82099ms step_avg:94.58ms +step:869/1705 train_time:82192ms step_avg:94.58ms +step:870/1705 train_time:82285ms step_avg:94.58ms +step:871/1705 train_time:82379ms step_avg:94.58ms +step:872/1705 train_time:82474ms step_avg:94.58ms +step:873/1705 train_time:82570ms step_avg:94.58ms +step:874/1705 train_time:82665ms step_avg:94.58ms +step:875/1705 train_time:82759ms step_avg:94.58ms +step:875/1705 val_loss:3.5242 train_time:82854ms step_avg:94.69ms +step:876/1705 train_time:82878ms step_avg:94.61ms +step:877/1705 train_time:82955ms step_avg:94.59ms +step:878/1705 train_time:83052ms step_avg:94.59ms +step:879/1705 train_time:83148ms step_avg:94.59ms +step:880/1705 train_time:83241ms step_avg:94.59ms +step:881/1705 train_time:83334ms step_avg:94.59ms +step:882/1705 train_time:83427ms step_avg:94.59ms +step:883/1705 train_time:83520ms step_avg:94.59ms +step:884/1705 train_time:83613ms step_avg:94.58ms +step:885/1705 train_time:83706ms step_avg:94.58ms +step:886/1705 train_time:83801ms step_avg:94.58ms +step:887/1705 train_time:83897ms step_avg:94.58ms +step:888/1705 train_time:83993ms step_avg:94.59ms +step:889/1705 train_time:84088ms step_avg:94.59ms +step:890/1705 train_time:84182ms step_avg:94.59ms +step:891/1705 train_time:84277ms step_avg:94.59ms +step:892/1705 train_time:84371ms step_avg:94.59ms +step:893/1705 train_time:84465ms step_avg:94.59ms +step:894/1705 train_time:84558ms step_avg:94.58ms +step:895/1705 train_time:84651ms step_avg:94.58ms +step:896/1705 train_time:84745ms step_avg:94.58ms +step:897/1705 train_time:84839ms step_avg:94.58ms +step:898/1705 train_time:84933ms step_avg:94.58ms +step:899/1705 train_time:85029ms step_avg:94.58ms +step:900/1705 train_time:85123ms step_avg:94.58ms +step:901/1705 train_time:85218ms step_avg:94.58ms +step:902/1705 train_time:85312ms step_avg:94.58ms +step:903/1705 train_time:85406ms step_avg:94.58ms +step:904/1705 train_time:85500ms step_avg:94.58ms +step:905/1705 train_time:85593ms step_avg:94.58ms +step:906/1705 train_time:85687ms step_avg:94.58ms +step:907/1705 train_time:85781ms step_avg:94.58ms +step:908/1705 train_time:85875ms step_avg:94.58ms +step:909/1705 train_time:85970ms step_avg:94.58ms +step:910/1705 train_time:86065ms step_avg:94.58ms +step:911/1705 train_time:86160ms step_avg:94.58ms +step:912/1705 train_time:86254ms step_avg:94.58ms +step:913/1705 train_time:86348ms step_avg:94.58ms +step:914/1705 train_time:86441ms step_avg:94.57ms +step:915/1705 train_time:86536ms step_avg:94.57ms +step:916/1705 train_time:86629ms step_avg:94.57ms +step:917/1705 train_time:86724ms step_avg:94.57ms +step:918/1705 train_time:86818ms step_avg:94.57ms +step:919/1705 train_time:86912ms step_avg:94.57ms +step:920/1705 train_time:87007ms step_avg:94.57ms +step:921/1705 train_time:87102ms step_avg:94.57ms +step:922/1705 train_time:87196ms step_avg:94.57ms +step:923/1705 train_time:87289ms step_avg:94.57ms +step:924/1705 train_time:87384ms step_avg:94.57ms +step:925/1705 train_time:87478ms step_avg:94.57ms +step:926/1705 train_time:87571ms step_avg:94.57ms +step:927/1705 train_time:87666ms step_avg:94.57ms +step:928/1705 train_time:87761ms step_avg:94.57ms +step:929/1705 train_time:87855ms step_avg:94.57ms +step:930/1705 train_time:87948ms step_avg:94.57ms +step:931/1705 train_time:88043ms step_avg:94.57ms +step:932/1705 train_time:88138ms step_avg:94.57ms +step:933/1705 train_time:88232ms step_avg:94.57ms +step:934/1705 train_time:88326ms step_avg:94.57ms +step:935/1705 train_time:88421ms step_avg:94.57ms +step:936/1705 train_time:88515ms step_avg:94.57ms +step:937/1705 train_time:88608ms step_avg:94.57ms +step:938/1705 train_time:88703ms step_avg:94.57ms +step:939/1705 train_time:88797ms step_avg:94.57ms +step:940/1705 train_time:88891ms step_avg:94.56ms +step:941/1705 train_time:88985ms step_avg:94.56ms +step:942/1705 train_time:89081ms step_avg:94.57ms +step:943/1705 train_time:89175ms step_avg:94.56ms +step:944/1705 train_time:89269ms step_avg:94.56ms +step:945/1705 train_time:89363ms step_avg:94.56ms +step:946/1705 train_time:89458ms step_avg:94.56ms +step:947/1705 train_time:89552ms step_avg:94.56ms +step:948/1705 train_time:89646ms step_avg:94.56ms +step:949/1705 train_time:89741ms step_avg:94.56ms +step:950/1705 train_time:89835ms step_avg:94.56ms +step:951/1705 train_time:89929ms step_avg:94.56ms +step:952/1705 train_time:90024ms step_avg:94.56ms +step:953/1705 train_time:90118ms step_avg:94.56ms +step:954/1705 train_time:90212ms step_avg:94.56ms +step:955/1705 train_time:90306ms step_avg:94.56ms +step:956/1705 train_time:90400ms step_avg:94.56ms +step:957/1705 train_time:90495ms step_avg:94.56ms +step:958/1705 train_time:90589ms step_avg:94.56ms +step:959/1705 train_time:90683ms step_avg:94.56ms +step:960/1705 train_time:90777ms step_avg:94.56ms +step:961/1705 train_time:90871ms step_avg:94.56ms +step:962/1705 train_time:90966ms step_avg:94.56ms +step:963/1705 train_time:91060ms step_avg:94.56ms +step:964/1705 train_time:91155ms step_avg:94.56ms +step:965/1705 train_time:91249ms step_avg:94.56ms +step:966/1705 train_time:91344ms step_avg:94.56ms +step:967/1705 train_time:91439ms step_avg:94.56ms +step:968/1705 train_time:91532ms step_avg:94.56ms +step:969/1705 train_time:91626ms step_avg:94.56ms +step:970/1705 train_time:91722ms step_avg:94.56ms +step:971/1705 train_time:91815ms step_avg:94.56ms +step:972/1705 train_time:91909ms step_avg:94.56ms +step:973/1705 train_time:92004ms step_avg:94.56ms +step:974/1705 train_time:92099ms step_avg:94.56ms +step:975/1705 train_time:92192ms step_avg:94.56ms +step:976/1705 train_time:92287ms step_avg:94.56ms +step:977/1705 train_time:92382ms step_avg:94.56ms +step:978/1705 train_time:92476ms step_avg:94.56ms +step:979/1705 train_time:92570ms step_avg:94.56ms +step:980/1705 train_time:92664ms step_avg:94.56ms +step:981/1705 train_time:92759ms step_avg:94.56ms +step:982/1705 train_time:92853ms step_avg:94.56ms +step:983/1705 train_time:92947ms step_avg:94.55ms +step:984/1705 train_time:93041ms step_avg:94.55ms +step:985/1705 train_time:93136ms step_avg:94.55ms +step:986/1705 train_time:93230ms step_avg:94.55ms +step:987/1705 train_time:93324ms step_avg:94.55ms +step:988/1705 train_time:93419ms step_avg:94.55ms +step:989/1705 train_time:93512ms step_avg:94.55ms +step:990/1705 train_time:93607ms step_avg:94.55ms +step:991/1705 train_time:93702ms step_avg:94.55ms +step:992/1705 train_time:93796ms step_avg:94.55ms +step:993/1705 train_time:93889ms step_avg:94.55ms +step:994/1705 train_time:93984ms step_avg:94.55ms +step:995/1705 train_time:94078ms step_avg:94.55ms +step:996/1705 train_time:94172ms step_avg:94.55ms +step:997/1705 train_time:94266ms step_avg:94.55ms +step:998/1705 train_time:94360ms step_avg:94.55ms +step:999/1705 train_time:94455ms step_avg:94.55ms +step:1000/1705 train_time:94549ms step_avg:94.55ms +step:1000/1705 val_loss:3.4846 train_time:94644ms step_avg:94.64ms +step:1001/1705 train_time:94667ms step_avg:94.57ms +step:1002/1705 train_time:94744ms step_avg:94.55ms +step:1003/1705 train_time:94842ms step_avg:94.56ms +step:1004/1705 train_time:94938ms step_avg:94.56ms +step:1005/1705 train_time:95031ms step_avg:94.56ms +step:1006/1705 train_time:95124ms step_avg:94.56ms +step:1007/1705 train_time:95218ms step_avg:94.56ms +step:1008/1705 train_time:95311ms step_avg:94.55ms +step:1009/1705 train_time:95405ms step_avg:94.55ms +step:1010/1705 train_time:95498ms step_avg:94.55ms +step:1011/1705 train_time:95592ms step_avg:94.55ms +step:1012/1705 train_time:95687ms step_avg:94.55ms +step:1013/1705 train_time:95784ms step_avg:94.55ms +step:1014/1705 train_time:95879ms step_avg:94.56ms +step:1015/1705 train_time:95974ms step_avg:94.56ms +step:1016/1705 train_time:96067ms step_avg:94.55ms +step:1017/1705 train_time:96162ms step_avg:94.55ms +step:1018/1705 train_time:96256ms step_avg:94.55ms +step:1019/1705 train_time:96349ms step_avg:94.55ms +step:1020/1705 train_time:96443ms step_avg:94.55ms +step:1021/1705 train_time:96537ms step_avg:94.55ms +step:1022/1705 train_time:96633ms step_avg:94.55ms +step:1023/1705 train_time:96727ms step_avg:94.55ms +step:1024/1705 train_time:96821ms step_avg:94.55ms +step:1025/1705 train_time:96918ms step_avg:94.55ms +step:1026/1705 train_time:97013ms step_avg:94.55ms +step:1027/1705 train_time:97106ms step_avg:94.55ms +step:1028/1705 train_time:97201ms step_avg:94.55ms +step:1029/1705 train_time:97295ms step_avg:94.55ms +step:1030/1705 train_time:97388ms step_avg:94.55ms +step:1031/1705 train_time:97482ms step_avg:94.55ms +step:1032/1705 train_time:97577ms step_avg:94.55ms +step:1033/1705 train_time:97671ms step_avg:94.55ms +step:1034/1705 train_time:97765ms step_avg:94.55ms +step:1035/1705 train_time:97860ms step_avg:94.55ms +step:1036/1705 train_time:97956ms step_avg:94.55ms +step:1037/1705 train_time:98049ms step_avg:94.55ms +step:1038/1705 train_time:98143ms step_avg:94.55ms +step:1039/1705 train_time:98238ms step_avg:94.55ms +step:1040/1705 train_time:98331ms step_avg:94.55ms +step:1041/1705 train_time:98425ms step_avg:94.55ms +step:1042/1705 train_time:98519ms step_avg:94.55ms +step:1043/1705 train_time:98613ms step_avg:94.55ms +step:1044/1705 train_time:98706ms step_avg:94.55ms +step:1045/1705 train_time:98800ms step_avg:94.55ms +step:1046/1705 train_time:98896ms step_avg:94.55ms +step:1047/1705 train_time:98991ms step_avg:94.55ms +step:1048/1705 train_time:99085ms step_avg:94.55ms +step:1049/1705 train_time:99179ms step_avg:94.55ms +step:1050/1705 train_time:99274ms step_avg:94.55ms +step:1051/1705 train_time:99367ms step_avg:94.55ms +step:1052/1705 train_time:99461ms step_avg:94.54ms +step:1053/1705 train_time:99556ms step_avg:94.54ms +step:1054/1705 train_time:99651ms step_avg:94.55ms +step:1055/1705 train_time:99744ms step_avg:94.54ms +step:1056/1705 train_time:99839ms step_avg:94.54ms +step:1057/1705 train_time:99935ms step_avg:94.55ms +step:1058/1705 train_time:100029ms step_avg:94.55ms +step:1059/1705 train_time:100123ms step_avg:94.54ms +step:1060/1705 train_time:100219ms step_avg:94.55ms +step:1061/1705 train_time:100314ms step_avg:94.55ms +step:1062/1705 train_time:100589ms step_avg:94.72ms +step:1063/1705 train_time:100735ms step_avg:94.76ms +step:1064/1705 train_time:100827ms step_avg:94.76ms +step:1065/1705 train_time:100921ms step_avg:94.76ms +step:1066/1705 train_time:101014ms step_avg:94.76ms +step:1067/1705 train_time:101108ms step_avg:94.76ms +step:1068/1705 train_time:101201ms step_avg:94.76ms +step:1069/1705 train_time:101294ms step_avg:94.76ms +step:1070/1705 train_time:101387ms step_avg:94.75ms +step:1071/1705 train_time:101481ms step_avg:94.75ms +step:1072/1705 train_time:101580ms step_avg:94.76ms +step:1073/1705 train_time:101678ms step_avg:94.76ms +step:1074/1705 train_time:101773ms step_avg:94.76ms +step:1075/1705 train_time:101867ms step_avg:94.76ms +step:1076/1705 train_time:101961ms step_avg:94.76ms +step:1077/1705 train_time:102056ms step_avg:94.76ms +step:1078/1705 train_time:102149ms step_avg:94.76ms +step:1079/1705 train_time:102243ms step_avg:94.76ms +step:1080/1705 train_time:102337ms step_avg:94.76ms +step:1081/1705 train_time:102430ms step_avg:94.75ms +step:1082/1705 train_time:102525ms step_avg:94.75ms +step:1083/1705 train_time:102622ms step_avg:94.76ms +step:1084/1705 train_time:102720ms step_avg:94.76ms +step:1085/1705 train_time:102815ms step_avg:94.76ms +step:1086/1705 train_time:102909ms step_avg:94.76ms +step:1087/1705 train_time:103004ms step_avg:94.76ms +step:1088/1705 train_time:103098ms step_avg:94.76ms +step:1089/1705 train_time:103192ms step_avg:94.76ms +step:1090/1705 train_time:103285ms step_avg:94.76ms +step:1091/1705 train_time:103379ms step_avg:94.76ms +step:1092/1705 train_time:103474ms step_avg:94.76ms +step:1093/1705 train_time:103568ms step_avg:94.76ms +step:1094/1705 train_time:103663ms step_avg:94.76ms +step:1095/1705 train_time:103758ms step_avg:94.76ms +step:1096/1705 train_time:103853ms step_avg:94.76ms +step:1097/1705 train_time:103947ms step_avg:94.76ms +step:1098/1705 train_time:104042ms step_avg:94.76ms +step:1099/1705 train_time:104136ms step_avg:94.76ms +step:1100/1705 train_time:104230ms step_avg:94.75ms +step:1101/1705 train_time:104324ms step_avg:94.75ms +step:1102/1705 train_time:104419ms step_avg:94.75ms +step:1103/1705 train_time:104513ms step_avg:94.75ms +step:1104/1705 train_time:104607ms step_avg:94.75ms +step:1105/1705 train_time:104701ms step_avg:94.75ms +step:1106/1705 train_time:104796ms step_avg:94.75ms +step:1107/1705 train_time:104891ms step_avg:94.75ms +step:1108/1705 train_time:104984ms step_avg:94.75ms +step:1109/1705 train_time:105079ms step_avg:94.75ms +step:1110/1705 train_time:105174ms step_avg:94.75ms +step:1111/1705 train_time:105268ms step_avg:94.75ms +step:1112/1705 train_time:105362ms step_avg:94.75ms +step:1113/1705 train_time:105457ms step_avg:94.75ms +step:1114/1705 train_time:105551ms step_avg:94.75ms +step:1115/1705 train_time:105645ms step_avg:94.75ms +step:1116/1705 train_time:105740ms step_avg:94.75ms +step:1117/1705 train_time:105834ms step_avg:94.75ms +step:1118/1705 train_time:105929ms step_avg:94.75ms +step:1119/1705 train_time:106024ms step_avg:94.75ms +step:1120/1705 train_time:106118ms step_avg:94.75ms +step:1121/1705 train_time:106214ms step_avg:94.75ms +step:1122/1705 train_time:106307ms step_avg:94.75ms +step:1123/1705 train_time:106401ms step_avg:94.75ms +step:1124/1705 train_time:106496ms step_avg:94.75ms +step:1125/1705 train_time:106590ms step_avg:94.75ms +step:1125/1705 val_loss:3.4377 train_time:106684ms step_avg:94.83ms +step:1126/1705 train_time:106707ms step_avg:94.77ms +step:1127/1705 train_time:106786ms step_avg:94.75ms +step:1128/1705 train_time:106887ms step_avg:94.76ms +step:1129/1705 train_time:106981ms step_avg:94.76ms +step:1130/1705 train_time:107075ms step_avg:94.76ms +step:1131/1705 train_time:107169ms step_avg:94.76ms +step:1132/1705 train_time:107261ms step_avg:94.75ms +step:1133/1705 train_time:107355ms step_avg:94.75ms +step:1134/1705 train_time:107449ms step_avg:94.75ms +step:1135/1705 train_time:107542ms step_avg:94.75ms +step:1136/1705 train_time:107636ms step_avg:94.75ms +step:1137/1705 train_time:107733ms step_avg:94.75ms +step:1138/1705 train_time:107831ms step_avg:94.75ms +step:1139/1705 train_time:107926ms step_avg:94.76ms +step:1140/1705 train_time:108021ms step_avg:94.76ms +step:1141/1705 train_time:108117ms step_avg:94.76ms +step:1142/1705 train_time:108211ms step_avg:94.76ms +step:1143/1705 train_time:108305ms step_avg:94.75ms +step:1144/1705 train_time:108400ms step_avg:94.75ms +step:1145/1705 train_time:108494ms step_avg:94.75ms +step:1146/1705 train_time:108588ms step_avg:94.75ms +step:1147/1705 train_time:108684ms step_avg:94.76ms +step:1148/1705 train_time:108782ms step_avg:94.76ms +step:1149/1705 train_time:108879ms step_avg:94.76ms +step:1150/1705 train_time:108975ms step_avg:94.76ms +step:1151/1705 train_time:109070ms step_avg:94.76ms +step:1152/1705 train_time:109163ms step_avg:94.76ms +step:1153/1705 train_time:109258ms step_avg:94.76ms +step:1154/1705 train_time:109353ms step_avg:94.76ms +step:1155/1705 train_time:109447ms step_avg:94.76ms +step:1156/1705 train_time:109542ms step_avg:94.76ms +step:1157/1705 train_time:109637ms step_avg:94.76ms +step:1158/1705 train_time:109734ms step_avg:94.76ms +step:1159/1705 train_time:109831ms step_avg:94.76ms +step:1160/1705 train_time:109927ms step_avg:94.76ms +step:1161/1705 train_time:110022ms step_avg:94.77ms +step:1162/1705 train_time:110117ms step_avg:94.76ms +step:1163/1705 train_time:110212ms step_avg:94.76ms +step:1164/1705 train_time:110305ms step_avg:94.76ms +step:1165/1705 train_time:110400ms step_avg:94.76ms +step:1166/1705 train_time:110495ms step_avg:94.76ms +step:1167/1705 train_time:110590ms step_avg:94.76ms +step:1168/1705 train_time:110684ms step_avg:94.76ms +step:1169/1705 train_time:110781ms step_avg:94.77ms +step:1170/1705 train_time:110878ms step_avg:94.77ms +step:1171/1705 train_time:110974ms step_avg:94.77ms +step:1172/1705 train_time:111069ms step_avg:94.77ms +step:1173/1705 train_time:111164ms step_avg:94.77ms +step:1174/1705 train_time:111259ms step_avg:94.77ms +step:1175/1705 train_time:111354ms step_avg:94.77ms +step:1176/1705 train_time:111449ms step_avg:94.77ms +step:1177/1705 train_time:111544ms step_avg:94.77ms +step:1178/1705 train_time:111639ms step_avg:94.77ms +step:1179/1705 train_time:111734ms step_avg:94.77ms +step:1180/1705 train_time:111830ms step_avg:94.77ms +step:1181/1705 train_time:111925ms step_avg:94.77ms +step:1182/1705 train_time:112021ms step_avg:94.77ms +step:1183/1705 train_time:112117ms step_avg:94.77ms +step:1184/1705 train_time:112212ms step_avg:94.77ms +step:1185/1705 train_time:112306ms step_avg:94.77ms +step:1186/1705 train_time:112402ms step_avg:94.77ms +step:1187/1705 train_time:112496ms step_avg:94.77ms +step:1188/1705 train_time:112591ms step_avg:94.77ms +step:1189/1705 train_time:112686ms step_avg:94.77ms +step:1190/1705 train_time:112782ms step_avg:94.77ms +step:1191/1705 train_time:112879ms step_avg:94.78ms +step:1192/1705 train_time:112975ms step_avg:94.78ms +step:1193/1705 train_time:113070ms step_avg:94.78ms +step:1194/1705 train_time:113164ms step_avg:94.78ms +step:1195/1705 train_time:113260ms step_avg:94.78ms +step:1196/1705 train_time:113355ms step_avg:94.78ms +step:1197/1705 train_time:113450ms step_avg:94.78ms +step:1198/1705 train_time:113544ms step_avg:94.78ms +step:1199/1705 train_time:113640ms step_avg:94.78ms +step:1200/1705 train_time:113735ms step_avg:94.78ms +step:1201/1705 train_time:113831ms step_avg:94.78ms +step:1202/1705 train_time:113925ms step_avg:94.78ms +step:1203/1705 train_time:114021ms step_avg:94.78ms +step:1204/1705 train_time:114117ms step_avg:94.78ms +step:1205/1705 train_time:114213ms step_avg:94.78ms +step:1206/1705 train_time:114308ms step_avg:94.78ms +step:1207/1705 train_time:114402ms step_avg:94.78ms +step:1208/1705 train_time:114498ms step_avg:94.78ms +step:1209/1705 train_time:114592ms step_avg:94.78ms +step:1210/1705 train_time:114687ms step_avg:94.78ms +step:1211/1705 train_time:114782ms step_avg:94.78ms +step:1212/1705 train_time:114878ms step_avg:94.78ms +step:1213/1705 train_time:114973ms step_avg:94.78ms +step:1214/1705 train_time:115067ms step_avg:94.78ms +step:1215/1705 train_time:115162ms step_avg:94.78ms +step:1216/1705 train_time:115259ms step_avg:94.79ms +step:1217/1705 train_time:115355ms step_avg:94.79ms +step:1218/1705 train_time:115449ms step_avg:94.79ms +step:1219/1705 train_time:115544ms step_avg:94.79ms +step:1220/1705 train_time:115639ms step_avg:94.79ms +step:1221/1705 train_time:115735ms step_avg:94.79ms +step:1222/1705 train_time:115830ms step_avg:94.79ms +step:1223/1705 train_time:115924ms step_avg:94.79ms +step:1224/1705 train_time:116020ms step_avg:94.79ms +step:1225/1705 train_time:116116ms step_avg:94.79ms +step:1226/1705 train_time:116212ms step_avg:94.79ms +step:1227/1705 train_time:116306ms step_avg:94.79ms +step:1228/1705 train_time:116401ms step_avg:94.79ms +step:1229/1705 train_time:116496ms step_avg:94.79ms +step:1230/1705 train_time:116591ms step_avg:94.79ms +step:1231/1705 train_time:116686ms step_avg:94.79ms +step:1232/1705 train_time:116782ms step_avg:94.79ms +step:1233/1705 train_time:116878ms step_avg:94.79ms +step:1234/1705 train_time:116972ms step_avg:94.79ms +step:1235/1705 train_time:117067ms step_avg:94.79ms +step:1236/1705 train_time:117163ms step_avg:94.79ms +step:1237/1705 train_time:117259ms step_avg:94.79ms +step:1238/1705 train_time:117354ms step_avg:94.79ms +step:1239/1705 train_time:117449ms step_avg:94.79ms +step:1240/1705 train_time:117543ms step_avg:94.79ms +step:1241/1705 train_time:117638ms step_avg:94.79ms +step:1242/1705 train_time:117733ms step_avg:94.79ms +step:1243/1705 train_time:117829ms step_avg:94.79ms +step:1244/1705 train_time:117923ms step_avg:94.79ms +step:1245/1705 train_time:118019ms step_avg:94.79ms +step:1246/1705 train_time:118114ms step_avg:94.79ms +step:1247/1705 train_time:118209ms step_avg:94.79ms +step:1248/1705 train_time:118304ms step_avg:94.79ms +step:1249/1705 train_time:118398ms step_avg:94.79ms +step:1250/1705 train_time:118494ms step_avg:94.80ms +step:1250/1705 val_loss:3.3891 train_time:118588ms step_avg:94.87ms +step:1251/1705 train_time:118612ms step_avg:94.81ms +step:1252/1705 train_time:118691ms step_avg:94.80ms +step:1253/1705 train_time:118790ms step_avg:94.80ms +step:1254/1705 train_time:118886ms step_avg:94.81ms +step:1255/1705 train_time:118980ms step_avg:94.80ms +step:1256/1705 train_time:119074ms step_avg:94.80ms +step:1257/1705 train_time:119168ms step_avg:94.80ms +step:1258/1705 train_time:119261ms step_avg:94.80ms +step:1259/1705 train_time:119355ms step_avg:94.80ms +step:1260/1705 train_time:119449ms step_avg:94.80ms +step:1261/1705 train_time:119546ms step_avg:94.80ms +step:1262/1705 train_time:119644ms step_avg:94.81ms +step:1263/1705 train_time:119741ms step_avg:94.81ms +step:1264/1705 train_time:119836ms step_avg:94.81ms +step:1265/1705 train_time:119930ms step_avg:94.81ms +step:1266/1705 train_time:120025ms step_avg:94.81ms +step:1267/1705 train_time:120120ms step_avg:94.81ms +step:1268/1705 train_time:120214ms step_avg:94.81ms +step:1269/1705 train_time:120307ms step_avg:94.80ms +step:1270/1705 train_time:120401ms step_avg:94.80ms +step:1271/1705 train_time:120496ms step_avg:94.80ms +step:1272/1705 train_time:120591ms step_avg:94.80ms +step:1273/1705 train_time:120688ms step_avg:94.81ms +step:1274/1705 train_time:121028ms step_avg:95.00ms +step:1275/1705 train_time:121097ms step_avg:94.98ms +step:1276/1705 train_time:121189ms step_avg:94.98ms +step:1277/1705 train_time:121283ms step_avg:94.98ms +step:1278/1705 train_time:121377ms step_avg:94.97ms +step:1279/1705 train_time:121470ms step_avg:94.97ms +step:1280/1705 train_time:121564ms step_avg:94.97ms +step:1281/1705 train_time:121657ms step_avg:94.97ms +step:1282/1705 train_time:121751ms step_avg:94.97ms +step:1283/1705 train_time:121845ms step_avg:94.97ms +step:1284/1705 train_time:121942ms step_avg:94.97ms +step:1285/1705 train_time:122041ms step_avg:94.97ms +step:1286/1705 train_time:122138ms step_avg:94.97ms +step:1287/1705 train_time:122233ms step_avg:94.97ms +step:1288/1705 train_time:122327ms step_avg:94.97ms +step:1289/1705 train_time:122421ms step_avg:94.97ms +step:1290/1705 train_time:122516ms step_avg:94.97ms +step:1291/1705 train_time:122609ms step_avg:94.97ms +step:1292/1705 train_time:122703ms step_avg:94.97ms +step:1293/1705 train_time:122797ms step_avg:94.97ms +step:1294/1705 train_time:122891ms step_avg:94.97ms +step:1295/1705 train_time:122987ms step_avg:94.97ms +step:1296/1705 train_time:123084ms step_avg:94.97ms +step:1297/1705 train_time:123180ms step_avg:94.97ms +step:1298/1705 train_time:123275ms step_avg:94.97ms +step:1299/1705 train_time:123371ms step_avg:94.97ms +step:1300/1705 train_time:123465ms step_avg:94.97ms +step:1301/1705 train_time:123561ms step_avg:94.97ms +step:1302/1705 train_time:123654ms step_avg:94.97ms +step:1303/1705 train_time:123748ms step_avg:94.97ms +step:1304/1705 train_time:123843ms step_avg:94.97ms +step:1305/1705 train_time:123938ms step_avg:94.97ms +step:1306/1705 train_time:124033ms step_avg:94.97ms +step:1307/1705 train_time:124128ms step_avg:94.97ms +step:1308/1705 train_time:124224ms step_avg:94.97ms +step:1309/1705 train_time:124320ms step_avg:94.97ms +step:1310/1705 train_time:124415ms step_avg:94.97ms +step:1311/1705 train_time:124509ms step_avg:94.97ms +step:1312/1705 train_time:124604ms step_avg:94.97ms +step:1313/1705 train_time:124698ms step_avg:94.97ms +step:1314/1705 train_time:124792ms step_avg:94.97ms +step:1315/1705 train_time:124887ms step_avg:94.97ms +step:1316/1705 train_time:124983ms step_avg:94.97ms +step:1317/1705 train_time:125078ms step_avg:94.97ms +step:1318/1705 train_time:125174ms step_avg:94.97ms +step:1319/1705 train_time:125269ms step_avg:94.97ms +step:1320/1705 train_time:125364ms step_avg:94.97ms +step:1321/1705 train_time:125459ms step_avg:94.97ms +step:1322/1705 train_time:125553ms step_avg:94.97ms +step:1323/1705 train_time:125647ms step_avg:94.97ms +step:1324/1705 train_time:125742ms step_avg:94.97ms +step:1325/1705 train_time:125835ms step_avg:94.97ms +step:1326/1705 train_time:125929ms step_avg:94.97ms +step:1327/1705 train_time:126025ms step_avg:94.97ms +step:1328/1705 train_time:126121ms step_avg:94.97ms +step:1329/1705 train_time:126215ms step_avg:94.97ms +step:1330/1705 train_time:126309ms step_avg:94.97ms +step:1331/1705 train_time:126405ms step_avg:94.97ms +step:1332/1705 train_time:126500ms step_avg:94.97ms +step:1333/1705 train_time:126595ms step_avg:94.97ms +step:1334/1705 train_time:126688ms step_avg:94.97ms +step:1335/1705 train_time:126784ms step_avg:94.97ms +step:1336/1705 train_time:126879ms step_avg:94.97ms +step:1337/1705 train_time:126975ms step_avg:94.97ms +step:1338/1705 train_time:127069ms step_avg:94.97ms +step:1339/1705 train_time:127165ms step_avg:94.97ms +step:1340/1705 train_time:127260ms step_avg:94.97ms +step:1341/1705 train_time:127355ms step_avg:94.97ms +step:1342/1705 train_time:127449ms step_avg:94.97ms +step:1343/1705 train_time:127545ms step_avg:94.97ms +step:1344/1705 train_time:127639ms step_avg:94.97ms +step:1345/1705 train_time:127734ms step_avg:94.97ms +step:1346/1705 train_time:127828ms step_avg:94.97ms +step:1347/1705 train_time:127923ms step_avg:94.97ms +step:1348/1705 train_time:128018ms step_avg:94.97ms +step:1349/1705 train_time:128113ms step_avg:94.97ms +step:1350/1705 train_time:128207ms step_avg:94.97ms +step:1351/1705 train_time:128304ms step_avg:94.97ms +step:1352/1705 train_time:128401ms step_avg:94.97ms +step:1353/1705 train_time:128496ms step_avg:94.97ms +step:1354/1705 train_time:128590ms step_avg:94.97ms +step:1355/1705 train_time:128686ms step_avg:94.97ms +step:1356/1705 train_time:128781ms step_avg:94.97ms +step:1357/1705 train_time:128875ms step_avg:94.97ms +step:1358/1705 train_time:128968ms step_avg:94.97ms +step:1359/1705 train_time:129063ms step_avg:94.97ms +step:1360/1705 train_time:129159ms step_avg:94.97ms +step:1361/1705 train_time:129253ms step_avg:94.97ms +step:1362/1705 train_time:129348ms step_avg:94.97ms +step:1363/1705 train_time:129444ms step_avg:94.97ms +step:1364/1705 train_time:129539ms step_avg:94.97ms +step:1365/1705 train_time:129633ms step_avg:94.97ms +step:1366/1705 train_time:129728ms step_avg:94.97ms +step:1367/1705 train_time:129823ms step_avg:94.97ms +step:1368/1705 train_time:129918ms step_avg:94.97ms +step:1369/1705 train_time:130012ms step_avg:94.97ms +step:1370/1705 train_time:130107ms step_avg:94.97ms +step:1371/1705 train_time:130203ms step_avg:94.97ms +step:1372/1705 train_time:130299ms step_avg:94.97ms +step:1373/1705 train_time:130393ms step_avg:94.97ms +step:1374/1705 train_time:130488ms step_avg:94.97ms +step:1375/1705 train_time:130584ms step_avg:94.97ms +step:1375/1705 val_loss:3.3522 train_time:130680ms step_avg:95.04ms +step:1376/1705 train_time:130702ms step_avg:94.99ms +step:1377/1705 train_time:130782ms step_avg:94.98ms +step:1378/1705 train_time:130883ms step_avg:94.98ms +step:1379/1705 train_time:130978ms step_avg:94.98ms +step:1380/1705 train_time:131072ms step_avg:94.98ms +step:1381/1705 train_time:131166ms step_avg:94.98ms +step:1382/1705 train_time:131260ms step_avg:94.98ms +step:1383/1705 train_time:131354ms step_avg:94.98ms +step:1384/1705 train_time:131448ms step_avg:94.98ms +step:1385/1705 train_time:131541ms step_avg:94.98ms +step:1386/1705 train_time:131635ms step_avg:94.97ms +step:1387/1705 train_time:131732ms step_avg:94.98ms +step:1388/1705 train_time:131831ms step_avg:94.98ms +step:1389/1705 train_time:131928ms step_avg:94.98ms +step:1390/1705 train_time:132024ms step_avg:94.98ms +step:1391/1705 train_time:132118ms step_avg:94.98ms +step:1392/1705 train_time:132212ms step_avg:94.98ms +step:1393/1705 train_time:132307ms step_avg:94.98ms +step:1394/1705 train_time:132400ms step_avg:94.98ms +step:1395/1705 train_time:132494ms step_avg:94.98ms +step:1396/1705 train_time:132588ms step_avg:94.98ms +step:1397/1705 train_time:132684ms step_avg:94.98ms +step:1398/1705 train_time:132779ms step_avg:94.98ms +step:1399/1705 train_time:132874ms step_avg:94.98ms +step:1400/1705 train_time:132970ms step_avg:94.98ms +step:1401/1705 train_time:133066ms step_avg:94.98ms +step:1402/1705 train_time:133162ms step_avg:94.98ms +step:1403/1705 train_time:133255ms step_avg:94.98ms +step:1404/1705 train_time:133349ms step_avg:94.98ms +step:1405/1705 train_time:133445ms step_avg:94.98ms +step:1406/1705 train_time:133539ms step_avg:94.98ms +step:1407/1705 train_time:133633ms step_avg:94.98ms +step:1408/1705 train_time:133729ms step_avg:94.98ms +step:1409/1705 train_time:133826ms step_avg:94.98ms +step:1410/1705 train_time:133921ms step_avg:94.98ms +step:1411/1705 train_time:134016ms step_avg:94.98ms +step:1412/1705 train_time:134112ms step_avg:94.98ms +step:1413/1705 train_time:134207ms step_avg:94.98ms +step:1414/1705 train_time:134301ms step_avg:94.98ms +step:1415/1705 train_time:134394ms step_avg:94.98ms +step:1416/1705 train_time:134489ms step_avg:94.98ms +step:1417/1705 train_time:134584ms step_avg:94.98ms +step:1418/1705 train_time:134678ms step_avg:94.98ms +step:1419/1705 train_time:134773ms step_avg:94.98ms +step:1420/1705 train_time:134869ms step_avg:94.98ms +step:1421/1705 train_time:134964ms step_avg:94.98ms +step:1422/1705 train_time:135060ms step_avg:94.98ms +step:1423/1705 train_time:135155ms step_avg:94.98ms +step:1424/1705 train_time:135250ms step_avg:94.98ms +step:1425/1705 train_time:135345ms step_avg:94.98ms +step:1426/1705 train_time:135439ms step_avg:94.98ms +step:1427/1705 train_time:135533ms step_avg:94.98ms +step:1428/1705 train_time:135628ms step_avg:94.98ms +step:1429/1705 train_time:135723ms step_avg:94.98ms +step:1430/1705 train_time:135819ms step_avg:94.98ms +step:1431/1705 train_time:135914ms step_avg:94.98ms +step:1432/1705 train_time:136010ms step_avg:94.98ms +step:1433/1705 train_time:136107ms step_avg:94.98ms +step:1434/1705 train_time:136202ms step_avg:94.98ms +step:1435/1705 train_time:136296ms step_avg:94.98ms +step:1436/1705 train_time:136392ms step_avg:94.98ms +step:1437/1705 train_time:136487ms step_avg:94.98ms +step:1438/1705 train_time:136581ms step_avg:94.98ms +step:1439/1705 train_time:136675ms step_avg:94.98ms +step:1440/1705 train_time:136770ms step_avg:94.98ms +step:1441/1705 train_time:136865ms step_avg:94.98ms +step:1442/1705 train_time:136961ms step_avg:94.98ms +step:1443/1705 train_time:137055ms step_avg:94.98ms +step:1444/1705 train_time:137151ms step_avg:94.98ms +step:1445/1705 train_time:137246ms step_avg:94.98ms +step:1446/1705 train_time:137341ms step_avg:94.98ms +step:1447/1705 train_time:137436ms step_avg:94.98ms +step:1448/1705 train_time:137531ms step_avg:94.98ms +step:1449/1705 train_time:137626ms step_avg:94.98ms +step:1450/1705 train_time:137721ms step_avg:94.98ms +step:1451/1705 train_time:137815ms step_avg:94.98ms +step:1452/1705 train_time:137911ms step_avg:94.98ms +step:1453/1705 train_time:138005ms step_avg:94.98ms +step:1454/1705 train_time:138100ms step_avg:94.98ms +step:1455/1705 train_time:138195ms step_avg:94.98ms +step:1456/1705 train_time:138291ms step_avg:94.98ms +step:1457/1705 train_time:138386ms step_avg:94.98ms +step:1458/1705 train_time:138482ms step_avg:94.98ms +step:1459/1705 train_time:138576ms step_avg:94.98ms +step:1460/1705 train_time:138671ms step_avg:94.98ms +step:1461/1705 train_time:138767ms step_avg:94.98ms +step:1462/1705 train_time:138862ms step_avg:94.98ms +step:1463/1705 train_time:138956ms step_avg:94.98ms +step:1464/1705 train_time:139052ms step_avg:94.98ms +step:1465/1705 train_time:139147ms step_avg:94.98ms +step:1466/1705 train_time:139243ms step_avg:94.98ms +step:1467/1705 train_time:139337ms step_avg:94.98ms +step:1468/1705 train_time:139433ms step_avg:94.98ms +step:1469/1705 train_time:139529ms step_avg:94.98ms +step:1470/1705 train_time:139623ms step_avg:94.98ms +step:1471/1705 train_time:139717ms step_avg:94.98ms +step:1472/1705 train_time:139813ms step_avg:94.98ms +step:1473/1705 train_time:139908ms step_avg:94.98ms +step:1474/1705 train_time:140003ms step_avg:94.98ms +step:1475/1705 train_time:140098ms step_avg:94.98ms +step:1476/1705 train_time:140194ms step_avg:94.98ms +step:1477/1705 train_time:140289ms step_avg:94.98ms +step:1478/1705 train_time:140385ms step_avg:94.98ms +step:1479/1705 train_time:140480ms step_avg:94.98ms +step:1480/1705 train_time:140575ms step_avg:94.98ms +step:1481/1705 train_time:140670ms step_avg:94.98ms +step:1482/1705 train_time:140766ms step_avg:94.98ms +step:1483/1705 train_time:140861ms step_avg:94.98ms +step:1484/1705 train_time:140955ms step_avg:94.98ms +step:1485/1705 train_time:141256ms step_avg:95.12ms +step:1486/1705 train_time:141332ms step_avg:95.11ms +step:1487/1705 train_time:141425ms step_avg:95.11ms +step:1488/1705 train_time:141519ms step_avg:95.11ms +step:1489/1705 train_time:141612ms step_avg:95.11ms +step:1490/1705 train_time:141706ms step_avg:95.10ms +step:1491/1705 train_time:141800ms step_avg:95.10ms +step:1492/1705 train_time:141894ms step_avg:95.10ms +step:1493/1705 train_time:141988ms step_avg:95.10ms +step:1494/1705 train_time:142082ms step_avg:95.10ms +step:1495/1705 train_time:142181ms step_avg:95.10ms +step:1496/1705 train_time:142279ms step_avg:95.11ms +step:1497/1705 train_time:142375ms step_avg:95.11ms +step:1498/1705 train_time:142470ms step_avg:95.11ms +step:1499/1705 train_time:142565ms step_avg:95.11ms +step:1500/1705 train_time:142659ms step_avg:95.11ms +step:1500/1705 val_loss:3.3196 train_time:142753ms step_avg:95.17ms +step:1501/1705 train_time:142776ms step_avg:95.12ms +step:1502/1705 train_time:142854ms step_avg:95.11ms +step:1503/1705 train_time:142953ms step_avg:95.11ms +step:1504/1705 train_time:143048ms step_avg:95.11ms +step:1505/1705 train_time:143141ms step_avg:95.11ms +step:1506/1705 train_time:143235ms step_avg:95.11ms +step:1507/1705 train_time:143329ms step_avg:95.11ms +step:1508/1705 train_time:143423ms step_avg:95.11ms +step:1509/1705 train_time:143517ms step_avg:95.11ms +step:1510/1705 train_time:143612ms step_avg:95.11ms +step:1511/1705 train_time:143705ms step_avg:95.11ms +step:1512/1705 train_time:143803ms step_avg:95.11ms +step:1513/1705 train_time:143901ms step_avg:95.11ms +step:1514/1705 train_time:143999ms step_avg:95.11ms +step:1515/1705 train_time:144095ms step_avg:95.11ms +step:1516/1705 train_time:144189ms step_avg:95.11ms +step:1517/1705 train_time:144283ms step_avg:95.11ms +step:1518/1705 train_time:144377ms step_avg:95.11ms +step:1519/1705 train_time:144470ms step_avg:95.11ms +step:1520/1705 train_time:144564ms step_avg:95.11ms +step:1521/1705 train_time:144659ms step_avg:95.11ms +step:1522/1705 train_time:144754ms step_avg:95.11ms +step:1523/1705 train_time:144851ms step_avg:95.11ms +step:1524/1705 train_time:144947ms step_avg:95.11ms +step:1525/1705 train_time:145042ms step_avg:95.11ms +step:1526/1705 train_time:145138ms step_avg:95.11ms +step:1527/1705 train_time:145232ms step_avg:95.11ms +step:1528/1705 train_time:145327ms step_avg:95.11ms +step:1529/1705 train_time:145421ms step_avg:95.11ms +step:1530/1705 train_time:145516ms step_avg:95.11ms +step:1531/1705 train_time:145610ms step_avg:95.11ms +step:1532/1705 train_time:145704ms step_avg:95.11ms +step:1533/1705 train_time:145800ms step_avg:95.11ms +step:1534/1705 train_time:145897ms step_avg:95.11ms +step:1535/1705 train_time:145993ms step_avg:95.11ms +step:1536/1705 train_time:146088ms step_avg:95.11ms +step:1537/1705 train_time:146183ms step_avg:95.11ms +step:1538/1705 train_time:146279ms step_avg:95.11ms +step:1539/1705 train_time:146373ms step_avg:95.11ms +step:1540/1705 train_time:146467ms step_avg:95.11ms +step:1541/1705 train_time:146562ms step_avg:95.11ms +step:1542/1705 train_time:146657ms step_avg:95.11ms +step:1543/1705 train_time:146753ms step_avg:95.11ms +step:1544/1705 train_time:146847ms step_avg:95.11ms +step:1545/1705 train_time:146944ms step_avg:95.11ms +step:1546/1705 train_time:147040ms step_avg:95.11ms +step:1547/1705 train_time:147136ms step_avg:95.11ms +step:1548/1705 train_time:147231ms step_avg:95.11ms +step:1549/1705 train_time:147324ms step_avg:95.11ms +step:1550/1705 train_time:147419ms step_avg:95.11ms +step:1551/1705 train_time:147515ms step_avg:95.11ms +step:1552/1705 train_time:147610ms step_avg:95.11ms +step:1553/1705 train_time:147704ms step_avg:95.11ms +step:1554/1705 train_time:147800ms step_avg:95.11ms +step:1555/1705 train_time:147896ms step_avg:95.11ms +step:1556/1705 train_time:147991ms step_avg:95.11ms +step:1557/1705 train_time:148086ms step_avg:95.11ms +step:1558/1705 train_time:148182ms step_avg:95.11ms +step:1559/1705 train_time:148277ms step_avg:95.11ms +step:1560/1705 train_time:148371ms step_avg:95.11ms +step:1561/1705 train_time:148465ms step_avg:95.11ms +step:1562/1705 train_time:148561ms step_avg:95.11ms +step:1563/1705 train_time:148657ms step_avg:95.11ms +step:1564/1705 train_time:148752ms step_avg:95.11ms +step:1565/1705 train_time:148846ms step_avg:95.11ms +step:1566/1705 train_time:148941ms step_avg:95.11ms +step:1567/1705 train_time:149037ms step_avg:95.11ms +step:1568/1705 train_time:149132ms step_avg:95.11ms +step:1569/1705 train_time:149226ms step_avg:95.11ms +step:1570/1705 train_time:149322ms step_avg:95.11ms +step:1571/1705 train_time:149417ms step_avg:95.11ms +step:1572/1705 train_time:149512ms step_avg:95.11ms +step:1573/1705 train_time:149607ms step_avg:95.11ms +step:1574/1705 train_time:149702ms step_avg:95.11ms +step:1575/1705 train_time:149796ms step_avg:95.11ms +step:1576/1705 train_time:149891ms step_avg:95.11ms +step:1577/1705 train_time:149987ms step_avg:95.11ms +step:1578/1705 train_time:150082ms step_avg:95.11ms +step:1579/1705 train_time:150177ms step_avg:95.11ms +step:1580/1705 train_time:150273ms step_avg:95.11ms +step:1581/1705 train_time:150368ms step_avg:95.11ms +step:1582/1705 train_time:150463ms step_avg:95.11ms +step:1583/1705 train_time:150558ms step_avg:95.11ms +step:1584/1705 train_time:150653ms step_avg:95.11ms +step:1585/1705 train_time:150747ms step_avg:95.11ms +step:1586/1705 train_time:150843ms step_avg:95.11ms +step:1587/1705 train_time:150938ms step_avg:95.11ms +step:1588/1705 train_time:151034ms step_avg:95.11ms +step:1589/1705 train_time:151129ms step_avg:95.11ms +step:1590/1705 train_time:151223ms step_avg:95.11ms +step:1591/1705 train_time:151319ms step_avg:95.11ms +step:1592/1705 train_time:151414ms step_avg:95.11ms +step:1593/1705 train_time:151509ms step_avg:95.11ms +step:1594/1705 train_time:151605ms step_avg:95.11ms +step:1595/1705 train_time:151700ms step_avg:95.11ms +step:1596/1705 train_time:151796ms step_avg:95.11ms +step:1597/1705 train_time:151891ms step_avg:95.11ms +step:1598/1705 train_time:151986ms step_avg:95.11ms +step:1599/1705 train_time:152081ms step_avg:95.11ms +step:1600/1705 train_time:152176ms step_avg:95.11ms +step:1601/1705 train_time:152271ms step_avg:95.11ms +step:1602/1705 train_time:152365ms step_avg:95.11ms +step:1603/1705 train_time:152460ms step_avg:95.11ms +step:1604/1705 train_time:152556ms step_avg:95.11ms +step:1605/1705 train_time:152651ms step_avg:95.11ms +step:1606/1705 train_time:152746ms step_avg:95.11ms +step:1607/1705 train_time:152841ms step_avg:95.11ms +step:1608/1705 train_time:152937ms step_avg:95.11ms +step:1609/1705 train_time:153032ms step_avg:95.11ms +step:1610/1705 train_time:153126ms step_avg:95.11ms +step:1611/1705 train_time:153221ms step_avg:95.11ms +step:1612/1705 train_time:153317ms step_avg:95.11ms +step:1613/1705 train_time:153412ms step_avg:95.11ms +step:1614/1705 train_time:153506ms step_avg:95.11ms +step:1615/1705 train_time:153601ms step_avg:95.11ms +step:1616/1705 train_time:153698ms step_avg:95.11ms +step:1617/1705 train_time:153793ms step_avg:95.11ms +step:1618/1705 train_time:153887ms step_avg:95.11ms +step:1619/1705 train_time:153984ms step_avg:95.11ms +step:1620/1705 train_time:154079ms step_avg:95.11ms +step:1621/1705 train_time:154174ms step_avg:95.11ms +step:1622/1705 train_time:154269ms step_avg:95.11ms +step:1623/1705 train_time:154364ms step_avg:95.11ms +step:1624/1705 train_time:154459ms step_avg:95.11ms +step:1625/1705 train_time:154555ms step_avg:95.11ms +step:1625/1705 val_loss:3.2916 train_time:154650ms step_avg:95.17ms +step:1626/1705 train_time:154673ms step_avg:95.12ms +step:1627/1705 train_time:154751ms step_avg:95.11ms +step:1628/1705 train_time:154853ms step_avg:95.12ms +step:1629/1705 train_time:154949ms step_avg:95.12ms +step:1630/1705 train_time:155046ms step_avg:95.12ms +step:1631/1705 train_time:155141ms step_avg:95.12ms +step:1632/1705 train_time:155234ms step_avg:95.12ms +step:1633/1705 train_time:155329ms step_avg:95.12ms +step:1634/1705 train_time:155424ms step_avg:95.12ms +step:1635/1705 train_time:155517ms step_avg:95.12ms +step:1636/1705 train_time:155612ms step_avg:95.12ms +step:1637/1705 train_time:155710ms step_avg:95.12ms +step:1638/1705 train_time:155809ms step_avg:95.12ms +step:1639/1705 train_time:155906ms step_avg:95.12ms +step:1640/1705 train_time:156003ms step_avg:95.12ms +step:1641/1705 train_time:156099ms step_avg:95.12ms +step:1642/1705 train_time:156193ms step_avg:95.12ms +step:1643/1705 train_time:156288ms step_avg:95.12ms +step:1644/1705 train_time:156382ms step_avg:95.12ms +step:1645/1705 train_time:156476ms step_avg:95.12ms +step:1646/1705 train_time:156570ms step_avg:95.12ms +step:1647/1705 train_time:156666ms step_avg:95.12ms +step:1648/1705 train_time:156764ms step_avg:95.12ms +step:1649/1705 train_time:156861ms step_avg:95.12ms +step:1650/1705 train_time:156956ms step_avg:95.12ms +step:1651/1705 train_time:157053ms step_avg:95.13ms +step:1652/1705 train_time:157148ms step_avg:95.13ms +step:1653/1705 train_time:157243ms step_avg:95.13ms +step:1654/1705 train_time:157337ms step_avg:95.13ms +step:1655/1705 train_time:157431ms step_avg:95.12ms +step:1656/1705 train_time:157526ms step_avg:95.12ms +step:1657/1705 train_time:157621ms step_avg:95.12ms +step:1658/1705 train_time:157716ms step_avg:95.12ms +step:1659/1705 train_time:157812ms step_avg:95.13ms +step:1660/1705 train_time:157908ms step_avg:95.13ms +step:1661/1705 train_time:158004ms step_avg:95.13ms +step:1662/1705 train_time:158100ms step_avg:95.13ms +step:1663/1705 train_time:158194ms step_avg:95.13ms +step:1664/1705 train_time:158289ms step_avg:95.13ms +step:1665/1705 train_time:158384ms step_avg:95.13ms +step:1666/1705 train_time:158479ms step_avg:95.13ms +step:1667/1705 train_time:158573ms step_avg:95.12ms +step:1668/1705 train_time:158669ms step_avg:95.13ms +step:1669/1705 train_time:158765ms step_avg:95.13ms +step:1670/1705 train_time:158860ms step_avg:95.13ms +step:1671/1705 train_time:158956ms step_avg:95.13ms +step:1672/1705 train_time:159051ms step_avg:95.13ms +step:1673/1705 train_time:159147ms step_avg:95.13ms +step:1674/1705 train_time:159242ms step_avg:95.13ms +step:1675/1705 train_time:159337ms step_avg:95.13ms +step:1676/1705 train_time:159431ms step_avg:95.13ms +step:1677/1705 train_time:159527ms step_avg:95.13ms +step:1678/1705 train_time:159621ms step_avg:95.13ms +step:1679/1705 train_time:159715ms step_avg:95.13ms +step:1680/1705 train_time:159811ms step_avg:95.13ms +step:1681/1705 train_time:159907ms step_avg:95.13ms +step:1682/1705 train_time:160005ms step_avg:95.13ms +step:1683/1705 train_time:160101ms step_avg:95.13ms +step:1684/1705 train_time:160195ms step_avg:95.13ms +step:1685/1705 train_time:160291ms step_avg:95.13ms +step:1686/1705 train_time:160386ms step_avg:95.13ms +step:1687/1705 train_time:160481ms step_avg:95.13ms +step:1688/1705 train_time:160576ms step_avg:95.13ms +step:1689/1705 train_time:160670ms step_avg:95.13ms +step:1690/1705 train_time:160767ms step_avg:95.13ms +step:1691/1705 train_time:160862ms step_avg:95.13ms +step:1692/1705 train_time:160957ms step_avg:95.13ms +step:1693/1705 train_time:161053ms step_avg:95.13ms +step:1694/1705 train_time:161149ms step_avg:95.13ms +step:1695/1705 train_time:161244ms step_avg:95.13ms +step:1696/1705 train_time:161339ms step_avg:95.13ms +step:1697/1705 train_time:161435ms step_avg:95.13ms +step:1698/1705 train_time:161785ms step_avg:95.28ms +step:1699/1705 train_time:161876ms step_avg:95.28ms +step:1700/1705 train_time:161969ms step_avg:95.28ms +step:1701/1705 train_time:162063ms step_avg:95.28ms +step:1702/1705 train_time:162156ms step_avg:95.27ms +step:1703/1705 train_time:162251ms step_avg:95.27ms +step:1704/1705 train_time:162345ms step_avg:95.27ms +step:1705/1705 train_time:162440ms step_avg:95.27ms +step:1705/1705 val_loss:3.2776 train_time:162534ms step_avg:95.33ms +peak memory allocated: 33992 MiB reserved: 49376 MiB diff --git a/records/090525_SkipMLPBlocks/70af20aa-f602-4cc1-85e9-430a1664f62e.txt b/records/090525_SkipMLPBlocks/70af20aa-f602-4cc1-85e9-430a1664f62e.txt new file mode 100644 index 000000000..0bbc9fad7 --- /dev/null +++ b/records/090525_SkipMLPBlocks/70af20aa-f602-4cc1-85e9-430a1664f62e.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:01:46 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 130W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 45C P0 128W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 44C P0 131W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 123W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 79057 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 79058 C /usr/bin/python3 610MiB | +| 0 N/A N/A 79059 C /usr/bin/python3 610MiB | +| 0 N/A N/A 79060 C /usr/bin/python3 610MiB | +| 0 N/A N/A 79061 C /usr/bin/python3 610MiB | +| 0 N/A N/A 79062 C /usr/bin/python3 610MiB | +| 0 N/A N/A 79063 C /usr/bin/python3 610MiB | +| 0 N/A N/A 79064 C /usr/bin/python3 610MiB | +| 1 N/A N/A 79058 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 79059 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 79060 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 79061 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 79062 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 79063 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 79064 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1705 train_time:399ms step_avg:398.52ms +step:2/1705 train_time:418ms step_avg:209.22ms +step:3/1705 train_time:488ms step_avg:162.52ms +step:4/1705 train_time:579ms step_avg:144.70ms +step:5/1705 train_time:670ms step_avg:134.07ms +step:6/1705 train_time:763ms step_avg:127.11ms +step:7/1705 train_time:855ms step_avg:122.10ms +step:8/1705 train_time:948ms step_avg:118.47ms +step:9/1705 train_time:1040ms step_avg:115.56ms +step:10/1705 train_time:1133ms step_avg:113.27ms +step:11/1705 train_time:1226ms step_avg:111.42ms +step:12/1705 train_time:1320ms step_avg:110.01ms +step:13/1705 train_time:1415ms step_avg:108.87ms +step:14/1705 train_time:1509ms step_avg:107.77ms +step:15/1705 train_time:1603ms step_avg:106.85ms +step:16/1705 train_time:1696ms step_avg:105.97ms +step:17/1705 train_time:1788ms step_avg:105.16ms +step:18/1705 train_time:1880ms step_avg:104.47ms +step:19/1705 train_time:1973ms step_avg:103.85ms +step:20/1705 train_time:2066ms step_avg:103.28ms +step:21/1705 train_time:2158ms step_avg:102.78ms +step:22/1705 train_time:2251ms step_avg:102.31ms +step:23/1705 train_time:2346ms step_avg:101.98ms +step:24/1705 train_time:2440ms step_avg:101.66ms +step:25/1705 train_time:2534ms step_avg:101.37ms +step:26/1705 train_time:2628ms step_avg:101.07ms +step:27/1705 train_time:2721ms step_avg:100.79ms +step:28/1705 train_time:2814ms step_avg:100.50ms +step:29/1705 train_time:2907ms step_avg:100.25ms +step:30/1705 train_time:3000ms step_avg:100.01ms +step:31/1705 train_time:3093ms step_avg:99.77ms +step:32/1705 train_time:3186ms step_avg:99.55ms +step:33/1705 train_time:3279ms step_avg:99.37ms +step:34/1705 train_time:3373ms step_avg:99.19ms +step:35/1705 train_time:3467ms step_avg:99.05ms +step:36/1705 train_time:3561ms step_avg:98.92ms +step:37/1705 train_time:3654ms step_avg:98.77ms +step:38/1705 train_time:3748ms step_avg:98.63ms +step:39/1705 train_time:3842ms step_avg:98.50ms +step:40/1705 train_time:3935ms step_avg:98.37ms +step:41/1705 train_time:4028ms step_avg:98.23ms +step:42/1705 train_time:4121ms step_avg:98.12ms +step:43/1705 train_time:4213ms step_avg:97.98ms +step:44/1705 train_time:4307ms step_avg:97.88ms +step:45/1705 train_time:4400ms step_avg:97.77ms +step:46/1705 train_time:4493ms step_avg:97.67ms +step:47/1705 train_time:4586ms step_avg:97.58ms +step:48/1705 train_time:4680ms step_avg:97.49ms +step:49/1705 train_time:4773ms step_avg:97.41ms +step:50/1705 train_time:4866ms step_avg:97.33ms +step:51/1705 train_time:4960ms step_avg:97.25ms +step:52/1705 train_time:5052ms step_avg:97.15ms +step:53/1705 train_time:5146ms step_avg:97.09ms +step:54/1705 train_time:5239ms step_avg:97.02ms +step:55/1705 train_time:5332ms step_avg:96.94ms +step:56/1705 train_time:5426ms step_avg:96.89ms +step:57/1705 train_time:5519ms step_avg:96.82ms +step:58/1705 train_time:5613ms step_avg:96.77ms +step:59/1705 train_time:5706ms step_avg:96.71ms +step:60/1705 train_time:5799ms step_avg:96.66ms +step:61/1705 train_time:5892ms step_avg:96.59ms +step:62/1705 train_time:5985ms step_avg:96.53ms +step:63/1705 train_time:6078ms step_avg:96.48ms +step:64/1705 train_time:6171ms step_avg:96.42ms +step:65/1705 train_time:6264ms step_avg:96.37ms +step:66/1705 train_time:6358ms step_avg:96.33ms +step:67/1705 train_time:6450ms step_avg:96.27ms +step:68/1705 train_time:6544ms step_avg:96.24ms +step:69/1705 train_time:6637ms step_avg:96.19ms +step:70/1705 train_time:6731ms step_avg:96.15ms +step:71/1705 train_time:6825ms step_avg:96.12ms +step:72/1705 train_time:6917ms step_avg:96.07ms +step:73/1705 train_time:7010ms step_avg:96.02ms +step:74/1705 train_time:7104ms step_avg:96.00ms +step:75/1705 train_time:7197ms step_avg:95.96ms +step:76/1705 train_time:7290ms step_avg:95.92ms +step:77/1705 train_time:7383ms step_avg:95.89ms +step:78/1705 train_time:7477ms step_avg:95.85ms +step:79/1705 train_time:7570ms step_avg:95.82ms +step:80/1705 train_time:7663ms step_avg:95.79ms +step:81/1705 train_time:7757ms step_avg:95.76ms +step:82/1705 train_time:7849ms step_avg:95.72ms +step:83/1705 train_time:7943ms step_avg:95.70ms +step:84/1705 train_time:8036ms step_avg:95.66ms +step:85/1705 train_time:8129ms step_avg:95.64ms +step:86/1705 train_time:8223ms step_avg:95.62ms +step:87/1705 train_time:8317ms step_avg:95.60ms +step:88/1705 train_time:8410ms step_avg:95.56ms +step:89/1705 train_time:8503ms step_avg:95.54ms +step:90/1705 train_time:8596ms step_avg:95.51ms +step:91/1705 train_time:8689ms step_avg:95.49ms +step:92/1705 train_time:8783ms step_avg:95.47ms +step:93/1705 train_time:8876ms step_avg:95.44ms +step:94/1705 train_time:8969ms step_avg:95.41ms +step:95/1705 train_time:9063ms step_avg:95.40ms +step:96/1705 train_time:9155ms step_avg:95.37ms +step:97/1705 train_time:9249ms step_avg:95.35ms +step:98/1705 train_time:9343ms step_avg:95.33ms +step:99/1705 train_time:9437ms step_avg:95.32ms +step:100/1705 train_time:9529ms step_avg:95.29ms +step:101/1705 train_time:9622ms step_avg:95.27ms +step:102/1705 train_time:9715ms step_avg:95.24ms +step:103/1705 train_time:9807ms step_avg:95.22ms +step:104/1705 train_time:9901ms step_avg:95.20ms +step:105/1705 train_time:9993ms step_avg:95.18ms +step:106/1705 train_time:10086ms step_avg:95.15ms +step:107/1705 train_time:10179ms step_avg:95.13ms +step:108/1705 train_time:10272ms step_avg:95.11ms +step:109/1705 train_time:10366ms step_avg:95.10ms +step:110/1705 train_time:10458ms step_avg:95.07ms +step:111/1705 train_time:10551ms step_avg:95.05ms +step:112/1705 train_time:10645ms step_avg:95.04ms +step:113/1705 train_time:10737ms step_avg:95.02ms +step:114/1705 train_time:10830ms step_avg:95.00ms +step:115/1705 train_time:10924ms step_avg:94.99ms +step:116/1705 train_time:11017ms step_avg:94.97ms +step:117/1705 train_time:11109ms step_avg:94.95ms +step:118/1705 train_time:11203ms step_avg:94.94ms +step:119/1705 train_time:11296ms step_avg:94.92ms +step:120/1705 train_time:11388ms step_avg:94.90ms +step:121/1705 train_time:11481ms step_avg:94.89ms +step:122/1705 train_time:11574ms step_avg:94.87ms +step:123/1705 train_time:11666ms step_avg:94.85ms +step:124/1705 train_time:11759ms step_avg:94.83ms +step:125/1705 train_time:11853ms step_avg:94.83ms +step:125/1705 val_loss:4.2975 train_time:11947ms step_avg:95.57ms +step:126/1705 train_time:11976ms step_avg:95.05ms +step:127/1705 train_time:12046ms step_avg:94.85ms +step:128/1705 train_time:12150ms step_avg:94.92ms +step:129/1705 train_time:12243ms step_avg:94.90ms +step:130/1705 train_time:12335ms step_avg:94.88ms +step:131/1705 train_time:12427ms step_avg:94.86ms +step:132/1705 train_time:12519ms step_avg:94.84ms +step:133/1705 train_time:12611ms step_avg:94.82ms +step:134/1705 train_time:12703ms step_avg:94.80ms +step:135/1705 train_time:12795ms step_avg:94.78ms +step:136/1705 train_time:12887ms step_avg:94.76ms +step:137/1705 train_time:12979ms step_avg:94.74ms +step:138/1705 train_time:13075ms step_avg:94.75ms +step:139/1705 train_time:13171ms step_avg:94.76ms +step:140/1705 train_time:13265ms step_avg:94.75ms +step:141/1705 train_time:13358ms step_avg:94.74ms +step:142/1705 train_time:13451ms step_avg:94.73ms +step:143/1705 train_time:13543ms step_avg:94.71ms +step:144/1705 train_time:13635ms step_avg:94.69ms +step:145/1705 train_time:13728ms step_avg:94.67ms +step:146/1705 train_time:13819ms step_avg:94.65ms +step:147/1705 train_time:13912ms step_avg:94.64ms +step:148/1705 train_time:14005ms step_avg:94.63ms +step:149/1705 train_time:14099ms step_avg:94.62ms +step:150/1705 train_time:14193ms step_avg:94.62ms +step:151/1705 train_time:14286ms step_avg:94.61ms +step:152/1705 train_time:14379ms step_avg:94.60ms +step:153/1705 train_time:14472ms step_avg:94.59ms +step:154/1705 train_time:14566ms step_avg:94.58ms +step:155/1705 train_time:14658ms step_avg:94.57ms +step:156/1705 train_time:14751ms step_avg:94.56ms +step:157/1705 train_time:14843ms step_avg:94.54ms +step:158/1705 train_time:14936ms step_avg:94.53ms +step:159/1705 train_time:15028ms step_avg:94.52ms +step:160/1705 train_time:15121ms step_avg:94.51ms +step:161/1705 train_time:15214ms step_avg:94.50ms +step:162/1705 train_time:15308ms step_avg:94.50ms +step:163/1705 train_time:15402ms step_avg:94.49ms +step:164/1705 train_time:15495ms step_avg:94.48ms +step:165/1705 train_time:15587ms step_avg:94.47ms +step:166/1705 train_time:15679ms step_avg:94.45ms +step:167/1705 train_time:15772ms step_avg:94.44ms +step:168/1705 train_time:15864ms step_avg:94.43ms +step:169/1705 train_time:15956ms step_avg:94.42ms +step:170/1705 train_time:16049ms step_avg:94.41ms +step:171/1705 train_time:16143ms step_avg:94.40ms +step:172/1705 train_time:16237ms step_avg:94.40ms +step:173/1705 train_time:16330ms step_avg:94.39ms +step:174/1705 train_time:16422ms step_avg:94.38ms +step:175/1705 train_time:16515ms step_avg:94.37ms +step:176/1705 train_time:16608ms step_avg:94.37ms +step:177/1705 train_time:16701ms step_avg:94.35ms +step:178/1705 train_time:16794ms step_avg:94.35ms +step:179/1705 train_time:16886ms step_avg:94.34ms +step:180/1705 train_time:16978ms step_avg:94.32ms +step:181/1705 train_time:17072ms step_avg:94.32ms +step:182/1705 train_time:17164ms step_avg:94.31ms +step:183/1705 train_time:17257ms step_avg:94.30ms +step:184/1705 train_time:17351ms step_avg:94.30ms +step:185/1705 train_time:17444ms step_avg:94.29ms +step:186/1705 train_time:17536ms step_avg:94.28ms +step:187/1705 train_time:17629ms step_avg:94.27ms +step:188/1705 train_time:17722ms step_avg:94.26ms +step:189/1705 train_time:17815ms step_avg:94.26ms +step:190/1705 train_time:17907ms step_avg:94.25ms +step:191/1705 train_time:18000ms step_avg:94.24ms +step:192/1705 train_time:18093ms step_avg:94.23ms +step:193/1705 train_time:18186ms step_avg:94.23ms +step:194/1705 train_time:18279ms step_avg:94.22ms +step:195/1705 train_time:18372ms step_avg:94.22ms +step:196/1705 train_time:18465ms step_avg:94.21ms +step:197/1705 train_time:18558ms step_avg:94.20ms +step:198/1705 train_time:18650ms step_avg:94.19ms +step:199/1705 train_time:18742ms step_avg:94.18ms +step:200/1705 train_time:18836ms step_avg:94.18ms +step:201/1705 train_time:18929ms step_avg:94.17ms +step:202/1705 train_time:19021ms step_avg:94.16ms +step:203/1705 train_time:19114ms step_avg:94.16ms +step:204/1705 train_time:19207ms step_avg:94.15ms +step:205/1705 train_time:19300ms step_avg:94.15ms +step:206/1705 train_time:19393ms step_avg:94.14ms +step:207/1705 train_time:19485ms step_avg:94.13ms +step:208/1705 train_time:19578ms step_avg:94.13ms +step:209/1705 train_time:19671ms step_avg:94.12ms +step:210/1705 train_time:19764ms step_avg:94.11ms +step:211/1705 train_time:19857ms step_avg:94.11ms +step:212/1705 train_time:19951ms step_avg:94.11ms +step:213/1705 train_time:20230ms step_avg:94.98ms +step:214/1705 train_time:20369ms step_avg:95.18ms +step:215/1705 train_time:20460ms step_avg:95.16ms +step:216/1705 train_time:20552ms step_avg:95.15ms +step:217/1705 train_time:20644ms step_avg:95.13ms +step:218/1705 train_time:20736ms step_avg:95.12ms +step:219/1705 train_time:20827ms step_avg:95.10ms +step:220/1705 train_time:20919ms step_avg:95.09ms +step:221/1705 train_time:21011ms step_avg:95.07ms +step:222/1705 train_time:21103ms step_avg:95.06ms +step:223/1705 train_time:21196ms step_avg:95.05ms +step:224/1705 train_time:21292ms step_avg:95.06ms +step:225/1705 train_time:21389ms step_avg:95.06ms +step:226/1705 train_time:21482ms step_avg:95.05ms +step:227/1705 train_time:21574ms step_avg:95.04ms +step:228/1705 train_time:21666ms step_avg:95.03ms +step:229/1705 train_time:21758ms step_avg:95.01ms +step:230/1705 train_time:21851ms step_avg:95.00ms +step:231/1705 train_time:21943ms step_avg:94.99ms +step:232/1705 train_time:22035ms step_avg:94.98ms +step:233/1705 train_time:22127ms step_avg:94.96ms +step:234/1705 train_time:22219ms step_avg:94.95ms +step:235/1705 train_time:22313ms step_avg:94.95ms +step:236/1705 train_time:22407ms step_avg:94.94ms +step:237/1705 train_time:22500ms step_avg:94.94ms +step:238/1705 train_time:22593ms step_avg:94.93ms +step:239/1705 train_time:22685ms step_avg:94.92ms +step:240/1705 train_time:22777ms step_avg:94.90ms +step:241/1705 train_time:22870ms step_avg:94.89ms +step:242/1705 train_time:22961ms step_avg:94.88ms +step:243/1705 train_time:23054ms step_avg:94.87ms +step:244/1705 train_time:23146ms step_avg:94.86ms +step:245/1705 train_time:23238ms step_avg:94.85ms +step:246/1705 train_time:23332ms step_avg:94.84ms +step:247/1705 train_time:23425ms step_avg:94.84ms +step:248/1705 train_time:23519ms step_avg:94.83ms +step:249/1705 train_time:23612ms step_avg:94.83ms +step:250/1705 train_time:23705ms step_avg:94.82ms +step:250/1705 val_loss:3.9686 train_time:23798ms step_avg:95.19ms +step:251/1705 train_time:23819ms step_avg:94.90ms +step:252/1705 train_time:23896ms step_avg:94.82ms +step:253/1705 train_time:23995ms step_avg:94.84ms +step:254/1705 train_time:24089ms step_avg:94.84ms +step:255/1705 train_time:24181ms step_avg:94.83ms +step:256/1705 train_time:24273ms step_avg:94.82ms +step:257/1705 train_time:24365ms step_avg:94.80ms +step:258/1705 train_time:24457ms step_avg:94.79ms +step:259/1705 train_time:24549ms step_avg:94.78ms +step:260/1705 train_time:24641ms step_avg:94.77ms +step:261/1705 train_time:24733ms step_avg:94.76ms +step:262/1705 train_time:24827ms step_avg:94.76ms +step:263/1705 train_time:24921ms step_avg:94.76ms +step:264/1705 train_time:25016ms step_avg:94.76ms +step:265/1705 train_time:25110ms step_avg:94.76ms +step:266/1705 train_time:25203ms step_avg:94.75ms +step:267/1705 train_time:25295ms step_avg:94.74ms +step:268/1705 train_time:25388ms step_avg:94.73ms +step:269/1705 train_time:25480ms step_avg:94.72ms +step:270/1705 train_time:25573ms step_avg:94.71ms +step:271/1705 train_time:25664ms step_avg:94.70ms +step:272/1705 train_time:25758ms step_avg:94.70ms +step:273/1705 train_time:25852ms step_avg:94.69ms +step:274/1705 train_time:25945ms step_avg:94.69ms +step:275/1705 train_time:26040ms step_avg:94.69ms +step:276/1705 train_time:26134ms step_avg:94.69ms +step:277/1705 train_time:26226ms step_avg:94.68ms +step:278/1705 train_time:26319ms step_avg:94.67ms +step:279/1705 train_time:26412ms step_avg:94.67ms +step:280/1705 train_time:26504ms step_avg:94.66ms +step:281/1705 train_time:26596ms step_avg:94.65ms +step:282/1705 train_time:26689ms step_avg:94.64ms +step:283/1705 train_time:26782ms step_avg:94.64ms +step:284/1705 train_time:26875ms step_avg:94.63ms +step:285/1705 train_time:26969ms step_avg:94.63ms +step:286/1705 train_time:27064ms step_avg:94.63ms +step:287/1705 train_time:27157ms step_avg:94.62ms +step:288/1705 train_time:27251ms step_avg:94.62ms +step:289/1705 train_time:27344ms step_avg:94.62ms +step:290/1705 train_time:27437ms step_avg:94.61ms +step:291/1705 train_time:27530ms step_avg:94.61ms +step:292/1705 train_time:27623ms step_avg:94.60ms +step:293/1705 train_time:27715ms step_avg:94.59ms +step:294/1705 train_time:27808ms step_avg:94.58ms +step:295/1705 train_time:27901ms step_avg:94.58ms +step:296/1705 train_time:27994ms step_avg:94.57ms +step:297/1705 train_time:28088ms step_avg:94.57ms +step:298/1705 train_time:28180ms step_avg:94.57ms +step:299/1705 train_time:28274ms step_avg:94.56ms +step:300/1705 train_time:28367ms step_avg:94.56ms +step:301/1705 train_time:28459ms step_avg:94.55ms +step:302/1705 train_time:28552ms step_avg:94.54ms +step:303/1705 train_time:28644ms step_avg:94.54ms +step:304/1705 train_time:28737ms step_avg:94.53ms +step:305/1705 train_time:28830ms step_avg:94.52ms +step:306/1705 train_time:28923ms step_avg:94.52ms +step:307/1705 train_time:29016ms step_avg:94.51ms +step:308/1705 train_time:29110ms step_avg:94.51ms +step:309/1705 train_time:29202ms step_avg:94.51ms +step:310/1705 train_time:29295ms step_avg:94.50ms +step:311/1705 train_time:29388ms step_avg:94.50ms +step:312/1705 train_time:29481ms step_avg:94.49ms +step:313/1705 train_time:29574ms step_avg:94.48ms +step:314/1705 train_time:29667ms step_avg:94.48ms +step:315/1705 train_time:29759ms step_avg:94.47ms +step:316/1705 train_time:29853ms step_avg:94.47ms +step:317/1705 train_time:29945ms step_avg:94.46ms +step:318/1705 train_time:30038ms step_avg:94.46ms +step:319/1705 train_time:30132ms step_avg:94.46ms +step:320/1705 train_time:30225ms step_avg:94.45ms +step:321/1705 train_time:30318ms step_avg:94.45ms +step:322/1705 train_time:30411ms step_avg:94.44ms +step:323/1705 train_time:30504ms step_avg:94.44ms +step:324/1705 train_time:30597ms step_avg:94.43ms +step:325/1705 train_time:30689ms step_avg:94.43ms +step:326/1705 train_time:30783ms step_avg:94.42ms +step:327/1705 train_time:30875ms step_avg:94.42ms +step:328/1705 train_time:30968ms step_avg:94.41ms +step:329/1705 train_time:31061ms step_avg:94.41ms +step:330/1705 train_time:31154ms step_avg:94.40ms +step:331/1705 train_time:31247ms step_avg:94.40ms +step:332/1705 train_time:31340ms step_avg:94.40ms +step:333/1705 train_time:31433ms step_avg:94.39ms +step:334/1705 train_time:31526ms step_avg:94.39ms +step:335/1705 train_time:31618ms step_avg:94.38ms +step:336/1705 train_time:31711ms step_avg:94.38ms +step:337/1705 train_time:31803ms step_avg:94.37ms +step:338/1705 train_time:31896ms step_avg:94.37ms +step:339/1705 train_time:31989ms step_avg:94.36ms +step:340/1705 train_time:32082ms step_avg:94.36ms +step:341/1705 train_time:32175ms step_avg:94.36ms +step:342/1705 train_time:32268ms step_avg:94.35ms +step:343/1705 train_time:32361ms step_avg:94.35ms +step:344/1705 train_time:32454ms step_avg:94.34ms +step:345/1705 train_time:32546ms step_avg:94.34ms +step:346/1705 train_time:32640ms step_avg:94.33ms +step:347/1705 train_time:32733ms step_avg:94.33ms +step:348/1705 train_time:32825ms step_avg:94.33ms +step:349/1705 train_time:32918ms step_avg:94.32ms +step:350/1705 train_time:33011ms step_avg:94.32ms +step:351/1705 train_time:33104ms step_avg:94.31ms +step:352/1705 train_time:33197ms step_avg:94.31ms +step:353/1705 train_time:33291ms step_avg:94.31ms +step:354/1705 train_time:33384ms step_avg:94.30ms +step:355/1705 train_time:33477ms step_avg:94.30ms +step:356/1705 train_time:33570ms step_avg:94.30ms +step:357/1705 train_time:33663ms step_avg:94.29ms +step:358/1705 train_time:33756ms step_avg:94.29ms +step:359/1705 train_time:33850ms step_avg:94.29ms +step:360/1705 train_time:33943ms step_avg:94.29ms +step:361/1705 train_time:34036ms step_avg:94.28ms +step:362/1705 train_time:34129ms step_avg:94.28ms +step:363/1705 train_time:34222ms step_avg:94.28ms +step:364/1705 train_time:34315ms step_avg:94.27ms +step:365/1705 train_time:34408ms step_avg:94.27ms +step:366/1705 train_time:34501ms step_avg:94.27ms +step:367/1705 train_time:34594ms step_avg:94.26ms +step:368/1705 train_time:34687ms step_avg:94.26ms +step:369/1705 train_time:34780ms step_avg:94.25ms +step:370/1705 train_time:34873ms step_avg:94.25ms +step:371/1705 train_time:34966ms step_avg:94.25ms +step:372/1705 train_time:35059ms step_avg:94.24ms +step:373/1705 train_time:35152ms step_avg:94.24ms +step:374/1705 train_time:35245ms step_avg:94.24ms +step:375/1705 train_time:35339ms step_avg:94.24ms +step:375/1705 val_loss:3.8161 train_time:35433ms step_avg:94.49ms +step:376/1705 train_time:35453ms step_avg:94.29ms +step:377/1705 train_time:35530ms step_avg:94.24ms +step:378/1705 train_time:35628ms step_avg:94.25ms +step:379/1705 train_time:35722ms step_avg:94.25ms +step:380/1705 train_time:35814ms step_avg:94.25ms +step:381/1705 train_time:35906ms step_avg:94.24ms +step:382/1705 train_time:35998ms step_avg:94.24ms +step:383/1705 train_time:36091ms step_avg:94.23ms +step:384/1705 train_time:36183ms step_avg:94.23ms +step:385/1705 train_time:36275ms step_avg:94.22ms +step:386/1705 train_time:36368ms step_avg:94.22ms +step:387/1705 train_time:36461ms step_avg:94.22ms +step:388/1705 train_time:36556ms step_avg:94.22ms +step:389/1705 train_time:36651ms step_avg:94.22ms +step:390/1705 train_time:36744ms step_avg:94.22ms +step:391/1705 train_time:36837ms step_avg:94.21ms +step:392/1705 train_time:36929ms step_avg:94.21ms +step:393/1705 train_time:37022ms step_avg:94.20ms +step:394/1705 train_time:37114ms step_avg:94.20ms +step:395/1705 train_time:37207ms step_avg:94.20ms +step:396/1705 train_time:37300ms step_avg:94.19ms +step:397/1705 train_time:37393ms step_avg:94.19ms +step:398/1705 train_time:37487ms step_avg:94.19ms +step:399/1705 train_time:37581ms step_avg:94.19ms +step:400/1705 train_time:37675ms step_avg:94.19ms +step:401/1705 train_time:37769ms step_avg:94.19ms +step:402/1705 train_time:37861ms step_avg:94.18ms +step:403/1705 train_time:37953ms step_avg:94.18ms +step:404/1705 train_time:38046ms step_avg:94.17ms +step:405/1705 train_time:38138ms step_avg:94.17ms +step:406/1705 train_time:38231ms step_avg:94.17ms +step:407/1705 train_time:38324ms step_avg:94.16ms +step:408/1705 train_time:38417ms step_avg:94.16ms +step:409/1705 train_time:38511ms step_avg:94.16ms +step:410/1705 train_time:38604ms step_avg:94.16ms +step:411/1705 train_time:38698ms step_avg:94.15ms +step:412/1705 train_time:38792ms step_avg:94.15ms +step:413/1705 train_time:38885ms step_avg:94.15ms +step:414/1705 train_time:38978ms step_avg:94.15ms +step:415/1705 train_time:39070ms step_avg:94.14ms +step:416/1705 train_time:39162ms step_avg:94.14ms +step:417/1705 train_time:39255ms step_avg:94.14ms +step:418/1705 train_time:39349ms step_avg:94.14ms +step:419/1705 train_time:39441ms step_avg:94.13ms +step:420/1705 train_time:39535ms step_avg:94.13ms +step:421/1705 train_time:39629ms step_avg:94.13ms +step:422/1705 train_time:39722ms step_avg:94.13ms +step:423/1705 train_time:39815ms step_avg:94.13ms +step:424/1705 train_time:39909ms step_avg:94.12ms +step:425/1705 train_time:40236ms step_avg:94.67ms +step:426/1705 train_time:40305ms step_avg:94.61ms +step:427/1705 train_time:40396ms step_avg:94.60ms +step:428/1705 train_time:40489ms step_avg:94.60ms +step:429/1705 train_time:40581ms step_avg:94.60ms +step:430/1705 train_time:40673ms step_avg:94.59ms +step:431/1705 train_time:40766ms step_avg:94.58ms +step:432/1705 train_time:40858ms step_avg:94.58ms +step:433/1705 train_time:40949ms step_avg:94.57ms +step:434/1705 train_time:41042ms step_avg:94.57ms +step:435/1705 train_time:41135ms step_avg:94.56ms +step:436/1705 train_time:41233ms step_avg:94.57ms +step:437/1705 train_time:41331ms step_avg:94.58ms +step:438/1705 train_time:41424ms step_avg:94.58ms +step:439/1705 train_time:41518ms step_avg:94.57ms +step:440/1705 train_time:41611ms step_avg:94.57ms +step:441/1705 train_time:41703ms step_avg:94.56ms +step:442/1705 train_time:41795ms step_avg:94.56ms +step:443/1705 train_time:41888ms step_avg:94.55ms +step:444/1705 train_time:41979ms step_avg:94.55ms +step:445/1705 train_time:42071ms step_avg:94.54ms +step:446/1705 train_time:42164ms step_avg:94.54ms +step:447/1705 train_time:42258ms step_avg:94.54ms +step:448/1705 train_time:42352ms step_avg:94.54ms +step:449/1705 train_time:42445ms step_avg:94.53ms +step:450/1705 train_time:42539ms step_avg:94.53ms +step:451/1705 train_time:42632ms step_avg:94.53ms +step:452/1705 train_time:42725ms step_avg:94.52ms +step:453/1705 train_time:42819ms step_avg:94.52ms +step:454/1705 train_time:42911ms step_avg:94.52ms +step:455/1705 train_time:43003ms step_avg:94.51ms +step:456/1705 train_time:43096ms step_avg:94.51ms +step:457/1705 train_time:43190ms step_avg:94.51ms +step:458/1705 train_time:43283ms step_avg:94.50ms +step:459/1705 train_time:43377ms step_avg:94.50ms +step:460/1705 train_time:43471ms step_avg:94.50ms +step:461/1705 train_time:43564ms step_avg:94.50ms +step:462/1705 train_time:43657ms step_avg:94.50ms +step:463/1705 train_time:43750ms step_avg:94.49ms +step:464/1705 train_time:43843ms step_avg:94.49ms +step:465/1705 train_time:43935ms step_avg:94.48ms +step:466/1705 train_time:44028ms step_avg:94.48ms +step:467/1705 train_time:44120ms step_avg:94.48ms +step:468/1705 train_time:44213ms step_avg:94.47ms +step:469/1705 train_time:44307ms step_avg:94.47ms +step:470/1705 train_time:44400ms step_avg:94.47ms +step:471/1705 train_time:44493ms step_avg:94.47ms +step:472/1705 train_time:44587ms step_avg:94.46ms +step:473/1705 train_time:44680ms step_avg:94.46ms +step:474/1705 train_time:44773ms step_avg:94.46ms +step:475/1705 train_time:44866ms step_avg:94.46ms +step:476/1705 train_time:44959ms step_avg:94.45ms +step:477/1705 train_time:45051ms step_avg:94.45ms +step:478/1705 train_time:45144ms step_avg:94.44ms +step:479/1705 train_time:45237ms step_avg:94.44ms +step:480/1705 train_time:45330ms step_avg:94.44ms +step:481/1705 train_time:45423ms step_avg:94.43ms +step:482/1705 train_time:45517ms step_avg:94.43ms +step:483/1705 train_time:45611ms step_avg:94.43ms +step:484/1705 train_time:45704ms step_avg:94.43ms +step:485/1705 train_time:45797ms step_avg:94.43ms +step:486/1705 train_time:45890ms step_avg:94.42ms +step:487/1705 train_time:45983ms step_avg:94.42ms +step:488/1705 train_time:46076ms step_avg:94.42ms +step:489/1705 train_time:46169ms step_avg:94.42ms +step:490/1705 train_time:46262ms step_avg:94.41ms +step:491/1705 train_time:46355ms step_avg:94.41ms +step:492/1705 train_time:46448ms step_avg:94.41ms +step:493/1705 train_time:46541ms step_avg:94.40ms +step:494/1705 train_time:46635ms step_avg:94.40ms +step:495/1705 train_time:46728ms step_avg:94.40ms +step:496/1705 train_time:46821ms step_avg:94.40ms +step:497/1705 train_time:46914ms step_avg:94.39ms +step:498/1705 train_time:47008ms step_avg:94.39ms +step:499/1705 train_time:47100ms step_avg:94.39ms +step:500/1705 train_time:47193ms step_avg:94.39ms +step:500/1705 val_loss:3.7129 train_time:47287ms step_avg:94.57ms +step:501/1705 train_time:47309ms step_avg:94.43ms +step:502/1705 train_time:47385ms step_avg:94.39ms +step:503/1705 train_time:47482ms step_avg:94.40ms +step:504/1705 train_time:47576ms step_avg:94.40ms +step:505/1705 train_time:47669ms step_avg:94.39ms +step:506/1705 train_time:47761ms step_avg:94.39ms +step:507/1705 train_time:47853ms step_avg:94.39ms +step:508/1705 train_time:47945ms step_avg:94.38ms +step:509/1705 train_time:48037ms step_avg:94.38ms +step:510/1705 train_time:48129ms step_avg:94.37ms +step:511/1705 train_time:48221ms step_avg:94.37ms +step:512/1705 train_time:48314ms step_avg:94.36ms +step:513/1705 train_time:48409ms step_avg:94.36ms +step:514/1705 train_time:48504ms step_avg:94.37ms +step:515/1705 train_time:48597ms step_avg:94.36ms +step:516/1705 train_time:48690ms step_avg:94.36ms +step:517/1705 train_time:48783ms step_avg:94.36ms +step:518/1705 train_time:48875ms step_avg:94.35ms +step:519/1705 train_time:48968ms step_avg:94.35ms +step:520/1705 train_time:49062ms step_avg:94.35ms +step:521/1705 train_time:49154ms step_avg:94.35ms +step:522/1705 train_time:49247ms step_avg:94.34ms +step:523/1705 train_time:49341ms step_avg:94.34ms +step:524/1705 train_time:49434ms step_avg:94.34ms +step:525/1705 train_time:49528ms step_avg:94.34ms +step:526/1705 train_time:49623ms step_avg:94.34ms +step:527/1705 train_time:49716ms step_avg:94.34ms +step:528/1705 train_time:49808ms step_avg:94.33ms +step:529/1705 train_time:49901ms step_avg:94.33ms +step:530/1705 train_time:49993ms step_avg:94.33ms +step:531/1705 train_time:50086ms step_avg:94.32ms +step:532/1705 train_time:50178ms step_avg:94.32ms +step:533/1705 train_time:50271ms step_avg:94.32ms +step:534/1705 train_time:50365ms step_avg:94.32ms +step:535/1705 train_time:50459ms step_avg:94.32ms +step:536/1705 train_time:50552ms step_avg:94.31ms +step:537/1705 train_time:50645ms step_avg:94.31ms +step:538/1705 train_time:50739ms step_avg:94.31ms +step:539/1705 train_time:50831ms step_avg:94.31ms +step:540/1705 train_time:50924ms step_avg:94.30ms +step:541/1705 train_time:51016ms step_avg:94.30ms +step:542/1705 train_time:51108ms step_avg:94.30ms +step:543/1705 train_time:51201ms step_avg:94.29ms +step:544/1705 train_time:51294ms step_avg:94.29ms +step:545/1705 train_time:51387ms step_avg:94.29ms +step:546/1705 train_time:51480ms step_avg:94.29ms +step:547/1705 train_time:51573ms step_avg:94.28ms +step:548/1705 train_time:51666ms step_avg:94.28ms +step:549/1705 train_time:51760ms step_avg:94.28ms +step:550/1705 train_time:51852ms step_avg:94.28ms +step:551/1705 train_time:51945ms step_avg:94.27ms +step:552/1705 train_time:52038ms step_avg:94.27ms +step:553/1705 train_time:52131ms step_avg:94.27ms +step:554/1705 train_time:52224ms step_avg:94.27ms +step:555/1705 train_time:52316ms step_avg:94.26ms +step:556/1705 train_time:52409ms step_avg:94.26ms +step:557/1705 train_time:52502ms step_avg:94.26ms +step:558/1705 train_time:52596ms step_avg:94.26ms +step:559/1705 train_time:52689ms step_avg:94.26ms +step:560/1705 train_time:52782ms step_avg:94.25ms +step:561/1705 train_time:52876ms step_avg:94.25ms +step:562/1705 train_time:52969ms step_avg:94.25ms +step:563/1705 train_time:53064ms step_avg:94.25ms +step:564/1705 train_time:53157ms step_avg:94.25ms +step:565/1705 train_time:53249ms step_avg:94.25ms +step:566/1705 train_time:53343ms step_avg:94.25ms +step:567/1705 train_time:53435ms step_avg:94.24ms +step:568/1705 train_time:53529ms step_avg:94.24ms +step:569/1705 train_time:53624ms step_avg:94.24ms +step:570/1705 train_time:53717ms step_avg:94.24ms +step:571/1705 train_time:53811ms step_avg:94.24ms +step:572/1705 train_time:53905ms step_avg:94.24ms +step:573/1705 train_time:53999ms step_avg:94.24ms +step:574/1705 train_time:54093ms step_avg:94.24ms +step:575/1705 train_time:54187ms step_avg:94.24ms +step:576/1705 train_time:54282ms step_avg:94.24ms +step:577/1705 train_time:54377ms step_avg:94.24ms +step:578/1705 train_time:54471ms step_avg:94.24ms +step:579/1705 train_time:54565ms step_avg:94.24ms +step:580/1705 train_time:54659ms step_avg:94.24ms +step:581/1705 train_time:54753ms step_avg:94.24ms +step:582/1705 train_time:54847ms step_avg:94.24ms +step:583/1705 train_time:54942ms step_avg:94.24ms +step:584/1705 train_time:55037ms step_avg:94.24ms +step:585/1705 train_time:55130ms step_avg:94.24ms +step:586/1705 train_time:55224ms step_avg:94.24ms +step:587/1705 train_time:55319ms step_avg:94.24ms +step:588/1705 train_time:55412ms step_avg:94.24ms +step:589/1705 train_time:55505ms step_avg:94.24ms +step:590/1705 train_time:55599ms step_avg:94.24ms +step:591/1705 train_time:55693ms step_avg:94.23ms +step:592/1705 train_time:55787ms step_avg:94.23ms +step:593/1705 train_time:55881ms step_avg:94.23ms +step:594/1705 train_time:55976ms step_avg:94.24ms +step:595/1705 train_time:56070ms step_avg:94.23ms +step:596/1705 train_time:56165ms step_avg:94.24ms +step:597/1705 train_time:56260ms step_avg:94.24ms +step:598/1705 train_time:56354ms step_avg:94.24ms +step:599/1705 train_time:56447ms step_avg:94.24ms +step:600/1705 train_time:56543ms step_avg:94.24ms +step:601/1705 train_time:56638ms step_avg:94.24ms +step:602/1705 train_time:56732ms step_avg:94.24ms +step:603/1705 train_time:56827ms step_avg:94.24ms +step:604/1705 train_time:56921ms step_avg:94.24ms +step:605/1705 train_time:57015ms step_avg:94.24ms +step:606/1705 train_time:57109ms step_avg:94.24ms +step:607/1705 train_time:57204ms step_avg:94.24ms +step:608/1705 train_time:57299ms step_avg:94.24ms +step:609/1705 train_time:57392ms step_avg:94.24ms +step:610/1705 train_time:57487ms step_avg:94.24ms +step:611/1705 train_time:57581ms step_avg:94.24ms +step:612/1705 train_time:57676ms step_avg:94.24ms +step:613/1705 train_time:57770ms step_avg:94.24ms +step:614/1705 train_time:57865ms step_avg:94.24ms +step:615/1705 train_time:57960ms step_avg:94.24ms +step:616/1705 train_time:58054ms step_avg:94.24ms +step:617/1705 train_time:58148ms step_avg:94.24ms +step:618/1705 train_time:58244ms step_avg:94.25ms +step:619/1705 train_time:58339ms step_avg:94.25ms +step:620/1705 train_time:58433ms step_avg:94.25ms +step:621/1705 train_time:58527ms step_avg:94.25ms +step:622/1705 train_time:58622ms step_avg:94.25ms +step:623/1705 train_time:58715ms step_avg:94.25ms +step:624/1705 train_time:58810ms step_avg:94.25ms +step:625/1705 train_time:58904ms step_avg:94.25ms +step:625/1705 val_loss:3.6156 train_time:58999ms step_avg:94.40ms +step:626/1705 train_time:59021ms step_avg:94.28ms +step:627/1705 train_time:59101ms step_avg:94.26ms +step:628/1705 train_time:59200ms step_avg:94.27ms +step:629/1705 train_time:59296ms step_avg:94.27ms +step:630/1705 train_time:59391ms step_avg:94.27ms +step:631/1705 train_time:59484ms step_avg:94.27ms +step:632/1705 train_time:59578ms step_avg:94.27ms +step:633/1705 train_time:59672ms step_avg:94.27ms +step:634/1705 train_time:59765ms step_avg:94.27ms +step:635/1705 train_time:59858ms step_avg:94.27ms +step:636/1705 train_time:59952ms step_avg:94.26ms +step:637/1705 train_time:60049ms step_avg:94.27ms +step:638/1705 train_time:60144ms step_avg:94.27ms +step:639/1705 train_time:60502ms step_avg:94.68ms +step:640/1705 train_time:60599ms step_avg:94.69ms +step:641/1705 train_time:60692ms step_avg:94.68ms +step:642/1705 train_time:60785ms step_avg:94.68ms +step:643/1705 train_time:60879ms step_avg:94.68ms +step:644/1705 train_time:60973ms step_avg:94.68ms +step:645/1705 train_time:61066ms step_avg:94.68ms +step:646/1705 train_time:61158ms step_avg:94.67ms +step:647/1705 train_time:61251ms step_avg:94.67ms +step:648/1705 train_time:61345ms step_avg:94.67ms +step:649/1705 train_time:61442ms step_avg:94.67ms +step:650/1705 train_time:61539ms step_avg:94.68ms +step:651/1705 train_time:61636ms step_avg:94.68ms +step:652/1705 train_time:61730ms step_avg:94.68ms +step:653/1705 train_time:61825ms step_avg:94.68ms +step:654/1705 train_time:61919ms step_avg:94.68ms +step:655/1705 train_time:62013ms step_avg:94.68ms +step:656/1705 train_time:62106ms step_avg:94.67ms +step:657/1705 train_time:62199ms step_avg:94.67ms +step:658/1705 train_time:62294ms step_avg:94.67ms +step:659/1705 train_time:62389ms step_avg:94.67ms +step:660/1705 train_time:62484ms step_avg:94.67ms +step:661/1705 train_time:62580ms step_avg:94.67ms +step:662/1705 train_time:62676ms step_avg:94.68ms +step:663/1705 train_time:62770ms step_avg:94.68ms +step:664/1705 train_time:62864ms step_avg:94.68ms +step:665/1705 train_time:62959ms step_avg:94.67ms +step:666/1705 train_time:63053ms step_avg:94.67ms +step:667/1705 train_time:63146ms step_avg:94.67ms +step:668/1705 train_time:63241ms step_avg:94.67ms +step:669/1705 train_time:63335ms step_avg:94.67ms +step:670/1705 train_time:63430ms step_avg:94.67ms +step:671/1705 train_time:63523ms step_avg:94.67ms +step:672/1705 train_time:63619ms step_avg:94.67ms +step:673/1705 train_time:63715ms step_avg:94.67ms +step:674/1705 train_time:63809ms step_avg:94.67ms +step:675/1705 train_time:63904ms step_avg:94.67ms +step:676/1705 train_time:63999ms step_avg:94.67ms +step:677/1705 train_time:64093ms step_avg:94.67ms +step:678/1705 train_time:64187ms step_avg:94.67ms +step:679/1705 train_time:64280ms step_avg:94.67ms +step:680/1705 train_time:64375ms step_avg:94.67ms +step:681/1705 train_time:64470ms step_avg:94.67ms +step:682/1705 train_time:64564ms step_avg:94.67ms +step:683/1705 train_time:64659ms step_avg:94.67ms +step:684/1705 train_time:64754ms step_avg:94.67ms +step:685/1705 train_time:64849ms step_avg:94.67ms +step:686/1705 train_time:64943ms step_avg:94.67ms +step:687/1705 train_time:65037ms step_avg:94.67ms +step:688/1705 train_time:65130ms step_avg:94.67ms +step:689/1705 train_time:65224ms step_avg:94.66ms +step:690/1705 train_time:65318ms step_avg:94.66ms +step:691/1705 train_time:65414ms step_avg:94.67ms +step:692/1705 train_time:65508ms step_avg:94.67ms +step:693/1705 train_time:65603ms step_avg:94.67ms +step:694/1705 train_time:65698ms step_avg:94.67ms +step:695/1705 train_time:65794ms step_avg:94.67ms +step:696/1705 train_time:65889ms step_avg:94.67ms +step:697/1705 train_time:65983ms step_avg:94.67ms +step:698/1705 train_time:66078ms step_avg:94.67ms +step:699/1705 train_time:66172ms step_avg:94.67ms +step:700/1705 train_time:66266ms step_avg:94.67ms +step:701/1705 train_time:66360ms step_avg:94.66ms +step:702/1705 train_time:66456ms step_avg:94.67ms +step:703/1705 train_time:66551ms step_avg:94.67ms +step:704/1705 train_time:66645ms step_avg:94.67ms +step:705/1705 train_time:66740ms step_avg:94.67ms +step:706/1705 train_time:66834ms step_avg:94.67ms +step:707/1705 train_time:66928ms step_avg:94.66ms +step:708/1705 train_time:67022ms step_avg:94.66ms +step:709/1705 train_time:67116ms step_avg:94.66ms +step:710/1705 train_time:67211ms step_avg:94.66ms +step:711/1705 train_time:67304ms step_avg:94.66ms +step:712/1705 train_time:67398ms step_avg:94.66ms +step:713/1705 train_time:67493ms step_avg:94.66ms +step:714/1705 train_time:67587ms step_avg:94.66ms +step:715/1705 train_time:67681ms step_avg:94.66ms +step:716/1705 train_time:67776ms step_avg:94.66ms +step:717/1705 train_time:67870ms step_avg:94.66ms +step:718/1705 train_time:67964ms step_avg:94.66ms +step:719/1705 train_time:68057ms step_avg:94.66ms +step:720/1705 train_time:68152ms step_avg:94.66ms +step:721/1705 train_time:68245ms step_avg:94.65ms +step:722/1705 train_time:68339ms step_avg:94.65ms +step:723/1705 train_time:68434ms step_avg:94.65ms +step:724/1705 train_time:68528ms step_avg:94.65ms +step:725/1705 train_time:68623ms step_avg:94.65ms +step:726/1705 train_time:68717ms step_avg:94.65ms +step:727/1705 train_time:68812ms step_avg:94.65ms +step:728/1705 train_time:68906ms step_avg:94.65ms +step:729/1705 train_time:69000ms step_avg:94.65ms +step:730/1705 train_time:69096ms step_avg:94.65ms +step:731/1705 train_time:69191ms step_avg:94.65ms +step:732/1705 train_time:69285ms step_avg:94.65ms +step:733/1705 train_time:69380ms step_avg:94.65ms +step:734/1705 train_time:69475ms step_avg:94.65ms +step:735/1705 train_time:69569ms step_avg:94.65ms +step:736/1705 train_time:69663ms step_avg:94.65ms +step:737/1705 train_time:69758ms step_avg:94.65ms +step:738/1705 train_time:69853ms step_avg:94.65ms +step:739/1705 train_time:69948ms step_avg:94.65ms +step:740/1705 train_time:70042ms step_avg:94.65ms +step:741/1705 train_time:70136ms step_avg:94.65ms +step:742/1705 train_time:70230ms step_avg:94.65ms +step:743/1705 train_time:70324ms step_avg:94.65ms +step:744/1705 train_time:70419ms step_avg:94.65ms +step:745/1705 train_time:70513ms step_avg:94.65ms +step:746/1705 train_time:70607ms step_avg:94.65ms +step:747/1705 train_time:70702ms step_avg:94.65ms +step:748/1705 train_time:70796ms step_avg:94.65ms +step:749/1705 train_time:70891ms step_avg:94.65ms +step:750/1705 train_time:70985ms step_avg:94.65ms +step:750/1705 val_loss:3.5630 train_time:71080ms step_avg:94.77ms +step:751/1705 train_time:71101ms step_avg:94.67ms +step:752/1705 train_time:71181ms step_avg:94.66ms +step:753/1705 train_time:71279ms step_avg:94.66ms +step:754/1705 train_time:71375ms step_avg:94.66ms +step:755/1705 train_time:71469ms step_avg:94.66ms +step:756/1705 train_time:71561ms step_avg:94.66ms +step:757/1705 train_time:71655ms step_avg:94.66ms +step:758/1705 train_time:71749ms step_avg:94.66ms +step:759/1705 train_time:71842ms step_avg:94.65ms +step:760/1705 train_time:71935ms step_avg:94.65ms +step:761/1705 train_time:72030ms step_avg:94.65ms +step:762/1705 train_time:72127ms step_avg:94.65ms +step:763/1705 train_time:72223ms step_avg:94.66ms +step:764/1705 train_time:72318ms step_avg:94.66ms +step:765/1705 train_time:72414ms step_avg:94.66ms +step:766/1705 train_time:72508ms step_avg:94.66ms +step:767/1705 train_time:72601ms step_avg:94.66ms +step:768/1705 train_time:72695ms step_avg:94.65ms +step:769/1705 train_time:72788ms step_avg:94.65ms +step:770/1705 train_time:72881ms step_avg:94.65ms +step:771/1705 train_time:72975ms step_avg:94.65ms +step:772/1705 train_time:73069ms step_avg:94.65ms +step:773/1705 train_time:73164ms step_avg:94.65ms +step:774/1705 train_time:73260ms step_avg:94.65ms +step:775/1705 train_time:73355ms step_avg:94.65ms +step:776/1705 train_time:73450ms step_avg:94.65ms +step:777/1705 train_time:73544ms step_avg:94.65ms +step:778/1705 train_time:73638ms step_avg:94.65ms +step:779/1705 train_time:73732ms step_avg:94.65ms +step:780/1705 train_time:73826ms step_avg:94.65ms +step:781/1705 train_time:73920ms step_avg:94.65ms +step:782/1705 train_time:74014ms step_avg:94.65ms +step:783/1705 train_time:74109ms step_avg:94.65ms +step:784/1705 train_time:74203ms step_avg:94.65ms +step:785/1705 train_time:74299ms step_avg:94.65ms +step:786/1705 train_time:74394ms step_avg:94.65ms +step:787/1705 train_time:74490ms step_avg:94.65ms +step:788/1705 train_time:74584ms step_avg:94.65ms +step:789/1705 train_time:74679ms step_avg:94.65ms +step:790/1705 train_time:74772ms step_avg:94.65ms +step:791/1705 train_time:74866ms step_avg:94.65ms +step:792/1705 train_time:74960ms step_avg:94.65ms +step:793/1705 train_time:75055ms step_avg:94.65ms +step:794/1705 train_time:75151ms step_avg:94.65ms +step:795/1705 train_time:75244ms step_avg:94.65ms +step:796/1705 train_time:75339ms step_avg:94.65ms +step:797/1705 train_time:75435ms step_avg:94.65ms +step:798/1705 train_time:75530ms step_avg:94.65ms +step:799/1705 train_time:75624ms step_avg:94.65ms +step:800/1705 train_time:75718ms step_avg:94.65ms +step:801/1705 train_time:75813ms step_avg:94.65ms +step:802/1705 train_time:75908ms step_avg:94.65ms +step:803/1705 train_time:76001ms step_avg:94.65ms +step:804/1705 train_time:76095ms step_avg:94.65ms +step:805/1705 train_time:76190ms step_avg:94.65ms +step:806/1705 train_time:76283ms step_avg:94.64ms +step:807/1705 train_time:76378ms step_avg:94.64ms +step:808/1705 train_time:76473ms step_avg:94.65ms +step:809/1705 train_time:76568ms step_avg:94.65ms +step:810/1705 train_time:76662ms step_avg:94.64ms +step:811/1705 train_time:76756ms step_avg:94.64ms +step:812/1705 train_time:76850ms step_avg:94.64ms +step:813/1705 train_time:76944ms step_avg:94.64ms +step:814/1705 train_time:77038ms step_avg:94.64ms +step:815/1705 train_time:77133ms step_avg:94.64ms +step:816/1705 train_time:77227ms step_avg:94.64ms +step:817/1705 train_time:77321ms step_avg:94.64ms +step:818/1705 train_time:77417ms step_avg:94.64ms +step:819/1705 train_time:77513ms step_avg:94.64ms +step:820/1705 train_time:77607ms step_avg:94.64ms +step:821/1705 train_time:77700ms step_avg:94.64ms +step:822/1705 train_time:77794ms step_avg:94.64ms +step:823/1705 train_time:77889ms step_avg:94.64ms +step:824/1705 train_time:77982ms step_avg:94.64ms +step:825/1705 train_time:78077ms step_avg:94.64ms +step:826/1705 train_time:78171ms step_avg:94.64ms +step:827/1705 train_time:78266ms step_avg:94.64ms +step:828/1705 train_time:78360ms step_avg:94.64ms +step:829/1705 train_time:78456ms step_avg:94.64ms +step:830/1705 train_time:78550ms step_avg:94.64ms +step:831/1705 train_time:78644ms step_avg:94.64ms +step:832/1705 train_time:78739ms step_avg:94.64ms +step:833/1705 train_time:78833ms step_avg:94.64ms +step:834/1705 train_time:78928ms step_avg:94.64ms +step:835/1705 train_time:79021ms step_avg:94.64ms +step:836/1705 train_time:79116ms step_avg:94.64ms +step:837/1705 train_time:79211ms step_avg:94.64ms +step:838/1705 train_time:79304ms step_avg:94.64ms +step:839/1705 train_time:79398ms step_avg:94.63ms +step:840/1705 train_time:79494ms step_avg:94.64ms +step:841/1705 train_time:79589ms step_avg:94.64ms +step:842/1705 train_time:79683ms step_avg:94.64ms +step:843/1705 train_time:79777ms step_avg:94.63ms +step:844/1705 train_time:79872ms step_avg:94.63ms +step:845/1705 train_time:79966ms step_avg:94.63ms +step:846/1705 train_time:80060ms step_avg:94.63ms +step:847/1705 train_time:80156ms step_avg:94.63ms +step:848/1705 train_time:80250ms step_avg:94.63ms +step:849/1705 train_time:80344ms step_avg:94.63ms +step:850/1705 train_time:80439ms step_avg:94.63ms +step:851/1705 train_time:80722ms step_avg:94.86ms +step:852/1705 train_time:80792ms step_avg:94.83ms +step:853/1705 train_time:80885ms step_avg:94.82ms +step:854/1705 train_time:80978ms step_avg:94.82ms +step:855/1705 train_time:81071ms step_avg:94.82ms +step:856/1705 train_time:81165ms step_avg:94.82ms +step:857/1705 train_time:81258ms step_avg:94.82ms +step:858/1705 train_time:81352ms step_avg:94.82ms +step:859/1705 train_time:81445ms step_avg:94.81ms +step:860/1705 train_time:81539ms step_avg:94.81ms +step:861/1705 train_time:81639ms step_avg:94.82ms +step:862/1705 train_time:81738ms step_avg:94.82ms +step:863/1705 train_time:81837ms step_avg:94.83ms +step:864/1705 train_time:81932ms step_avg:94.83ms +step:865/1705 train_time:82026ms step_avg:94.83ms +step:866/1705 train_time:82119ms step_avg:94.83ms +step:867/1705 train_time:82213ms step_avg:94.82ms +step:868/1705 train_time:82306ms step_avg:94.82ms +step:869/1705 train_time:82399ms step_avg:94.82ms +step:870/1705 train_time:82494ms step_avg:94.82ms +step:871/1705 train_time:82589ms step_avg:94.82ms +step:872/1705 train_time:82685ms step_avg:94.82ms +step:873/1705 train_time:82780ms step_avg:94.82ms +step:874/1705 train_time:82875ms step_avg:94.82ms +step:875/1705 train_time:82970ms step_avg:94.82ms +step:875/1705 val_loss:3.5205 train_time:83065ms step_avg:94.93ms +step:876/1705 train_time:83086ms step_avg:94.85ms +step:877/1705 train_time:83165ms step_avg:94.83ms +step:878/1705 train_time:83262ms step_avg:94.83ms +step:879/1705 train_time:83357ms step_avg:94.83ms +step:880/1705 train_time:83451ms step_avg:94.83ms +step:881/1705 train_time:83544ms step_avg:94.83ms +step:882/1705 train_time:83638ms step_avg:94.83ms +step:883/1705 train_time:83731ms step_avg:94.83ms +step:884/1705 train_time:83824ms step_avg:94.82ms +step:885/1705 train_time:83919ms step_avg:94.82ms +step:886/1705 train_time:84014ms step_avg:94.82ms +step:887/1705 train_time:84112ms step_avg:94.83ms +step:888/1705 train_time:84210ms step_avg:94.83ms +step:889/1705 train_time:84304ms step_avg:94.83ms +step:890/1705 train_time:84399ms step_avg:94.83ms +step:891/1705 train_time:84495ms step_avg:94.83ms +step:892/1705 train_time:84588ms step_avg:94.83ms +step:893/1705 train_time:84681ms step_avg:94.83ms +step:894/1705 train_time:84775ms step_avg:94.83ms +step:895/1705 train_time:84868ms step_avg:94.82ms +step:896/1705 train_time:84962ms step_avg:94.82ms +step:897/1705 train_time:85057ms step_avg:94.82ms +step:898/1705 train_time:85153ms step_avg:94.83ms +step:899/1705 train_time:85248ms step_avg:94.83ms +step:900/1705 train_time:85342ms step_avg:94.82ms +step:901/1705 train_time:85437ms step_avg:94.82ms +step:902/1705 train_time:85532ms step_avg:94.82ms +step:903/1705 train_time:85627ms step_avg:94.82ms +step:904/1705 train_time:85720ms step_avg:94.82ms +step:905/1705 train_time:85814ms step_avg:94.82ms +step:906/1705 train_time:85907ms step_avg:94.82ms +step:907/1705 train_time:86000ms step_avg:94.82ms +step:908/1705 train_time:86095ms step_avg:94.82ms +step:909/1705 train_time:86191ms step_avg:94.82ms +step:910/1705 train_time:86288ms step_avg:94.82ms +step:911/1705 train_time:86381ms step_avg:94.82ms +step:912/1705 train_time:86477ms step_avg:94.82ms +step:913/1705 train_time:86572ms step_avg:94.82ms +step:914/1705 train_time:86666ms step_avg:94.82ms +step:915/1705 train_time:86760ms step_avg:94.82ms +step:916/1705 train_time:86855ms step_avg:94.82ms +step:917/1705 train_time:86950ms step_avg:94.82ms +step:918/1705 train_time:87044ms step_avg:94.82ms +step:919/1705 train_time:87138ms step_avg:94.82ms +step:920/1705 train_time:87233ms step_avg:94.82ms +step:921/1705 train_time:87328ms step_avg:94.82ms +step:922/1705 train_time:87422ms step_avg:94.82ms +step:923/1705 train_time:87516ms step_avg:94.82ms +step:924/1705 train_time:87610ms step_avg:94.82ms +step:925/1705 train_time:87704ms step_avg:94.81ms +step:926/1705 train_time:87798ms step_avg:94.81ms +step:927/1705 train_time:87892ms step_avg:94.81ms +step:928/1705 train_time:87986ms step_avg:94.81ms +step:929/1705 train_time:88081ms step_avg:94.81ms +step:930/1705 train_time:88176ms step_avg:94.81ms +step:931/1705 train_time:88271ms step_avg:94.81ms +step:932/1705 train_time:88366ms step_avg:94.81ms +step:933/1705 train_time:88460ms step_avg:94.81ms +step:934/1705 train_time:88555ms step_avg:94.81ms +step:935/1705 train_time:88651ms step_avg:94.81ms +step:936/1705 train_time:88745ms step_avg:94.81ms +step:937/1705 train_time:88839ms step_avg:94.81ms +step:938/1705 train_time:88934ms step_avg:94.81ms +step:939/1705 train_time:89029ms step_avg:94.81ms +step:940/1705 train_time:89122ms step_avg:94.81ms +step:941/1705 train_time:89217ms step_avg:94.81ms +step:942/1705 train_time:89312ms step_avg:94.81ms +step:943/1705 train_time:89406ms step_avg:94.81ms +step:944/1705 train_time:89500ms step_avg:94.81ms +step:945/1705 train_time:89594ms step_avg:94.81ms +step:946/1705 train_time:89689ms step_avg:94.81ms +step:947/1705 train_time:89783ms step_avg:94.81ms +step:948/1705 train_time:89877ms step_avg:94.81ms +step:949/1705 train_time:89972ms step_avg:94.81ms +step:950/1705 train_time:90067ms step_avg:94.81ms +step:951/1705 train_time:90161ms step_avg:94.81ms +step:952/1705 train_time:90255ms step_avg:94.81ms +step:953/1705 train_time:90350ms step_avg:94.81ms +step:954/1705 train_time:90444ms step_avg:94.81ms +step:955/1705 train_time:90538ms step_avg:94.80ms +step:956/1705 train_time:90633ms step_avg:94.80ms +step:957/1705 train_time:90728ms step_avg:94.80ms +step:958/1705 train_time:90822ms step_avg:94.80ms +step:959/1705 train_time:90917ms step_avg:94.80ms +step:960/1705 train_time:91011ms step_avg:94.80ms +step:961/1705 train_time:91105ms step_avg:94.80ms +step:962/1705 train_time:91200ms step_avg:94.80ms +step:963/1705 train_time:91295ms step_avg:94.80ms +step:964/1705 train_time:91390ms step_avg:94.80ms +step:965/1705 train_time:91484ms step_avg:94.80ms +step:966/1705 train_time:91578ms step_avg:94.80ms +step:967/1705 train_time:91674ms step_avg:94.80ms +step:968/1705 train_time:91769ms step_avg:94.80ms +step:969/1705 train_time:91864ms step_avg:94.80ms +step:970/1705 train_time:91958ms step_avg:94.80ms +step:971/1705 train_time:92053ms step_avg:94.80ms +step:972/1705 train_time:92147ms step_avg:94.80ms +step:973/1705 train_time:92240ms step_avg:94.80ms +step:974/1705 train_time:92335ms step_avg:94.80ms +step:975/1705 train_time:92428ms step_avg:94.80ms +step:976/1705 train_time:92522ms step_avg:94.80ms +step:977/1705 train_time:92617ms step_avg:94.80ms +step:978/1705 train_time:92711ms step_avg:94.80ms +step:979/1705 train_time:92806ms step_avg:94.80ms +step:980/1705 train_time:92900ms step_avg:94.80ms +step:981/1705 train_time:92995ms step_avg:94.80ms +step:982/1705 train_time:93091ms step_avg:94.80ms +step:983/1705 train_time:93185ms step_avg:94.80ms +step:984/1705 train_time:93279ms step_avg:94.80ms +step:985/1705 train_time:93373ms step_avg:94.80ms +step:986/1705 train_time:93468ms step_avg:94.79ms +step:987/1705 train_time:93561ms step_avg:94.79ms +step:988/1705 train_time:93655ms step_avg:94.79ms +step:989/1705 train_time:93750ms step_avg:94.79ms +step:990/1705 train_time:93844ms step_avg:94.79ms +step:991/1705 train_time:93938ms step_avg:94.79ms +step:992/1705 train_time:94033ms step_avg:94.79ms +step:993/1705 train_time:94127ms step_avg:94.79ms +step:994/1705 train_time:94221ms step_avg:94.79ms +step:995/1705 train_time:94315ms step_avg:94.79ms +step:996/1705 train_time:94410ms step_avg:94.79ms +step:997/1705 train_time:94504ms step_avg:94.79ms +step:998/1705 train_time:94598ms step_avg:94.79ms +step:999/1705 train_time:94694ms step_avg:94.79ms +step:1000/1705 train_time:94789ms step_avg:94.79ms +step:1000/1705 val_loss:3.4826 train_time:94884ms step_avg:94.88ms +step:1001/1705 train_time:94905ms step_avg:94.81ms +step:1002/1705 train_time:94983ms step_avg:94.79ms +step:1003/1705 train_time:95080ms step_avg:94.80ms +step:1004/1705 train_time:95174ms step_avg:94.79ms +step:1005/1705 train_time:95268ms step_avg:94.79ms +step:1006/1705 train_time:95363ms step_avg:94.79ms +step:1007/1705 train_time:95456ms step_avg:94.79ms +step:1008/1705 train_time:95550ms step_avg:94.79ms +step:1009/1705 train_time:95643ms step_avg:94.79ms +step:1010/1705 train_time:95737ms step_avg:94.79ms +step:1011/1705 train_time:95833ms step_avg:94.79ms +step:1012/1705 train_time:95931ms step_avg:94.79ms +step:1013/1705 train_time:96029ms step_avg:94.80ms +step:1014/1705 train_time:96124ms step_avg:94.80ms +step:1015/1705 train_time:96217ms step_avg:94.80ms +step:1016/1705 train_time:96312ms step_avg:94.80ms +step:1017/1705 train_time:96406ms step_avg:94.79ms +step:1018/1705 train_time:96499ms step_avg:94.79ms +step:1019/1705 train_time:96593ms step_avg:94.79ms +step:1020/1705 train_time:96687ms step_avg:94.79ms +step:1021/1705 train_time:96781ms step_avg:94.79ms +step:1022/1705 train_time:96876ms step_avg:94.79ms +step:1023/1705 train_time:96972ms step_avg:94.79ms +step:1024/1705 train_time:97069ms step_avg:94.79ms +step:1025/1705 train_time:97164ms step_avg:94.79ms +step:1026/1705 train_time:97259ms step_avg:94.79ms +step:1027/1705 train_time:97353ms step_avg:94.79ms +step:1028/1705 train_time:97448ms step_avg:94.79ms +step:1029/1705 train_time:97542ms step_avg:94.79ms +step:1030/1705 train_time:97636ms step_avg:94.79ms +step:1031/1705 train_time:97730ms step_avg:94.79ms +step:1032/1705 train_time:97826ms step_avg:94.79ms +step:1033/1705 train_time:97920ms step_avg:94.79ms +step:1034/1705 train_time:98015ms step_avg:94.79ms +step:1035/1705 train_time:98111ms step_avg:94.79ms +step:1036/1705 train_time:98207ms step_avg:94.79ms +step:1037/1705 train_time:98301ms step_avg:94.79ms +step:1038/1705 train_time:98395ms step_avg:94.79ms +step:1039/1705 train_time:98489ms step_avg:94.79ms +step:1040/1705 train_time:98583ms step_avg:94.79ms +step:1041/1705 train_time:98677ms step_avg:94.79ms +step:1042/1705 train_time:98771ms step_avg:94.79ms +step:1043/1705 train_time:98866ms step_avg:94.79ms +step:1044/1705 train_time:98960ms step_avg:94.79ms +step:1045/1705 train_time:99055ms step_avg:94.79ms +step:1046/1705 train_time:99151ms step_avg:94.79ms +step:1047/1705 train_time:99246ms step_avg:94.79ms +step:1048/1705 train_time:99340ms step_avg:94.79ms +step:1049/1705 train_time:99434ms step_avg:94.79ms +step:1050/1705 train_time:99530ms step_avg:94.79ms +step:1051/1705 train_time:99624ms step_avg:94.79ms +step:1052/1705 train_time:99718ms step_avg:94.79ms +step:1053/1705 train_time:99812ms step_avg:94.79ms +step:1054/1705 train_time:99908ms step_avg:94.79ms +step:1055/1705 train_time:100002ms step_avg:94.79ms +step:1056/1705 train_time:100096ms step_avg:94.79ms +step:1057/1705 train_time:100192ms step_avg:94.79ms +step:1058/1705 train_time:100287ms step_avg:94.79ms +step:1059/1705 train_time:100381ms step_avg:94.79ms +step:1060/1705 train_time:100475ms step_avg:94.79ms +step:1061/1705 train_time:100569ms step_avg:94.79ms +step:1062/1705 train_time:100811ms step_avg:94.93ms +step:1063/1705 train_time:100990ms step_avg:95.00ms +step:1064/1705 train_time:101083ms step_avg:95.00ms +step:1065/1705 train_time:101176ms step_avg:95.00ms +step:1066/1705 train_time:101270ms step_avg:95.00ms +step:1067/1705 train_time:101363ms step_avg:95.00ms +step:1068/1705 train_time:101456ms step_avg:95.00ms +step:1069/1705 train_time:101550ms step_avg:95.00ms +step:1070/1705 train_time:101643ms step_avg:94.99ms +step:1071/1705 train_time:101736ms step_avg:94.99ms +step:1072/1705 train_time:101835ms step_avg:95.00ms +step:1073/1705 train_time:101934ms step_avg:95.00ms +step:1074/1705 train_time:102033ms step_avg:95.00ms +step:1075/1705 train_time:102130ms step_avg:95.00ms +step:1076/1705 train_time:102225ms step_avg:95.00ms +step:1077/1705 train_time:102318ms step_avg:95.00ms +step:1078/1705 train_time:102411ms step_avg:95.00ms +step:1079/1705 train_time:102504ms step_avg:95.00ms +step:1080/1705 train_time:102598ms step_avg:95.00ms +step:1081/1705 train_time:102691ms step_avg:95.00ms +step:1082/1705 train_time:102786ms step_avg:95.00ms +step:1083/1705 train_time:102881ms step_avg:95.00ms +step:1084/1705 train_time:102976ms step_avg:95.00ms +step:1085/1705 train_time:103071ms step_avg:95.00ms +step:1086/1705 train_time:103166ms step_avg:95.00ms +step:1087/1705 train_time:103260ms step_avg:95.00ms +step:1088/1705 train_time:103354ms step_avg:94.99ms +step:1089/1705 train_time:103447ms step_avg:94.99ms +step:1090/1705 train_time:103541ms step_avg:94.99ms +step:1091/1705 train_time:103635ms step_avg:94.99ms +step:1092/1705 train_time:103730ms step_avg:94.99ms +step:1093/1705 train_time:103825ms step_avg:94.99ms +step:1094/1705 train_time:103920ms step_avg:94.99ms +step:1095/1705 train_time:104015ms step_avg:94.99ms +step:1096/1705 train_time:104111ms step_avg:94.99ms +step:1097/1705 train_time:104207ms step_avg:94.99ms +step:1098/1705 train_time:104300ms step_avg:94.99ms +step:1099/1705 train_time:104395ms step_avg:94.99ms +step:1100/1705 train_time:104490ms step_avg:94.99ms +step:1101/1705 train_time:104584ms step_avg:94.99ms +step:1102/1705 train_time:104678ms step_avg:94.99ms +step:1103/1705 train_time:104772ms step_avg:94.99ms +step:1104/1705 train_time:104867ms step_avg:94.99ms +step:1105/1705 train_time:104961ms step_avg:94.99ms +step:1106/1705 train_time:105055ms step_avg:94.99ms +step:1107/1705 train_time:105151ms step_avg:94.99ms +step:1108/1705 train_time:105246ms step_avg:94.99ms +step:1109/1705 train_time:105340ms step_avg:94.99ms +step:1110/1705 train_time:105434ms step_avg:94.99ms +step:1111/1705 train_time:105528ms step_avg:94.99ms +step:1112/1705 train_time:105624ms step_avg:94.99ms +step:1113/1705 train_time:105717ms step_avg:94.98ms +step:1114/1705 train_time:105813ms step_avg:94.98ms +step:1115/1705 train_time:105909ms step_avg:94.99ms +step:1116/1705 train_time:106004ms step_avg:94.99ms +step:1117/1705 train_time:106098ms step_avg:94.98ms +step:1118/1705 train_time:106194ms step_avg:94.99ms +step:1119/1705 train_time:106289ms step_avg:94.99ms +step:1120/1705 train_time:106382ms step_avg:94.98ms +step:1121/1705 train_time:106476ms step_avg:94.98ms +step:1122/1705 train_time:106571ms step_avg:94.98ms +step:1123/1705 train_time:106665ms step_avg:94.98ms +step:1124/1705 train_time:106758ms step_avg:94.98ms +step:1125/1705 train_time:106852ms step_avg:94.98ms +step:1125/1705 val_loss:3.4350 train_time:106948ms step_avg:95.07ms +step:1126/1705 train_time:106969ms step_avg:95.00ms +step:1127/1705 train_time:107048ms step_avg:94.98ms +step:1128/1705 train_time:107146ms step_avg:94.99ms +step:1129/1705 train_time:107241ms step_avg:94.99ms +step:1130/1705 train_time:107335ms step_avg:94.99ms +step:1131/1705 train_time:107429ms step_avg:94.99ms +step:1132/1705 train_time:107522ms step_avg:94.98ms +step:1133/1705 train_time:107615ms step_avg:94.98ms +step:1134/1705 train_time:107709ms step_avg:94.98ms +step:1135/1705 train_time:107802ms step_avg:94.98ms +step:1136/1705 train_time:107897ms step_avg:94.98ms +step:1137/1705 train_time:107993ms step_avg:94.98ms +step:1138/1705 train_time:108090ms step_avg:94.98ms +step:1139/1705 train_time:108187ms step_avg:94.98ms +step:1140/1705 train_time:108282ms step_avg:94.98ms +step:1141/1705 train_time:108377ms step_avg:94.98ms +step:1142/1705 train_time:108472ms step_avg:94.98ms +step:1143/1705 train_time:108567ms step_avg:94.98ms +step:1144/1705 train_time:108661ms step_avg:94.98ms +step:1145/1705 train_time:108756ms step_avg:94.98ms +step:1146/1705 train_time:108850ms step_avg:94.98ms +step:1147/1705 train_time:108945ms step_avg:94.98ms +step:1148/1705 train_time:109042ms step_avg:94.98ms +step:1149/1705 train_time:109137ms step_avg:94.98ms +step:1150/1705 train_time:109233ms step_avg:94.99ms +step:1151/1705 train_time:109329ms step_avg:94.99ms +step:1152/1705 train_time:109424ms step_avg:94.99ms +step:1153/1705 train_time:109518ms step_avg:94.99ms +step:1154/1705 train_time:109613ms step_avg:94.99ms +step:1155/1705 train_time:109708ms step_avg:94.99ms +step:1156/1705 train_time:109803ms step_avg:94.99ms +step:1157/1705 train_time:109898ms step_avg:94.99ms +step:1158/1705 train_time:109995ms step_avg:94.99ms +step:1159/1705 train_time:110092ms step_avg:94.99ms +step:1160/1705 train_time:110188ms step_avg:94.99ms +step:1161/1705 train_time:110283ms step_avg:94.99ms +step:1162/1705 train_time:110379ms step_avg:94.99ms +step:1163/1705 train_time:110474ms step_avg:94.99ms +step:1164/1705 train_time:110569ms step_avg:94.99ms +step:1165/1705 train_time:110664ms step_avg:94.99ms +step:1166/1705 train_time:110759ms step_avg:94.99ms +step:1167/1705 train_time:110854ms step_avg:94.99ms +step:1168/1705 train_time:110950ms step_avg:94.99ms +step:1169/1705 train_time:111045ms step_avg:94.99ms +step:1170/1705 train_time:111141ms step_avg:94.99ms +step:1171/1705 train_time:111236ms step_avg:94.99ms +step:1172/1705 train_time:111332ms step_avg:94.99ms +step:1173/1705 train_time:111429ms step_avg:94.99ms +step:1174/1705 train_time:111524ms step_avg:95.00ms +step:1175/1705 train_time:111619ms step_avg:94.99ms +step:1176/1705 train_time:111714ms step_avg:94.99ms +step:1177/1705 train_time:111808ms step_avg:94.99ms +step:1178/1705 train_time:111903ms step_avg:94.99ms +step:1179/1705 train_time:111999ms step_avg:95.00ms +step:1180/1705 train_time:112095ms step_avg:95.00ms +step:1181/1705 train_time:112191ms step_avg:95.00ms +step:1182/1705 train_time:112287ms step_avg:95.00ms +step:1183/1705 train_time:112382ms step_avg:95.00ms +step:1184/1705 train_time:112478ms step_avg:95.00ms +step:1185/1705 train_time:112573ms step_avg:95.00ms +step:1186/1705 train_time:112668ms step_avg:95.00ms +step:1187/1705 train_time:112762ms step_avg:95.00ms +step:1188/1705 train_time:112857ms step_avg:95.00ms +step:1189/1705 train_time:112953ms step_avg:95.00ms +step:1190/1705 train_time:113049ms step_avg:95.00ms +step:1191/1705 train_time:113144ms step_avg:95.00ms +step:1192/1705 train_time:113240ms step_avg:95.00ms +step:1193/1705 train_time:113335ms step_avg:95.00ms +step:1194/1705 train_time:113432ms step_avg:95.00ms +step:1195/1705 train_time:113528ms step_avg:95.00ms +step:1196/1705 train_time:113623ms step_avg:95.00ms +step:1197/1705 train_time:113718ms step_avg:95.00ms +step:1198/1705 train_time:113813ms step_avg:95.00ms +step:1199/1705 train_time:113909ms step_avg:95.00ms +step:1200/1705 train_time:114005ms step_avg:95.00ms +step:1201/1705 train_time:114100ms step_avg:95.00ms +step:1202/1705 train_time:114196ms step_avg:95.00ms +step:1203/1705 train_time:114291ms step_avg:95.01ms +step:1204/1705 train_time:114388ms step_avg:95.01ms +step:1205/1705 train_time:114484ms step_avg:95.01ms +step:1206/1705 train_time:114578ms step_avg:95.01ms +step:1207/1705 train_time:114673ms step_avg:95.01ms +step:1208/1705 train_time:114769ms step_avg:95.01ms +step:1209/1705 train_time:114864ms step_avg:95.01ms +step:1210/1705 train_time:114959ms step_avg:95.01ms +step:1211/1705 train_time:115054ms step_avg:95.01ms +step:1212/1705 train_time:115151ms step_avg:95.01ms +step:1213/1705 train_time:115247ms step_avg:95.01ms +step:1214/1705 train_time:115342ms step_avg:95.01ms +step:1215/1705 train_time:115438ms step_avg:95.01ms +step:1216/1705 train_time:115533ms step_avg:95.01ms +step:1217/1705 train_time:115630ms step_avg:95.01ms +step:1218/1705 train_time:115725ms step_avg:95.01ms +step:1219/1705 train_time:115820ms step_avg:95.01ms +step:1220/1705 train_time:115915ms step_avg:95.01ms +step:1221/1705 train_time:116011ms step_avg:95.01ms +step:1222/1705 train_time:116106ms step_avg:95.01ms +step:1223/1705 train_time:116201ms step_avg:95.01ms +step:1224/1705 train_time:116296ms step_avg:95.01ms +step:1225/1705 train_time:116392ms step_avg:95.01ms +step:1226/1705 train_time:116488ms step_avg:95.01ms +step:1227/1705 train_time:116582ms step_avg:95.01ms +step:1228/1705 train_time:116677ms step_avg:95.01ms +step:1229/1705 train_time:116772ms step_avg:95.01ms +step:1230/1705 train_time:116867ms step_avg:95.01ms +step:1231/1705 train_time:116962ms step_avg:95.01ms +step:1232/1705 train_time:117057ms step_avg:95.01ms +step:1233/1705 train_time:117153ms step_avg:95.01ms +step:1234/1705 train_time:117249ms step_avg:95.02ms +step:1235/1705 train_time:117344ms step_avg:95.02ms +step:1236/1705 train_time:117439ms step_avg:95.02ms +step:1237/1705 train_time:117534ms step_avg:95.02ms +step:1238/1705 train_time:117631ms step_avg:95.02ms +step:1239/1705 train_time:117726ms step_avg:95.02ms +step:1240/1705 train_time:117822ms step_avg:95.02ms +step:1241/1705 train_time:117916ms step_avg:95.02ms +step:1242/1705 train_time:118011ms step_avg:95.02ms +step:1243/1705 train_time:118107ms step_avg:95.02ms +step:1244/1705 train_time:118202ms step_avg:95.02ms +step:1245/1705 train_time:118297ms step_avg:95.02ms +step:1246/1705 train_time:118393ms step_avg:95.02ms +step:1247/1705 train_time:118488ms step_avg:95.02ms +step:1248/1705 train_time:118583ms step_avg:95.02ms +step:1249/1705 train_time:118679ms step_avg:95.02ms +step:1250/1705 train_time:118774ms step_avg:95.02ms +step:1250/1705 val_loss:3.3868 train_time:118871ms step_avg:95.10ms +step:1251/1705 train_time:118891ms step_avg:95.04ms +step:1252/1705 train_time:118979ms step_avg:95.03ms +step:1253/1705 train_time:119076ms step_avg:95.03ms +step:1254/1705 train_time:119170ms step_avg:95.03ms +step:1255/1705 train_time:119264ms step_avg:95.03ms +step:1256/1705 train_time:119358ms step_avg:95.03ms +step:1257/1705 train_time:119452ms step_avg:95.03ms +step:1258/1705 train_time:119547ms step_avg:95.03ms +step:1259/1705 train_time:119642ms step_avg:95.03ms +step:1260/1705 train_time:119735ms step_avg:95.03ms +step:1261/1705 train_time:119831ms step_avg:95.03ms +step:1262/1705 train_time:119930ms step_avg:95.03ms +step:1263/1705 train_time:120028ms step_avg:95.03ms +step:1264/1705 train_time:120123ms step_avg:95.03ms +step:1265/1705 train_time:120219ms step_avg:95.03ms +step:1266/1705 train_time:120313ms step_avg:95.03ms +step:1267/1705 train_time:120407ms step_avg:95.03ms +step:1268/1705 train_time:120501ms step_avg:95.03ms +step:1269/1705 train_time:120596ms step_avg:95.03ms +step:1270/1705 train_time:120690ms step_avg:95.03ms +step:1271/1705 train_time:120785ms step_avg:95.03ms +step:1272/1705 train_time:120882ms step_avg:95.03ms +step:1273/1705 train_time:120979ms step_avg:95.03ms +step:1274/1705 train_time:121347ms step_avg:95.25ms +step:1275/1705 train_time:121432ms step_avg:95.24ms +step:1276/1705 train_time:121526ms step_avg:95.24ms +step:1277/1705 train_time:121621ms step_avg:95.24ms +step:1278/1705 train_time:121715ms step_avg:95.24ms +step:1279/1705 train_time:121808ms step_avg:95.24ms +step:1280/1705 train_time:121902ms step_avg:95.24ms +step:1281/1705 train_time:121996ms step_avg:95.23ms +step:1282/1705 train_time:122090ms step_avg:95.23ms +step:1283/1705 train_time:122184ms step_avg:95.23ms +step:1284/1705 train_time:122288ms step_avg:95.24ms +step:1285/1705 train_time:122386ms step_avg:95.24ms +step:1286/1705 train_time:122482ms step_avg:95.24ms +step:1287/1705 train_time:122578ms step_avg:95.24ms +step:1288/1705 train_time:122672ms step_avg:95.24ms +step:1289/1705 train_time:122766ms step_avg:95.24ms +step:1290/1705 train_time:122861ms step_avg:95.24ms +step:1291/1705 train_time:122955ms step_avg:95.24ms +step:1292/1705 train_time:123049ms step_avg:95.24ms +step:1293/1705 train_time:123144ms step_avg:95.24ms +step:1294/1705 train_time:123241ms step_avg:95.24ms +step:1295/1705 train_time:123338ms step_avg:95.24ms +step:1296/1705 train_time:123436ms step_avg:95.24ms +step:1297/1705 train_time:123530ms step_avg:95.24ms +step:1298/1705 train_time:123626ms step_avg:95.24ms +step:1299/1705 train_time:123721ms step_avg:95.24ms +step:1300/1705 train_time:123815ms step_avg:95.24ms +step:1301/1705 train_time:123910ms step_avg:95.24ms +step:1302/1705 train_time:124005ms step_avg:95.24ms +step:1303/1705 train_time:124099ms step_avg:95.24ms +step:1304/1705 train_time:124194ms step_avg:95.24ms +step:1305/1705 train_time:124290ms step_avg:95.24ms +step:1306/1705 train_time:124386ms step_avg:95.24ms +step:1307/1705 train_time:124483ms step_avg:95.24ms +step:1308/1705 train_time:124579ms step_avg:95.24ms +step:1309/1705 train_time:124674ms step_avg:95.24ms +step:1310/1705 train_time:124768ms step_avg:95.24ms +step:1311/1705 train_time:124864ms step_avg:95.24ms +step:1312/1705 train_time:124958ms step_avg:95.24ms +step:1313/1705 train_time:125053ms step_avg:95.24ms +step:1314/1705 train_time:125148ms step_avg:95.24ms +step:1315/1705 train_time:125244ms step_avg:95.24ms +step:1316/1705 train_time:125340ms step_avg:95.24ms +step:1317/1705 train_time:125436ms step_avg:95.24ms +step:1318/1705 train_time:125531ms step_avg:95.24ms +step:1319/1705 train_time:125626ms step_avg:95.24ms +step:1320/1705 train_time:125721ms step_avg:95.24ms +step:1321/1705 train_time:125816ms step_avg:95.24ms +step:1322/1705 train_time:125910ms step_avg:95.24ms +step:1323/1705 train_time:126006ms step_avg:95.24ms +step:1324/1705 train_time:126101ms step_avg:95.24ms +step:1325/1705 train_time:126196ms step_avg:95.24ms +step:1326/1705 train_time:126291ms step_avg:95.24ms +step:1327/1705 train_time:126386ms step_avg:95.24ms +step:1328/1705 train_time:126483ms step_avg:95.24ms +step:1329/1705 train_time:126579ms step_avg:95.24ms +step:1330/1705 train_time:126674ms step_avg:95.24ms +step:1331/1705 train_time:126769ms step_avg:95.24ms +step:1332/1705 train_time:126865ms step_avg:95.24ms +step:1333/1705 train_time:126961ms step_avg:95.24ms +step:1334/1705 train_time:127056ms step_avg:95.24ms +step:1335/1705 train_time:127151ms step_avg:95.24ms +step:1336/1705 train_time:127246ms step_avg:95.24ms +step:1337/1705 train_time:127342ms step_avg:95.24ms +step:1338/1705 train_time:127437ms step_avg:95.24ms +step:1339/1705 train_time:127532ms step_avg:95.24ms +step:1340/1705 train_time:127628ms step_avg:95.24ms +step:1341/1705 train_time:127722ms step_avg:95.24ms +step:1342/1705 train_time:127818ms step_avg:95.24ms +step:1343/1705 train_time:127913ms step_avg:95.24ms +step:1344/1705 train_time:128007ms step_avg:95.24ms +step:1345/1705 train_time:128102ms step_avg:95.24ms +step:1346/1705 train_time:128198ms step_avg:95.24ms +step:1347/1705 train_time:128293ms step_avg:95.24ms +step:1348/1705 train_time:128388ms step_avg:95.24ms +step:1349/1705 train_time:128485ms step_avg:95.24ms +step:1350/1705 train_time:128582ms step_avg:95.25ms +step:1351/1705 train_time:128677ms step_avg:95.25ms +step:1352/1705 train_time:128772ms step_avg:95.25ms +step:1353/1705 train_time:128867ms step_avg:95.25ms +step:1354/1705 train_time:128963ms step_avg:95.25ms +step:1355/1705 train_time:129058ms step_avg:95.25ms +step:1356/1705 train_time:129152ms step_avg:95.25ms +step:1357/1705 train_time:129249ms step_avg:95.25ms +step:1358/1705 train_time:129343ms step_avg:95.25ms +step:1359/1705 train_time:129438ms step_avg:95.25ms +step:1360/1705 train_time:129533ms step_avg:95.24ms +step:1361/1705 train_time:129628ms step_avg:95.24ms +step:1362/1705 train_time:129723ms step_avg:95.24ms +step:1363/1705 train_time:129819ms step_avg:95.24ms +step:1364/1705 train_time:129914ms step_avg:95.25ms +step:1365/1705 train_time:130009ms step_avg:95.24ms +step:1366/1705 train_time:130104ms step_avg:95.24ms +step:1367/1705 train_time:130200ms step_avg:95.25ms +step:1368/1705 train_time:130295ms step_avg:95.24ms +step:1369/1705 train_time:130389ms step_avg:95.24ms +step:1370/1705 train_time:130484ms step_avg:95.24ms +step:1371/1705 train_time:130581ms step_avg:95.25ms +step:1372/1705 train_time:130677ms step_avg:95.25ms +step:1373/1705 train_time:130772ms step_avg:95.25ms +step:1374/1705 train_time:130867ms step_avg:95.25ms +step:1375/1705 train_time:130964ms step_avg:95.25ms +step:1375/1705 val_loss:3.3495 train_time:131060ms step_avg:95.32ms +step:1376/1705 train_time:131081ms step_avg:95.26ms +step:1377/1705 train_time:131162ms step_avg:95.25ms +step:1378/1705 train_time:131259ms step_avg:95.25ms +step:1379/1705 train_time:131355ms step_avg:95.25ms +step:1380/1705 train_time:131450ms step_avg:95.25ms +step:1381/1705 train_time:131544ms step_avg:95.25ms +step:1382/1705 train_time:131638ms step_avg:95.25ms +step:1383/1705 train_time:131733ms step_avg:95.25ms +step:1384/1705 train_time:131827ms step_avg:95.25ms +step:1385/1705 train_time:131922ms step_avg:95.25ms +step:1386/1705 train_time:132018ms step_avg:95.25ms +step:1387/1705 train_time:132117ms step_avg:95.25ms +step:1388/1705 train_time:132215ms step_avg:95.26ms +step:1389/1705 train_time:132313ms step_avg:95.26ms +step:1390/1705 train_time:132409ms step_avg:95.26ms +step:1391/1705 train_time:132504ms step_avg:95.26ms +step:1392/1705 train_time:132598ms step_avg:95.26ms +step:1393/1705 train_time:132692ms step_avg:95.26ms +step:1394/1705 train_time:132787ms step_avg:95.26ms +step:1395/1705 train_time:132881ms step_avg:95.26ms +step:1396/1705 train_time:132976ms step_avg:95.26ms +step:1397/1705 train_time:133073ms step_avg:95.26ms +step:1398/1705 train_time:133170ms step_avg:95.26ms +step:1399/1705 train_time:133268ms step_avg:95.26ms +step:1400/1705 train_time:133364ms step_avg:95.26ms +step:1401/1705 train_time:133458ms step_avg:95.26ms +step:1402/1705 train_time:133554ms step_avg:95.26ms +step:1403/1705 train_time:133649ms step_avg:95.26ms +step:1404/1705 train_time:133744ms step_avg:95.26ms +step:1405/1705 train_time:133838ms step_avg:95.26ms +step:1406/1705 train_time:133933ms step_avg:95.26ms +step:1407/1705 train_time:134028ms step_avg:95.26ms +step:1408/1705 train_time:134123ms step_avg:95.26ms +step:1409/1705 train_time:134221ms step_avg:95.26ms +step:1410/1705 train_time:134318ms step_avg:95.26ms +step:1411/1705 train_time:134414ms step_avg:95.26ms +step:1412/1705 train_time:134508ms step_avg:95.26ms +step:1413/1705 train_time:134602ms step_avg:95.26ms +step:1414/1705 train_time:134697ms step_avg:95.26ms +step:1415/1705 train_time:134793ms step_avg:95.26ms +step:1416/1705 train_time:134888ms step_avg:95.26ms +step:1417/1705 train_time:134983ms step_avg:95.26ms +step:1418/1705 train_time:135078ms step_avg:95.26ms +step:1419/1705 train_time:135175ms step_avg:95.26ms +step:1420/1705 train_time:135271ms step_avg:95.26ms +step:1421/1705 train_time:135366ms step_avg:95.26ms +step:1422/1705 train_time:135461ms step_avg:95.26ms +step:1423/1705 train_time:135556ms step_avg:95.26ms +step:1424/1705 train_time:135650ms step_avg:95.26ms +step:1425/1705 train_time:135746ms step_avg:95.26ms +step:1426/1705 train_time:135840ms step_avg:95.26ms +step:1427/1705 train_time:135935ms step_avg:95.26ms +step:1428/1705 train_time:136030ms step_avg:95.26ms +step:1429/1705 train_time:136126ms step_avg:95.26ms +step:1430/1705 train_time:136221ms step_avg:95.26ms +step:1431/1705 train_time:136317ms step_avg:95.26ms +step:1432/1705 train_time:136413ms step_avg:95.26ms +step:1433/1705 train_time:136509ms step_avg:95.26ms +step:1434/1705 train_time:136603ms step_avg:95.26ms +step:1435/1705 train_time:136699ms step_avg:95.26ms +step:1436/1705 train_time:136794ms step_avg:95.26ms +step:1437/1705 train_time:136889ms step_avg:95.26ms +step:1438/1705 train_time:136983ms step_avg:95.26ms +step:1439/1705 train_time:137079ms step_avg:95.26ms +step:1440/1705 train_time:137175ms step_avg:95.26ms +step:1441/1705 train_time:137272ms step_avg:95.26ms +step:1442/1705 train_time:137367ms step_avg:95.26ms +step:1443/1705 train_time:137462ms step_avg:95.26ms +step:1444/1705 train_time:137557ms step_avg:95.26ms +step:1445/1705 train_time:137652ms step_avg:95.26ms +step:1446/1705 train_time:137747ms step_avg:95.26ms +step:1447/1705 train_time:137842ms step_avg:95.26ms +step:1448/1705 train_time:137937ms step_avg:95.26ms +step:1449/1705 train_time:138033ms step_avg:95.26ms +step:1450/1705 train_time:138129ms step_avg:95.26ms +step:1451/1705 train_time:138224ms step_avg:95.26ms +step:1452/1705 train_time:138319ms step_avg:95.26ms +step:1453/1705 train_time:138416ms step_avg:95.26ms +step:1454/1705 train_time:138512ms step_avg:95.26ms +step:1455/1705 train_time:138607ms step_avg:95.26ms +step:1456/1705 train_time:138701ms step_avg:95.26ms +step:1457/1705 train_time:138797ms step_avg:95.26ms +step:1458/1705 train_time:138892ms step_avg:95.26ms +step:1459/1705 train_time:138987ms step_avg:95.26ms +step:1460/1705 train_time:139082ms step_avg:95.26ms +step:1461/1705 train_time:139178ms step_avg:95.26ms +step:1462/1705 train_time:139274ms step_avg:95.26ms +step:1463/1705 train_time:139371ms step_avg:95.26ms +step:1464/1705 train_time:139466ms step_avg:95.26ms +step:1465/1705 train_time:139561ms step_avg:95.26ms +step:1466/1705 train_time:139656ms step_avg:95.26ms +step:1467/1705 train_time:139752ms step_avg:95.26ms +step:1468/1705 train_time:139848ms step_avg:95.26ms +step:1469/1705 train_time:139943ms step_avg:95.26ms +step:1470/1705 train_time:140040ms step_avg:95.27ms +step:1471/1705 train_time:140133ms step_avg:95.26ms +step:1472/1705 train_time:140228ms step_avg:95.26ms +step:1473/1705 train_time:140324ms step_avg:95.26ms +step:1474/1705 train_time:140420ms step_avg:95.26ms +step:1475/1705 train_time:140516ms step_avg:95.26ms +step:1476/1705 train_time:140611ms step_avg:95.26ms +step:1477/1705 train_time:140706ms step_avg:95.26ms +step:1478/1705 train_time:140801ms step_avg:95.26ms +step:1479/1705 train_time:140897ms step_avg:95.26ms +step:1480/1705 train_time:140993ms step_avg:95.27ms +step:1481/1705 train_time:141090ms step_avg:95.27ms +step:1482/1705 train_time:141185ms step_avg:95.27ms +step:1483/1705 train_time:141279ms step_avg:95.27ms +step:1484/1705 train_time:141376ms step_avg:95.27ms +step:1485/1705 train_time:141646ms step_avg:95.38ms +step:1486/1705 train_time:141838ms step_avg:95.45ms +step:1487/1705 train_time:141931ms step_avg:95.45ms +step:1488/1705 train_time:142025ms step_avg:95.45ms +step:1489/1705 train_time:142118ms step_avg:95.45ms +step:1490/1705 train_time:142213ms step_avg:95.45ms +step:1491/1705 train_time:142307ms step_avg:95.44ms +step:1492/1705 train_time:142401ms step_avg:95.44ms +step:1493/1705 train_time:142495ms step_avg:95.44ms +step:1494/1705 train_time:142590ms step_avg:95.44ms +step:1495/1705 train_time:142688ms step_avg:95.44ms +step:1496/1705 train_time:142787ms step_avg:95.45ms +step:1497/1705 train_time:142885ms step_avg:95.45ms +step:1498/1705 train_time:142980ms step_avg:95.45ms +step:1499/1705 train_time:143075ms step_avg:95.45ms +step:1500/1705 train_time:143169ms step_avg:95.45ms +step:1500/1705 val_loss:3.3171 train_time:143265ms step_avg:95.51ms +step:1501/1705 train_time:143286ms step_avg:95.46ms +step:1502/1705 train_time:143367ms step_avg:95.45ms +step:1503/1705 train_time:143467ms step_avg:95.45ms +step:1504/1705 train_time:143562ms step_avg:95.45ms +step:1505/1705 train_time:143656ms step_avg:95.45ms +step:1506/1705 train_time:143750ms step_avg:95.45ms +step:1507/1705 train_time:143845ms step_avg:95.45ms +step:1508/1705 train_time:143940ms step_avg:95.45ms +step:1509/1705 train_time:144033ms step_avg:95.45ms +step:1510/1705 train_time:144127ms step_avg:95.45ms +step:1511/1705 train_time:144225ms step_avg:95.45ms +step:1512/1705 train_time:144323ms step_avg:95.45ms +step:1513/1705 train_time:144420ms step_avg:95.45ms +step:1514/1705 train_time:144516ms step_avg:95.45ms +step:1515/1705 train_time:144611ms step_avg:95.45ms +step:1516/1705 train_time:144706ms step_avg:95.45ms +step:1517/1705 train_time:144800ms step_avg:95.45ms +step:1518/1705 train_time:144894ms step_avg:95.45ms +step:1519/1705 train_time:144989ms step_avg:95.45ms +step:1520/1705 train_time:145084ms step_avg:95.45ms +step:1521/1705 train_time:145179ms step_avg:95.45ms +step:1522/1705 train_time:145275ms step_avg:95.45ms +step:1523/1705 train_time:145371ms step_avg:95.45ms +step:1524/1705 train_time:145467ms step_avg:95.45ms +step:1525/1705 train_time:145565ms step_avg:95.45ms +step:1526/1705 train_time:145661ms step_avg:95.45ms +step:1527/1705 train_time:145757ms step_avg:95.45ms +step:1528/1705 train_time:145851ms step_avg:95.45ms +step:1529/1705 train_time:145946ms step_avg:95.45ms +step:1530/1705 train_time:146041ms step_avg:95.45ms +step:1531/1705 train_time:146136ms step_avg:95.45ms +step:1532/1705 train_time:146230ms step_avg:95.45ms +step:1533/1705 train_time:146326ms step_avg:95.45ms +step:1534/1705 train_time:146423ms step_avg:95.45ms +step:1535/1705 train_time:146519ms step_avg:95.45ms +step:1536/1705 train_time:146614ms step_avg:95.45ms +step:1537/1705 train_time:146709ms step_avg:95.45ms +step:1538/1705 train_time:146805ms step_avg:95.45ms +step:1539/1705 train_time:146899ms step_avg:95.45ms +step:1540/1705 train_time:146993ms step_avg:95.45ms +step:1541/1705 train_time:147088ms step_avg:95.45ms +step:1542/1705 train_time:147184ms step_avg:95.45ms +step:1543/1705 train_time:147280ms step_avg:95.45ms +step:1544/1705 train_time:147376ms step_avg:95.45ms +step:1545/1705 train_time:147472ms step_avg:95.45ms +step:1546/1705 train_time:147568ms step_avg:95.45ms +step:1547/1705 train_time:147663ms step_avg:95.45ms +step:1548/1705 train_time:147761ms step_avg:95.45ms +step:1549/1705 train_time:147857ms step_avg:95.45ms +step:1550/1705 train_time:147951ms step_avg:95.45ms +step:1551/1705 train_time:148047ms step_avg:95.45ms +step:1552/1705 train_time:148143ms step_avg:95.45ms +step:1553/1705 train_time:148238ms step_avg:95.45ms +step:1554/1705 train_time:148333ms step_avg:95.45ms +step:1555/1705 train_time:148428ms step_avg:95.45ms +step:1556/1705 train_time:148525ms step_avg:95.45ms +step:1557/1705 train_time:148621ms step_avg:95.45ms +step:1558/1705 train_time:148716ms step_avg:95.45ms +step:1559/1705 train_time:148811ms step_avg:95.45ms +step:1560/1705 train_time:148907ms step_avg:95.45ms +step:1561/1705 train_time:149003ms step_avg:95.45ms +step:1562/1705 train_time:149097ms step_avg:95.45ms +step:1563/1705 train_time:149193ms step_avg:95.45ms +step:1564/1705 train_time:149288ms step_avg:95.45ms +step:1565/1705 train_time:149385ms step_avg:95.45ms +step:1566/1705 train_time:149481ms step_avg:95.45ms +step:1567/1705 train_time:149576ms step_avg:95.45ms +step:1568/1705 train_time:149670ms step_avg:95.45ms +step:1569/1705 train_time:149766ms step_avg:95.45ms +step:1570/1705 train_time:149861ms step_avg:95.45ms +step:1571/1705 train_time:149956ms step_avg:95.45ms +step:1572/1705 train_time:150051ms step_avg:95.45ms +step:1573/1705 train_time:150147ms step_avg:95.45ms +step:1574/1705 train_time:150242ms step_avg:95.45ms +step:1575/1705 train_time:150337ms step_avg:95.45ms +step:1576/1705 train_time:150431ms step_avg:95.45ms +step:1577/1705 train_time:150527ms step_avg:95.45ms +step:1578/1705 train_time:150623ms step_avg:95.45ms +step:1579/1705 train_time:150717ms step_avg:95.45ms +step:1580/1705 train_time:150812ms step_avg:95.45ms +step:1581/1705 train_time:150908ms step_avg:95.45ms +step:1582/1705 train_time:151004ms step_avg:95.45ms +step:1583/1705 train_time:151101ms step_avg:95.45ms +step:1584/1705 train_time:151195ms step_avg:95.45ms +step:1585/1705 train_time:151290ms step_avg:95.45ms +step:1586/1705 train_time:151385ms step_avg:95.45ms +step:1587/1705 train_time:151481ms step_avg:95.45ms +step:1588/1705 train_time:151576ms step_avg:95.45ms +step:1589/1705 train_time:151671ms step_avg:95.45ms +step:1590/1705 train_time:151767ms step_avg:95.45ms +step:1591/1705 train_time:151863ms step_avg:95.45ms +step:1592/1705 train_time:151960ms step_avg:95.45ms +step:1593/1705 train_time:152055ms step_avg:95.45ms +step:1594/1705 train_time:152151ms step_avg:95.45ms +step:1595/1705 train_time:152247ms step_avg:95.45ms +step:1596/1705 train_time:152344ms step_avg:95.45ms +step:1597/1705 train_time:152439ms step_avg:95.45ms +step:1598/1705 train_time:152534ms step_avg:95.45ms +step:1599/1705 train_time:152629ms step_avg:95.45ms +step:1600/1705 train_time:152725ms step_avg:95.45ms +step:1601/1705 train_time:152819ms step_avg:95.45ms +step:1602/1705 train_time:152914ms step_avg:95.45ms +step:1603/1705 train_time:153010ms step_avg:95.45ms +step:1604/1705 train_time:153105ms step_avg:95.45ms +step:1605/1705 train_time:153201ms step_avg:95.45ms +step:1606/1705 train_time:153297ms step_avg:95.45ms +step:1607/1705 train_time:153391ms step_avg:95.45ms +step:1608/1705 train_time:153487ms step_avg:95.45ms +step:1609/1705 train_time:153583ms step_avg:95.45ms +step:1610/1705 train_time:153678ms step_avg:95.45ms +step:1611/1705 train_time:153773ms step_avg:95.45ms +step:1612/1705 train_time:153869ms step_avg:95.45ms +step:1613/1705 train_time:153965ms step_avg:95.45ms +step:1614/1705 train_time:154061ms step_avg:95.45ms +step:1615/1705 train_time:154157ms step_avg:95.45ms +step:1616/1705 train_time:154252ms step_avg:95.45ms +step:1617/1705 train_time:154348ms step_avg:95.45ms +step:1618/1705 train_time:154443ms step_avg:95.45ms +step:1619/1705 train_time:154539ms step_avg:95.45ms +step:1620/1705 train_time:154633ms step_avg:95.45ms +step:1621/1705 train_time:154728ms step_avg:95.45ms +step:1622/1705 train_time:154824ms step_avg:95.45ms +step:1623/1705 train_time:154919ms step_avg:95.45ms +step:1624/1705 train_time:155014ms step_avg:95.45ms +step:1625/1705 train_time:155111ms step_avg:95.45ms +step:1625/1705 val_loss:3.2893 train_time:155207ms step_avg:95.51ms +step:1626/1705 train_time:155228ms step_avg:95.47ms +step:1627/1705 train_time:155309ms step_avg:95.46ms +step:1628/1705 train_time:155410ms step_avg:95.46ms +step:1629/1705 train_time:155504ms step_avg:95.46ms +step:1630/1705 train_time:155598ms step_avg:95.46ms +step:1631/1705 train_time:155693ms step_avg:95.46ms +step:1632/1705 train_time:155787ms step_avg:95.46ms +step:1633/1705 train_time:155881ms step_avg:95.46ms +step:1634/1705 train_time:155975ms step_avg:95.46ms +step:1635/1705 train_time:156070ms step_avg:95.46ms +step:1636/1705 train_time:156165ms step_avg:95.46ms +step:1637/1705 train_time:156263ms step_avg:95.46ms +step:1638/1705 train_time:156363ms step_avg:95.46ms +step:1639/1705 train_time:156459ms step_avg:95.46ms +step:1640/1705 train_time:156555ms step_avg:95.46ms +step:1641/1705 train_time:156650ms step_avg:95.46ms +step:1642/1705 train_time:156745ms step_avg:95.46ms +step:1643/1705 train_time:156839ms step_avg:95.46ms +step:1644/1705 train_time:156934ms step_avg:95.46ms +step:1645/1705 train_time:157028ms step_avg:95.46ms +step:1646/1705 train_time:157123ms step_avg:95.46ms +step:1647/1705 train_time:157219ms step_avg:95.46ms +step:1648/1705 train_time:157317ms step_avg:95.46ms +step:1649/1705 train_time:157414ms step_avg:95.46ms +step:1650/1705 train_time:157510ms step_avg:95.46ms +step:1651/1705 train_time:157604ms step_avg:95.46ms +step:1652/1705 train_time:157699ms step_avg:95.46ms +step:1653/1705 train_time:157793ms step_avg:95.46ms +step:1654/1705 train_time:157887ms step_avg:95.46ms +step:1655/1705 train_time:157982ms step_avg:95.46ms +step:1656/1705 train_time:158077ms step_avg:95.46ms +step:1657/1705 train_time:158172ms step_avg:95.46ms +step:1658/1705 train_time:158268ms step_avg:95.46ms +step:1659/1705 train_time:158363ms step_avg:95.46ms +step:1660/1705 train_time:158461ms step_avg:95.46ms +step:1661/1705 train_time:158556ms step_avg:95.46ms +step:1662/1705 train_time:158652ms step_avg:95.46ms +step:1663/1705 train_time:158746ms step_avg:95.46ms +step:1664/1705 train_time:158841ms step_avg:95.46ms +step:1665/1705 train_time:158937ms step_avg:95.46ms +step:1666/1705 train_time:159032ms step_avg:95.46ms +step:1667/1705 train_time:159127ms step_avg:95.46ms +step:1668/1705 train_time:159222ms step_avg:95.46ms +step:1669/1705 train_time:159317ms step_avg:95.46ms +step:1670/1705 train_time:159413ms step_avg:95.46ms +step:1671/1705 train_time:159509ms step_avg:95.46ms +step:1672/1705 train_time:159604ms step_avg:95.46ms +step:1673/1705 train_time:159699ms step_avg:95.46ms +step:1674/1705 train_time:159794ms step_avg:95.46ms +step:1675/1705 train_time:159889ms step_avg:95.46ms +step:1676/1705 train_time:159983ms step_avg:95.46ms +step:1677/1705 train_time:160078ms step_avg:95.46ms +step:1678/1705 train_time:160173ms step_avg:95.45ms +step:1679/1705 train_time:160270ms step_avg:95.46ms +step:1680/1705 train_time:160366ms step_avg:95.46ms +step:1681/1705 train_time:160461ms step_avg:95.46ms +step:1682/1705 train_time:160557ms step_avg:95.46ms +step:1683/1705 train_time:160654ms step_avg:95.46ms +step:1684/1705 train_time:160749ms step_avg:95.46ms +step:1685/1705 train_time:160844ms step_avg:95.46ms +step:1686/1705 train_time:160939ms step_avg:95.46ms +step:1687/1705 train_time:161034ms step_avg:95.46ms +step:1688/1705 train_time:161129ms step_avg:95.46ms +step:1689/1705 train_time:161224ms step_avg:95.46ms +step:1690/1705 train_time:161319ms step_avg:95.46ms +step:1691/1705 train_time:161415ms step_avg:95.46ms +step:1692/1705 train_time:161512ms step_avg:95.46ms +step:1693/1705 train_time:161606ms step_avg:95.46ms +step:1694/1705 train_time:161701ms step_avg:95.46ms +step:1695/1705 train_time:161797ms step_avg:95.46ms +step:1696/1705 train_time:161893ms step_avg:95.46ms +step:1697/1705 train_time:161988ms step_avg:95.46ms +step:1698/1705 train_time:162279ms step_avg:95.57ms +step:1699/1705 train_time:162442ms step_avg:95.61ms +step:1700/1705 train_time:162536ms step_avg:95.61ms +step:1701/1705 train_time:162630ms step_avg:95.61ms +step:1702/1705 train_time:162723ms step_avg:95.61ms +step:1703/1705 train_time:162817ms step_avg:95.61ms +step:1704/1705 train_time:162912ms step_avg:95.61ms +step:1705/1705 train_time:163006ms step_avg:95.60ms +step:1705/1705 val_loss:3.2754 train_time:163100ms step_avg:95.66ms +peak memory allocated: 33750 MiB reserved: 48696 MiB diff --git a/records/090525_SkipMLPBlocks/8ac310eb-aa6a-4f5b-b298-8a0cbcb01398.txt b/records/090525_SkipMLPBlocks/8ac310eb-aa6a-4f5b-b298-8a0cbcb01398.txt new file mode 100644 index 000000000..e2f905a6f --- /dev/null +++ b/records/090525_SkipMLPBlocks/8ac310eb-aa6a-4f5b-b298-8a0cbcb01398.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:26:47 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 129W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 43C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 42C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 34C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 42C P0 129W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 33C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 89808 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 89809 C /usr/bin/python3 610MiB | +| 0 N/A N/A 89810 C /usr/bin/python3 610MiB | +| 0 N/A N/A 89811 C /usr/bin/python3 610MiB | +| 0 N/A N/A 89812 C /usr/bin/python3 610MiB | +| 0 N/A N/A 89813 C /usr/bin/python3 610MiB | +| 0 N/A N/A 89814 C /usr/bin/python3 610MiB | +| 0 N/A N/A 89815 C /usr/bin/python3 610MiB | +| 1 N/A N/A 89809 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 89810 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 89811 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 89812 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 89813 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 89814 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 89815 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1705 train_time:368ms step_avg:367.62ms +step:2/1705 train_time:389ms step_avg:194.68ms +step:3/1705 train_time:457ms step_avg:152.28ms +step:4/1705 train_time:548ms step_avg:137.00ms +step:5/1705 train_time:640ms step_avg:128.06ms +step:6/1705 train_time:733ms step_avg:122.12ms +step:7/1705 train_time:825ms step_avg:117.86ms +step:8/1705 train_time:917ms step_avg:114.68ms +step:9/1705 train_time:1010ms step_avg:112.19ms +step:10/1705 train_time:1102ms step_avg:110.23ms +step:11/1705 train_time:1194ms step_avg:108.59ms +step:12/1705 train_time:1288ms step_avg:107.36ms +step:13/1705 train_time:1385ms step_avg:106.54ms +step:14/1705 train_time:1481ms step_avg:105.80ms +step:15/1705 train_time:1575ms step_avg:104.99ms +step:16/1705 train_time:1667ms step_avg:104.19ms +step:17/1705 train_time:1760ms step_avg:103.55ms +step:18/1705 train_time:1852ms step_avg:102.91ms +step:19/1705 train_time:1946ms step_avg:102.40ms +step:20/1705 train_time:2038ms step_avg:101.91ms +step:21/1705 train_time:2131ms step_avg:101.46ms +step:22/1705 train_time:2224ms step_avg:101.07ms +step:23/1705 train_time:2318ms step_avg:100.77ms +step:24/1705 train_time:2412ms step_avg:100.50ms +step:25/1705 train_time:2506ms step_avg:100.23ms +step:26/1705 train_time:2600ms step_avg:100.00ms +step:27/1705 train_time:2693ms step_avg:99.74ms +step:28/1705 train_time:2786ms step_avg:99.49ms +step:29/1705 train_time:2878ms step_avg:99.25ms +step:30/1705 train_time:2971ms step_avg:99.03ms +step:31/1705 train_time:3064ms step_avg:98.85ms +step:32/1705 train_time:3158ms step_avg:98.69ms +step:33/1705 train_time:3251ms step_avg:98.51ms +step:34/1705 train_time:3346ms step_avg:98.40ms +step:35/1705 train_time:3439ms step_avg:98.25ms +step:36/1705 train_time:3532ms step_avg:98.12ms +step:37/1705 train_time:3626ms step_avg:98.00ms +step:38/1705 train_time:3719ms step_avg:97.87ms +step:39/1705 train_time:3813ms step_avg:97.76ms +step:40/1705 train_time:3905ms step_avg:97.63ms +step:41/1705 train_time:3998ms step_avg:97.51ms +step:42/1705 train_time:4091ms step_avg:97.39ms +step:43/1705 train_time:4183ms step_avg:97.29ms +step:44/1705 train_time:4276ms step_avg:97.18ms +step:45/1705 train_time:4369ms step_avg:97.10ms +step:46/1705 train_time:4463ms step_avg:97.03ms +step:47/1705 train_time:4557ms step_avg:96.96ms +step:48/1705 train_time:4651ms step_avg:96.90ms +step:49/1705 train_time:4744ms step_avg:96.83ms +step:50/1705 train_time:4838ms step_avg:96.77ms +step:51/1705 train_time:4931ms step_avg:96.69ms +step:52/1705 train_time:5025ms step_avg:96.63ms +step:53/1705 train_time:5118ms step_avg:96.57ms +step:54/1705 train_time:5212ms step_avg:96.51ms +step:55/1705 train_time:5305ms step_avg:96.45ms +step:56/1705 train_time:5397ms step_avg:96.38ms +step:57/1705 train_time:5490ms step_avg:96.32ms +step:58/1705 train_time:5584ms step_avg:96.27ms +step:59/1705 train_time:5677ms step_avg:96.22ms +step:60/1705 train_time:5770ms step_avg:96.16ms +step:61/1705 train_time:5864ms step_avg:96.13ms +step:62/1705 train_time:5958ms step_avg:96.09ms +step:63/1705 train_time:6050ms step_avg:96.03ms +step:64/1705 train_time:6143ms step_avg:95.98ms +step:65/1705 train_time:6236ms step_avg:95.94ms +step:66/1705 train_time:6329ms step_avg:95.90ms +step:67/1705 train_time:6423ms step_avg:95.87ms +step:68/1705 train_time:6517ms step_avg:95.83ms +step:69/1705 train_time:6610ms step_avg:95.79ms +step:70/1705 train_time:6703ms step_avg:95.75ms +step:71/1705 train_time:6796ms step_avg:95.71ms +step:72/1705 train_time:6889ms step_avg:95.68ms +step:73/1705 train_time:6983ms step_avg:95.65ms +step:74/1705 train_time:7075ms step_avg:95.61ms +step:75/1705 train_time:7169ms step_avg:95.58ms +step:76/1705 train_time:7262ms step_avg:95.55ms +step:77/1705 train_time:7355ms step_avg:95.51ms +step:78/1705 train_time:7447ms step_avg:95.48ms +step:79/1705 train_time:7541ms step_avg:95.46ms +step:80/1705 train_time:7634ms step_avg:95.43ms +step:81/1705 train_time:7727ms step_avg:95.40ms +step:82/1705 train_time:7821ms step_avg:95.38ms +step:83/1705 train_time:7914ms step_avg:95.35ms +step:84/1705 train_time:8007ms step_avg:95.32ms +step:85/1705 train_time:8101ms step_avg:95.30ms +step:86/1705 train_time:8192ms step_avg:95.26ms +step:87/1705 train_time:8286ms step_avg:95.24ms +step:88/1705 train_time:8378ms step_avg:95.21ms +step:89/1705 train_time:8472ms step_avg:95.19ms +step:90/1705 train_time:8565ms step_avg:95.17ms +step:91/1705 train_time:8659ms step_avg:95.16ms +step:92/1705 train_time:8752ms step_avg:95.13ms +step:93/1705 train_time:8845ms step_avg:95.11ms +step:94/1705 train_time:8939ms step_avg:95.10ms +step:95/1705 train_time:9032ms step_avg:95.08ms +step:96/1705 train_time:9126ms step_avg:95.06ms +step:97/1705 train_time:9219ms step_avg:95.04ms +step:98/1705 train_time:9312ms step_avg:95.02ms +step:99/1705 train_time:9404ms step_avg:94.99ms +step:100/1705 train_time:9497ms step_avg:94.97ms +step:101/1705 train_time:9591ms step_avg:94.96ms +step:102/1705 train_time:9684ms step_avg:94.94ms +step:103/1705 train_time:9777ms step_avg:94.93ms +step:104/1705 train_time:9869ms step_avg:94.90ms +step:105/1705 train_time:9963ms step_avg:94.89ms +step:106/1705 train_time:10057ms step_avg:94.88ms +step:107/1705 train_time:10149ms step_avg:94.85ms +step:108/1705 train_time:10243ms step_avg:94.84ms +step:109/1705 train_time:10336ms step_avg:94.82ms +step:110/1705 train_time:10429ms step_avg:94.81ms +step:111/1705 train_time:10523ms step_avg:94.80ms +step:112/1705 train_time:10616ms step_avg:94.78ms +step:113/1705 train_time:10709ms step_avg:94.77ms +step:114/1705 train_time:10803ms step_avg:94.76ms +step:115/1705 train_time:10896ms step_avg:94.75ms +step:116/1705 train_time:10989ms step_avg:94.74ms +step:117/1705 train_time:11084ms step_avg:94.73ms +step:118/1705 train_time:11176ms step_avg:94.71ms +step:119/1705 train_time:11268ms step_avg:94.69ms +step:120/1705 train_time:11361ms step_avg:94.68ms +step:121/1705 train_time:11455ms step_avg:94.67ms +step:122/1705 train_time:11548ms step_avg:94.65ms +step:123/1705 train_time:11641ms step_avg:94.64ms +step:124/1705 train_time:11733ms step_avg:94.62ms +step:125/1705 train_time:11827ms step_avg:94.61ms +step:125/1705 val_loss:4.2950 train_time:11920ms step_avg:95.36ms +step:126/1705 train_time:11943ms step_avg:94.79ms +step:127/1705 train_time:12020ms step_avg:94.65ms +step:128/1705 train_time:12125ms step_avg:94.73ms +step:129/1705 train_time:12219ms step_avg:94.72ms +step:130/1705 train_time:12312ms step_avg:94.71ms +step:131/1705 train_time:12404ms step_avg:94.69ms +step:132/1705 train_time:12497ms step_avg:94.67ms +step:133/1705 train_time:12589ms step_avg:94.65ms +step:134/1705 train_time:12680ms step_avg:94.63ms +step:135/1705 train_time:12773ms step_avg:94.61ms +step:136/1705 train_time:12864ms step_avg:94.59ms +step:137/1705 train_time:12957ms step_avg:94.57ms +step:138/1705 train_time:13052ms step_avg:94.58ms +step:139/1705 train_time:13146ms step_avg:94.58ms +step:140/1705 train_time:13240ms step_avg:94.57ms +step:141/1705 train_time:13334ms step_avg:94.56ms +step:142/1705 train_time:13426ms step_avg:94.55ms +step:143/1705 train_time:13518ms step_avg:94.53ms +step:144/1705 train_time:13611ms step_avg:94.52ms +step:145/1705 train_time:13703ms step_avg:94.50ms +step:146/1705 train_time:13795ms step_avg:94.49ms +step:147/1705 train_time:13887ms step_avg:94.47ms +step:148/1705 train_time:13980ms step_avg:94.46ms +step:149/1705 train_time:14074ms step_avg:94.46ms +step:150/1705 train_time:14168ms step_avg:94.46ms +step:151/1705 train_time:14262ms step_avg:94.45ms +step:152/1705 train_time:14355ms step_avg:94.44ms +step:153/1705 train_time:14448ms step_avg:94.43ms +step:154/1705 train_time:14541ms step_avg:94.42ms +step:155/1705 train_time:14633ms step_avg:94.41ms +step:156/1705 train_time:14726ms step_avg:94.40ms +step:157/1705 train_time:14818ms step_avg:94.38ms +step:158/1705 train_time:14910ms step_avg:94.37ms +step:159/1705 train_time:15003ms step_avg:94.36ms +step:160/1705 train_time:15097ms step_avg:94.36ms +step:161/1705 train_time:15192ms step_avg:94.36ms +step:162/1705 train_time:15286ms step_avg:94.36ms +step:163/1705 train_time:15378ms step_avg:94.34ms +step:164/1705 train_time:15472ms step_avg:94.34ms +step:165/1705 train_time:15565ms step_avg:94.33ms +step:166/1705 train_time:15656ms step_avg:94.31ms +step:167/1705 train_time:15749ms step_avg:94.30ms +step:168/1705 train_time:15841ms step_avg:94.29ms +step:169/1705 train_time:15934ms step_avg:94.29ms +step:170/1705 train_time:16027ms step_avg:94.28ms +step:171/1705 train_time:16120ms step_avg:94.27ms +step:172/1705 train_time:16213ms step_avg:94.26ms +step:173/1705 train_time:16306ms step_avg:94.26ms +step:174/1705 train_time:16399ms step_avg:94.25ms +step:175/1705 train_time:16493ms step_avg:94.25ms +step:176/1705 train_time:16586ms step_avg:94.24ms +step:177/1705 train_time:16678ms step_avg:94.23ms +step:178/1705 train_time:16771ms step_avg:94.22ms +step:179/1705 train_time:16863ms step_avg:94.21ms +step:180/1705 train_time:16956ms step_avg:94.20ms +step:181/1705 train_time:17049ms step_avg:94.19ms +step:182/1705 train_time:17142ms step_avg:94.19ms +step:183/1705 train_time:17236ms step_avg:94.18ms +step:184/1705 train_time:17329ms step_avg:94.18ms +step:185/1705 train_time:17422ms step_avg:94.17ms +step:186/1705 train_time:17515ms step_avg:94.17ms +step:187/1705 train_time:17608ms step_avg:94.16ms +step:188/1705 train_time:17700ms step_avg:94.15ms +step:189/1705 train_time:17793ms step_avg:94.14ms +step:190/1705 train_time:17885ms step_avg:94.13ms +step:191/1705 train_time:17978ms step_avg:94.12ms +step:192/1705 train_time:18070ms step_avg:94.11ms +step:193/1705 train_time:18163ms step_avg:94.11ms +step:194/1705 train_time:18256ms step_avg:94.10ms +step:195/1705 train_time:18349ms step_avg:94.10ms +step:196/1705 train_time:18442ms step_avg:94.09ms +step:197/1705 train_time:18535ms step_avg:94.09ms +step:198/1705 train_time:18628ms step_avg:94.08ms +step:199/1705 train_time:18722ms step_avg:94.08ms +step:200/1705 train_time:18815ms step_avg:94.07ms +step:201/1705 train_time:18908ms step_avg:94.07ms +step:202/1705 train_time:19000ms step_avg:94.06ms +step:203/1705 train_time:19093ms step_avg:94.06ms +step:204/1705 train_time:19186ms step_avg:94.05ms +step:205/1705 train_time:19278ms step_avg:94.04ms +step:206/1705 train_time:19371ms step_avg:94.03ms +step:207/1705 train_time:19464ms step_avg:94.03ms +step:208/1705 train_time:19556ms step_avg:94.02ms +step:209/1705 train_time:19649ms step_avg:94.01ms +step:210/1705 train_time:19742ms step_avg:94.01ms +step:211/1705 train_time:19835ms step_avg:94.00ms +step:212/1705 train_time:19928ms step_avg:94.00ms +step:213/1705 train_time:20218ms step_avg:94.92ms +step:214/1705 train_time:20310ms step_avg:94.91ms +step:215/1705 train_time:20402ms step_avg:94.89ms +step:216/1705 train_time:20495ms step_avg:94.88ms +step:217/1705 train_time:20587ms step_avg:94.87ms +step:218/1705 train_time:20679ms step_avg:94.86ms +step:219/1705 train_time:20771ms step_avg:94.84ms +step:220/1705 train_time:20863ms step_avg:94.83ms +step:221/1705 train_time:20955ms step_avg:94.82ms +step:222/1705 train_time:21047ms step_avg:94.81ms +step:223/1705 train_time:21143ms step_avg:94.81ms +step:224/1705 train_time:21238ms step_avg:94.81ms +step:225/1705 train_time:21332ms step_avg:94.81ms +step:226/1705 train_time:21426ms step_avg:94.80ms +step:227/1705 train_time:21518ms step_avg:94.79ms +step:228/1705 train_time:21612ms step_avg:94.79ms +step:229/1705 train_time:21704ms step_avg:94.78ms +step:230/1705 train_time:21796ms step_avg:94.76ms +step:231/1705 train_time:21887ms step_avg:94.75ms +step:232/1705 train_time:21979ms step_avg:94.74ms +step:233/1705 train_time:22072ms step_avg:94.73ms +step:234/1705 train_time:22166ms step_avg:94.73ms +step:235/1705 train_time:22260ms step_avg:94.72ms +step:236/1705 train_time:22354ms step_avg:94.72ms +step:237/1705 train_time:22446ms step_avg:94.71ms +step:238/1705 train_time:22539ms step_avg:94.70ms +step:239/1705 train_time:22632ms step_avg:94.70ms +step:240/1705 train_time:22724ms step_avg:94.68ms +step:241/1705 train_time:22816ms step_avg:94.67ms +step:242/1705 train_time:22908ms step_avg:94.66ms +step:243/1705 train_time:23001ms step_avg:94.65ms +step:244/1705 train_time:23094ms step_avg:94.65ms +step:245/1705 train_time:23186ms step_avg:94.64ms +step:246/1705 train_time:23280ms step_avg:94.64ms +step:247/1705 train_time:23374ms step_avg:94.63ms +step:248/1705 train_time:23468ms step_avg:94.63ms +step:249/1705 train_time:23560ms step_avg:94.62ms +step:250/1705 train_time:23654ms step_avg:94.61ms +step:250/1705 val_loss:3.9737 train_time:23746ms step_avg:94.98ms +step:251/1705 train_time:23769ms step_avg:94.70ms +step:252/1705 train_time:23843ms step_avg:94.62ms +step:253/1705 train_time:23943ms step_avg:94.64ms +step:254/1705 train_time:24038ms step_avg:94.64ms +step:255/1705 train_time:24131ms step_avg:94.63ms +step:256/1705 train_time:24224ms step_avg:94.62ms +step:257/1705 train_time:24315ms step_avg:94.61ms +step:258/1705 train_time:24407ms step_avg:94.60ms +step:259/1705 train_time:24499ms step_avg:94.59ms +step:260/1705 train_time:24590ms step_avg:94.58ms +step:261/1705 train_time:24683ms step_avg:94.57ms +step:262/1705 train_time:24777ms step_avg:94.57ms +step:263/1705 train_time:24873ms step_avg:94.58ms +step:264/1705 train_time:24968ms step_avg:94.57ms +step:265/1705 train_time:25062ms step_avg:94.57ms +step:266/1705 train_time:25154ms step_avg:94.56ms +step:267/1705 train_time:25246ms step_avg:94.56ms +step:268/1705 train_time:25339ms step_avg:94.55ms +step:269/1705 train_time:25432ms step_avg:94.54ms +step:270/1705 train_time:25524ms step_avg:94.53ms +step:271/1705 train_time:25616ms step_avg:94.52ms +step:272/1705 train_time:25708ms step_avg:94.51ms +step:273/1705 train_time:25801ms step_avg:94.51ms +step:274/1705 train_time:25894ms step_avg:94.50ms +step:275/1705 train_time:25987ms step_avg:94.50ms +step:276/1705 train_time:26081ms step_avg:94.50ms +step:277/1705 train_time:26174ms step_avg:94.49ms +step:278/1705 train_time:26267ms step_avg:94.49ms +step:279/1705 train_time:26360ms step_avg:94.48ms +step:280/1705 train_time:26453ms step_avg:94.47ms +step:281/1705 train_time:26544ms step_avg:94.46ms +step:282/1705 train_time:26636ms step_avg:94.46ms +step:283/1705 train_time:26729ms step_avg:94.45ms +step:284/1705 train_time:26822ms step_avg:94.44ms +step:285/1705 train_time:26914ms step_avg:94.44ms +step:286/1705 train_time:27007ms step_avg:94.43ms +step:287/1705 train_time:27101ms step_avg:94.43ms +step:288/1705 train_time:27194ms step_avg:94.42ms +step:289/1705 train_time:27286ms step_avg:94.42ms +step:290/1705 train_time:27379ms step_avg:94.41ms +step:291/1705 train_time:27472ms step_avg:94.41ms +step:292/1705 train_time:27565ms step_avg:94.40ms +step:293/1705 train_time:27658ms step_avg:94.40ms +step:294/1705 train_time:27751ms step_avg:94.39ms +step:295/1705 train_time:27844ms step_avg:94.38ms +step:296/1705 train_time:27937ms step_avg:94.38ms +step:297/1705 train_time:28031ms step_avg:94.38ms +step:298/1705 train_time:28124ms step_avg:94.37ms +step:299/1705 train_time:28216ms step_avg:94.37ms +step:300/1705 train_time:28309ms step_avg:94.36ms +step:301/1705 train_time:28402ms step_avg:94.36ms +step:302/1705 train_time:28495ms step_avg:94.35ms +step:303/1705 train_time:28587ms step_avg:94.35ms +step:304/1705 train_time:28680ms step_avg:94.34ms +step:305/1705 train_time:28773ms step_avg:94.34ms +step:306/1705 train_time:28866ms step_avg:94.33ms +step:307/1705 train_time:28959ms step_avg:94.33ms +step:308/1705 train_time:29052ms step_avg:94.32ms +step:309/1705 train_time:29145ms step_avg:94.32ms +step:310/1705 train_time:29238ms step_avg:94.32ms +step:311/1705 train_time:29330ms step_avg:94.31ms +step:312/1705 train_time:29422ms step_avg:94.30ms +step:313/1705 train_time:29515ms step_avg:94.30ms +step:314/1705 train_time:29608ms step_avg:94.29ms +step:315/1705 train_time:29700ms step_avg:94.29ms +step:316/1705 train_time:29793ms step_avg:94.28ms +step:317/1705 train_time:29886ms step_avg:94.28ms +step:318/1705 train_time:29979ms step_avg:94.27ms +step:319/1705 train_time:30072ms step_avg:94.27ms +step:320/1705 train_time:30165ms step_avg:94.27ms +step:321/1705 train_time:30258ms step_avg:94.26ms +step:322/1705 train_time:30350ms step_avg:94.26ms +step:323/1705 train_time:30444ms step_avg:94.25ms +step:324/1705 train_time:30536ms step_avg:94.25ms +step:325/1705 train_time:30629ms step_avg:94.24ms +step:326/1705 train_time:30722ms step_avg:94.24ms +step:327/1705 train_time:30815ms step_avg:94.23ms +step:328/1705 train_time:30908ms step_avg:94.23ms +step:329/1705 train_time:31000ms step_avg:94.23ms +step:330/1705 train_time:31093ms step_avg:94.22ms +step:331/1705 train_time:31186ms step_avg:94.22ms +step:332/1705 train_time:31279ms step_avg:94.21ms +step:333/1705 train_time:31373ms step_avg:94.21ms +step:334/1705 train_time:31465ms step_avg:94.21ms +step:335/1705 train_time:31558ms step_avg:94.20ms +step:336/1705 train_time:31651ms step_avg:94.20ms +step:337/1705 train_time:31744ms step_avg:94.20ms +step:338/1705 train_time:31837ms step_avg:94.19ms +step:339/1705 train_time:31930ms step_avg:94.19ms +step:340/1705 train_time:32023ms step_avg:94.19ms +step:341/1705 train_time:32116ms step_avg:94.18ms +step:342/1705 train_time:32209ms step_avg:94.18ms +step:343/1705 train_time:32302ms step_avg:94.17ms +step:344/1705 train_time:32394ms step_avg:94.17ms +step:345/1705 train_time:32486ms step_avg:94.16ms +step:346/1705 train_time:32579ms step_avg:94.16ms +step:347/1705 train_time:32672ms step_avg:94.16ms +step:348/1705 train_time:32766ms step_avg:94.15ms +step:349/1705 train_time:32858ms step_avg:94.15ms +step:350/1705 train_time:32952ms step_avg:94.15ms +step:351/1705 train_time:33045ms step_avg:94.14ms +step:352/1705 train_time:33137ms step_avg:94.14ms +step:353/1705 train_time:33230ms step_avg:94.14ms +step:354/1705 train_time:33323ms step_avg:94.13ms +step:355/1705 train_time:33416ms step_avg:94.13ms +step:356/1705 train_time:33509ms step_avg:94.13ms +step:357/1705 train_time:33602ms step_avg:94.12ms +step:358/1705 train_time:33695ms step_avg:94.12ms +step:359/1705 train_time:33788ms step_avg:94.12ms +step:360/1705 train_time:33881ms step_avg:94.11ms +step:361/1705 train_time:33975ms step_avg:94.11ms +step:362/1705 train_time:34068ms step_avg:94.11ms +step:363/1705 train_time:34161ms step_avg:94.11ms +step:364/1705 train_time:34254ms step_avg:94.11ms +step:365/1705 train_time:34346ms step_avg:94.10ms +step:366/1705 train_time:34439ms step_avg:94.10ms +step:367/1705 train_time:34532ms step_avg:94.09ms +step:368/1705 train_time:34625ms step_avg:94.09ms +step:369/1705 train_time:34718ms step_avg:94.09ms +step:370/1705 train_time:34811ms step_avg:94.08ms +step:371/1705 train_time:34904ms step_avg:94.08ms +step:372/1705 train_time:34997ms step_avg:94.08ms +step:373/1705 train_time:35090ms step_avg:94.08ms +step:374/1705 train_time:35183ms step_avg:94.07ms +step:375/1705 train_time:35276ms step_avg:94.07ms +step:375/1705 val_loss:3.8244 train_time:35369ms step_avg:94.32ms +step:376/1705 train_time:35392ms step_avg:94.13ms +step:377/1705 train_time:35468ms step_avg:94.08ms +step:378/1705 train_time:35566ms step_avg:94.09ms +step:379/1705 train_time:35659ms step_avg:94.09ms +step:380/1705 train_time:35752ms step_avg:94.08ms +step:381/1705 train_time:35844ms step_avg:94.08ms +step:382/1705 train_time:35936ms step_avg:94.07ms +step:383/1705 train_time:36029ms step_avg:94.07ms +step:384/1705 train_time:36121ms step_avg:94.06ms +step:385/1705 train_time:36213ms step_avg:94.06ms +step:386/1705 train_time:36307ms step_avg:94.06ms +step:387/1705 train_time:36401ms step_avg:94.06ms +step:388/1705 train_time:36497ms step_avg:94.06ms +step:389/1705 train_time:36591ms step_avg:94.06ms +step:390/1705 train_time:36684ms step_avg:94.06ms +step:391/1705 train_time:36776ms step_avg:94.06ms +step:392/1705 train_time:36869ms step_avg:94.05ms +step:393/1705 train_time:36961ms step_avg:94.05ms +step:394/1705 train_time:37054ms step_avg:94.05ms +step:395/1705 train_time:37147ms step_avg:94.04ms +step:396/1705 train_time:37239ms step_avg:94.04ms +step:397/1705 train_time:37332ms step_avg:94.04ms +step:398/1705 train_time:37426ms step_avg:94.03ms +step:399/1705 train_time:37520ms step_avg:94.03ms +step:400/1705 train_time:37613ms step_avg:94.03ms +step:401/1705 train_time:37707ms step_avg:94.03ms +step:402/1705 train_time:37800ms step_avg:94.03ms +step:403/1705 train_time:37893ms step_avg:94.03ms +step:404/1705 train_time:37987ms step_avg:94.03ms +step:405/1705 train_time:38079ms step_avg:94.02ms +step:406/1705 train_time:38172ms step_avg:94.02ms +step:407/1705 train_time:38264ms step_avg:94.01ms +step:408/1705 train_time:38357ms step_avg:94.01ms +step:409/1705 train_time:38450ms step_avg:94.01ms +step:410/1705 train_time:38543ms step_avg:94.01ms +step:411/1705 train_time:38637ms step_avg:94.01ms +step:412/1705 train_time:38730ms step_avg:94.01ms +step:413/1705 train_time:38823ms step_avg:94.00ms +step:414/1705 train_time:38918ms step_avg:94.01ms +step:415/1705 train_time:39011ms step_avg:94.00ms +step:416/1705 train_time:39104ms step_avg:94.00ms +step:417/1705 train_time:39197ms step_avg:94.00ms +step:418/1705 train_time:39290ms step_avg:93.99ms +step:419/1705 train_time:39382ms step_avg:93.99ms +step:420/1705 train_time:39474ms step_avg:93.99ms +step:421/1705 train_time:39568ms step_avg:93.99ms +step:422/1705 train_time:39661ms step_avg:93.98ms +step:423/1705 train_time:39755ms step_avg:93.98ms +step:424/1705 train_time:39848ms step_avg:93.98ms +step:425/1705 train_time:40130ms step_avg:94.42ms +step:426/1705 train_time:40211ms step_avg:94.39ms +step:427/1705 train_time:40302ms step_avg:94.38ms +step:428/1705 train_time:40394ms step_avg:94.38ms +step:429/1705 train_time:40486ms step_avg:94.37ms +step:430/1705 train_time:40579ms step_avg:94.37ms +step:431/1705 train_time:40671ms step_avg:94.36ms +step:432/1705 train_time:40763ms step_avg:94.36ms +step:433/1705 train_time:40854ms step_avg:94.35ms +step:434/1705 train_time:40947ms step_avg:94.35ms +step:435/1705 train_time:41043ms step_avg:94.35ms +step:436/1705 train_time:41138ms step_avg:94.35ms +step:437/1705 train_time:41232ms step_avg:94.35ms +step:438/1705 train_time:41327ms step_avg:94.35ms +step:439/1705 train_time:41421ms step_avg:94.35ms +step:440/1705 train_time:41513ms step_avg:94.35ms +step:441/1705 train_time:41605ms step_avg:94.34ms +step:442/1705 train_time:41697ms step_avg:94.34ms +step:443/1705 train_time:41789ms step_avg:94.33ms +step:444/1705 train_time:41882ms step_avg:94.33ms +step:445/1705 train_time:41975ms step_avg:94.33ms +step:446/1705 train_time:42069ms step_avg:94.32ms +step:447/1705 train_time:42162ms step_avg:94.32ms +step:448/1705 train_time:42256ms step_avg:94.32ms +step:449/1705 train_time:42350ms step_avg:94.32ms +step:450/1705 train_time:42442ms step_avg:94.32ms +step:451/1705 train_time:42536ms step_avg:94.31ms +step:452/1705 train_time:42629ms step_avg:94.31ms +step:453/1705 train_time:42721ms step_avg:94.31ms +step:454/1705 train_time:42813ms step_avg:94.30ms +step:455/1705 train_time:42906ms step_avg:94.30ms +step:456/1705 train_time:42999ms step_avg:94.30ms +step:457/1705 train_time:43091ms step_avg:94.29ms +step:458/1705 train_time:43185ms step_avg:94.29ms +step:459/1705 train_time:43278ms step_avg:94.29ms +step:460/1705 train_time:43372ms step_avg:94.29ms +step:461/1705 train_time:43465ms step_avg:94.28ms +step:462/1705 train_time:43557ms step_avg:94.28ms +step:463/1705 train_time:43651ms step_avg:94.28ms +step:464/1705 train_time:43744ms step_avg:94.28ms +step:465/1705 train_time:43836ms step_avg:94.27ms +step:466/1705 train_time:43930ms step_avg:94.27ms +step:467/1705 train_time:44023ms step_avg:94.27ms +step:468/1705 train_time:44116ms step_avg:94.27ms +step:469/1705 train_time:44210ms step_avg:94.26ms +step:470/1705 train_time:44303ms step_avg:94.26ms +step:471/1705 train_time:44396ms step_avg:94.26ms +step:472/1705 train_time:44489ms step_avg:94.26ms +step:473/1705 train_time:44582ms step_avg:94.25ms +step:474/1705 train_time:44675ms step_avg:94.25ms +step:475/1705 train_time:44768ms step_avg:94.25ms +step:476/1705 train_time:44861ms step_avg:94.25ms +step:477/1705 train_time:44954ms step_avg:94.24ms +step:478/1705 train_time:45047ms step_avg:94.24ms +step:479/1705 train_time:45140ms step_avg:94.24ms +step:480/1705 train_time:45233ms step_avg:94.23ms +step:481/1705 train_time:45325ms step_avg:94.23ms +step:482/1705 train_time:45418ms step_avg:94.23ms +step:483/1705 train_time:45511ms step_avg:94.23ms +step:484/1705 train_time:45605ms step_avg:94.23ms +step:485/1705 train_time:45698ms step_avg:94.22ms +step:486/1705 train_time:45790ms step_avg:94.22ms +step:487/1705 train_time:45883ms step_avg:94.22ms +step:488/1705 train_time:45975ms step_avg:94.21ms +step:489/1705 train_time:46069ms step_avg:94.21ms +step:490/1705 train_time:46161ms step_avg:94.21ms +step:491/1705 train_time:46255ms step_avg:94.20ms +step:492/1705 train_time:46348ms step_avg:94.20ms +step:493/1705 train_time:46440ms step_avg:94.20ms +step:494/1705 train_time:46534ms step_avg:94.20ms +step:495/1705 train_time:46628ms step_avg:94.20ms +step:496/1705 train_time:46721ms step_avg:94.19ms +step:497/1705 train_time:46814ms step_avg:94.19ms +step:498/1705 train_time:46908ms step_avg:94.19ms +step:499/1705 train_time:47000ms step_avg:94.19ms +step:500/1705 train_time:47093ms step_avg:94.19ms +step:500/1705 val_loss:3.7168 train_time:47187ms step_avg:94.37ms +step:501/1705 train_time:47210ms step_avg:94.23ms +step:502/1705 train_time:47285ms step_avg:94.19ms +step:503/1705 train_time:47381ms step_avg:94.20ms +step:504/1705 train_time:47473ms step_avg:94.19ms +step:505/1705 train_time:47566ms step_avg:94.19ms +step:506/1705 train_time:47658ms step_avg:94.19ms +step:507/1705 train_time:47750ms step_avg:94.18ms +step:508/1705 train_time:47842ms step_avg:94.18ms +step:509/1705 train_time:47934ms step_avg:94.17ms +step:510/1705 train_time:48027ms step_avg:94.17ms +step:511/1705 train_time:48120ms step_avg:94.17ms +step:512/1705 train_time:48215ms step_avg:94.17ms +step:513/1705 train_time:48311ms step_avg:94.17ms +step:514/1705 train_time:48404ms step_avg:94.17ms +step:515/1705 train_time:48498ms step_avg:94.17ms +step:516/1705 train_time:48592ms step_avg:94.17ms +step:517/1705 train_time:48683ms step_avg:94.16ms +step:518/1705 train_time:48775ms step_avg:94.16ms +step:519/1705 train_time:48867ms step_avg:94.16ms +step:520/1705 train_time:48960ms step_avg:94.15ms +step:521/1705 train_time:49053ms step_avg:94.15ms +step:522/1705 train_time:49146ms step_avg:94.15ms +step:523/1705 train_time:49239ms step_avg:94.15ms +step:524/1705 train_time:49334ms step_avg:94.15ms +step:525/1705 train_time:49428ms step_avg:94.15ms +step:526/1705 train_time:49521ms step_avg:94.15ms +step:527/1705 train_time:49614ms step_avg:94.14ms +step:528/1705 train_time:49708ms step_avg:94.14ms +step:529/1705 train_time:49800ms step_avg:94.14ms +step:530/1705 train_time:49893ms step_avg:94.14ms +step:531/1705 train_time:49985ms step_avg:94.13ms +step:532/1705 train_time:50078ms step_avg:94.13ms +step:533/1705 train_time:50172ms step_avg:94.13ms +step:534/1705 train_time:50265ms step_avg:94.13ms +step:535/1705 train_time:50359ms step_avg:94.13ms +step:536/1705 train_time:50452ms step_avg:94.13ms +step:537/1705 train_time:50546ms step_avg:94.13ms +step:538/1705 train_time:50639ms step_avg:94.13ms +step:539/1705 train_time:50733ms step_avg:94.12ms +step:540/1705 train_time:50825ms step_avg:94.12ms +step:541/1705 train_time:50918ms step_avg:94.12ms +step:542/1705 train_time:51011ms step_avg:94.12ms +step:543/1705 train_time:51103ms step_avg:94.11ms +step:544/1705 train_time:51197ms step_avg:94.11ms +step:545/1705 train_time:51290ms step_avg:94.11ms +step:546/1705 train_time:51383ms step_avg:94.11ms +step:547/1705 train_time:51477ms step_avg:94.11ms +step:548/1705 train_time:51571ms step_avg:94.11ms +step:549/1705 train_time:51664ms step_avg:94.11ms +step:550/1705 train_time:51757ms step_avg:94.10ms +step:551/1705 train_time:51850ms step_avg:94.10ms +step:552/1705 train_time:51943ms step_avg:94.10ms +step:553/1705 train_time:52036ms step_avg:94.10ms +step:554/1705 train_time:52129ms step_avg:94.10ms +step:555/1705 train_time:52222ms step_avg:94.09ms +step:556/1705 train_time:52315ms step_avg:94.09ms +step:557/1705 train_time:52409ms step_avg:94.09ms +step:558/1705 train_time:52502ms step_avg:94.09ms +step:559/1705 train_time:52596ms step_avg:94.09ms +step:560/1705 train_time:52688ms step_avg:94.09ms +step:561/1705 train_time:52781ms step_avg:94.08ms +step:562/1705 train_time:52875ms step_avg:94.08ms +step:563/1705 train_time:52967ms step_avg:94.08ms +step:564/1705 train_time:53060ms step_avg:94.08ms +step:565/1705 train_time:53153ms step_avg:94.08ms +step:566/1705 train_time:53247ms step_avg:94.08ms +step:567/1705 train_time:53340ms step_avg:94.07ms +step:568/1705 train_time:53433ms step_avg:94.07ms +step:569/1705 train_time:53526ms step_avg:94.07ms +step:570/1705 train_time:53619ms step_avg:94.07ms +step:571/1705 train_time:53713ms step_avg:94.07ms +step:572/1705 train_time:53807ms step_avg:94.07ms +step:573/1705 train_time:53903ms step_avg:94.07ms +step:574/1705 train_time:53997ms step_avg:94.07ms +step:575/1705 train_time:54091ms step_avg:94.07ms +step:576/1705 train_time:54186ms step_avg:94.07ms +step:577/1705 train_time:54280ms step_avg:94.07ms +step:578/1705 train_time:54375ms step_avg:94.07ms +step:579/1705 train_time:54470ms step_avg:94.08ms +step:580/1705 train_time:54564ms step_avg:94.08ms +step:581/1705 train_time:54658ms step_avg:94.08ms +step:582/1705 train_time:54753ms step_avg:94.08ms +step:583/1705 train_time:54847ms step_avg:94.08ms +step:584/1705 train_time:54941ms step_avg:94.08ms +step:585/1705 train_time:55035ms step_avg:94.08ms +step:586/1705 train_time:55129ms step_avg:94.08ms +step:587/1705 train_time:55223ms step_avg:94.08ms +step:588/1705 train_time:55318ms step_avg:94.08ms +step:589/1705 train_time:55413ms step_avg:94.08ms +step:590/1705 train_time:55507ms step_avg:94.08ms +step:591/1705 train_time:55601ms step_avg:94.08ms +step:592/1705 train_time:55695ms step_avg:94.08ms +step:593/1705 train_time:55790ms step_avg:94.08ms +step:594/1705 train_time:55883ms step_avg:94.08ms +step:595/1705 train_time:55977ms step_avg:94.08ms +step:596/1705 train_time:56072ms step_avg:94.08ms +step:597/1705 train_time:56167ms step_avg:94.08ms +step:598/1705 train_time:56261ms step_avg:94.08ms +step:599/1705 train_time:56356ms step_avg:94.08ms +step:600/1705 train_time:56451ms step_avg:94.08ms +step:601/1705 train_time:56545ms step_avg:94.09ms +step:602/1705 train_time:56640ms step_avg:94.09ms +step:603/1705 train_time:56735ms step_avg:94.09ms +step:604/1705 train_time:56830ms step_avg:94.09ms +step:605/1705 train_time:56924ms step_avg:94.09ms +step:606/1705 train_time:57018ms step_avg:94.09ms +step:607/1705 train_time:57113ms step_avg:94.09ms +step:608/1705 train_time:57206ms step_avg:94.09ms +step:609/1705 train_time:57300ms step_avg:94.09ms +step:610/1705 train_time:57396ms step_avg:94.09ms +step:611/1705 train_time:57490ms step_avg:94.09ms +step:612/1705 train_time:57584ms step_avg:94.09ms +step:613/1705 train_time:57679ms step_avg:94.09ms +step:614/1705 train_time:57773ms step_avg:94.09ms +step:615/1705 train_time:57868ms step_avg:94.09ms +step:616/1705 train_time:57962ms step_avg:94.09ms +step:617/1705 train_time:58057ms step_avg:94.10ms +step:618/1705 train_time:58151ms step_avg:94.10ms +step:619/1705 train_time:58245ms step_avg:94.10ms +step:620/1705 train_time:58340ms step_avg:94.10ms +step:621/1705 train_time:58435ms step_avg:94.10ms +step:622/1705 train_time:58529ms step_avg:94.10ms +step:623/1705 train_time:58623ms step_avg:94.10ms +step:624/1705 train_time:58718ms step_avg:94.10ms +step:625/1705 train_time:58812ms step_avg:94.10ms +step:625/1705 val_loss:3.6168 train_time:58906ms step_avg:94.25ms +step:626/1705 train_time:58929ms step_avg:94.14ms +step:627/1705 train_time:59011ms step_avg:94.12ms +step:628/1705 train_time:59110ms step_avg:94.12ms +step:629/1705 train_time:59205ms step_avg:94.13ms +step:630/1705 train_time:59298ms step_avg:94.12ms +step:631/1705 train_time:59392ms step_avg:94.12ms +step:632/1705 train_time:59485ms step_avg:94.12ms +step:633/1705 train_time:59579ms step_avg:94.12ms +step:634/1705 train_time:59671ms step_avg:94.12ms +step:635/1705 train_time:59764ms step_avg:94.12ms +step:636/1705 train_time:59857ms step_avg:94.12ms +step:637/1705 train_time:59956ms step_avg:94.12ms +step:638/1705 train_time:60055ms step_avg:94.13ms +step:639/1705 train_time:60428ms step_avg:94.57ms +step:640/1705 train_time:60507ms step_avg:94.54ms +step:641/1705 train_time:60600ms step_avg:94.54ms +step:642/1705 train_time:60694ms step_avg:94.54ms +step:643/1705 train_time:60787ms step_avg:94.54ms +step:644/1705 train_time:60881ms step_avg:94.54ms +step:645/1705 train_time:60974ms step_avg:94.53ms +step:646/1705 train_time:61067ms step_avg:94.53ms +step:647/1705 train_time:61160ms step_avg:94.53ms +step:648/1705 train_time:61254ms step_avg:94.53ms +step:649/1705 train_time:61351ms step_avg:94.53ms +step:650/1705 train_time:61448ms step_avg:94.54ms +step:651/1705 train_time:61543ms step_avg:94.54ms +step:652/1705 train_time:61638ms step_avg:94.54ms +step:653/1705 train_time:61733ms step_avg:94.54ms +step:654/1705 train_time:61827ms step_avg:94.54ms +step:655/1705 train_time:61921ms step_avg:94.54ms +step:656/1705 train_time:62014ms step_avg:94.53ms +step:657/1705 train_time:62107ms step_avg:94.53ms +step:658/1705 train_time:62201ms step_avg:94.53ms +step:659/1705 train_time:62296ms step_avg:94.53ms +step:660/1705 train_time:62392ms step_avg:94.53ms +step:661/1705 train_time:62487ms step_avg:94.53ms +step:662/1705 train_time:62581ms step_avg:94.53ms +step:663/1705 train_time:62676ms step_avg:94.53ms +step:664/1705 train_time:62771ms step_avg:94.53ms +step:665/1705 train_time:62865ms step_avg:94.53ms +step:666/1705 train_time:62959ms step_avg:94.53ms +step:667/1705 train_time:63053ms step_avg:94.53ms +step:668/1705 train_time:63146ms step_avg:94.53ms +step:669/1705 train_time:63240ms step_avg:94.53ms +step:670/1705 train_time:63336ms step_avg:94.53ms +step:671/1705 train_time:63431ms step_avg:94.53ms +step:672/1705 train_time:63526ms step_avg:94.53ms +step:673/1705 train_time:63621ms step_avg:94.53ms +step:674/1705 train_time:63715ms step_avg:94.53ms +step:675/1705 train_time:63810ms step_avg:94.53ms +step:676/1705 train_time:63904ms step_avg:94.53ms +step:677/1705 train_time:63997ms step_avg:94.53ms +step:678/1705 train_time:64091ms step_avg:94.53ms +step:679/1705 train_time:64185ms step_avg:94.53ms +step:680/1705 train_time:64279ms step_avg:94.53ms +step:681/1705 train_time:64374ms step_avg:94.53ms +step:682/1705 train_time:64469ms step_avg:94.53ms +step:683/1705 train_time:64563ms step_avg:94.53ms +step:684/1705 train_time:64658ms step_avg:94.53ms +step:685/1705 train_time:64753ms step_avg:94.53ms +step:686/1705 train_time:64847ms step_avg:94.53ms +step:687/1705 train_time:64941ms step_avg:94.53ms +step:688/1705 train_time:65035ms step_avg:94.53ms +step:689/1705 train_time:65130ms step_avg:94.53ms +step:690/1705 train_time:65224ms step_avg:94.53ms +step:691/1705 train_time:65319ms step_avg:94.53ms +step:692/1705 train_time:65413ms step_avg:94.53ms +step:693/1705 train_time:65508ms step_avg:94.53ms +step:694/1705 train_time:65603ms step_avg:94.53ms +step:695/1705 train_time:65698ms step_avg:94.53ms +step:696/1705 train_time:65793ms step_avg:94.53ms +step:697/1705 train_time:65886ms step_avg:94.53ms +step:698/1705 train_time:65980ms step_avg:94.53ms +step:699/1705 train_time:66075ms step_avg:94.53ms +step:700/1705 train_time:66170ms step_avg:94.53ms +step:701/1705 train_time:66263ms step_avg:94.53ms +step:702/1705 train_time:66358ms step_avg:94.53ms +step:703/1705 train_time:66454ms step_avg:94.53ms +step:704/1705 train_time:66548ms step_avg:94.53ms +step:705/1705 train_time:66642ms step_avg:94.53ms +step:706/1705 train_time:66736ms step_avg:94.53ms +step:707/1705 train_time:66831ms step_avg:94.53ms +step:708/1705 train_time:66925ms step_avg:94.53ms +step:709/1705 train_time:67019ms step_avg:94.53ms +step:710/1705 train_time:67113ms step_avg:94.53ms +step:711/1705 train_time:67207ms step_avg:94.52ms +step:712/1705 train_time:67301ms step_avg:94.52ms +step:713/1705 train_time:67397ms step_avg:94.53ms +step:714/1705 train_time:67491ms step_avg:94.53ms +step:715/1705 train_time:67586ms step_avg:94.53ms +step:716/1705 train_time:67680ms step_avg:94.52ms +step:717/1705 train_time:67774ms step_avg:94.52ms +step:718/1705 train_time:67868ms step_avg:94.52ms +step:719/1705 train_time:67962ms step_avg:94.52ms +step:720/1705 train_time:68057ms step_avg:94.52ms +step:721/1705 train_time:68152ms step_avg:94.52ms +step:722/1705 train_time:68246ms step_avg:94.52ms +step:723/1705 train_time:68340ms step_avg:94.52ms +step:724/1705 train_time:68435ms step_avg:94.52ms +step:725/1705 train_time:68529ms step_avg:94.52ms +step:726/1705 train_time:68624ms step_avg:94.52ms +step:727/1705 train_time:68719ms step_avg:94.52ms +step:728/1705 train_time:68813ms step_avg:94.52ms +step:729/1705 train_time:68908ms step_avg:94.52ms +step:730/1705 train_time:69002ms step_avg:94.52ms +step:731/1705 train_time:69096ms step_avg:94.52ms +step:732/1705 train_time:69192ms step_avg:94.52ms +step:733/1705 train_time:69286ms step_avg:94.52ms +step:734/1705 train_time:69380ms step_avg:94.52ms +step:735/1705 train_time:69475ms step_avg:94.52ms +step:736/1705 train_time:69570ms step_avg:94.52ms +step:737/1705 train_time:69664ms step_avg:94.52ms +step:738/1705 train_time:69759ms step_avg:94.52ms +step:739/1705 train_time:69854ms step_avg:94.52ms +step:740/1705 train_time:69948ms step_avg:94.52ms +step:741/1705 train_time:70043ms step_avg:94.52ms +step:742/1705 train_time:70138ms step_avg:94.53ms +step:743/1705 train_time:70233ms step_avg:94.53ms +step:744/1705 train_time:70327ms step_avg:94.53ms +step:745/1705 train_time:70422ms step_avg:94.53ms +step:746/1705 train_time:70517ms step_avg:94.53ms +step:747/1705 train_time:70612ms step_avg:94.53ms +step:748/1705 train_time:70706ms step_avg:94.53ms +step:749/1705 train_time:70800ms step_avg:94.53ms +step:750/1705 train_time:70895ms step_avg:94.53ms +step:750/1705 val_loss:3.5649 train_time:70990ms step_avg:94.65ms +step:751/1705 train_time:71013ms step_avg:94.56ms +step:752/1705 train_time:71089ms step_avg:94.53ms +step:753/1705 train_time:71189ms step_avg:94.54ms +step:754/1705 train_time:71285ms step_avg:94.54ms +step:755/1705 train_time:71378ms step_avg:94.54ms +step:756/1705 train_time:71472ms step_avg:94.54ms +step:757/1705 train_time:71566ms step_avg:94.54ms +step:758/1705 train_time:71659ms step_avg:94.54ms +step:759/1705 train_time:71753ms step_avg:94.54ms +step:760/1705 train_time:71846ms step_avg:94.53ms +step:761/1705 train_time:71940ms step_avg:94.53ms +step:762/1705 train_time:72036ms step_avg:94.54ms +step:763/1705 train_time:72134ms step_avg:94.54ms +step:764/1705 train_time:72230ms step_avg:94.54ms +step:765/1705 train_time:72324ms step_avg:94.54ms +step:766/1705 train_time:72419ms step_avg:94.54ms +step:767/1705 train_time:72513ms step_avg:94.54ms +step:768/1705 train_time:72606ms step_avg:94.54ms +step:769/1705 train_time:72700ms step_avg:94.54ms +step:770/1705 train_time:72794ms step_avg:94.54ms +step:771/1705 train_time:72888ms step_avg:94.54ms +step:772/1705 train_time:72982ms step_avg:94.54ms +step:773/1705 train_time:73080ms step_avg:94.54ms +step:774/1705 train_time:73177ms step_avg:94.54ms +step:775/1705 train_time:73273ms step_avg:94.55ms +step:776/1705 train_time:73367ms step_avg:94.54ms +step:777/1705 train_time:73461ms step_avg:94.54ms +step:778/1705 train_time:73555ms step_avg:94.54ms +step:779/1705 train_time:73648ms step_avg:94.54ms +step:780/1705 train_time:73742ms step_avg:94.54ms +step:781/1705 train_time:73837ms step_avg:94.54ms +step:782/1705 train_time:73931ms step_avg:94.54ms +step:783/1705 train_time:74025ms step_avg:94.54ms +step:784/1705 train_time:74122ms step_avg:94.54ms +step:785/1705 train_time:74217ms step_avg:94.54ms +step:786/1705 train_time:74312ms step_avg:94.54ms +step:787/1705 train_time:74406ms step_avg:94.54ms +step:788/1705 train_time:74500ms step_avg:94.54ms +step:789/1705 train_time:74595ms step_avg:94.54ms +step:790/1705 train_time:74688ms step_avg:94.54ms +step:791/1705 train_time:74783ms step_avg:94.54ms +step:792/1705 train_time:74878ms step_avg:94.54ms +step:793/1705 train_time:74973ms step_avg:94.54ms +step:794/1705 train_time:75068ms step_avg:94.54ms +step:795/1705 train_time:75163ms step_avg:94.54ms +step:796/1705 train_time:75258ms step_avg:94.55ms +step:797/1705 train_time:75352ms step_avg:94.54ms +step:798/1705 train_time:75446ms step_avg:94.54ms +step:799/1705 train_time:75540ms step_avg:94.54ms +step:800/1705 train_time:75636ms step_avg:94.54ms +step:801/1705 train_time:75730ms step_avg:94.54ms +step:802/1705 train_time:75824ms step_avg:94.54ms +step:803/1705 train_time:75919ms step_avg:94.54ms +step:804/1705 train_time:76014ms step_avg:94.54ms +step:805/1705 train_time:76108ms step_avg:94.54ms +step:806/1705 train_time:76204ms step_avg:94.55ms +step:807/1705 train_time:76299ms step_avg:94.55ms +step:808/1705 train_time:76393ms step_avg:94.55ms +step:809/1705 train_time:76487ms step_avg:94.55ms +step:810/1705 train_time:76582ms step_avg:94.55ms +step:811/1705 train_time:76676ms step_avg:94.55ms +step:812/1705 train_time:76770ms step_avg:94.54ms +step:813/1705 train_time:76865ms step_avg:94.54ms +step:814/1705 train_time:76960ms step_avg:94.55ms +step:815/1705 train_time:77055ms step_avg:94.55ms +step:816/1705 train_time:77149ms step_avg:94.55ms +step:817/1705 train_time:77244ms step_avg:94.55ms +step:818/1705 train_time:77338ms step_avg:94.55ms +step:819/1705 train_time:77432ms step_avg:94.55ms +step:820/1705 train_time:77526ms step_avg:94.54ms +step:821/1705 train_time:77621ms step_avg:94.54ms +step:822/1705 train_time:77715ms step_avg:94.54ms +step:823/1705 train_time:77809ms step_avg:94.54ms +step:824/1705 train_time:77903ms step_avg:94.54ms +step:825/1705 train_time:77997ms step_avg:94.54ms +step:826/1705 train_time:78093ms step_avg:94.54ms +step:827/1705 train_time:78186ms step_avg:94.54ms +step:828/1705 train_time:78281ms step_avg:94.54ms +step:829/1705 train_time:78375ms step_avg:94.54ms +step:830/1705 train_time:78470ms step_avg:94.54ms +step:831/1705 train_time:78565ms step_avg:94.54ms +step:832/1705 train_time:78659ms step_avg:94.54ms +step:833/1705 train_time:78753ms step_avg:94.54ms +step:834/1705 train_time:78847ms step_avg:94.54ms +step:835/1705 train_time:78941ms step_avg:94.54ms +step:836/1705 train_time:79037ms step_avg:94.54ms +step:837/1705 train_time:79131ms step_avg:94.54ms +step:838/1705 train_time:79224ms step_avg:94.54ms +step:839/1705 train_time:79319ms step_avg:94.54ms +step:840/1705 train_time:79413ms step_avg:94.54ms +step:841/1705 train_time:79507ms step_avg:94.54ms +step:842/1705 train_time:79601ms step_avg:94.54ms +step:843/1705 train_time:79697ms step_avg:94.54ms +step:844/1705 train_time:79791ms step_avg:94.54ms +step:845/1705 train_time:79885ms step_avg:94.54ms +step:846/1705 train_time:79980ms step_avg:94.54ms +step:847/1705 train_time:80075ms step_avg:94.54ms +step:848/1705 train_time:80169ms step_avg:94.54ms +step:849/1705 train_time:80263ms step_avg:94.54ms +step:850/1705 train_time:80359ms step_avg:94.54ms +step:851/1705 train_time:80626ms step_avg:94.74ms +step:852/1705 train_time:80792ms step_avg:94.83ms +step:853/1705 train_time:80886ms step_avg:94.82ms +step:854/1705 train_time:80979ms step_avg:94.82ms +step:855/1705 train_time:81073ms step_avg:94.82ms +step:856/1705 train_time:81165ms step_avg:94.82ms +step:857/1705 train_time:81259ms step_avg:94.82ms +step:858/1705 train_time:81353ms step_avg:94.82ms +step:859/1705 train_time:81446ms step_avg:94.82ms +step:860/1705 train_time:81540ms step_avg:94.81ms +step:861/1705 train_time:81636ms step_avg:94.82ms +step:862/1705 train_time:81736ms step_avg:94.82ms +step:863/1705 train_time:81834ms step_avg:94.83ms +step:864/1705 train_time:81928ms step_avg:94.82ms +step:865/1705 train_time:82023ms step_avg:94.82ms +step:866/1705 train_time:82116ms step_avg:94.82ms +step:867/1705 train_time:82210ms step_avg:94.82ms +step:868/1705 train_time:82303ms step_avg:94.82ms +step:869/1705 train_time:82397ms step_avg:94.82ms +step:870/1705 train_time:82490ms step_avg:94.82ms +step:871/1705 train_time:82585ms step_avg:94.82ms +step:872/1705 train_time:82681ms step_avg:94.82ms +step:873/1705 train_time:82780ms step_avg:94.82ms +step:874/1705 train_time:82877ms step_avg:94.82ms +step:875/1705 train_time:82971ms step_avg:94.82ms +step:875/1705 val_loss:3.5231 train_time:83065ms step_avg:94.93ms +step:876/1705 train_time:83088ms step_avg:94.85ms +step:877/1705 train_time:83167ms step_avg:94.83ms +step:878/1705 train_time:83265ms step_avg:94.83ms +step:879/1705 train_time:83360ms step_avg:94.84ms +step:880/1705 train_time:83454ms step_avg:94.83ms +step:881/1705 train_time:83547ms step_avg:94.83ms +step:882/1705 train_time:83640ms step_avg:94.83ms +step:883/1705 train_time:83734ms step_avg:94.83ms +step:884/1705 train_time:83828ms step_avg:94.83ms +step:885/1705 train_time:83921ms step_avg:94.83ms +step:886/1705 train_time:84016ms step_avg:94.83ms +step:887/1705 train_time:84113ms step_avg:94.83ms +step:888/1705 train_time:84210ms step_avg:94.83ms +step:889/1705 train_time:84306ms step_avg:94.83ms +step:890/1705 train_time:84401ms step_avg:94.83ms +step:891/1705 train_time:84495ms step_avg:94.83ms +step:892/1705 train_time:84588ms step_avg:94.83ms +step:893/1705 train_time:84681ms step_avg:94.83ms +step:894/1705 train_time:84775ms step_avg:94.83ms +step:895/1705 train_time:84869ms step_avg:94.83ms +step:896/1705 train_time:84962ms step_avg:94.82ms +step:897/1705 train_time:85058ms step_avg:94.82ms +step:898/1705 train_time:85154ms step_avg:94.83ms +step:899/1705 train_time:85249ms step_avg:94.83ms +step:900/1705 train_time:85343ms step_avg:94.83ms +step:901/1705 train_time:85439ms step_avg:94.83ms +step:902/1705 train_time:85533ms step_avg:94.83ms +step:903/1705 train_time:85627ms step_avg:94.83ms +step:904/1705 train_time:85721ms step_avg:94.82ms +step:905/1705 train_time:85815ms step_avg:94.82ms +step:906/1705 train_time:85908ms step_avg:94.82ms +step:907/1705 train_time:86002ms step_avg:94.82ms +step:908/1705 train_time:86098ms step_avg:94.82ms +step:909/1705 train_time:86194ms step_avg:94.82ms +step:910/1705 train_time:86289ms step_avg:94.82ms +step:911/1705 train_time:86384ms step_avg:94.82ms +step:912/1705 train_time:86478ms step_avg:94.82ms +step:913/1705 train_time:86573ms step_avg:94.82ms +step:914/1705 train_time:86667ms step_avg:94.82ms +step:915/1705 train_time:86761ms step_avg:94.82ms +step:916/1705 train_time:86855ms step_avg:94.82ms +step:917/1705 train_time:86949ms step_avg:94.82ms +step:918/1705 train_time:87043ms step_avg:94.82ms +step:919/1705 train_time:87138ms step_avg:94.82ms +step:920/1705 train_time:87234ms step_avg:94.82ms +step:921/1705 train_time:87329ms step_avg:94.82ms +step:922/1705 train_time:87423ms step_avg:94.82ms +step:923/1705 train_time:87518ms step_avg:94.82ms +step:924/1705 train_time:87613ms step_avg:94.82ms +step:925/1705 train_time:87708ms step_avg:94.82ms +step:926/1705 train_time:87801ms step_avg:94.82ms +step:927/1705 train_time:87896ms step_avg:94.82ms +step:928/1705 train_time:87990ms step_avg:94.82ms +step:929/1705 train_time:88084ms step_avg:94.82ms +step:930/1705 train_time:88179ms step_avg:94.82ms +step:931/1705 train_time:88275ms step_avg:94.82ms +step:932/1705 train_time:88370ms step_avg:94.82ms +step:933/1705 train_time:88463ms step_avg:94.82ms +step:934/1705 train_time:88558ms step_avg:94.82ms +step:935/1705 train_time:88653ms step_avg:94.82ms +step:936/1705 train_time:88748ms step_avg:94.82ms +step:937/1705 train_time:88842ms step_avg:94.81ms +step:938/1705 train_time:88936ms step_avg:94.81ms +step:939/1705 train_time:89030ms step_avg:94.81ms +step:940/1705 train_time:89125ms step_avg:94.81ms +step:941/1705 train_time:89219ms step_avg:94.81ms +step:942/1705 train_time:89315ms step_avg:94.81ms +step:943/1705 train_time:89410ms step_avg:94.81ms +step:944/1705 train_time:89504ms step_avg:94.81ms +step:945/1705 train_time:89599ms step_avg:94.81ms +step:946/1705 train_time:89694ms step_avg:94.81ms +step:947/1705 train_time:89788ms step_avg:94.81ms +step:948/1705 train_time:89881ms step_avg:94.81ms +step:949/1705 train_time:89976ms step_avg:94.81ms +step:950/1705 train_time:90070ms step_avg:94.81ms +step:951/1705 train_time:90164ms step_avg:94.81ms +step:952/1705 train_time:90260ms step_avg:94.81ms +step:953/1705 train_time:90354ms step_avg:94.81ms +step:954/1705 train_time:90449ms step_avg:94.81ms +step:955/1705 train_time:90544ms step_avg:94.81ms +step:956/1705 train_time:90639ms step_avg:94.81ms +step:957/1705 train_time:90734ms step_avg:94.81ms +step:958/1705 train_time:90829ms step_avg:94.81ms +step:959/1705 train_time:90922ms step_avg:94.81ms +step:960/1705 train_time:91016ms step_avg:94.81ms +step:961/1705 train_time:91112ms step_avg:94.81ms +step:962/1705 train_time:91206ms step_avg:94.81ms +step:963/1705 train_time:91300ms step_avg:94.81ms +step:964/1705 train_time:91395ms step_avg:94.81ms +step:965/1705 train_time:91490ms step_avg:94.81ms +step:966/1705 train_time:91584ms step_avg:94.81ms +step:967/1705 train_time:91679ms step_avg:94.81ms +step:968/1705 train_time:91773ms step_avg:94.81ms +step:969/1705 train_time:91868ms step_avg:94.81ms +step:970/1705 train_time:91961ms step_avg:94.81ms +step:971/1705 train_time:92055ms step_avg:94.80ms +step:972/1705 train_time:92150ms step_avg:94.80ms +step:973/1705 train_time:92244ms step_avg:94.80ms +step:974/1705 train_time:92339ms step_avg:94.80ms +step:975/1705 train_time:92434ms step_avg:94.80ms +step:976/1705 train_time:92528ms step_avg:94.80ms +step:977/1705 train_time:92622ms step_avg:94.80ms +step:978/1705 train_time:92717ms step_avg:94.80ms +step:979/1705 train_time:92812ms step_avg:94.80ms +step:980/1705 train_time:92908ms step_avg:94.80ms +step:981/1705 train_time:93002ms step_avg:94.80ms +step:982/1705 train_time:93096ms step_avg:94.80ms +step:983/1705 train_time:93192ms step_avg:94.80ms +step:984/1705 train_time:93286ms step_avg:94.80ms +step:985/1705 train_time:93380ms step_avg:94.80ms +step:986/1705 train_time:93475ms step_avg:94.80ms +step:987/1705 train_time:93569ms step_avg:94.80ms +step:988/1705 train_time:93663ms step_avg:94.80ms +step:989/1705 train_time:93758ms step_avg:94.80ms +step:990/1705 train_time:93853ms step_avg:94.80ms +step:991/1705 train_time:93948ms step_avg:94.80ms +step:992/1705 train_time:94042ms step_avg:94.80ms +step:993/1705 train_time:94137ms step_avg:94.80ms +step:994/1705 train_time:94231ms step_avg:94.80ms +step:995/1705 train_time:94325ms step_avg:94.80ms +step:996/1705 train_time:94419ms step_avg:94.80ms +step:997/1705 train_time:94515ms step_avg:94.80ms +step:998/1705 train_time:94610ms step_avg:94.80ms +step:999/1705 train_time:94704ms step_avg:94.80ms +step:1000/1705 train_time:94798ms step_avg:94.80ms +step:1000/1705 val_loss:3.4852 train_time:94893ms step_avg:94.89ms +step:1001/1705 train_time:94916ms step_avg:94.82ms +step:1002/1705 train_time:94993ms step_avg:94.80ms +step:1003/1705 train_time:95089ms step_avg:94.80ms +step:1004/1705 train_time:95184ms step_avg:94.80ms +step:1005/1705 train_time:95277ms step_avg:94.80ms +step:1006/1705 train_time:95371ms step_avg:94.80ms +step:1007/1705 train_time:95464ms step_avg:94.80ms +step:1008/1705 train_time:95557ms step_avg:94.80ms +step:1009/1705 train_time:95651ms step_avg:94.80ms +step:1010/1705 train_time:95745ms step_avg:94.80ms +step:1011/1705 train_time:95840ms step_avg:94.80ms +step:1012/1705 train_time:95936ms step_avg:94.80ms +step:1013/1705 train_time:96034ms step_avg:94.80ms +step:1014/1705 train_time:96130ms step_avg:94.80ms +step:1015/1705 train_time:96224ms step_avg:94.80ms +step:1016/1705 train_time:96318ms step_avg:94.80ms +step:1017/1705 train_time:96412ms step_avg:94.80ms +step:1018/1705 train_time:96505ms step_avg:94.80ms +step:1019/1705 train_time:96599ms step_avg:94.80ms +step:1020/1705 train_time:96693ms step_avg:94.80ms +step:1021/1705 train_time:96788ms step_avg:94.80ms +step:1022/1705 train_time:96882ms step_avg:94.80ms +step:1023/1705 train_time:96979ms step_avg:94.80ms +step:1024/1705 train_time:97076ms step_avg:94.80ms +step:1025/1705 train_time:97170ms step_avg:94.80ms +step:1026/1705 train_time:97264ms step_avg:94.80ms +step:1027/1705 train_time:97358ms step_avg:94.80ms +step:1028/1705 train_time:97454ms step_avg:94.80ms +step:1029/1705 train_time:97547ms step_avg:94.80ms +step:1030/1705 train_time:97640ms step_avg:94.80ms +step:1031/1705 train_time:97735ms step_avg:94.80ms +step:1032/1705 train_time:97829ms step_avg:94.80ms +step:1033/1705 train_time:97923ms step_avg:94.79ms +step:1034/1705 train_time:98019ms step_avg:94.80ms +step:1035/1705 train_time:98114ms step_avg:94.80ms +step:1036/1705 train_time:98209ms step_avg:94.80ms +step:1037/1705 train_time:98303ms step_avg:94.80ms +step:1038/1705 train_time:98398ms step_avg:94.80ms +step:1039/1705 train_time:98492ms step_avg:94.80ms +step:1040/1705 train_time:98587ms step_avg:94.80ms +step:1041/1705 train_time:98681ms step_avg:94.79ms +step:1042/1705 train_time:98776ms step_avg:94.79ms +step:1043/1705 train_time:98870ms step_avg:94.79ms +step:1044/1705 train_time:98965ms step_avg:94.79ms +step:1045/1705 train_time:99060ms step_avg:94.79ms +step:1046/1705 train_time:99156ms step_avg:94.80ms +step:1047/1705 train_time:99250ms step_avg:94.79ms +step:1048/1705 train_time:99344ms step_avg:94.79ms +step:1049/1705 train_time:99439ms step_avg:94.79ms +step:1050/1705 train_time:99533ms step_avg:94.79ms +step:1051/1705 train_time:99627ms step_avg:94.79ms +step:1052/1705 train_time:99722ms step_avg:94.79ms +step:1053/1705 train_time:99816ms step_avg:94.79ms +step:1054/1705 train_time:99911ms step_avg:94.79ms +step:1055/1705 train_time:100005ms step_avg:94.79ms +step:1056/1705 train_time:100100ms step_avg:94.79ms +step:1057/1705 train_time:100195ms step_avg:94.79ms +step:1058/1705 train_time:100290ms step_avg:94.79ms +step:1059/1705 train_time:100384ms step_avg:94.79ms +step:1060/1705 train_time:100479ms step_avg:94.79ms +step:1061/1705 train_time:100574ms step_avg:94.79ms +step:1062/1705 train_time:100835ms step_avg:94.95ms +step:1063/1705 train_time:100938ms step_avg:94.96ms +step:1064/1705 train_time:101031ms step_avg:94.95ms +step:1065/1705 train_time:101124ms step_avg:94.95ms +step:1066/1705 train_time:101217ms step_avg:94.95ms +step:1067/1705 train_time:101311ms step_avg:94.95ms +step:1068/1705 train_time:101404ms step_avg:94.95ms +step:1069/1705 train_time:101498ms step_avg:94.95ms +step:1070/1705 train_time:101591ms step_avg:94.95ms +step:1071/1705 train_time:101685ms step_avg:94.94ms +step:1072/1705 train_time:101780ms step_avg:94.94ms +step:1073/1705 train_time:101879ms step_avg:94.95ms +step:1074/1705 train_time:101976ms step_avg:94.95ms +step:1075/1705 train_time:102072ms step_avg:94.95ms +step:1076/1705 train_time:102165ms step_avg:94.95ms +step:1077/1705 train_time:102259ms step_avg:94.95ms +step:1078/1705 train_time:102353ms step_avg:94.95ms +step:1079/1705 train_time:102447ms step_avg:94.95ms +step:1080/1705 train_time:102541ms step_avg:94.95ms +step:1081/1705 train_time:102634ms step_avg:94.94ms +step:1082/1705 train_time:102729ms step_avg:94.94ms +step:1083/1705 train_time:102824ms step_avg:94.94ms +step:1084/1705 train_time:102921ms step_avg:94.95ms +step:1085/1705 train_time:103018ms step_avg:94.95ms +step:1086/1705 train_time:103113ms step_avg:94.95ms +step:1087/1705 train_time:103207ms step_avg:94.95ms +step:1088/1705 train_time:103302ms step_avg:94.95ms +step:1089/1705 train_time:103396ms step_avg:94.95ms +step:1090/1705 train_time:103491ms step_avg:94.95ms +step:1091/1705 train_time:103585ms step_avg:94.94ms +step:1092/1705 train_time:103679ms step_avg:94.94ms +step:1093/1705 train_time:103774ms step_avg:94.94ms +step:1094/1705 train_time:103869ms step_avg:94.94ms +step:1095/1705 train_time:103964ms step_avg:94.94ms +step:1096/1705 train_time:104059ms step_avg:94.94ms +step:1097/1705 train_time:104155ms step_avg:94.95ms +step:1098/1705 train_time:104249ms step_avg:94.94ms +step:1099/1705 train_time:104343ms step_avg:94.94ms +step:1100/1705 train_time:104437ms step_avg:94.94ms +step:1101/1705 train_time:104531ms step_avg:94.94ms +step:1102/1705 train_time:104625ms step_avg:94.94ms +step:1103/1705 train_time:104719ms step_avg:94.94ms +step:1104/1705 train_time:104814ms step_avg:94.94ms +step:1105/1705 train_time:104909ms step_avg:94.94ms +step:1106/1705 train_time:105004ms step_avg:94.94ms +step:1107/1705 train_time:105098ms step_avg:94.94ms +step:1108/1705 train_time:105193ms step_avg:94.94ms +step:1109/1705 train_time:105287ms step_avg:94.94ms +step:1110/1705 train_time:105382ms step_avg:94.94ms +step:1111/1705 train_time:105477ms step_avg:94.94ms +step:1112/1705 train_time:105571ms step_avg:94.94ms +step:1113/1705 train_time:105665ms step_avg:94.94ms +step:1114/1705 train_time:105760ms step_avg:94.94ms +step:1115/1705 train_time:105855ms step_avg:94.94ms +step:1116/1705 train_time:105949ms step_avg:94.94ms +step:1117/1705 train_time:106044ms step_avg:94.94ms +step:1118/1705 train_time:106139ms step_avg:94.94ms +step:1119/1705 train_time:106234ms step_avg:94.94ms +step:1120/1705 train_time:106328ms step_avg:94.94ms +step:1121/1705 train_time:106422ms step_avg:94.93ms +step:1122/1705 train_time:106517ms step_avg:94.94ms +step:1123/1705 train_time:106612ms step_avg:94.93ms +step:1124/1705 train_time:106705ms step_avg:94.93ms +step:1125/1705 train_time:106800ms step_avg:94.93ms +step:1125/1705 val_loss:3.4369 train_time:106895ms step_avg:95.02ms +step:1126/1705 train_time:106918ms step_avg:94.95ms +step:1127/1705 train_time:106995ms step_avg:94.94ms +step:1128/1705 train_time:107092ms step_avg:94.94ms +step:1129/1705 train_time:107187ms step_avg:94.94ms +step:1130/1705 train_time:107281ms step_avg:94.94ms +step:1131/1705 train_time:107374ms step_avg:94.94ms +step:1132/1705 train_time:107468ms step_avg:94.94ms +step:1133/1705 train_time:107562ms step_avg:94.94ms +step:1134/1705 train_time:107656ms step_avg:94.94ms +step:1135/1705 train_time:107750ms step_avg:94.93ms +step:1136/1705 train_time:107844ms step_avg:94.93ms +step:1137/1705 train_time:107940ms step_avg:94.93ms +step:1138/1705 train_time:108038ms step_avg:94.94ms +step:1139/1705 train_time:108134ms step_avg:94.94ms +step:1140/1705 train_time:108229ms step_avg:94.94ms +step:1141/1705 train_time:108324ms step_avg:94.94ms +step:1142/1705 train_time:108419ms step_avg:94.94ms +step:1143/1705 train_time:108513ms step_avg:94.94ms +step:1144/1705 train_time:108607ms step_avg:94.94ms +step:1145/1705 train_time:108701ms step_avg:94.94ms +step:1146/1705 train_time:108796ms step_avg:94.94ms +step:1147/1705 train_time:108891ms step_avg:94.94ms +step:1148/1705 train_time:108988ms step_avg:94.94ms +step:1149/1705 train_time:109084ms step_avg:94.94ms +step:1150/1705 train_time:109180ms step_avg:94.94ms +step:1151/1705 train_time:109277ms step_avg:94.94ms +step:1152/1705 train_time:109372ms step_avg:94.94ms +step:1153/1705 train_time:109466ms step_avg:94.94ms +step:1154/1705 train_time:109561ms step_avg:94.94ms +step:1155/1705 train_time:109656ms step_avg:94.94ms +step:1156/1705 train_time:109750ms step_avg:94.94ms +step:1157/1705 train_time:109845ms step_avg:94.94ms +step:1158/1705 train_time:109941ms step_avg:94.94ms +step:1159/1705 train_time:110038ms step_avg:94.94ms +step:1160/1705 train_time:110134ms step_avg:94.94ms +step:1161/1705 train_time:110230ms step_avg:94.94ms +step:1162/1705 train_time:110325ms step_avg:94.94ms +step:1163/1705 train_time:110421ms step_avg:94.95ms +step:1164/1705 train_time:110517ms step_avg:94.95ms +step:1165/1705 train_time:110611ms step_avg:94.95ms +step:1166/1705 train_time:110706ms step_avg:94.94ms +step:1167/1705 train_time:110801ms step_avg:94.95ms +step:1168/1705 train_time:110896ms step_avg:94.95ms +step:1169/1705 train_time:110992ms step_avg:94.95ms +step:1170/1705 train_time:111087ms step_avg:94.95ms +step:1171/1705 train_time:111183ms step_avg:94.95ms +step:1172/1705 train_time:111280ms step_avg:94.95ms +step:1173/1705 train_time:111376ms step_avg:94.95ms +step:1174/1705 train_time:111472ms step_avg:94.95ms +step:1175/1705 train_time:111567ms step_avg:94.95ms +step:1176/1705 train_time:111662ms step_avg:94.95ms +step:1177/1705 train_time:111756ms step_avg:94.95ms +step:1178/1705 train_time:111851ms step_avg:94.95ms +step:1179/1705 train_time:111946ms step_avg:94.95ms +step:1180/1705 train_time:112042ms step_avg:94.95ms +step:1181/1705 train_time:112137ms step_avg:94.95ms +step:1182/1705 train_time:112232ms step_avg:94.95ms +step:1183/1705 train_time:112328ms step_avg:94.95ms +step:1184/1705 train_time:112423ms step_avg:94.95ms +step:1185/1705 train_time:112519ms step_avg:94.95ms +step:1186/1705 train_time:112614ms step_avg:94.95ms +step:1187/1705 train_time:112709ms step_avg:94.95ms +step:1188/1705 train_time:112804ms step_avg:94.95ms +step:1189/1705 train_time:112900ms step_avg:94.95ms +step:1190/1705 train_time:112996ms step_avg:94.95ms +step:1191/1705 train_time:113091ms step_avg:94.95ms +step:1192/1705 train_time:113186ms step_avg:94.95ms +step:1193/1705 train_time:113282ms step_avg:94.96ms +step:1194/1705 train_time:113379ms step_avg:94.96ms +step:1195/1705 train_time:113474ms step_avg:94.96ms +step:1196/1705 train_time:113569ms step_avg:94.96ms +step:1197/1705 train_time:113664ms step_avg:94.96ms +step:1198/1705 train_time:113759ms step_avg:94.96ms +step:1199/1705 train_time:113854ms step_avg:94.96ms +step:1200/1705 train_time:113949ms step_avg:94.96ms +step:1201/1705 train_time:114044ms step_avg:94.96ms +step:1202/1705 train_time:114140ms step_avg:94.96ms +step:1203/1705 train_time:114235ms step_avg:94.96ms +step:1204/1705 train_time:114331ms step_avg:94.96ms +step:1205/1705 train_time:114426ms step_avg:94.96ms +step:1206/1705 train_time:114522ms step_avg:94.96ms +step:1207/1705 train_time:114617ms step_avg:94.96ms +step:1208/1705 train_time:114712ms step_avg:94.96ms +step:1209/1705 train_time:114807ms step_avg:94.96ms +step:1210/1705 train_time:114903ms step_avg:94.96ms +step:1211/1705 train_time:114998ms step_avg:94.96ms +step:1212/1705 train_time:115094ms step_avg:94.96ms +step:1213/1705 train_time:115189ms step_avg:94.96ms +step:1214/1705 train_time:115284ms step_avg:94.96ms +step:1215/1705 train_time:115381ms step_avg:94.96ms +step:1216/1705 train_time:115477ms step_avg:94.96ms +step:1217/1705 train_time:115573ms step_avg:94.97ms +step:1218/1705 train_time:115667ms step_avg:94.97ms +step:1219/1705 train_time:115762ms step_avg:94.96ms +step:1220/1705 train_time:115858ms step_avg:94.97ms +step:1221/1705 train_time:115953ms step_avg:94.97ms +step:1222/1705 train_time:116048ms step_avg:94.97ms +step:1223/1705 train_time:116142ms step_avg:94.97ms +step:1224/1705 train_time:116238ms step_avg:94.97ms +step:1225/1705 train_time:116335ms step_avg:94.97ms +step:1226/1705 train_time:116431ms step_avg:94.97ms +step:1227/1705 train_time:116526ms step_avg:94.97ms +step:1228/1705 train_time:116621ms step_avg:94.97ms +step:1229/1705 train_time:116717ms step_avg:94.97ms +step:1230/1705 train_time:116812ms step_avg:94.97ms +step:1231/1705 train_time:116907ms step_avg:94.97ms +step:1232/1705 train_time:117002ms step_avg:94.97ms +step:1233/1705 train_time:117098ms step_avg:94.97ms +step:1234/1705 train_time:117193ms step_avg:94.97ms +step:1235/1705 train_time:117287ms step_avg:94.97ms +step:1236/1705 train_time:117383ms step_avg:94.97ms +step:1237/1705 train_time:117479ms step_avg:94.97ms +step:1238/1705 train_time:117574ms step_avg:94.97ms +step:1239/1705 train_time:117669ms step_avg:94.97ms +step:1240/1705 train_time:117764ms step_avg:94.97ms +step:1241/1705 train_time:117859ms step_avg:94.97ms +step:1242/1705 train_time:117954ms step_avg:94.97ms +step:1243/1705 train_time:118049ms step_avg:94.97ms +step:1244/1705 train_time:118145ms step_avg:94.97ms +step:1245/1705 train_time:118240ms step_avg:94.97ms +step:1246/1705 train_time:118335ms step_avg:94.97ms +step:1247/1705 train_time:118431ms step_avg:94.97ms +step:1248/1705 train_time:118526ms step_avg:94.97ms +step:1249/1705 train_time:118621ms step_avg:94.97ms +step:1250/1705 train_time:118717ms step_avg:94.97ms +step:1250/1705 val_loss:3.3885 train_time:118813ms step_avg:95.05ms +step:1251/1705 train_time:118836ms step_avg:94.99ms +step:1252/1705 train_time:118909ms step_avg:94.98ms +step:1253/1705 train_time:119006ms step_avg:94.98ms +step:1254/1705 train_time:119102ms step_avg:94.98ms +step:1255/1705 train_time:119197ms step_avg:94.98ms +step:1256/1705 train_time:119291ms step_avg:94.98ms +step:1257/1705 train_time:119386ms step_avg:94.98ms +step:1258/1705 train_time:119480ms step_avg:94.98ms +step:1259/1705 train_time:119573ms step_avg:94.97ms +step:1260/1705 train_time:119667ms step_avg:94.97ms +step:1261/1705 train_time:119767ms step_avg:94.98ms +step:1262/1705 train_time:119865ms step_avg:94.98ms +step:1263/1705 train_time:119962ms step_avg:94.98ms +step:1264/1705 train_time:120057ms step_avg:94.98ms +step:1265/1705 train_time:120152ms step_avg:94.98ms +step:1266/1705 train_time:120246ms step_avg:94.98ms +step:1267/1705 train_time:120341ms step_avg:94.98ms +step:1268/1705 train_time:120436ms step_avg:94.98ms +step:1269/1705 train_time:120530ms step_avg:94.98ms +step:1270/1705 train_time:120625ms step_avg:94.98ms +step:1271/1705 train_time:120720ms step_avg:94.98ms +step:1272/1705 train_time:120817ms step_avg:94.98ms +step:1273/1705 train_time:120914ms step_avg:94.98ms +step:1274/1705 train_time:121289ms step_avg:95.20ms +step:1275/1705 train_time:121374ms step_avg:95.20ms +step:1276/1705 train_time:121468ms step_avg:95.19ms +step:1277/1705 train_time:121562ms step_avg:95.19ms +step:1278/1705 train_time:121656ms step_avg:95.19ms +step:1279/1705 train_time:121749ms step_avg:95.19ms +step:1280/1705 train_time:121844ms step_avg:95.19ms +step:1281/1705 train_time:121938ms step_avg:95.19ms +step:1282/1705 train_time:122033ms step_avg:95.19ms +step:1283/1705 train_time:122127ms step_avg:95.19ms +step:1284/1705 train_time:122226ms step_avg:95.19ms +step:1285/1705 train_time:122327ms step_avg:95.20ms +step:1286/1705 train_time:122423ms step_avg:95.20ms +step:1287/1705 train_time:122519ms step_avg:95.20ms +step:1288/1705 train_time:122614ms step_avg:95.20ms +step:1289/1705 train_time:122708ms step_avg:95.20ms +step:1290/1705 train_time:122803ms step_avg:95.20ms +step:1291/1705 train_time:122898ms step_avg:95.20ms +step:1292/1705 train_time:122991ms step_avg:95.19ms +step:1293/1705 train_time:123086ms step_avg:95.19ms +step:1294/1705 train_time:123183ms step_avg:95.20ms +step:1295/1705 train_time:123281ms step_avg:95.20ms +step:1296/1705 train_time:123377ms step_avg:95.20ms +step:1297/1705 train_time:123472ms step_avg:95.20ms +step:1298/1705 train_time:123568ms step_avg:95.20ms +step:1299/1705 train_time:123664ms step_avg:95.20ms +step:1300/1705 train_time:123760ms step_avg:95.20ms +step:1301/1705 train_time:123855ms step_avg:95.20ms +step:1302/1705 train_time:123948ms step_avg:95.20ms +step:1303/1705 train_time:124042ms step_avg:95.20ms +step:1304/1705 train_time:124138ms step_avg:95.20ms +step:1305/1705 train_time:124235ms step_avg:95.20ms +step:1306/1705 train_time:124331ms step_avg:95.20ms +step:1307/1705 train_time:124427ms step_avg:95.20ms +step:1308/1705 train_time:124523ms step_avg:95.20ms +step:1309/1705 train_time:124618ms step_avg:95.20ms +step:1310/1705 train_time:124713ms step_avg:95.20ms +step:1311/1705 train_time:124809ms step_avg:95.20ms +step:1312/1705 train_time:124904ms step_avg:95.20ms +step:1313/1705 train_time:124998ms step_avg:95.20ms +step:1314/1705 train_time:125093ms step_avg:95.20ms +step:1315/1705 train_time:125187ms step_avg:95.20ms +step:1316/1705 train_time:125285ms step_avg:95.20ms +step:1317/1705 train_time:125382ms step_avg:95.20ms +step:1318/1705 train_time:125478ms step_avg:95.20ms +step:1319/1705 train_time:125572ms step_avg:95.20ms +step:1320/1705 train_time:125668ms step_avg:95.20ms +step:1321/1705 train_time:125763ms step_avg:95.20ms +step:1322/1705 train_time:125859ms step_avg:95.20ms +step:1323/1705 train_time:125953ms step_avg:95.20ms +step:1324/1705 train_time:126048ms step_avg:95.20ms +step:1325/1705 train_time:126143ms step_avg:95.20ms +step:1326/1705 train_time:126238ms step_avg:95.20ms +step:1327/1705 train_time:126333ms step_avg:95.20ms +step:1328/1705 train_time:126429ms step_avg:95.20ms +step:1329/1705 train_time:126525ms step_avg:95.20ms +step:1330/1705 train_time:126621ms step_avg:95.20ms +step:1331/1705 train_time:126716ms step_avg:95.20ms +step:1332/1705 train_time:126811ms step_avg:95.20ms +step:1333/1705 train_time:126906ms step_avg:95.20ms +step:1334/1705 train_time:127001ms step_avg:95.20ms +step:1335/1705 train_time:127097ms step_avg:95.20ms +step:1336/1705 train_time:127192ms step_avg:95.20ms +step:1337/1705 train_time:127288ms step_avg:95.20ms +step:1338/1705 train_time:127383ms step_avg:95.20ms +step:1339/1705 train_time:127480ms step_avg:95.21ms +step:1340/1705 train_time:127576ms step_avg:95.21ms +step:1341/1705 train_time:127671ms step_avg:95.21ms +step:1342/1705 train_time:127766ms step_avg:95.21ms +step:1343/1705 train_time:127861ms step_avg:95.21ms +step:1344/1705 train_time:127955ms step_avg:95.20ms +step:1345/1705 train_time:128050ms step_avg:95.20ms +step:1346/1705 train_time:128146ms step_avg:95.21ms +step:1347/1705 train_time:128242ms step_avg:95.21ms +step:1348/1705 train_time:128337ms step_avg:95.21ms +step:1349/1705 train_time:128433ms step_avg:95.21ms +step:1350/1705 train_time:128528ms step_avg:95.21ms +step:1351/1705 train_time:128625ms step_avg:95.21ms +step:1352/1705 train_time:128720ms step_avg:95.21ms +step:1353/1705 train_time:128816ms step_avg:95.21ms +step:1354/1705 train_time:128911ms step_avg:95.21ms +step:1355/1705 train_time:129005ms step_avg:95.21ms +step:1356/1705 train_time:129101ms step_avg:95.21ms +step:1357/1705 train_time:129196ms step_avg:95.21ms +step:1358/1705 train_time:129291ms step_avg:95.21ms +step:1359/1705 train_time:129387ms step_avg:95.21ms +step:1360/1705 train_time:129483ms step_avg:95.21ms +step:1361/1705 train_time:129577ms step_avg:95.21ms +step:1362/1705 train_time:129672ms step_avg:95.21ms +step:1363/1705 train_time:129768ms step_avg:95.21ms +step:1364/1705 train_time:129863ms step_avg:95.21ms +step:1365/1705 train_time:129958ms step_avg:95.21ms +step:1366/1705 train_time:130053ms step_avg:95.21ms +step:1367/1705 train_time:130149ms step_avg:95.21ms +step:1368/1705 train_time:130244ms step_avg:95.21ms +step:1369/1705 train_time:130341ms step_avg:95.21ms +step:1370/1705 train_time:130436ms step_avg:95.21ms +step:1371/1705 train_time:130531ms step_avg:95.21ms +step:1372/1705 train_time:130626ms step_avg:95.21ms +step:1373/1705 train_time:130721ms step_avg:95.21ms +step:1374/1705 train_time:130817ms step_avg:95.21ms +step:1375/1705 train_time:130912ms step_avg:95.21ms +step:1375/1705 val_loss:3.3516 train_time:131007ms step_avg:95.28ms +step:1376/1705 train_time:131030ms step_avg:95.23ms +step:1377/1705 train_time:131111ms step_avg:95.22ms +step:1378/1705 train_time:131212ms step_avg:95.22ms +step:1379/1705 train_time:131307ms step_avg:95.22ms +step:1380/1705 train_time:131402ms step_avg:95.22ms +step:1381/1705 train_time:131497ms step_avg:95.22ms +step:1382/1705 train_time:131590ms step_avg:95.22ms +step:1383/1705 train_time:131685ms step_avg:95.22ms +step:1384/1705 train_time:131780ms step_avg:95.22ms +step:1385/1705 train_time:131874ms step_avg:95.22ms +step:1386/1705 train_time:131970ms step_avg:95.22ms +step:1387/1705 train_time:132070ms step_avg:95.22ms +step:1388/1705 train_time:132167ms step_avg:95.22ms +step:1389/1705 train_time:132264ms step_avg:95.22ms +step:1390/1705 train_time:132359ms step_avg:95.22ms +step:1391/1705 train_time:132454ms step_avg:95.22ms +step:1392/1705 train_time:132549ms step_avg:95.22ms +step:1393/1705 train_time:132643ms step_avg:95.22ms +step:1394/1705 train_time:132737ms step_avg:95.22ms +step:1395/1705 train_time:132832ms step_avg:95.22ms +step:1396/1705 train_time:132927ms step_avg:95.22ms +step:1397/1705 train_time:133023ms step_avg:95.22ms +step:1398/1705 train_time:133119ms step_avg:95.22ms +step:1399/1705 train_time:133216ms step_avg:95.22ms +step:1400/1705 train_time:133313ms step_avg:95.22ms +step:1401/1705 train_time:133408ms step_avg:95.22ms +step:1402/1705 train_time:133503ms step_avg:95.22ms +step:1403/1705 train_time:133598ms step_avg:95.22ms +step:1404/1705 train_time:133693ms step_avg:95.22ms +step:1405/1705 train_time:133788ms step_avg:95.22ms +step:1406/1705 train_time:133882ms step_avg:95.22ms +step:1407/1705 train_time:133977ms step_avg:95.22ms +step:1408/1705 train_time:134074ms step_avg:95.22ms +step:1409/1705 train_time:134170ms step_avg:95.22ms +step:1410/1705 train_time:134267ms step_avg:95.22ms +step:1411/1705 train_time:134362ms step_avg:95.22ms +step:1412/1705 train_time:134458ms step_avg:95.22ms +step:1413/1705 train_time:134552ms step_avg:95.22ms +step:1414/1705 train_time:134647ms step_avg:95.22ms +step:1415/1705 train_time:134743ms step_avg:95.22ms +step:1416/1705 train_time:134838ms step_avg:95.22ms +step:1417/1705 train_time:134932ms step_avg:95.22ms +step:1418/1705 train_time:135028ms step_avg:95.22ms +step:1419/1705 train_time:135124ms step_avg:95.22ms +step:1420/1705 train_time:135219ms step_avg:95.22ms +step:1421/1705 train_time:135316ms step_avg:95.23ms +step:1422/1705 train_time:135412ms step_avg:95.23ms +step:1423/1705 train_time:135507ms step_avg:95.23ms +step:1424/1705 train_time:135601ms step_avg:95.23ms +step:1425/1705 train_time:135696ms step_avg:95.23ms +step:1426/1705 train_time:135791ms step_avg:95.23ms +step:1427/1705 train_time:135887ms step_avg:95.23ms +step:1428/1705 train_time:135982ms step_avg:95.23ms +step:1429/1705 train_time:136077ms step_avg:95.23ms +step:1430/1705 train_time:136173ms step_avg:95.23ms +step:1431/1705 train_time:136271ms step_avg:95.23ms +step:1432/1705 train_time:136367ms step_avg:95.23ms +step:1433/1705 train_time:136462ms step_avg:95.23ms +step:1434/1705 train_time:136557ms step_avg:95.23ms +step:1435/1705 train_time:136652ms step_avg:95.23ms +step:1436/1705 train_time:136747ms step_avg:95.23ms +step:1437/1705 train_time:136844ms step_avg:95.23ms +step:1438/1705 train_time:136937ms step_avg:95.23ms +step:1439/1705 train_time:137031ms step_avg:95.23ms +step:1440/1705 train_time:137127ms step_avg:95.23ms +step:1441/1705 train_time:137223ms step_avg:95.23ms +step:1442/1705 train_time:137319ms step_avg:95.23ms +step:1443/1705 train_time:137416ms step_avg:95.23ms +step:1444/1705 train_time:137511ms step_avg:95.23ms +step:1445/1705 train_time:137606ms step_avg:95.23ms +step:1446/1705 train_time:137701ms step_avg:95.23ms +step:1447/1705 train_time:137797ms step_avg:95.23ms +step:1448/1705 train_time:137892ms step_avg:95.23ms +step:1449/1705 train_time:137988ms step_avg:95.23ms +step:1450/1705 train_time:138084ms step_avg:95.23ms +step:1451/1705 train_time:138179ms step_avg:95.23ms +step:1452/1705 train_time:138275ms step_avg:95.23ms +step:1453/1705 train_time:138370ms step_avg:95.23ms +step:1454/1705 train_time:138466ms step_avg:95.23ms +step:1455/1705 train_time:138561ms step_avg:95.23ms +step:1456/1705 train_time:138655ms step_avg:95.23ms +step:1457/1705 train_time:138751ms step_avg:95.23ms +step:1458/1705 train_time:138847ms step_avg:95.23ms +step:1459/1705 train_time:138941ms step_avg:95.23ms +step:1460/1705 train_time:139037ms step_avg:95.23ms +step:1461/1705 train_time:139132ms step_avg:95.23ms +step:1462/1705 train_time:139230ms step_avg:95.23ms +step:1463/1705 train_time:139325ms step_avg:95.23ms +step:1464/1705 train_time:139420ms step_avg:95.23ms +step:1465/1705 train_time:139516ms step_avg:95.23ms +step:1466/1705 train_time:139611ms step_avg:95.23ms +step:1467/1705 train_time:139707ms step_avg:95.23ms +step:1468/1705 train_time:139802ms step_avg:95.23ms +step:1469/1705 train_time:139897ms step_avg:95.23ms +step:1470/1705 train_time:139992ms step_avg:95.23ms +step:1471/1705 train_time:140087ms step_avg:95.23ms +step:1472/1705 train_time:140183ms step_avg:95.23ms +step:1473/1705 train_time:140278ms step_avg:95.23ms +step:1474/1705 train_time:140372ms step_avg:95.23ms +step:1475/1705 train_time:140468ms step_avg:95.23ms +step:1476/1705 train_time:140563ms step_avg:95.23ms +step:1477/1705 train_time:140657ms step_avg:95.23ms +step:1478/1705 train_time:140754ms step_avg:95.23ms +step:1479/1705 train_time:140849ms step_avg:95.23ms +step:1480/1705 train_time:140944ms step_avg:95.23ms +step:1481/1705 train_time:141039ms step_avg:95.23ms +step:1482/1705 train_time:141134ms step_avg:95.23ms +step:1483/1705 train_time:141231ms step_avg:95.23ms +step:1484/1705 train_time:141326ms step_avg:95.23ms +step:1485/1705 train_time:141701ms step_avg:95.42ms +step:1486/1705 train_time:141777ms step_avg:95.41ms +step:1487/1705 train_time:141871ms step_avg:95.41ms +step:1488/1705 train_time:141965ms step_avg:95.41ms +step:1489/1705 train_time:142059ms step_avg:95.41ms +step:1490/1705 train_time:142153ms step_avg:95.40ms +step:1491/1705 train_time:142247ms step_avg:95.40ms +step:1492/1705 train_time:142340ms step_avg:95.40ms +step:1493/1705 train_time:142435ms step_avg:95.40ms +step:1494/1705 train_time:142529ms step_avg:95.40ms +step:1495/1705 train_time:142628ms step_avg:95.40ms +step:1496/1705 train_time:142729ms step_avg:95.41ms +step:1497/1705 train_time:142826ms step_avg:95.41ms +step:1498/1705 train_time:142921ms step_avg:95.41ms +step:1499/1705 train_time:143017ms step_avg:95.41ms +step:1500/1705 train_time:143112ms step_avg:95.41ms +step:1500/1705 val_loss:3.3198 train_time:143207ms step_avg:95.47ms +step:1501/1705 train_time:143230ms step_avg:95.42ms +step:1502/1705 train_time:143309ms step_avg:95.41ms +step:1503/1705 train_time:143408ms step_avg:95.41ms +step:1504/1705 train_time:143504ms step_avg:95.41ms +step:1505/1705 train_time:143598ms step_avg:95.41ms +step:1506/1705 train_time:143692ms step_avg:95.41ms +step:1507/1705 train_time:143787ms step_avg:95.41ms +step:1508/1705 train_time:143882ms step_avg:95.41ms +step:1509/1705 train_time:143976ms step_avg:95.41ms +step:1510/1705 train_time:144070ms step_avg:95.41ms +step:1511/1705 train_time:144167ms step_avg:95.41ms +step:1512/1705 train_time:144264ms step_avg:95.41ms +step:1513/1705 train_time:144363ms step_avg:95.41ms +step:1514/1705 train_time:144459ms step_avg:95.42ms +step:1515/1705 train_time:144555ms step_avg:95.42ms +step:1516/1705 train_time:144649ms step_avg:95.41ms +step:1517/1705 train_time:144744ms step_avg:95.41ms +step:1518/1705 train_time:144838ms step_avg:95.41ms +step:1519/1705 train_time:144932ms step_avg:95.41ms +step:1520/1705 train_time:145027ms step_avg:95.41ms +step:1521/1705 train_time:145123ms step_avg:95.41ms +step:1522/1705 train_time:145222ms step_avg:95.42ms +step:1523/1705 train_time:145319ms step_avg:95.42ms +step:1524/1705 train_time:145416ms step_avg:95.42ms +step:1525/1705 train_time:145511ms step_avg:95.42ms +step:1526/1705 train_time:145607ms step_avg:95.42ms +step:1527/1705 train_time:145702ms step_avg:95.42ms +step:1528/1705 train_time:145797ms step_avg:95.42ms +step:1529/1705 train_time:145891ms step_avg:95.42ms +step:1530/1705 train_time:145985ms step_avg:95.42ms +step:1531/1705 train_time:146080ms step_avg:95.41ms +step:1532/1705 train_time:146176ms step_avg:95.42ms +step:1533/1705 train_time:146272ms step_avg:95.42ms +step:1534/1705 train_time:146368ms step_avg:95.42ms +step:1535/1705 train_time:146463ms step_avg:95.42ms +step:1536/1705 train_time:146561ms step_avg:95.42ms +step:1537/1705 train_time:146656ms step_avg:95.42ms +step:1538/1705 train_time:146751ms step_avg:95.42ms +step:1539/1705 train_time:146846ms step_avg:95.42ms +step:1540/1705 train_time:146941ms step_avg:95.42ms +step:1541/1705 train_time:147036ms step_avg:95.42ms +step:1542/1705 train_time:147130ms step_avg:95.42ms +step:1543/1705 train_time:147226ms step_avg:95.42ms +step:1544/1705 train_time:147323ms step_avg:95.42ms +step:1545/1705 train_time:147420ms step_avg:95.42ms +step:1546/1705 train_time:147516ms step_avg:95.42ms +step:1547/1705 train_time:147613ms step_avg:95.42ms +step:1548/1705 train_time:147707ms step_avg:95.42ms +step:1549/1705 train_time:147803ms step_avg:95.42ms +step:1550/1705 train_time:147898ms step_avg:95.42ms +step:1551/1705 train_time:147994ms step_avg:95.42ms +step:1552/1705 train_time:148089ms step_avg:95.42ms +step:1553/1705 train_time:148183ms step_avg:95.42ms +step:1554/1705 train_time:148278ms step_avg:95.42ms +step:1555/1705 train_time:148374ms step_avg:95.42ms +step:1556/1705 train_time:148470ms step_avg:95.42ms +step:1557/1705 train_time:148567ms step_avg:95.42ms +step:1558/1705 train_time:148663ms step_avg:95.42ms +step:1559/1705 train_time:148759ms step_avg:95.42ms +step:1560/1705 train_time:148854ms step_avg:95.42ms +step:1561/1705 train_time:148948ms step_avg:95.42ms +step:1562/1705 train_time:149043ms step_avg:95.42ms +step:1563/1705 train_time:149138ms step_avg:95.42ms +step:1564/1705 train_time:149233ms step_avg:95.42ms +step:1565/1705 train_time:149328ms step_avg:95.42ms +step:1566/1705 train_time:149424ms step_avg:95.42ms +step:1567/1705 train_time:149520ms step_avg:95.42ms +step:1568/1705 train_time:149617ms step_avg:95.42ms +step:1569/1705 train_time:149713ms step_avg:95.42ms +step:1570/1705 train_time:149808ms step_avg:95.42ms +step:1571/1705 train_time:149902ms step_avg:95.42ms +step:1572/1705 train_time:149997ms step_avg:95.42ms +step:1573/1705 train_time:150092ms step_avg:95.42ms +step:1574/1705 train_time:150187ms step_avg:95.42ms +step:1575/1705 train_time:150282ms step_avg:95.42ms +step:1576/1705 train_time:150377ms step_avg:95.42ms +step:1577/1705 train_time:150472ms step_avg:95.42ms +step:1578/1705 train_time:150568ms step_avg:95.42ms +step:1579/1705 train_time:150664ms step_avg:95.42ms +step:1580/1705 train_time:150760ms step_avg:95.42ms +step:1581/1705 train_time:150855ms step_avg:95.42ms +step:1582/1705 train_time:150950ms step_avg:95.42ms +step:1583/1705 train_time:151045ms step_avg:95.42ms +step:1584/1705 train_time:151140ms step_avg:95.42ms +step:1585/1705 train_time:151236ms step_avg:95.42ms +step:1586/1705 train_time:151330ms step_avg:95.42ms +step:1587/1705 train_time:151426ms step_avg:95.42ms +step:1588/1705 train_time:151522ms step_avg:95.42ms +step:1589/1705 train_time:151618ms step_avg:95.42ms +step:1590/1705 train_time:151714ms step_avg:95.42ms +step:1591/1705 train_time:151809ms step_avg:95.42ms +step:1592/1705 train_time:151904ms step_avg:95.42ms +step:1593/1705 train_time:152000ms step_avg:95.42ms +step:1594/1705 train_time:152096ms step_avg:95.42ms +step:1595/1705 train_time:152191ms step_avg:95.42ms +step:1596/1705 train_time:152285ms step_avg:95.42ms +step:1597/1705 train_time:152381ms step_avg:95.42ms +step:1598/1705 train_time:152477ms step_avg:95.42ms +step:1599/1705 train_time:152572ms step_avg:95.42ms +step:1600/1705 train_time:152667ms step_avg:95.42ms +step:1601/1705 train_time:152763ms step_avg:95.42ms +step:1602/1705 train_time:152860ms step_avg:95.42ms +step:1603/1705 train_time:152955ms step_avg:95.42ms +step:1604/1705 train_time:153050ms step_avg:95.42ms +step:1605/1705 train_time:153145ms step_avg:95.42ms +step:1606/1705 train_time:153241ms step_avg:95.42ms +step:1607/1705 train_time:153338ms step_avg:95.42ms +step:1608/1705 train_time:153434ms step_avg:95.42ms +step:1609/1705 train_time:153529ms step_avg:95.42ms +step:1610/1705 train_time:153625ms step_avg:95.42ms +step:1611/1705 train_time:153721ms step_avg:95.42ms +step:1612/1705 train_time:153816ms step_avg:95.42ms +step:1613/1705 train_time:153913ms step_avg:95.42ms +step:1614/1705 train_time:154008ms step_avg:95.42ms +step:1615/1705 train_time:154103ms step_avg:95.42ms +step:1616/1705 train_time:154198ms step_avg:95.42ms +step:1617/1705 train_time:154293ms step_avg:95.42ms +step:1618/1705 train_time:154389ms step_avg:95.42ms +step:1619/1705 train_time:154484ms step_avg:95.42ms +step:1620/1705 train_time:154579ms step_avg:95.42ms +step:1621/1705 train_time:154674ms step_avg:95.42ms +step:1622/1705 train_time:154768ms step_avg:95.42ms +step:1623/1705 train_time:154864ms step_avg:95.42ms +step:1624/1705 train_time:154961ms step_avg:95.42ms +step:1625/1705 train_time:155057ms step_avg:95.42ms +step:1625/1705 val_loss:3.2921 train_time:155153ms step_avg:95.48ms +step:1626/1705 train_time:155176ms step_avg:95.43ms +step:1627/1705 train_time:155256ms step_avg:95.42ms +step:1628/1705 train_time:155355ms step_avg:95.43ms +step:1629/1705 train_time:155451ms step_avg:95.43ms +step:1630/1705 train_time:155545ms step_avg:95.43ms +step:1631/1705 train_time:155640ms step_avg:95.43ms +step:1632/1705 train_time:155734ms step_avg:95.43ms +step:1633/1705 train_time:155828ms step_avg:95.42ms +step:1634/1705 train_time:155922ms step_avg:95.42ms +step:1635/1705 train_time:156016ms step_avg:95.42ms +step:1636/1705 train_time:156113ms step_avg:95.42ms +step:1637/1705 train_time:156210ms step_avg:95.42ms +step:1638/1705 train_time:156309ms step_avg:95.43ms +step:1639/1705 train_time:156406ms step_avg:95.43ms +step:1640/1705 train_time:156502ms step_avg:95.43ms +step:1641/1705 train_time:156596ms step_avg:95.43ms +step:1642/1705 train_time:156691ms step_avg:95.43ms +step:1643/1705 train_time:156786ms step_avg:95.43ms +step:1644/1705 train_time:156881ms step_avg:95.43ms +step:1645/1705 train_time:156975ms step_avg:95.43ms +step:1646/1705 train_time:157071ms step_avg:95.43ms +step:1647/1705 train_time:157167ms step_avg:95.43ms +step:1648/1705 train_time:157264ms step_avg:95.43ms +step:1649/1705 train_time:157360ms step_avg:95.43ms +step:1650/1705 train_time:157456ms step_avg:95.43ms +step:1651/1705 train_time:157551ms step_avg:95.43ms +step:1652/1705 train_time:157646ms step_avg:95.43ms +step:1653/1705 train_time:157740ms step_avg:95.43ms +step:1654/1705 train_time:157835ms step_avg:95.43ms +step:1655/1705 train_time:157930ms step_avg:95.43ms +step:1656/1705 train_time:158025ms step_avg:95.43ms +step:1657/1705 train_time:158120ms step_avg:95.43ms +step:1658/1705 train_time:158216ms step_avg:95.43ms +step:1659/1705 train_time:158314ms step_avg:95.43ms +step:1660/1705 train_time:158410ms step_avg:95.43ms +step:1661/1705 train_time:158505ms step_avg:95.43ms +step:1662/1705 train_time:158600ms step_avg:95.43ms +step:1663/1705 train_time:158696ms step_avg:95.43ms +step:1664/1705 train_time:158791ms step_avg:95.43ms +step:1665/1705 train_time:158887ms step_avg:95.43ms +step:1666/1705 train_time:158982ms step_avg:95.43ms +step:1667/1705 train_time:159077ms step_avg:95.43ms +step:1668/1705 train_time:159173ms step_avg:95.43ms +step:1669/1705 train_time:159269ms step_avg:95.43ms +step:1670/1705 train_time:159364ms step_avg:95.43ms +step:1671/1705 train_time:159459ms step_avg:95.43ms +step:1672/1705 train_time:159554ms step_avg:95.43ms +step:1673/1705 train_time:159650ms step_avg:95.43ms +step:1674/1705 train_time:159746ms step_avg:95.43ms +step:1675/1705 train_time:159841ms step_avg:95.43ms +step:1676/1705 train_time:159936ms step_avg:95.43ms +step:1677/1705 train_time:160031ms step_avg:95.43ms +step:1678/1705 train_time:160127ms step_avg:95.43ms +step:1679/1705 train_time:160222ms step_avg:95.43ms +step:1680/1705 train_time:160317ms step_avg:95.43ms +step:1681/1705 train_time:160413ms step_avg:95.43ms +step:1682/1705 train_time:160509ms step_avg:95.43ms +step:1683/1705 train_time:160605ms step_avg:95.43ms +step:1684/1705 train_time:160700ms step_avg:95.43ms +step:1685/1705 train_time:160796ms step_avg:95.43ms +step:1686/1705 train_time:160892ms step_avg:95.43ms +step:1687/1705 train_time:160987ms step_avg:95.43ms +step:1688/1705 train_time:161082ms step_avg:95.43ms +step:1689/1705 train_time:161177ms step_avg:95.43ms +step:1690/1705 train_time:161273ms step_avg:95.43ms +step:1691/1705 train_time:161369ms step_avg:95.43ms +step:1692/1705 train_time:161464ms step_avg:95.43ms +step:1693/1705 train_time:161559ms step_avg:95.43ms +step:1694/1705 train_time:161655ms step_avg:95.43ms +step:1695/1705 train_time:161751ms step_avg:95.43ms +step:1696/1705 train_time:161847ms step_avg:95.43ms +step:1697/1705 train_time:161943ms step_avg:95.43ms +step:1698/1705 train_time:162176ms step_avg:95.51ms +step:1699/1705 train_time:162387ms step_avg:95.58ms +step:1700/1705 train_time:162480ms step_avg:95.58ms +step:1701/1705 train_time:162574ms step_avg:95.58ms +step:1702/1705 train_time:162669ms step_avg:95.58ms +step:1703/1705 train_time:162763ms step_avg:95.57ms +step:1704/1705 train_time:162857ms step_avg:95.57ms +step:1705/1705 train_time:162951ms step_avg:95.57ms +step:1705/1705 val_loss:3.2779 train_time:163046ms step_avg:95.63ms +peak memory allocated: 33750 MiB reserved: 49456 MiB diff --git a/records/090525_SkipMLPBlocks/cf25c17a-ae33-4c45-8478-3e4f177a9f26.txt b/records/090525_SkipMLPBlocks/cf25c17a-ae33-4c45-8478-3e4f177a9f26.txt new file mode 100644 index 000000000..8fa21c9f2 --- /dev/null +++ b/records/090525_SkipMLPBlocks/cf25c17a-ae33-4c45-8478-3e4f177a9f26.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:53:26 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 128W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 128W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 123W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 75912 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 75913 C /usr/bin/python3 610MiB | +| 0 N/A N/A 75914 C /usr/bin/python3 610MiB | +| 0 N/A N/A 75915 C /usr/bin/python3 610MiB | +| 0 N/A N/A 75916 C /usr/bin/python3 610MiB | +| 0 N/A N/A 75917 C /usr/bin/python3 610MiB | +| 0 N/A N/A 75918 C /usr/bin/python3 610MiB | +| 0 N/A N/A 75919 C /usr/bin/python3 610MiB | +| 1 N/A N/A 75913 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 75914 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 75915 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 75916 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 75917 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 75918 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 75919 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1705 train_time:355ms step_avg:355.30ms +step:2/1705 train_time:382ms step_avg:190.80ms +step:3/1705 train_time:444ms step_avg:148.16ms +step:4/1705 train_time:536ms step_avg:133.88ms +step:5/1705 train_time:627ms step_avg:125.44ms +step:6/1705 train_time:719ms step_avg:119.82ms +step:7/1705 train_time:810ms step_avg:115.76ms +step:8/1705 train_time:903ms step_avg:112.87ms +step:9/1705 train_time:995ms step_avg:110.52ms +step:10/1705 train_time:1087ms step_avg:108.69ms +step:11/1705 train_time:1179ms step_avg:107.22ms +step:12/1705 train_time:1274ms step_avg:106.15ms +step:13/1705 train_time:1370ms step_avg:105.36ms +step:14/1705 train_time:1465ms step_avg:104.65ms +step:15/1705 train_time:1558ms step_avg:103.87ms +step:16/1705 train_time:1651ms step_avg:103.17ms +step:17/1705 train_time:1743ms step_avg:102.53ms +step:18/1705 train_time:1835ms step_avg:101.94ms +step:19/1705 train_time:1927ms step_avg:101.43ms +step:20/1705 train_time:2020ms step_avg:101.00ms +step:21/1705 train_time:2112ms step_avg:100.58ms +step:22/1705 train_time:2206ms step_avg:100.29ms +step:23/1705 train_time:2301ms step_avg:100.05ms +step:24/1705 train_time:2395ms step_avg:99.80ms +step:25/1705 train_time:2489ms step_avg:99.57ms +step:26/1705 train_time:2583ms step_avg:99.36ms +step:27/1705 train_time:2677ms step_avg:99.14ms +step:28/1705 train_time:2769ms step_avg:98.91ms +step:29/1705 train_time:2862ms step_avg:98.69ms +step:30/1705 train_time:2955ms step_avg:98.50ms +step:31/1705 train_time:3047ms step_avg:98.30ms +step:32/1705 train_time:3141ms step_avg:98.16ms +step:33/1705 train_time:3234ms step_avg:98.01ms +step:34/1705 train_time:3327ms step_avg:97.86ms +step:35/1705 train_time:3421ms step_avg:97.75ms +step:36/1705 train_time:3515ms step_avg:97.64ms +step:37/1705 train_time:3608ms step_avg:97.51ms +step:38/1705 train_time:3702ms step_avg:97.43ms +step:39/1705 train_time:3796ms step_avg:97.34ms +step:40/1705 train_time:3889ms step_avg:97.22ms +step:41/1705 train_time:3981ms step_avg:97.10ms +step:42/1705 train_time:4073ms step_avg:96.99ms +step:43/1705 train_time:4166ms step_avg:96.89ms +step:44/1705 train_time:4260ms step_avg:96.82ms +step:45/1705 train_time:4353ms step_avg:96.73ms +step:46/1705 train_time:4446ms step_avg:96.66ms +step:47/1705 train_time:4540ms step_avg:96.59ms +step:48/1705 train_time:4634ms step_avg:96.54ms +step:49/1705 train_time:4727ms step_avg:96.47ms +step:50/1705 train_time:4821ms step_avg:96.41ms +step:51/1705 train_time:4913ms step_avg:96.34ms +step:52/1705 train_time:5006ms step_avg:96.27ms +step:53/1705 train_time:5099ms step_avg:96.21ms +step:54/1705 train_time:5192ms step_avg:96.14ms +step:55/1705 train_time:5285ms step_avg:96.10ms +step:56/1705 train_time:5379ms step_avg:96.05ms +step:57/1705 train_time:5471ms step_avg:95.99ms +step:58/1705 train_time:5565ms step_avg:95.94ms +step:59/1705 train_time:5659ms step_avg:95.91ms +step:60/1705 train_time:5751ms step_avg:95.85ms +step:61/1705 train_time:5845ms step_avg:95.81ms +step:62/1705 train_time:5938ms step_avg:95.78ms +step:63/1705 train_time:6030ms step_avg:95.72ms +step:64/1705 train_time:6124ms step_avg:95.68ms +step:65/1705 train_time:6218ms step_avg:95.66ms +step:66/1705 train_time:6311ms step_avg:95.62ms +step:67/1705 train_time:6404ms step_avg:95.58ms +step:68/1705 train_time:6498ms step_avg:95.55ms +step:69/1705 train_time:6590ms step_avg:95.51ms +step:70/1705 train_time:6684ms step_avg:95.49ms +step:71/1705 train_time:6778ms step_avg:95.46ms +step:72/1705 train_time:6870ms step_avg:95.42ms +step:73/1705 train_time:6964ms step_avg:95.40ms +step:74/1705 train_time:7057ms step_avg:95.37ms +step:75/1705 train_time:7150ms step_avg:95.33ms +step:76/1705 train_time:7243ms step_avg:95.31ms +step:77/1705 train_time:7337ms step_avg:95.28ms +step:78/1705 train_time:7429ms step_avg:95.25ms +step:79/1705 train_time:7523ms step_avg:95.22ms +step:80/1705 train_time:7616ms step_avg:95.20ms +step:81/1705 train_time:7709ms step_avg:95.17ms +step:82/1705 train_time:7802ms step_avg:95.15ms +step:83/1705 train_time:7896ms step_avg:95.13ms +step:84/1705 train_time:7989ms step_avg:95.10ms +step:85/1705 train_time:8084ms step_avg:95.10ms +step:86/1705 train_time:8177ms step_avg:95.08ms +step:87/1705 train_time:8270ms step_avg:95.06ms +step:88/1705 train_time:8364ms step_avg:95.04ms +step:89/1705 train_time:8457ms step_avg:95.02ms +step:90/1705 train_time:8550ms step_avg:95.00ms +step:91/1705 train_time:8643ms step_avg:94.98ms +step:92/1705 train_time:8736ms step_avg:94.96ms +step:93/1705 train_time:8829ms step_avg:94.93ms +step:94/1705 train_time:8922ms step_avg:94.92ms +step:95/1705 train_time:9016ms step_avg:94.90ms +step:96/1705 train_time:9108ms step_avg:94.88ms +step:97/1705 train_time:9201ms step_avg:94.86ms +step:98/1705 train_time:9294ms step_avg:94.84ms +step:99/1705 train_time:9388ms step_avg:94.83ms +step:100/1705 train_time:9481ms step_avg:94.81ms +step:101/1705 train_time:9574ms step_avg:94.79ms +step:102/1705 train_time:9667ms step_avg:94.78ms +step:103/1705 train_time:9761ms step_avg:94.77ms +step:104/1705 train_time:9853ms step_avg:94.74ms +step:105/1705 train_time:9947ms step_avg:94.73ms +step:106/1705 train_time:10040ms step_avg:94.71ms +step:107/1705 train_time:10132ms step_avg:94.69ms +step:108/1705 train_time:10226ms step_avg:94.68ms +step:109/1705 train_time:10319ms step_avg:94.67ms +step:110/1705 train_time:10412ms step_avg:94.65ms +step:111/1705 train_time:10505ms step_avg:94.64ms +step:112/1705 train_time:10599ms step_avg:94.63ms +step:113/1705 train_time:10692ms step_avg:94.62ms +step:114/1705 train_time:10785ms step_avg:94.61ms +step:115/1705 train_time:10878ms step_avg:94.60ms +step:116/1705 train_time:10971ms step_avg:94.58ms +step:117/1705 train_time:11064ms step_avg:94.56ms +step:118/1705 train_time:11156ms step_avg:94.55ms +step:119/1705 train_time:11249ms step_avg:94.53ms +step:120/1705 train_time:11342ms step_avg:94.52ms +step:121/1705 train_time:11435ms step_avg:94.51ms +step:122/1705 train_time:11528ms step_avg:94.49ms +step:123/1705 train_time:11621ms step_avg:94.48ms +step:124/1705 train_time:11715ms step_avg:94.47ms +step:125/1705 train_time:11807ms step_avg:94.46ms +step:125/1705 val_loss:4.3039 train_time:11901ms step_avg:95.21ms +step:126/1705 train_time:11925ms step_avg:94.65ms +step:127/1705 train_time:11999ms step_avg:94.48ms +step:128/1705 train_time:12101ms step_avg:94.54ms +step:129/1705 train_time:12195ms step_avg:94.54ms +step:130/1705 train_time:12289ms step_avg:94.53ms +step:131/1705 train_time:12383ms step_avg:94.52ms +step:132/1705 train_time:12473ms step_avg:94.50ms +step:133/1705 train_time:12565ms step_avg:94.48ms +step:134/1705 train_time:12657ms step_avg:94.46ms +step:135/1705 train_time:12749ms step_avg:94.44ms +step:136/1705 train_time:12842ms step_avg:94.42ms +step:137/1705 train_time:12935ms step_avg:94.41ms +step:138/1705 train_time:13030ms step_avg:94.42ms +step:139/1705 train_time:13126ms step_avg:94.43ms +step:140/1705 train_time:13220ms step_avg:94.43ms +step:141/1705 train_time:13313ms step_avg:94.42ms +step:142/1705 train_time:13406ms step_avg:94.41ms +step:143/1705 train_time:13498ms step_avg:94.39ms +step:144/1705 train_time:13591ms step_avg:94.38ms +step:145/1705 train_time:13683ms step_avg:94.37ms +step:146/1705 train_time:13775ms step_avg:94.35ms +step:147/1705 train_time:13869ms step_avg:94.35ms +step:148/1705 train_time:13962ms step_avg:94.33ms +step:149/1705 train_time:14055ms step_avg:94.33ms +step:150/1705 train_time:14149ms step_avg:94.33ms +step:151/1705 train_time:14243ms step_avg:94.33ms +step:152/1705 train_time:14336ms step_avg:94.31ms +step:153/1705 train_time:14429ms step_avg:94.31ms +step:154/1705 train_time:14522ms step_avg:94.30ms +step:155/1705 train_time:14614ms step_avg:94.28ms +step:156/1705 train_time:14707ms step_avg:94.28ms +step:157/1705 train_time:14800ms step_avg:94.26ms +step:158/1705 train_time:14892ms step_avg:94.25ms +step:159/1705 train_time:14985ms step_avg:94.25ms +step:160/1705 train_time:15079ms step_avg:94.24ms +step:161/1705 train_time:15172ms step_avg:94.24ms +step:162/1705 train_time:15266ms step_avg:94.24ms +step:163/1705 train_time:15360ms step_avg:94.23ms +step:164/1705 train_time:15452ms step_avg:94.22ms +step:165/1705 train_time:15545ms step_avg:94.21ms +step:166/1705 train_time:15637ms step_avg:94.20ms +step:167/1705 train_time:15730ms step_avg:94.19ms +step:168/1705 train_time:15822ms step_avg:94.18ms +step:169/1705 train_time:15915ms step_avg:94.17ms +step:170/1705 train_time:16008ms step_avg:94.17ms +step:171/1705 train_time:16101ms step_avg:94.16ms +step:172/1705 train_time:16194ms step_avg:94.15ms +step:173/1705 train_time:16289ms step_avg:94.15ms +step:174/1705 train_time:16382ms step_avg:94.15ms +step:175/1705 train_time:16475ms step_avg:94.14ms +step:176/1705 train_time:16567ms step_avg:94.13ms +step:177/1705 train_time:16660ms step_avg:94.12ms +step:178/1705 train_time:16753ms step_avg:94.12ms +step:179/1705 train_time:16846ms step_avg:94.11ms +step:180/1705 train_time:16938ms step_avg:94.10ms +step:181/1705 train_time:17032ms step_avg:94.10ms +step:182/1705 train_time:17125ms step_avg:94.10ms +step:183/1705 train_time:17218ms step_avg:94.09ms +step:184/1705 train_time:17311ms step_avg:94.08ms +step:185/1705 train_time:17404ms step_avg:94.07ms +step:186/1705 train_time:17496ms step_avg:94.07ms +step:187/1705 train_time:17589ms step_avg:94.06ms +step:188/1705 train_time:17682ms step_avg:94.05ms +step:189/1705 train_time:17775ms step_avg:94.05ms +step:190/1705 train_time:17867ms step_avg:94.04ms +step:191/1705 train_time:17960ms step_avg:94.03ms +step:192/1705 train_time:18053ms step_avg:94.03ms +step:193/1705 train_time:18147ms step_avg:94.03ms +step:194/1705 train_time:18241ms step_avg:94.02ms +step:195/1705 train_time:18334ms step_avg:94.02ms +step:196/1705 train_time:18427ms step_avg:94.02ms +step:197/1705 train_time:18519ms step_avg:94.01ms +step:198/1705 train_time:18612ms step_avg:94.00ms +step:199/1705 train_time:18705ms step_avg:94.00ms +step:200/1705 train_time:18797ms step_avg:93.99ms +step:201/1705 train_time:18890ms step_avg:93.98ms +step:202/1705 train_time:18983ms step_avg:93.97ms +step:203/1705 train_time:19076ms step_avg:93.97ms +step:204/1705 train_time:19169ms step_avg:93.97ms +step:205/1705 train_time:19263ms step_avg:93.96ms +step:206/1705 train_time:19356ms step_avg:93.96ms +step:207/1705 train_time:19450ms step_avg:93.96ms +step:208/1705 train_time:19544ms step_avg:93.96ms +step:209/1705 train_time:19637ms step_avg:93.96ms +step:210/1705 train_time:19730ms step_avg:93.95ms +step:211/1705 train_time:19823ms step_avg:93.95ms +step:212/1705 train_time:19916ms step_avg:93.94ms +step:213/1705 train_time:20170ms step_avg:94.70ms +step:214/1705 train_time:20340ms step_avg:95.05ms +step:215/1705 train_time:20432ms step_avg:95.03ms +step:216/1705 train_time:20524ms step_avg:95.02ms +step:217/1705 train_time:20615ms step_avg:95.00ms +step:218/1705 train_time:20708ms step_avg:94.99ms +step:219/1705 train_time:20800ms step_avg:94.98ms +step:220/1705 train_time:20892ms step_avg:94.96ms +step:221/1705 train_time:20984ms step_avg:94.95ms +step:222/1705 train_time:21075ms step_avg:94.93ms +step:223/1705 train_time:21168ms step_avg:94.93ms +step:224/1705 train_time:21262ms step_avg:94.92ms +step:225/1705 train_time:21357ms step_avg:94.92ms +step:226/1705 train_time:21451ms step_avg:94.92ms +step:227/1705 train_time:21545ms step_avg:94.91ms +step:228/1705 train_time:21637ms step_avg:94.90ms +step:229/1705 train_time:21730ms step_avg:94.89ms +step:230/1705 train_time:21823ms step_avg:94.88ms +step:231/1705 train_time:21915ms step_avg:94.87ms +step:232/1705 train_time:22008ms step_avg:94.86ms +step:233/1705 train_time:22099ms step_avg:94.85ms +step:234/1705 train_time:22192ms step_avg:94.84ms +step:235/1705 train_time:22285ms step_avg:94.83ms +step:236/1705 train_time:22379ms step_avg:94.83ms +step:237/1705 train_time:22472ms step_avg:94.82ms +step:238/1705 train_time:22565ms step_avg:94.81ms +step:239/1705 train_time:22658ms step_avg:94.80ms +step:240/1705 train_time:22751ms step_avg:94.79ms +step:241/1705 train_time:22844ms step_avg:94.79ms +step:242/1705 train_time:22936ms step_avg:94.78ms +step:243/1705 train_time:23029ms step_avg:94.77ms +step:244/1705 train_time:23121ms step_avg:94.76ms +step:245/1705 train_time:23214ms step_avg:94.75ms +step:246/1705 train_time:23307ms step_avg:94.74ms +step:247/1705 train_time:23400ms step_avg:94.74ms +step:248/1705 train_time:23493ms step_avg:94.73ms +step:249/1705 train_time:23587ms step_avg:94.73ms +step:250/1705 train_time:23680ms step_avg:94.72ms +step:250/1705 val_loss:3.9663 train_time:23774ms step_avg:95.10ms +step:251/1705 train_time:23798ms step_avg:94.81ms +step:252/1705 train_time:23869ms step_avg:94.72ms +step:253/1705 train_time:23965ms step_avg:94.72ms +step:254/1705 train_time:24066ms step_avg:94.75ms +step:255/1705 train_time:24160ms step_avg:94.75ms +step:256/1705 train_time:24252ms step_avg:94.73ms +step:257/1705 train_time:24343ms step_avg:94.72ms +step:258/1705 train_time:24436ms step_avg:94.71ms +step:259/1705 train_time:24527ms step_avg:94.70ms +step:260/1705 train_time:24619ms step_avg:94.69ms +step:261/1705 train_time:24714ms step_avg:94.69ms +step:262/1705 train_time:24808ms step_avg:94.69ms +step:263/1705 train_time:24902ms step_avg:94.68ms +step:264/1705 train_time:24996ms step_avg:94.68ms +step:265/1705 train_time:25090ms step_avg:94.68ms +step:266/1705 train_time:25183ms step_avg:94.67ms +step:267/1705 train_time:25275ms step_avg:94.66ms +step:268/1705 train_time:25367ms step_avg:94.65ms +step:269/1705 train_time:25459ms step_avg:94.64ms +step:270/1705 train_time:25551ms step_avg:94.63ms +step:271/1705 train_time:25643ms step_avg:94.62ms +step:272/1705 train_time:25736ms step_avg:94.62ms +step:273/1705 train_time:25830ms step_avg:94.61ms +step:274/1705 train_time:25922ms step_avg:94.61ms +step:275/1705 train_time:26016ms step_avg:94.60ms +step:276/1705 train_time:26109ms step_avg:94.60ms +step:277/1705 train_time:26202ms step_avg:94.59ms +step:278/1705 train_time:26296ms step_avg:94.59ms +step:279/1705 train_time:26388ms step_avg:94.58ms +step:280/1705 train_time:26481ms step_avg:94.57ms +step:281/1705 train_time:26573ms step_avg:94.57ms +step:282/1705 train_time:26666ms step_avg:94.56ms +step:283/1705 train_time:26758ms step_avg:94.55ms +step:284/1705 train_time:26851ms step_avg:94.54ms +step:285/1705 train_time:26944ms step_avg:94.54ms +step:286/1705 train_time:27037ms step_avg:94.54ms +step:287/1705 train_time:27131ms step_avg:94.53ms +step:288/1705 train_time:27223ms step_avg:94.52ms +step:289/1705 train_time:27316ms step_avg:94.52ms +step:290/1705 train_time:27409ms step_avg:94.51ms +step:291/1705 train_time:27501ms step_avg:94.51ms +step:292/1705 train_time:27594ms step_avg:94.50ms +step:293/1705 train_time:27685ms step_avg:94.49ms +step:294/1705 train_time:27778ms step_avg:94.48ms +step:295/1705 train_time:27871ms step_avg:94.48ms +step:296/1705 train_time:27965ms step_avg:94.48ms +step:297/1705 train_time:28058ms step_avg:94.47ms +step:298/1705 train_time:28151ms step_avg:94.46ms +step:299/1705 train_time:28243ms step_avg:94.46ms +step:300/1705 train_time:28336ms step_avg:94.45ms +step:301/1705 train_time:28429ms step_avg:94.45ms +step:302/1705 train_time:28521ms step_avg:94.44ms +step:303/1705 train_time:28614ms step_avg:94.44ms +step:304/1705 train_time:28707ms step_avg:94.43ms +step:305/1705 train_time:28800ms step_avg:94.43ms +step:306/1705 train_time:28893ms step_avg:94.42ms +step:307/1705 train_time:28985ms step_avg:94.41ms +step:308/1705 train_time:29078ms step_avg:94.41ms +step:309/1705 train_time:29171ms step_avg:94.40ms +step:310/1705 train_time:29264ms step_avg:94.40ms +step:311/1705 train_time:29357ms step_avg:94.39ms +step:312/1705 train_time:29449ms step_avg:94.39ms +step:313/1705 train_time:29541ms step_avg:94.38ms +step:314/1705 train_time:29635ms step_avg:94.38ms +step:315/1705 train_time:29728ms step_avg:94.37ms +step:316/1705 train_time:29820ms step_avg:94.37ms +step:317/1705 train_time:29914ms step_avg:94.37ms +step:318/1705 train_time:30007ms step_avg:94.36ms +step:319/1705 train_time:30099ms step_avg:94.35ms +step:320/1705 train_time:30193ms step_avg:94.35ms +step:321/1705 train_time:30285ms step_avg:94.35ms +step:322/1705 train_time:30378ms step_avg:94.34ms +step:323/1705 train_time:30471ms step_avg:94.34ms +step:324/1705 train_time:30564ms step_avg:94.33ms +step:325/1705 train_time:30656ms step_avg:94.33ms +step:326/1705 train_time:30750ms step_avg:94.32ms +step:327/1705 train_time:30842ms step_avg:94.32ms +step:328/1705 train_time:30936ms step_avg:94.32ms +step:329/1705 train_time:31029ms step_avg:94.31ms +step:330/1705 train_time:31121ms step_avg:94.31ms +step:331/1705 train_time:31215ms step_avg:94.30ms +step:332/1705 train_time:31308ms step_avg:94.30ms +step:333/1705 train_time:31401ms step_avg:94.30ms +step:334/1705 train_time:31494ms step_avg:94.29ms +step:335/1705 train_time:31587ms step_avg:94.29ms +step:336/1705 train_time:31679ms step_avg:94.28ms +step:337/1705 train_time:31773ms step_avg:94.28ms +step:338/1705 train_time:31865ms step_avg:94.28ms +step:339/1705 train_time:31959ms step_avg:94.27ms +step:340/1705 train_time:32052ms step_avg:94.27ms +step:341/1705 train_time:32145ms step_avg:94.27ms +step:342/1705 train_time:32237ms step_avg:94.26ms +step:343/1705 train_time:32331ms step_avg:94.26ms +step:344/1705 train_time:32424ms step_avg:94.25ms +step:345/1705 train_time:32517ms step_avg:94.25ms +step:346/1705 train_time:32610ms step_avg:94.25ms +step:347/1705 train_time:32702ms step_avg:94.24ms +step:348/1705 train_time:32795ms step_avg:94.24ms +step:349/1705 train_time:32888ms step_avg:94.24ms +step:350/1705 train_time:32982ms step_avg:94.23ms +step:351/1705 train_time:33075ms step_avg:94.23ms +step:352/1705 train_time:33168ms step_avg:94.23ms +step:353/1705 train_time:33260ms step_avg:94.22ms +step:354/1705 train_time:33354ms step_avg:94.22ms +step:355/1705 train_time:33446ms step_avg:94.22ms +step:356/1705 train_time:33539ms step_avg:94.21ms +step:357/1705 train_time:33633ms step_avg:94.21ms +step:358/1705 train_time:33726ms step_avg:94.21ms +step:359/1705 train_time:33819ms step_avg:94.20ms +step:360/1705 train_time:33912ms step_avg:94.20ms +step:361/1705 train_time:34005ms step_avg:94.20ms +step:362/1705 train_time:34098ms step_avg:94.19ms +step:363/1705 train_time:34191ms step_avg:94.19ms +step:364/1705 train_time:34283ms step_avg:94.19ms +step:365/1705 train_time:34376ms step_avg:94.18ms +step:366/1705 train_time:34469ms step_avg:94.18ms +step:367/1705 train_time:34561ms step_avg:94.17ms +step:368/1705 train_time:34655ms step_avg:94.17ms +step:369/1705 train_time:34748ms step_avg:94.17ms +step:370/1705 train_time:34840ms step_avg:94.16ms +step:371/1705 train_time:34935ms step_avg:94.16ms +step:372/1705 train_time:35028ms step_avg:94.16ms +step:373/1705 train_time:35120ms step_avg:94.16ms +step:374/1705 train_time:35214ms step_avg:94.15ms +step:375/1705 train_time:35307ms step_avg:94.15ms +step:375/1705 val_loss:3.8168 train_time:35400ms step_avg:94.40ms +step:376/1705 train_time:35422ms step_avg:94.21ms +step:377/1705 train_time:35499ms step_avg:94.16ms +step:378/1705 train_time:35597ms step_avg:94.17ms +step:379/1705 train_time:35691ms step_avg:94.17ms +step:380/1705 train_time:35784ms step_avg:94.17ms +step:381/1705 train_time:35875ms step_avg:94.16ms +step:382/1705 train_time:35967ms step_avg:94.16ms +step:383/1705 train_time:36059ms step_avg:94.15ms +step:384/1705 train_time:36151ms step_avg:94.14ms +step:385/1705 train_time:36243ms step_avg:94.14ms +step:386/1705 train_time:36336ms step_avg:94.13ms +step:387/1705 train_time:36430ms step_avg:94.13ms +step:388/1705 train_time:36524ms step_avg:94.13ms +step:389/1705 train_time:36617ms step_avg:94.13ms +step:390/1705 train_time:36711ms step_avg:94.13ms +step:391/1705 train_time:36805ms step_avg:94.13ms +step:392/1705 train_time:36897ms step_avg:94.12ms +step:393/1705 train_time:36990ms step_avg:94.12ms +step:394/1705 train_time:37083ms step_avg:94.12ms +step:395/1705 train_time:37175ms step_avg:94.11ms +step:396/1705 train_time:37267ms step_avg:94.11ms +step:397/1705 train_time:37360ms step_avg:94.11ms +step:398/1705 train_time:37453ms step_avg:94.10ms +step:399/1705 train_time:37548ms step_avg:94.10ms +step:400/1705 train_time:37641ms step_avg:94.10ms +step:401/1705 train_time:37734ms step_avg:94.10ms +step:402/1705 train_time:37828ms step_avg:94.10ms +step:403/1705 train_time:37921ms step_avg:94.10ms +step:404/1705 train_time:38013ms step_avg:94.09ms +step:405/1705 train_time:38106ms step_avg:94.09ms +step:406/1705 train_time:38197ms step_avg:94.08ms +step:407/1705 train_time:38290ms step_avg:94.08ms +step:408/1705 train_time:38382ms step_avg:94.07ms +step:409/1705 train_time:38475ms step_avg:94.07ms +step:410/1705 train_time:38569ms step_avg:94.07ms +step:411/1705 train_time:38663ms step_avg:94.07ms +step:412/1705 train_time:38756ms step_avg:94.07ms +step:413/1705 train_time:38849ms step_avg:94.07ms +step:414/1705 train_time:38942ms step_avg:94.06ms +step:415/1705 train_time:39035ms step_avg:94.06ms +step:416/1705 train_time:39127ms step_avg:94.06ms +step:417/1705 train_time:39219ms step_avg:94.05ms +step:418/1705 train_time:39312ms step_avg:94.05ms +step:419/1705 train_time:39405ms step_avg:94.05ms +step:420/1705 train_time:39498ms step_avg:94.04ms +step:421/1705 train_time:39592ms step_avg:94.04ms +step:422/1705 train_time:39685ms step_avg:94.04ms +step:423/1705 train_time:39778ms step_avg:94.04ms +step:424/1705 train_time:39871ms step_avg:94.04ms +step:425/1705 train_time:40155ms step_avg:94.48ms +step:426/1705 train_time:40268ms step_avg:94.53ms +step:427/1705 train_time:40359ms step_avg:94.52ms +step:428/1705 train_time:40451ms step_avg:94.51ms +step:429/1705 train_time:40543ms step_avg:94.51ms +step:430/1705 train_time:40635ms step_avg:94.50ms +step:431/1705 train_time:40727ms step_avg:94.49ms +step:432/1705 train_time:40819ms step_avg:94.49ms +step:433/1705 train_time:40911ms step_avg:94.48ms +step:434/1705 train_time:41003ms step_avg:94.48ms +step:435/1705 train_time:41097ms step_avg:94.48ms +step:436/1705 train_time:41192ms step_avg:94.48ms +step:437/1705 train_time:41288ms step_avg:94.48ms +step:438/1705 train_time:41381ms step_avg:94.48ms +step:439/1705 train_time:41474ms step_avg:94.47ms +step:440/1705 train_time:41567ms step_avg:94.47ms +step:441/1705 train_time:41659ms step_avg:94.47ms +step:442/1705 train_time:41752ms step_avg:94.46ms +step:443/1705 train_time:41844ms step_avg:94.46ms +step:444/1705 train_time:41936ms step_avg:94.45ms +step:445/1705 train_time:42029ms step_avg:94.45ms +step:446/1705 train_time:42123ms step_avg:94.45ms +step:447/1705 train_time:42216ms step_avg:94.44ms +step:448/1705 train_time:42310ms step_avg:94.44ms +step:449/1705 train_time:42404ms step_avg:94.44ms +step:450/1705 train_time:42496ms step_avg:94.44ms +step:451/1705 train_time:42590ms step_avg:94.43ms +step:452/1705 train_time:42683ms step_avg:94.43ms +step:453/1705 train_time:42775ms step_avg:94.43ms +step:454/1705 train_time:42867ms step_avg:94.42ms +step:455/1705 train_time:42960ms step_avg:94.42ms +step:456/1705 train_time:43053ms step_avg:94.41ms +step:457/1705 train_time:43146ms step_avg:94.41ms +step:458/1705 train_time:43239ms step_avg:94.41ms +step:459/1705 train_time:43332ms step_avg:94.41ms +step:460/1705 train_time:43426ms step_avg:94.40ms +step:461/1705 train_time:43518ms step_avg:94.40ms +step:462/1705 train_time:43612ms step_avg:94.40ms +step:463/1705 train_time:43705ms step_avg:94.40ms +step:464/1705 train_time:43797ms step_avg:94.39ms +step:465/1705 train_time:43890ms step_avg:94.39ms +step:466/1705 train_time:43983ms step_avg:94.38ms +step:467/1705 train_time:44076ms step_avg:94.38ms +step:468/1705 train_time:44170ms step_avg:94.38ms +step:469/1705 train_time:44263ms step_avg:94.38ms +step:470/1705 train_time:44356ms step_avg:94.37ms +step:471/1705 train_time:44449ms step_avg:94.37ms +step:472/1705 train_time:44542ms step_avg:94.37ms +step:473/1705 train_time:44635ms step_avg:94.37ms +step:474/1705 train_time:44728ms step_avg:94.36ms +step:475/1705 train_time:44822ms step_avg:94.36ms +step:476/1705 train_time:44914ms step_avg:94.36ms +step:477/1705 train_time:45008ms step_avg:94.36ms +step:478/1705 train_time:45101ms step_avg:94.35ms +step:479/1705 train_time:45194ms step_avg:94.35ms +step:480/1705 train_time:45287ms step_avg:94.35ms +step:481/1705 train_time:45381ms step_avg:94.35ms +step:482/1705 train_time:45474ms step_avg:94.34ms +step:483/1705 train_time:45567ms step_avg:94.34ms +step:484/1705 train_time:45660ms step_avg:94.34ms +step:485/1705 train_time:45752ms step_avg:94.33ms +step:486/1705 train_time:45845ms step_avg:94.33ms +step:487/1705 train_time:45938ms step_avg:94.33ms +step:488/1705 train_time:46031ms step_avg:94.33ms +step:489/1705 train_time:46125ms step_avg:94.32ms +step:490/1705 train_time:46217ms step_avg:94.32ms +step:491/1705 train_time:46311ms step_avg:94.32ms +step:492/1705 train_time:46404ms step_avg:94.32ms +step:493/1705 train_time:46496ms step_avg:94.31ms +step:494/1705 train_time:46589ms step_avg:94.31ms +step:495/1705 train_time:46683ms step_avg:94.31ms +step:496/1705 train_time:46776ms step_avg:94.31ms +step:497/1705 train_time:46868ms step_avg:94.30ms +step:498/1705 train_time:46961ms step_avg:94.30ms +step:499/1705 train_time:47054ms step_avg:94.30ms +step:500/1705 train_time:47148ms step_avg:94.30ms +step:500/1705 val_loss:3.7144 train_time:47241ms step_avg:94.48ms +step:501/1705 train_time:47264ms step_avg:94.34ms +step:502/1705 train_time:47341ms step_avg:94.31ms +step:503/1705 train_time:47438ms step_avg:94.31ms +step:504/1705 train_time:47533ms step_avg:94.31ms +step:505/1705 train_time:47625ms step_avg:94.31ms +step:506/1705 train_time:47718ms step_avg:94.30ms +step:507/1705 train_time:47809ms step_avg:94.30ms +step:508/1705 train_time:47902ms step_avg:94.29ms +step:509/1705 train_time:47993ms step_avg:94.29ms +step:510/1705 train_time:48085ms step_avg:94.29ms +step:511/1705 train_time:48178ms step_avg:94.28ms +step:512/1705 train_time:48273ms step_avg:94.28ms +step:513/1705 train_time:48368ms step_avg:94.28ms +step:514/1705 train_time:48462ms step_avg:94.28ms +step:515/1705 train_time:48556ms step_avg:94.28ms +step:516/1705 train_time:48649ms step_avg:94.28ms +step:517/1705 train_time:48743ms step_avg:94.28ms +step:518/1705 train_time:48835ms step_avg:94.28ms +step:519/1705 train_time:48927ms step_avg:94.27ms +step:520/1705 train_time:49019ms step_avg:94.27ms +step:521/1705 train_time:49112ms step_avg:94.26ms +step:522/1705 train_time:49205ms step_avg:94.26ms +step:523/1705 train_time:49298ms step_avg:94.26ms +step:524/1705 train_time:49391ms step_avg:94.26ms +step:525/1705 train_time:49485ms step_avg:94.26ms +step:526/1705 train_time:49580ms step_avg:94.26ms +step:527/1705 train_time:49673ms step_avg:94.26ms +step:528/1705 train_time:49766ms step_avg:94.25ms +step:529/1705 train_time:49858ms step_avg:94.25ms +step:530/1705 train_time:49951ms step_avg:94.25ms +step:531/1705 train_time:50043ms step_avg:94.24ms +step:532/1705 train_time:50135ms step_avg:94.24ms +step:533/1705 train_time:50229ms step_avg:94.24ms +step:534/1705 train_time:50322ms step_avg:94.24ms +step:535/1705 train_time:50415ms step_avg:94.23ms +step:536/1705 train_time:50509ms step_avg:94.23ms +step:537/1705 train_time:50602ms step_avg:94.23ms +step:538/1705 train_time:50694ms step_avg:94.23ms +step:539/1705 train_time:50788ms step_avg:94.23ms +step:540/1705 train_time:50881ms step_avg:94.22ms +step:541/1705 train_time:50973ms step_avg:94.22ms +step:542/1705 train_time:51066ms step_avg:94.22ms +step:543/1705 train_time:51159ms step_avg:94.21ms +step:544/1705 train_time:51251ms step_avg:94.21ms +step:545/1705 train_time:51345ms step_avg:94.21ms +step:546/1705 train_time:51439ms step_avg:94.21ms +step:547/1705 train_time:51532ms step_avg:94.21ms +step:548/1705 train_time:51626ms step_avg:94.21ms +step:549/1705 train_time:51721ms step_avg:94.21ms +step:550/1705 train_time:51813ms step_avg:94.21ms +step:551/1705 train_time:51907ms step_avg:94.20ms +step:552/1705 train_time:52000ms step_avg:94.20ms +step:553/1705 train_time:52093ms step_avg:94.20ms +step:554/1705 train_time:52186ms step_avg:94.20ms +step:555/1705 train_time:52279ms step_avg:94.20ms +step:556/1705 train_time:52372ms step_avg:94.19ms +step:557/1705 train_time:52466ms step_avg:94.19ms +step:558/1705 train_time:52560ms step_avg:94.19ms +step:559/1705 train_time:52653ms step_avg:94.19ms +step:560/1705 train_time:52746ms step_avg:94.19ms +step:561/1705 train_time:52839ms step_avg:94.19ms +step:562/1705 train_time:52932ms step_avg:94.18ms +step:563/1705 train_time:53025ms step_avg:94.18ms +step:564/1705 train_time:53119ms step_avg:94.18ms +step:565/1705 train_time:53211ms step_avg:94.18ms +step:566/1705 train_time:53304ms step_avg:94.18ms +step:567/1705 train_time:53397ms step_avg:94.17ms +step:568/1705 train_time:53490ms step_avg:94.17ms +step:569/1705 train_time:53584ms step_avg:94.17ms +step:570/1705 train_time:53677ms step_avg:94.17ms +step:571/1705 train_time:53772ms step_avg:94.17ms +step:572/1705 train_time:53866ms step_avg:94.17ms +step:573/1705 train_time:53960ms step_avg:94.17ms +step:574/1705 train_time:54054ms step_avg:94.17ms +step:575/1705 train_time:54148ms step_avg:94.17ms +step:576/1705 train_time:54242ms step_avg:94.17ms +step:577/1705 train_time:54337ms step_avg:94.17ms +step:578/1705 train_time:54431ms step_avg:94.17ms +step:579/1705 train_time:54526ms step_avg:94.17ms +step:580/1705 train_time:54621ms step_avg:94.17ms +step:581/1705 train_time:54715ms step_avg:94.17ms +step:582/1705 train_time:54809ms step_avg:94.17ms +step:583/1705 train_time:54904ms step_avg:94.17ms +step:584/1705 train_time:54999ms step_avg:94.18ms +step:585/1705 train_time:55092ms step_avg:94.18ms +step:586/1705 train_time:55187ms step_avg:94.18ms +step:587/1705 train_time:55282ms step_avg:94.18ms +step:588/1705 train_time:55376ms step_avg:94.18ms +step:589/1705 train_time:55471ms step_avg:94.18ms +step:590/1705 train_time:55566ms step_avg:94.18ms +step:591/1705 train_time:55661ms step_avg:94.18ms +step:592/1705 train_time:55755ms step_avg:94.18ms +step:593/1705 train_time:55849ms step_avg:94.18ms +step:594/1705 train_time:55944ms step_avg:94.18ms +step:595/1705 train_time:56039ms step_avg:94.18ms +step:596/1705 train_time:56133ms step_avg:94.18ms +step:597/1705 train_time:56228ms step_avg:94.18ms +step:598/1705 train_time:56322ms step_avg:94.18ms +step:599/1705 train_time:56416ms step_avg:94.18ms +step:600/1705 train_time:56510ms step_avg:94.18ms +step:601/1705 train_time:56605ms step_avg:94.18ms +step:602/1705 train_time:56700ms step_avg:94.19ms +step:603/1705 train_time:56794ms step_avg:94.19ms +step:604/1705 train_time:56888ms step_avg:94.19ms +step:605/1705 train_time:56983ms step_avg:94.19ms +step:606/1705 train_time:57078ms step_avg:94.19ms +step:607/1705 train_time:57172ms step_avg:94.19ms +step:608/1705 train_time:57266ms step_avg:94.19ms +step:609/1705 train_time:57361ms step_avg:94.19ms +step:610/1705 train_time:57455ms step_avg:94.19ms +step:611/1705 train_time:57550ms step_avg:94.19ms +step:612/1705 train_time:57645ms step_avg:94.19ms +step:613/1705 train_time:57740ms step_avg:94.19ms +step:614/1705 train_time:57834ms step_avg:94.19ms +step:615/1705 train_time:57928ms step_avg:94.19ms +step:616/1705 train_time:58023ms step_avg:94.19ms +step:617/1705 train_time:58118ms step_avg:94.19ms +step:618/1705 train_time:58211ms step_avg:94.19ms +step:619/1705 train_time:58307ms step_avg:94.20ms +step:620/1705 train_time:58402ms step_avg:94.20ms +step:621/1705 train_time:58497ms step_avg:94.20ms +step:622/1705 train_time:58591ms step_avg:94.20ms +step:623/1705 train_time:58685ms step_avg:94.20ms +step:624/1705 train_time:58779ms step_avg:94.20ms +step:625/1705 train_time:58873ms step_avg:94.20ms +step:625/1705 val_loss:3.6166 train_time:58967ms step_avg:94.35ms +step:626/1705 train_time:58991ms step_avg:94.23ms +step:627/1705 train_time:59073ms step_avg:94.21ms +step:628/1705 train_time:59170ms step_avg:94.22ms +step:629/1705 train_time:59265ms step_avg:94.22ms +step:630/1705 train_time:59358ms step_avg:94.22ms +step:631/1705 train_time:59451ms step_avg:94.22ms +step:632/1705 train_time:59544ms step_avg:94.22ms +step:633/1705 train_time:59638ms step_avg:94.21ms +step:634/1705 train_time:59731ms step_avg:94.21ms +step:635/1705 train_time:59824ms step_avg:94.21ms +step:636/1705 train_time:59918ms step_avg:94.21ms +step:637/1705 train_time:60014ms step_avg:94.21ms +step:638/1705 train_time:60112ms step_avg:94.22ms +step:639/1705 train_time:60466ms step_avg:94.63ms +step:640/1705 train_time:60573ms step_avg:94.65ms +step:641/1705 train_time:60666ms step_avg:94.64ms +step:642/1705 train_time:60759ms step_avg:94.64ms +step:643/1705 train_time:60853ms step_avg:94.64ms +step:644/1705 train_time:60946ms step_avg:94.64ms +step:645/1705 train_time:61039ms step_avg:94.63ms +step:646/1705 train_time:61132ms step_avg:94.63ms +step:647/1705 train_time:61225ms step_avg:94.63ms +step:648/1705 train_time:61319ms step_avg:94.63ms +step:649/1705 train_time:61415ms step_avg:94.63ms +step:650/1705 train_time:61514ms step_avg:94.64ms +step:651/1705 train_time:61611ms step_avg:94.64ms +step:652/1705 train_time:61705ms step_avg:94.64ms +step:653/1705 train_time:61798ms step_avg:94.64ms +step:654/1705 train_time:61892ms step_avg:94.64ms +step:655/1705 train_time:61986ms step_avg:94.64ms +step:656/1705 train_time:62079ms step_avg:94.63ms +step:657/1705 train_time:62173ms step_avg:94.63ms +step:658/1705 train_time:62266ms step_avg:94.63ms +step:659/1705 train_time:62360ms step_avg:94.63ms +step:660/1705 train_time:62455ms step_avg:94.63ms +step:661/1705 train_time:62551ms step_avg:94.63ms +step:662/1705 train_time:62646ms step_avg:94.63ms +step:663/1705 train_time:62740ms step_avg:94.63ms +step:664/1705 train_time:62834ms step_avg:94.63ms +step:665/1705 train_time:62928ms step_avg:94.63ms +step:666/1705 train_time:63021ms step_avg:94.63ms +step:667/1705 train_time:63115ms step_avg:94.63ms +step:668/1705 train_time:63210ms step_avg:94.63ms +step:669/1705 train_time:63304ms step_avg:94.62ms +step:670/1705 train_time:63398ms step_avg:94.62ms +step:671/1705 train_time:63493ms step_avg:94.62ms +step:672/1705 train_time:63589ms step_avg:94.63ms +step:673/1705 train_time:63685ms step_avg:94.63ms +step:674/1705 train_time:63779ms step_avg:94.63ms +step:675/1705 train_time:63873ms step_avg:94.63ms +step:676/1705 train_time:63968ms step_avg:94.63ms +step:677/1705 train_time:64061ms step_avg:94.63ms +step:678/1705 train_time:64155ms step_avg:94.62ms +step:679/1705 train_time:64249ms step_avg:94.62ms +step:680/1705 train_time:64343ms step_avg:94.62ms +step:681/1705 train_time:64437ms step_avg:94.62ms +step:682/1705 train_time:64533ms step_avg:94.62ms +step:683/1705 train_time:64627ms step_avg:94.62ms +step:684/1705 train_time:64722ms step_avg:94.62ms +step:685/1705 train_time:64816ms step_avg:94.62ms +step:686/1705 train_time:64911ms step_avg:94.62ms +step:687/1705 train_time:65006ms step_avg:94.62ms +step:688/1705 train_time:65100ms step_avg:94.62ms +step:689/1705 train_time:65194ms step_avg:94.62ms +step:690/1705 train_time:65289ms step_avg:94.62ms +step:691/1705 train_time:65383ms step_avg:94.62ms +step:692/1705 train_time:65477ms step_avg:94.62ms +step:693/1705 train_time:65572ms step_avg:94.62ms +step:694/1705 train_time:65666ms step_avg:94.62ms +step:695/1705 train_time:65761ms step_avg:94.62ms +step:696/1705 train_time:65855ms step_avg:94.62ms +step:697/1705 train_time:65950ms step_avg:94.62ms +step:698/1705 train_time:66045ms step_avg:94.62ms +step:699/1705 train_time:66138ms step_avg:94.62ms +step:700/1705 train_time:66233ms step_avg:94.62ms +step:701/1705 train_time:66328ms step_avg:94.62ms +step:702/1705 train_time:66422ms step_avg:94.62ms +step:703/1705 train_time:66516ms step_avg:94.62ms +step:704/1705 train_time:66611ms step_avg:94.62ms +step:705/1705 train_time:66704ms step_avg:94.62ms +step:706/1705 train_time:66798ms step_avg:94.62ms +step:707/1705 train_time:66893ms step_avg:94.62ms +step:708/1705 train_time:66988ms step_avg:94.62ms +step:709/1705 train_time:67081ms step_avg:94.61ms +step:710/1705 train_time:67176ms step_avg:94.61ms +step:711/1705 train_time:67270ms step_avg:94.61ms +step:712/1705 train_time:67366ms step_avg:94.61ms +step:713/1705 train_time:67459ms step_avg:94.61ms +step:714/1705 train_time:67554ms step_avg:94.61ms +step:715/1705 train_time:67648ms step_avg:94.61ms +step:716/1705 train_time:67742ms step_avg:94.61ms +step:717/1705 train_time:67836ms step_avg:94.61ms +step:718/1705 train_time:67931ms step_avg:94.61ms +step:719/1705 train_time:68025ms step_avg:94.61ms +step:720/1705 train_time:68118ms step_avg:94.61ms +step:721/1705 train_time:68212ms step_avg:94.61ms +step:722/1705 train_time:68307ms step_avg:94.61ms +step:723/1705 train_time:68401ms step_avg:94.61ms +step:724/1705 train_time:68495ms step_avg:94.61ms +step:725/1705 train_time:68590ms step_avg:94.61ms +step:726/1705 train_time:68683ms step_avg:94.61ms +step:727/1705 train_time:68778ms step_avg:94.60ms +step:728/1705 train_time:68873ms step_avg:94.61ms +step:729/1705 train_time:68967ms step_avg:94.60ms +step:730/1705 train_time:69061ms step_avg:94.60ms +step:731/1705 train_time:69156ms step_avg:94.60ms +step:732/1705 train_time:69251ms step_avg:94.61ms +step:733/1705 train_time:69346ms step_avg:94.61ms +step:734/1705 train_time:69439ms step_avg:94.60ms +step:735/1705 train_time:69534ms step_avg:94.60ms +step:736/1705 train_time:69627ms step_avg:94.60ms +step:737/1705 train_time:69722ms step_avg:94.60ms +step:738/1705 train_time:69816ms step_avg:94.60ms +step:739/1705 train_time:69911ms step_avg:94.60ms +step:740/1705 train_time:70005ms step_avg:94.60ms +step:741/1705 train_time:70099ms step_avg:94.60ms +step:742/1705 train_time:70194ms step_avg:94.60ms +step:743/1705 train_time:70288ms step_avg:94.60ms +step:744/1705 train_time:70383ms step_avg:94.60ms +step:745/1705 train_time:70478ms step_avg:94.60ms +step:746/1705 train_time:70572ms step_avg:94.60ms +step:747/1705 train_time:70666ms step_avg:94.60ms +step:748/1705 train_time:70760ms step_avg:94.60ms +step:749/1705 train_time:70854ms step_avg:94.60ms +step:750/1705 train_time:70949ms step_avg:94.60ms +step:750/1705 val_loss:3.5638 train_time:71043ms step_avg:94.72ms +step:751/1705 train_time:71066ms step_avg:94.63ms +step:752/1705 train_time:71142ms step_avg:94.60ms +step:753/1705 train_time:71239ms step_avg:94.61ms +step:754/1705 train_time:71332ms step_avg:94.61ms +step:755/1705 train_time:71426ms step_avg:94.60ms +step:756/1705 train_time:71520ms step_avg:94.60ms +step:757/1705 train_time:71614ms step_avg:94.60ms +step:758/1705 train_time:71707ms step_avg:94.60ms +step:759/1705 train_time:71800ms step_avg:94.60ms +step:760/1705 train_time:71893ms step_avg:94.60ms +step:761/1705 train_time:71988ms step_avg:94.60ms +step:762/1705 train_time:72086ms step_avg:94.60ms +step:763/1705 train_time:72182ms step_avg:94.60ms +step:764/1705 train_time:72278ms step_avg:94.60ms +step:765/1705 train_time:72372ms step_avg:94.60ms +step:766/1705 train_time:72466ms step_avg:94.60ms +step:767/1705 train_time:72561ms step_avg:94.60ms +step:768/1705 train_time:72654ms step_avg:94.60ms +step:769/1705 train_time:72749ms step_avg:94.60ms +step:770/1705 train_time:72843ms step_avg:94.60ms +step:771/1705 train_time:72936ms step_avg:94.60ms +step:772/1705 train_time:73031ms step_avg:94.60ms +step:773/1705 train_time:73127ms step_avg:94.60ms +step:774/1705 train_time:73225ms step_avg:94.61ms +step:775/1705 train_time:73321ms step_avg:94.61ms +step:776/1705 train_time:73415ms step_avg:94.61ms +step:777/1705 train_time:73508ms step_avg:94.61ms +step:778/1705 train_time:73602ms step_avg:94.60ms +step:779/1705 train_time:73695ms step_avg:94.60ms +step:780/1705 train_time:73789ms step_avg:94.60ms +step:781/1705 train_time:73883ms step_avg:94.60ms +step:782/1705 train_time:73977ms step_avg:94.60ms +step:783/1705 train_time:74072ms step_avg:94.60ms +step:784/1705 train_time:74168ms step_avg:94.60ms +step:785/1705 train_time:74263ms step_avg:94.60ms +step:786/1705 train_time:74358ms step_avg:94.60ms +step:787/1705 train_time:74452ms step_avg:94.60ms +step:788/1705 train_time:74546ms step_avg:94.60ms +step:789/1705 train_time:74640ms step_avg:94.60ms +step:790/1705 train_time:74733ms step_avg:94.60ms +step:791/1705 train_time:74827ms step_avg:94.60ms +step:792/1705 train_time:74921ms step_avg:94.60ms +step:793/1705 train_time:75015ms step_avg:94.60ms +step:794/1705 train_time:75110ms step_avg:94.60ms +step:795/1705 train_time:75206ms step_avg:94.60ms +step:796/1705 train_time:75301ms step_avg:94.60ms +step:797/1705 train_time:75396ms step_avg:94.60ms +step:798/1705 train_time:75490ms step_avg:94.60ms +step:799/1705 train_time:75584ms step_avg:94.60ms +step:800/1705 train_time:75679ms step_avg:94.60ms +step:801/1705 train_time:75773ms step_avg:94.60ms +step:802/1705 train_time:75866ms step_avg:94.60ms +step:803/1705 train_time:75961ms step_avg:94.60ms +step:804/1705 train_time:76054ms step_avg:94.60ms +step:805/1705 train_time:76149ms step_avg:94.60ms +step:806/1705 train_time:76244ms step_avg:94.60ms +step:807/1705 train_time:76339ms step_avg:94.60ms +step:808/1705 train_time:76433ms step_avg:94.60ms +step:809/1705 train_time:76528ms step_avg:94.60ms +step:810/1705 train_time:76624ms step_avg:94.60ms +step:811/1705 train_time:76718ms step_avg:94.60ms +step:812/1705 train_time:76812ms step_avg:94.60ms +step:813/1705 train_time:76906ms step_avg:94.60ms +step:814/1705 train_time:77001ms step_avg:94.60ms +step:815/1705 train_time:77095ms step_avg:94.59ms +step:816/1705 train_time:77189ms step_avg:94.59ms +step:817/1705 train_time:77284ms step_avg:94.60ms +step:818/1705 train_time:77378ms step_avg:94.59ms +step:819/1705 train_time:77472ms step_avg:94.59ms +step:820/1705 train_time:77568ms step_avg:94.59ms +step:821/1705 train_time:77663ms step_avg:94.60ms +step:822/1705 train_time:77757ms step_avg:94.59ms +step:823/1705 train_time:77851ms step_avg:94.59ms +step:824/1705 train_time:77946ms step_avg:94.59ms +step:825/1705 train_time:78041ms step_avg:94.59ms +step:826/1705 train_time:78135ms step_avg:94.59ms +step:827/1705 train_time:78230ms step_avg:94.59ms +step:828/1705 train_time:78324ms step_avg:94.59ms +step:829/1705 train_time:78418ms step_avg:94.59ms +step:830/1705 train_time:78512ms step_avg:94.59ms +step:831/1705 train_time:78607ms step_avg:94.59ms +step:832/1705 train_time:78701ms step_avg:94.59ms +step:833/1705 train_time:78795ms step_avg:94.59ms +step:834/1705 train_time:78889ms step_avg:94.59ms +step:835/1705 train_time:78983ms step_avg:94.59ms +step:836/1705 train_time:79079ms step_avg:94.59ms +step:837/1705 train_time:79173ms step_avg:94.59ms +step:838/1705 train_time:79267ms step_avg:94.59ms +step:839/1705 train_time:79362ms step_avg:94.59ms +step:840/1705 train_time:79457ms step_avg:94.59ms +step:841/1705 train_time:79551ms step_avg:94.59ms +step:842/1705 train_time:79646ms step_avg:94.59ms +step:843/1705 train_time:79740ms step_avg:94.59ms +step:844/1705 train_time:79834ms step_avg:94.59ms +step:845/1705 train_time:79928ms step_avg:94.59ms +step:846/1705 train_time:80023ms step_avg:94.59ms +step:847/1705 train_time:80118ms step_avg:94.59ms +step:848/1705 train_time:80212ms step_avg:94.59ms +step:849/1705 train_time:80306ms step_avg:94.59ms +step:850/1705 train_time:80401ms step_avg:94.59ms +step:851/1705 train_time:80688ms step_avg:94.82ms +step:852/1705 train_time:80847ms step_avg:94.89ms +step:853/1705 train_time:80940ms step_avg:94.89ms +step:854/1705 train_time:81033ms step_avg:94.89ms +step:855/1705 train_time:81126ms step_avg:94.88ms +step:856/1705 train_time:81219ms step_avg:94.88ms +step:857/1705 train_time:81313ms step_avg:94.88ms +step:858/1705 train_time:81407ms step_avg:94.88ms +step:859/1705 train_time:81501ms step_avg:94.88ms +step:860/1705 train_time:81594ms step_avg:94.88ms +step:861/1705 train_time:81690ms step_avg:94.88ms +step:862/1705 train_time:81789ms step_avg:94.88ms +step:863/1705 train_time:81889ms step_avg:94.89ms +step:864/1705 train_time:81984ms step_avg:94.89ms +step:865/1705 train_time:82078ms step_avg:94.89ms +step:866/1705 train_time:82171ms step_avg:94.89ms +step:867/1705 train_time:82265ms step_avg:94.88ms +step:868/1705 train_time:82359ms step_avg:94.88ms +step:869/1705 train_time:82452ms step_avg:94.88ms +step:870/1705 train_time:82545ms step_avg:94.88ms +step:871/1705 train_time:82641ms step_avg:94.88ms +step:872/1705 train_time:82736ms step_avg:94.88ms +step:873/1705 train_time:82832ms step_avg:94.88ms +step:874/1705 train_time:82927ms step_avg:94.88ms +step:875/1705 train_time:83023ms step_avg:94.88ms +step:875/1705 val_loss:3.5241 train_time:83118ms step_avg:94.99ms +step:876/1705 train_time:83140ms step_avg:94.91ms +step:877/1705 train_time:83221ms step_avg:94.89ms +step:878/1705 train_time:83319ms step_avg:94.90ms +step:879/1705 train_time:83413ms step_avg:94.90ms +step:880/1705 train_time:83506ms step_avg:94.89ms +step:881/1705 train_time:83599ms step_avg:94.89ms +step:882/1705 train_time:83692ms step_avg:94.89ms +step:883/1705 train_time:83786ms step_avg:94.89ms +step:884/1705 train_time:83878ms step_avg:94.89ms +step:885/1705 train_time:83972ms step_avg:94.88ms +step:886/1705 train_time:84067ms step_avg:94.88ms +step:887/1705 train_time:84164ms step_avg:94.89ms +step:888/1705 train_time:84261ms step_avg:94.89ms +step:889/1705 train_time:84359ms step_avg:94.89ms +step:890/1705 train_time:84453ms step_avg:94.89ms +step:891/1705 train_time:84547ms step_avg:94.89ms +step:892/1705 train_time:84641ms step_avg:94.89ms +step:893/1705 train_time:84733ms step_avg:94.89ms +step:894/1705 train_time:84827ms step_avg:94.89ms +step:895/1705 train_time:84921ms step_avg:94.88ms +step:896/1705 train_time:85015ms step_avg:94.88ms +step:897/1705 train_time:85109ms step_avg:94.88ms +step:898/1705 train_time:85205ms step_avg:94.88ms +step:899/1705 train_time:85301ms step_avg:94.88ms +step:900/1705 train_time:85396ms step_avg:94.88ms +step:901/1705 train_time:85490ms step_avg:94.88ms +step:902/1705 train_time:85585ms step_avg:94.88ms +step:903/1705 train_time:85679ms step_avg:94.88ms +step:904/1705 train_time:85772ms step_avg:94.88ms +step:905/1705 train_time:85866ms step_avg:94.88ms +step:906/1705 train_time:85960ms step_avg:94.88ms +step:907/1705 train_time:86055ms step_avg:94.88ms +step:908/1705 train_time:86149ms step_avg:94.88ms +step:909/1705 train_time:86245ms step_avg:94.88ms +step:910/1705 train_time:86340ms step_avg:94.88ms +step:911/1705 train_time:86434ms step_avg:94.88ms +step:912/1705 train_time:86529ms step_avg:94.88ms +step:913/1705 train_time:86624ms step_avg:94.88ms +step:914/1705 train_time:86718ms step_avg:94.88ms +step:915/1705 train_time:86812ms step_avg:94.88ms +step:916/1705 train_time:86906ms step_avg:94.88ms +step:917/1705 train_time:87000ms step_avg:94.87ms +step:918/1705 train_time:87094ms step_avg:94.87ms +step:919/1705 train_time:87189ms step_avg:94.87ms +step:920/1705 train_time:87284ms step_avg:94.87ms +step:921/1705 train_time:87379ms step_avg:94.87ms +step:922/1705 train_time:87473ms step_avg:94.87ms +step:923/1705 train_time:87568ms step_avg:94.87ms +step:924/1705 train_time:87662ms step_avg:94.87ms +step:925/1705 train_time:87757ms step_avg:94.87ms +step:926/1705 train_time:87850ms step_avg:94.87ms +step:927/1705 train_time:87945ms step_avg:94.87ms +step:928/1705 train_time:88040ms step_avg:94.87ms +step:929/1705 train_time:88134ms step_avg:94.87ms +step:930/1705 train_time:88229ms step_avg:94.87ms +step:931/1705 train_time:88323ms step_avg:94.87ms +step:932/1705 train_time:88418ms step_avg:94.87ms +step:933/1705 train_time:88512ms step_avg:94.87ms +step:934/1705 train_time:88607ms step_avg:94.87ms +step:935/1705 train_time:88702ms step_avg:94.87ms +step:936/1705 train_time:88796ms step_avg:94.87ms +step:937/1705 train_time:88890ms step_avg:94.87ms +step:938/1705 train_time:88984ms step_avg:94.87ms +step:939/1705 train_time:89079ms step_avg:94.87ms +step:940/1705 train_time:89174ms step_avg:94.87ms +step:941/1705 train_time:89268ms step_avg:94.87ms +step:942/1705 train_time:89363ms step_avg:94.87ms +step:943/1705 train_time:89458ms step_avg:94.86ms +step:944/1705 train_time:89553ms step_avg:94.87ms +step:945/1705 train_time:89649ms step_avg:94.87ms +step:946/1705 train_time:89744ms step_avg:94.87ms +step:947/1705 train_time:89838ms step_avg:94.87ms +step:948/1705 train_time:89931ms step_avg:94.86ms +step:949/1705 train_time:90026ms step_avg:94.86ms +step:950/1705 train_time:90120ms step_avg:94.86ms +step:951/1705 train_time:90214ms step_avg:94.86ms +step:952/1705 train_time:90309ms step_avg:94.86ms +step:953/1705 train_time:90405ms step_avg:94.86ms +step:954/1705 train_time:90500ms step_avg:94.86ms +step:955/1705 train_time:90593ms step_avg:94.86ms +step:956/1705 train_time:90688ms step_avg:94.86ms +step:957/1705 train_time:90784ms step_avg:94.86ms +step:958/1705 train_time:90878ms step_avg:94.86ms +step:959/1705 train_time:90972ms step_avg:94.86ms +step:960/1705 train_time:91067ms step_avg:94.86ms +step:961/1705 train_time:91161ms step_avg:94.86ms +step:962/1705 train_time:91255ms step_avg:94.86ms +step:963/1705 train_time:91350ms step_avg:94.86ms +step:964/1705 train_time:91446ms step_avg:94.86ms +step:965/1705 train_time:91540ms step_avg:94.86ms +step:966/1705 train_time:91633ms step_avg:94.86ms +step:967/1705 train_time:91728ms step_avg:94.86ms +step:968/1705 train_time:91822ms step_avg:94.86ms +step:969/1705 train_time:91916ms step_avg:94.86ms +step:970/1705 train_time:92011ms step_avg:94.86ms +step:971/1705 train_time:92105ms step_avg:94.86ms +step:972/1705 train_time:92200ms step_avg:94.86ms +step:973/1705 train_time:92294ms step_avg:94.85ms +step:974/1705 train_time:92388ms step_avg:94.85ms +step:975/1705 train_time:92483ms step_avg:94.85ms +step:976/1705 train_time:92578ms step_avg:94.85ms +step:977/1705 train_time:92672ms step_avg:94.85ms +step:978/1705 train_time:92767ms step_avg:94.85ms +step:979/1705 train_time:92862ms step_avg:94.85ms +step:980/1705 train_time:92956ms step_avg:94.85ms +step:981/1705 train_time:93051ms step_avg:94.85ms +step:982/1705 train_time:93145ms step_avg:94.85ms +step:983/1705 train_time:93240ms step_avg:94.85ms +step:984/1705 train_time:93333ms step_avg:94.85ms +step:985/1705 train_time:93428ms step_avg:94.85ms +step:986/1705 train_time:93523ms step_avg:94.85ms +step:987/1705 train_time:93618ms step_avg:94.85ms +step:988/1705 train_time:93713ms step_avg:94.85ms +step:989/1705 train_time:93809ms step_avg:94.85ms +step:990/1705 train_time:93903ms step_avg:94.85ms +step:991/1705 train_time:93999ms step_avg:94.85ms +step:992/1705 train_time:94093ms step_avg:94.85ms +step:993/1705 train_time:94188ms step_avg:94.85ms +step:994/1705 train_time:94282ms step_avg:94.85ms +step:995/1705 train_time:94377ms step_avg:94.85ms +step:996/1705 train_time:94471ms step_avg:94.85ms +step:997/1705 train_time:94565ms step_avg:94.85ms +step:998/1705 train_time:94660ms step_avg:94.85ms +step:999/1705 train_time:94753ms step_avg:94.85ms +step:1000/1705 train_time:94848ms step_avg:94.85ms +step:1000/1705 val_loss:3.4849 train_time:94943ms step_avg:94.94ms +step:1001/1705 train_time:94967ms step_avg:94.87ms +step:1002/1705 train_time:95041ms step_avg:94.85ms +step:1003/1705 train_time:95142ms step_avg:94.86ms +step:1004/1705 train_time:95238ms step_avg:94.86ms +step:1005/1705 train_time:95332ms step_avg:94.86ms +step:1006/1705 train_time:95425ms step_avg:94.86ms +step:1007/1705 train_time:95519ms step_avg:94.85ms +step:1008/1705 train_time:95612ms step_avg:94.85ms +step:1009/1705 train_time:95706ms step_avg:94.85ms +step:1010/1705 train_time:95799ms step_avg:94.85ms +step:1011/1705 train_time:95893ms step_avg:94.85ms +step:1012/1705 train_time:95989ms step_avg:94.85ms +step:1013/1705 train_time:96085ms step_avg:94.85ms +step:1014/1705 train_time:96182ms step_avg:94.85ms +step:1015/1705 train_time:96279ms step_avg:94.86ms +step:1016/1705 train_time:96373ms step_avg:94.86ms +step:1017/1705 train_time:96467ms step_avg:94.85ms +step:1018/1705 train_time:96560ms step_avg:94.85ms +step:1019/1705 train_time:96653ms step_avg:94.85ms +step:1020/1705 train_time:96746ms step_avg:94.85ms +step:1021/1705 train_time:96840ms step_avg:94.85ms +step:1022/1705 train_time:96935ms step_avg:94.85ms +step:1023/1705 train_time:97030ms step_avg:94.85ms +step:1024/1705 train_time:97126ms step_avg:94.85ms +step:1025/1705 train_time:97223ms step_avg:94.85ms +step:1026/1705 train_time:97319ms step_avg:94.85ms +step:1027/1705 train_time:97413ms step_avg:94.85ms +step:1028/1705 train_time:97507ms step_avg:94.85ms +step:1029/1705 train_time:97601ms step_avg:94.85ms +step:1030/1705 train_time:97695ms step_avg:94.85ms +step:1031/1705 train_time:97788ms step_avg:94.85ms +step:1032/1705 train_time:97883ms step_avg:94.85ms +step:1033/1705 train_time:97978ms step_avg:94.85ms +step:1034/1705 train_time:98074ms step_avg:94.85ms +step:1035/1705 train_time:98168ms step_avg:94.85ms +step:1036/1705 train_time:98264ms step_avg:94.85ms +step:1037/1705 train_time:98359ms step_avg:94.85ms +step:1038/1705 train_time:98453ms step_avg:94.85ms +step:1039/1705 train_time:98547ms step_avg:94.85ms +step:1040/1705 train_time:98641ms step_avg:94.85ms +step:1041/1705 train_time:98735ms step_avg:94.85ms +step:1042/1705 train_time:98829ms step_avg:94.85ms +step:1043/1705 train_time:98923ms step_avg:94.84ms +step:1044/1705 train_time:99018ms step_avg:94.84ms +step:1045/1705 train_time:99113ms step_avg:94.85ms +step:1046/1705 train_time:99208ms step_avg:94.85ms +step:1047/1705 train_time:99303ms step_avg:94.85ms +step:1048/1705 train_time:99399ms step_avg:94.85ms +step:1049/1705 train_time:99494ms step_avg:94.85ms +step:1050/1705 train_time:99587ms step_avg:94.85ms +step:1051/1705 train_time:99682ms step_avg:94.84ms +step:1052/1705 train_time:99776ms step_avg:94.84ms +step:1053/1705 train_time:99871ms step_avg:94.84ms +step:1054/1705 train_time:99965ms step_avg:94.84ms +step:1055/1705 train_time:100061ms step_avg:94.84ms +step:1056/1705 train_time:100156ms step_avg:94.84ms +step:1057/1705 train_time:100251ms step_avg:94.84ms +step:1058/1705 train_time:100345ms step_avg:94.84ms +step:1059/1705 train_time:100440ms step_avg:94.84ms +step:1060/1705 train_time:100535ms step_avg:94.84ms +step:1061/1705 train_time:100628ms step_avg:94.84ms +step:1062/1705 train_time:100934ms step_avg:95.04ms +step:1063/1705 train_time:101051ms step_avg:95.06ms +step:1064/1705 train_time:101145ms step_avg:95.06ms +step:1065/1705 train_time:101239ms step_avg:95.06ms +step:1066/1705 train_time:101332ms step_avg:95.06ms +step:1067/1705 train_time:101426ms step_avg:95.06ms +step:1068/1705 train_time:101519ms step_avg:95.06ms +step:1069/1705 train_time:101612ms step_avg:95.05ms +step:1070/1705 train_time:101706ms step_avg:95.05ms +step:1071/1705 train_time:101799ms step_avg:95.05ms +step:1072/1705 train_time:101897ms step_avg:95.05ms +step:1073/1705 train_time:101996ms step_avg:95.06ms +step:1074/1705 train_time:102094ms step_avg:95.06ms +step:1075/1705 train_time:102189ms step_avg:95.06ms +step:1076/1705 train_time:102283ms step_avg:95.06ms +step:1077/1705 train_time:102377ms step_avg:95.06ms +step:1078/1705 train_time:102471ms step_avg:95.06ms +step:1079/1705 train_time:102564ms step_avg:95.05ms +step:1080/1705 train_time:102658ms step_avg:95.05ms +step:1081/1705 train_time:102752ms step_avg:95.05ms +step:1082/1705 train_time:102847ms step_avg:95.05ms +step:1083/1705 train_time:102942ms step_avg:95.05ms +step:1084/1705 train_time:103039ms step_avg:95.05ms +step:1085/1705 train_time:103136ms step_avg:95.06ms +step:1086/1705 train_time:103231ms step_avg:95.06ms +step:1087/1705 train_time:103325ms step_avg:95.06ms +step:1088/1705 train_time:103419ms step_avg:95.05ms +step:1089/1705 train_time:103513ms step_avg:95.05ms +step:1090/1705 train_time:103607ms step_avg:95.05ms +step:1091/1705 train_time:103701ms step_avg:95.05ms +step:1092/1705 train_time:103795ms step_avg:95.05ms +step:1093/1705 train_time:103888ms step_avg:95.05ms +step:1094/1705 train_time:103984ms step_avg:95.05ms +step:1095/1705 train_time:104080ms step_avg:95.05ms +step:1096/1705 train_time:104176ms step_avg:95.05ms +step:1097/1705 train_time:104270ms step_avg:95.05ms +step:1098/1705 train_time:104364ms step_avg:95.05ms +step:1099/1705 train_time:104459ms step_avg:95.05ms +step:1100/1705 train_time:104552ms step_avg:95.05ms +step:1101/1705 train_time:104646ms step_avg:95.05ms +step:1102/1705 train_time:104740ms step_avg:95.05ms +step:1103/1705 train_time:104834ms step_avg:95.04ms +step:1104/1705 train_time:104928ms step_avg:95.04ms +step:1105/1705 train_time:105024ms step_avg:95.04ms +step:1106/1705 train_time:105119ms step_avg:95.04ms +step:1107/1705 train_time:105214ms step_avg:95.04ms +step:1108/1705 train_time:105308ms step_avg:95.04ms +step:1109/1705 train_time:105403ms step_avg:95.04ms +step:1110/1705 train_time:105497ms step_avg:95.04ms +step:1111/1705 train_time:105591ms step_avg:95.04ms +step:1112/1705 train_time:105685ms step_avg:95.04ms +step:1113/1705 train_time:105780ms step_avg:95.04ms +step:1114/1705 train_time:105874ms step_avg:95.04ms +step:1115/1705 train_time:105968ms step_avg:95.04ms +step:1116/1705 train_time:106064ms step_avg:95.04ms +step:1117/1705 train_time:106159ms step_avg:95.04ms +step:1118/1705 train_time:106254ms step_avg:95.04ms +step:1119/1705 train_time:106348ms step_avg:95.04ms +step:1120/1705 train_time:106442ms step_avg:95.04ms +step:1121/1705 train_time:106537ms step_avg:95.04ms +step:1122/1705 train_time:106632ms step_avg:95.04ms +step:1123/1705 train_time:106726ms step_avg:95.04ms +step:1124/1705 train_time:106821ms step_avg:95.04ms +step:1125/1705 train_time:106915ms step_avg:95.04ms +step:1125/1705 val_loss:3.4376 train_time:107009ms step_avg:95.12ms +step:1126/1705 train_time:107033ms step_avg:95.06ms +step:1127/1705 train_time:107110ms step_avg:95.04ms +step:1128/1705 train_time:107206ms step_avg:95.04ms +step:1129/1705 train_time:107301ms step_avg:95.04ms +step:1130/1705 train_time:107394ms step_avg:95.04ms +step:1131/1705 train_time:107488ms step_avg:95.04ms +step:1132/1705 train_time:107581ms step_avg:95.04ms +step:1133/1705 train_time:107675ms step_avg:95.03ms +step:1134/1705 train_time:107768ms step_avg:95.03ms +step:1135/1705 train_time:107862ms step_avg:95.03ms +step:1136/1705 train_time:107957ms step_avg:95.03ms +step:1137/1705 train_time:108055ms step_avg:95.03ms +step:1138/1705 train_time:108151ms step_avg:95.04ms +step:1139/1705 train_time:108247ms step_avg:95.04ms +step:1140/1705 train_time:108342ms step_avg:95.04ms +step:1141/1705 train_time:108437ms step_avg:95.04ms +step:1142/1705 train_time:108532ms step_avg:95.04ms +step:1143/1705 train_time:108626ms step_avg:95.04ms +step:1144/1705 train_time:108721ms step_avg:95.04ms +step:1145/1705 train_time:108816ms step_avg:95.04ms +step:1146/1705 train_time:108910ms step_avg:95.04ms +step:1147/1705 train_time:109006ms step_avg:95.04ms +step:1148/1705 train_time:109104ms step_avg:95.04ms +step:1149/1705 train_time:109200ms step_avg:95.04ms +step:1150/1705 train_time:109297ms step_avg:95.04ms +step:1151/1705 train_time:109393ms step_avg:95.04ms +step:1152/1705 train_time:109488ms step_avg:95.04ms +step:1153/1705 train_time:109582ms step_avg:95.04ms +step:1154/1705 train_time:109677ms step_avg:95.04ms +step:1155/1705 train_time:109772ms step_avg:95.04ms +step:1156/1705 train_time:109866ms step_avg:95.04ms +step:1157/1705 train_time:109961ms step_avg:95.04ms +step:1158/1705 train_time:110058ms step_avg:95.04ms +step:1159/1705 train_time:110155ms step_avg:95.04ms +step:1160/1705 train_time:110253ms step_avg:95.05ms +step:1161/1705 train_time:110347ms step_avg:95.04ms +step:1162/1705 train_time:110443ms step_avg:95.05ms +step:1163/1705 train_time:110537ms step_avg:95.05ms +step:1164/1705 train_time:110632ms step_avg:95.04ms +step:1165/1705 train_time:110726ms step_avg:95.04ms +step:1166/1705 train_time:110821ms step_avg:95.04ms +step:1167/1705 train_time:110917ms step_avg:95.04ms +step:1168/1705 train_time:111012ms step_avg:95.04ms +step:1169/1705 train_time:111107ms step_avg:95.04ms +step:1170/1705 train_time:111204ms step_avg:95.05ms +step:1171/1705 train_time:111300ms step_avg:95.05ms +step:1172/1705 train_time:111396ms step_avg:95.05ms +step:1173/1705 train_time:111491ms step_avg:95.05ms +step:1174/1705 train_time:111586ms step_avg:95.05ms +step:1175/1705 train_time:111681ms step_avg:95.05ms +step:1176/1705 train_time:111776ms step_avg:95.05ms +step:1177/1705 train_time:111871ms step_avg:95.05ms +step:1178/1705 train_time:111965ms step_avg:95.05ms +step:1179/1705 train_time:112061ms step_avg:95.05ms +step:1180/1705 train_time:112157ms step_avg:95.05ms +step:1181/1705 train_time:112253ms step_avg:95.05ms +step:1182/1705 train_time:112349ms step_avg:95.05ms +step:1183/1705 train_time:112444ms step_avg:95.05ms +step:1184/1705 train_time:112540ms step_avg:95.05ms +step:1185/1705 train_time:112636ms step_avg:95.05ms +step:1186/1705 train_time:112730ms step_avg:95.05ms +step:1187/1705 train_time:112825ms step_avg:95.05ms +step:1188/1705 train_time:112920ms step_avg:95.05ms +step:1189/1705 train_time:113015ms step_avg:95.05ms +step:1190/1705 train_time:113111ms step_avg:95.05ms +step:1191/1705 train_time:113206ms step_avg:95.05ms +step:1192/1705 train_time:113302ms step_avg:95.05ms +step:1193/1705 train_time:113399ms step_avg:95.05ms +step:1194/1705 train_time:113494ms step_avg:95.05ms +step:1195/1705 train_time:113589ms step_avg:95.05ms +step:1196/1705 train_time:113684ms step_avg:95.05ms +step:1197/1705 train_time:113779ms step_avg:95.05ms +step:1198/1705 train_time:113874ms step_avg:95.05ms +step:1199/1705 train_time:113969ms step_avg:95.05ms +step:1200/1705 train_time:114064ms step_avg:95.05ms +step:1201/1705 train_time:114161ms step_avg:95.05ms +step:1202/1705 train_time:114258ms step_avg:95.06ms +step:1203/1705 train_time:114353ms step_avg:95.06ms +step:1204/1705 train_time:114448ms step_avg:95.06ms +step:1205/1705 train_time:114544ms step_avg:95.06ms +step:1206/1705 train_time:114640ms step_avg:95.06ms +step:1207/1705 train_time:114735ms step_avg:95.06ms +step:1208/1705 train_time:114831ms step_avg:95.06ms +step:1209/1705 train_time:114925ms step_avg:95.06ms +step:1210/1705 train_time:115021ms step_avg:95.06ms +step:1211/1705 train_time:115116ms step_avg:95.06ms +step:1212/1705 train_time:115212ms step_avg:95.06ms +step:1213/1705 train_time:115306ms step_avg:95.06ms +step:1214/1705 train_time:115403ms step_avg:95.06ms +step:1215/1705 train_time:115498ms step_avg:95.06ms +step:1216/1705 train_time:115594ms step_avg:95.06ms +step:1217/1705 train_time:115689ms step_avg:95.06ms +step:1218/1705 train_time:115784ms step_avg:95.06ms +step:1219/1705 train_time:115880ms step_avg:95.06ms +step:1220/1705 train_time:115974ms step_avg:95.06ms +step:1221/1705 train_time:116071ms step_avg:95.06ms +step:1222/1705 train_time:116165ms step_avg:95.06ms +step:1223/1705 train_time:116260ms step_avg:95.06ms +step:1224/1705 train_time:116357ms step_avg:95.06ms +step:1225/1705 train_time:116452ms step_avg:95.06ms +step:1226/1705 train_time:116547ms step_avg:95.06ms +step:1227/1705 train_time:116642ms step_avg:95.06ms +step:1228/1705 train_time:116738ms step_avg:95.06ms +step:1229/1705 train_time:116832ms step_avg:95.06ms +step:1230/1705 train_time:116927ms step_avg:95.06ms +step:1231/1705 train_time:117023ms step_avg:95.06ms +step:1232/1705 train_time:117119ms step_avg:95.06ms +step:1233/1705 train_time:117215ms step_avg:95.06ms +step:1234/1705 train_time:117310ms step_avg:95.07ms +step:1235/1705 train_time:117406ms step_avg:95.07ms +step:1236/1705 train_time:117501ms step_avg:95.07ms +step:1237/1705 train_time:117596ms step_avg:95.07ms +step:1238/1705 train_time:117691ms step_avg:95.07ms +step:1239/1705 train_time:117785ms step_avg:95.06ms +step:1240/1705 train_time:117881ms step_avg:95.07ms +step:1241/1705 train_time:117975ms step_avg:95.06ms +step:1242/1705 train_time:118071ms step_avg:95.07ms +step:1243/1705 train_time:118165ms step_avg:95.06ms +step:1244/1705 train_time:118262ms step_avg:95.07ms +step:1245/1705 train_time:118358ms step_avg:95.07ms +step:1246/1705 train_time:118453ms step_avg:95.07ms +step:1247/1705 train_time:118547ms step_avg:95.07ms +step:1248/1705 train_time:118642ms step_avg:95.07ms +step:1249/1705 train_time:118739ms step_avg:95.07ms +step:1250/1705 train_time:118834ms step_avg:95.07ms +step:1250/1705 val_loss:3.3887 train_time:118929ms step_avg:95.14ms +step:1251/1705 train_time:118952ms step_avg:95.09ms +step:1252/1705 train_time:119033ms step_avg:95.07ms +step:1253/1705 train_time:119131ms step_avg:95.08ms +step:1254/1705 train_time:119224ms step_avg:95.08ms +step:1255/1705 train_time:119319ms step_avg:95.07ms +step:1256/1705 train_time:119413ms step_avg:95.07ms +step:1257/1705 train_time:119507ms step_avg:95.07ms +step:1258/1705 train_time:119601ms step_avg:95.07ms +step:1259/1705 train_time:119695ms step_avg:95.07ms +step:1260/1705 train_time:119788ms step_avg:95.07ms +step:1261/1705 train_time:119885ms step_avg:95.07ms +step:1262/1705 train_time:119984ms step_avg:95.07ms +step:1263/1705 train_time:120081ms step_avg:95.08ms +step:1264/1705 train_time:120177ms step_avg:95.08ms +step:1265/1705 train_time:120273ms step_avg:95.08ms +step:1266/1705 train_time:120367ms step_avg:95.08ms +step:1267/1705 train_time:120461ms step_avg:95.08ms +step:1268/1705 train_time:120555ms step_avg:95.08ms +step:1269/1705 train_time:120650ms step_avg:95.07ms +step:1270/1705 train_time:120743ms step_avg:95.07ms +step:1271/1705 train_time:120839ms step_avg:95.07ms +step:1272/1705 train_time:120936ms step_avg:95.08ms +step:1273/1705 train_time:121033ms step_avg:95.08ms +step:1274/1705 train_time:121405ms step_avg:95.29ms +step:1275/1705 train_time:121488ms step_avg:95.29ms +step:1276/1705 train_time:121582ms step_avg:95.28ms +step:1277/1705 train_time:121676ms step_avg:95.28ms +step:1278/1705 train_time:121770ms step_avg:95.28ms +step:1279/1705 train_time:121864ms step_avg:95.28ms +step:1280/1705 train_time:121959ms step_avg:95.28ms +step:1281/1705 train_time:122053ms step_avg:95.28ms +step:1282/1705 train_time:122147ms step_avg:95.28ms +step:1283/1705 train_time:122240ms step_avg:95.28ms +step:1284/1705 train_time:122343ms step_avg:95.28ms +step:1285/1705 train_time:122442ms step_avg:95.29ms +step:1286/1705 train_time:122538ms step_avg:95.29ms +step:1287/1705 train_time:122633ms step_avg:95.29ms +step:1288/1705 train_time:122727ms step_avg:95.28ms +step:1289/1705 train_time:122821ms step_avg:95.28ms +step:1290/1705 train_time:122916ms step_avg:95.28ms +step:1291/1705 train_time:123010ms step_avg:95.28ms +step:1292/1705 train_time:123104ms step_avg:95.28ms +step:1293/1705 train_time:123198ms step_avg:95.28ms +step:1294/1705 train_time:123295ms step_avg:95.28ms +step:1295/1705 train_time:123394ms step_avg:95.29ms +step:1296/1705 train_time:123491ms step_avg:95.29ms +step:1297/1705 train_time:123586ms step_avg:95.29ms +step:1298/1705 train_time:123681ms step_avg:95.29ms +step:1299/1705 train_time:123776ms step_avg:95.29ms +step:1300/1705 train_time:123871ms step_avg:95.29ms +step:1301/1705 train_time:123966ms step_avg:95.29ms +step:1302/1705 train_time:124060ms step_avg:95.28ms +step:1303/1705 train_time:124154ms step_avg:95.28ms +step:1304/1705 train_time:124250ms step_avg:95.28ms +step:1305/1705 train_time:124347ms step_avg:95.28ms +step:1306/1705 train_time:124442ms step_avg:95.29ms +step:1307/1705 train_time:124538ms step_avg:95.29ms +step:1308/1705 train_time:124634ms step_avg:95.29ms +step:1309/1705 train_time:124729ms step_avg:95.29ms +step:1310/1705 train_time:124824ms step_avg:95.29ms +step:1311/1705 train_time:124918ms step_avg:95.28ms +step:1312/1705 train_time:125012ms step_avg:95.28ms +step:1313/1705 train_time:125107ms step_avg:95.28ms +step:1314/1705 train_time:125201ms step_avg:95.28ms +step:1315/1705 train_time:125297ms step_avg:95.28ms +step:1316/1705 train_time:125394ms step_avg:95.28ms +step:1317/1705 train_time:125490ms step_avg:95.28ms +step:1318/1705 train_time:125585ms step_avg:95.28ms +step:1319/1705 train_time:125680ms step_avg:95.28ms +step:1320/1705 train_time:125775ms step_avg:95.28ms +step:1321/1705 train_time:125870ms step_avg:95.28ms +step:1322/1705 train_time:125965ms step_avg:95.28ms +step:1323/1705 train_time:126060ms step_avg:95.28ms +step:1324/1705 train_time:126154ms step_avg:95.28ms +step:1325/1705 train_time:126249ms step_avg:95.28ms +step:1326/1705 train_time:126346ms step_avg:95.28ms +step:1327/1705 train_time:126441ms step_avg:95.28ms +step:1328/1705 train_time:126537ms step_avg:95.28ms +step:1329/1705 train_time:126632ms step_avg:95.28ms +step:1330/1705 train_time:126728ms step_avg:95.28ms +step:1331/1705 train_time:126822ms step_avg:95.28ms +step:1332/1705 train_time:126918ms step_avg:95.28ms +step:1333/1705 train_time:127013ms step_avg:95.28ms +step:1334/1705 train_time:127108ms step_avg:95.28ms +step:1335/1705 train_time:127202ms step_avg:95.28ms +step:1336/1705 train_time:127298ms step_avg:95.28ms +step:1337/1705 train_time:127395ms step_avg:95.28ms +step:1338/1705 train_time:127491ms step_avg:95.28ms +step:1339/1705 train_time:127586ms step_avg:95.28ms +step:1340/1705 train_time:127681ms step_avg:95.28ms +step:1341/1705 train_time:127776ms step_avg:95.28ms +step:1342/1705 train_time:127871ms step_avg:95.28ms +step:1343/1705 train_time:127966ms step_avg:95.28ms +step:1344/1705 train_time:128062ms step_avg:95.28ms +step:1345/1705 train_time:128156ms step_avg:95.28ms +step:1346/1705 train_time:128253ms step_avg:95.28ms +step:1347/1705 train_time:128349ms step_avg:95.29ms +step:1348/1705 train_time:128445ms step_avg:95.29ms +step:1349/1705 train_time:128541ms step_avg:95.29ms +step:1350/1705 train_time:128636ms step_avg:95.29ms +step:1351/1705 train_time:128732ms step_avg:95.29ms +step:1352/1705 train_time:128827ms step_avg:95.29ms +step:1353/1705 train_time:128922ms step_avg:95.29ms +step:1354/1705 train_time:129017ms step_avg:95.29ms +step:1355/1705 train_time:129113ms step_avg:95.29ms +step:1356/1705 train_time:129208ms step_avg:95.29ms +step:1357/1705 train_time:129304ms step_avg:95.29ms +step:1358/1705 train_time:129399ms step_avg:95.29ms +step:1359/1705 train_time:129494ms step_avg:95.29ms +step:1360/1705 train_time:129590ms step_avg:95.29ms +step:1361/1705 train_time:129685ms step_avg:95.29ms +step:1362/1705 train_time:129779ms step_avg:95.29ms +step:1363/1705 train_time:129875ms step_avg:95.29ms +step:1364/1705 train_time:129970ms step_avg:95.29ms +step:1365/1705 train_time:130065ms step_avg:95.29ms +step:1366/1705 train_time:130160ms step_avg:95.29ms +step:1367/1705 train_time:130255ms step_avg:95.29ms +step:1368/1705 train_time:130352ms step_avg:95.29ms +step:1369/1705 train_time:130447ms step_avg:95.29ms +step:1370/1705 train_time:130543ms step_avg:95.29ms +step:1371/1705 train_time:130638ms step_avg:95.29ms +step:1372/1705 train_time:130733ms step_avg:95.29ms +step:1373/1705 train_time:130828ms step_avg:95.29ms +step:1374/1705 train_time:130924ms step_avg:95.29ms +step:1375/1705 train_time:131019ms step_avg:95.29ms +step:1375/1705 val_loss:3.3517 train_time:131115ms step_avg:95.36ms +step:1376/1705 train_time:131138ms step_avg:95.30ms +step:1377/1705 train_time:131221ms step_avg:95.29ms +step:1378/1705 train_time:131321ms step_avg:95.30ms +step:1379/1705 train_time:131416ms step_avg:95.30ms +step:1380/1705 train_time:131511ms step_avg:95.30ms +step:1381/1705 train_time:131604ms step_avg:95.30ms +step:1382/1705 train_time:131698ms step_avg:95.30ms +step:1383/1705 train_time:131792ms step_avg:95.29ms +step:1384/1705 train_time:131886ms step_avg:95.29ms +step:1385/1705 train_time:131981ms step_avg:95.29ms +step:1386/1705 train_time:132076ms step_avg:95.29ms +step:1387/1705 train_time:132175ms step_avg:95.30ms +step:1388/1705 train_time:132273ms step_avg:95.30ms +step:1389/1705 train_time:132368ms step_avg:95.30ms +step:1390/1705 train_time:132464ms step_avg:95.30ms +step:1391/1705 train_time:132559ms step_avg:95.30ms +step:1392/1705 train_time:132654ms step_avg:95.30ms +step:1393/1705 train_time:132748ms step_avg:95.30ms +step:1394/1705 train_time:132842ms step_avg:95.30ms +step:1395/1705 train_time:132937ms step_avg:95.30ms +step:1396/1705 train_time:133031ms step_avg:95.29ms +step:1397/1705 train_time:133127ms step_avg:95.30ms +step:1398/1705 train_time:133224ms step_avg:95.30ms +step:1399/1705 train_time:133322ms step_avg:95.30ms +step:1400/1705 train_time:133417ms step_avg:95.30ms +step:1401/1705 train_time:133511ms step_avg:95.30ms +step:1402/1705 train_time:133607ms step_avg:95.30ms +step:1403/1705 train_time:133701ms step_avg:95.30ms +step:1404/1705 train_time:133797ms step_avg:95.30ms +step:1405/1705 train_time:133891ms step_avg:95.30ms +step:1406/1705 train_time:133986ms step_avg:95.30ms +step:1407/1705 train_time:134081ms step_avg:95.30ms +step:1408/1705 train_time:134177ms step_avg:95.30ms +step:1409/1705 train_time:134273ms step_avg:95.30ms +step:1410/1705 train_time:134369ms step_avg:95.30ms +step:1411/1705 train_time:134464ms step_avg:95.30ms +step:1412/1705 train_time:134560ms step_avg:95.30ms +step:1413/1705 train_time:134655ms step_avg:95.30ms +step:1414/1705 train_time:134749ms step_avg:95.30ms +step:1415/1705 train_time:134845ms step_avg:95.30ms +step:1416/1705 train_time:134940ms step_avg:95.30ms +step:1417/1705 train_time:135035ms step_avg:95.30ms +step:1418/1705 train_time:135130ms step_avg:95.30ms +step:1419/1705 train_time:135225ms step_avg:95.30ms +step:1420/1705 train_time:135322ms step_avg:95.30ms +step:1421/1705 train_time:135418ms step_avg:95.30ms +step:1422/1705 train_time:135513ms step_avg:95.30ms +step:1423/1705 train_time:135609ms step_avg:95.30ms +step:1424/1705 train_time:135703ms step_avg:95.30ms +step:1425/1705 train_time:135798ms step_avg:95.30ms +step:1426/1705 train_time:135892ms step_avg:95.30ms +step:1427/1705 train_time:135987ms step_avg:95.30ms +step:1428/1705 train_time:136083ms step_avg:95.30ms +step:1429/1705 train_time:136179ms step_avg:95.30ms +step:1430/1705 train_time:136274ms step_avg:95.30ms +step:1431/1705 train_time:136369ms step_avg:95.30ms +step:1432/1705 train_time:136467ms step_avg:95.30ms +step:1433/1705 train_time:136563ms step_avg:95.30ms +step:1434/1705 train_time:136658ms step_avg:95.30ms +step:1435/1705 train_time:136753ms step_avg:95.30ms +step:1436/1705 train_time:136848ms step_avg:95.30ms +step:1437/1705 train_time:136943ms step_avg:95.30ms +step:1438/1705 train_time:137038ms step_avg:95.30ms +step:1439/1705 train_time:137132ms step_avg:95.30ms +step:1440/1705 train_time:137228ms step_avg:95.30ms +step:1441/1705 train_time:137323ms step_avg:95.30ms +step:1442/1705 train_time:137420ms step_avg:95.30ms +step:1443/1705 train_time:137515ms step_avg:95.30ms +step:1444/1705 train_time:137610ms step_avg:95.30ms +step:1445/1705 train_time:137706ms step_avg:95.30ms +step:1446/1705 train_time:137802ms step_avg:95.30ms +step:1447/1705 train_time:137898ms step_avg:95.30ms +step:1448/1705 train_time:137992ms step_avg:95.30ms +step:1449/1705 train_time:138087ms step_avg:95.30ms +step:1450/1705 train_time:138183ms step_avg:95.30ms +step:1451/1705 train_time:138279ms step_avg:95.30ms +step:1452/1705 train_time:138375ms step_avg:95.30ms +step:1453/1705 train_time:138469ms step_avg:95.30ms +step:1454/1705 train_time:138565ms step_avg:95.30ms +step:1455/1705 train_time:138661ms step_avg:95.30ms +step:1456/1705 train_time:138757ms step_avg:95.30ms +step:1457/1705 train_time:138852ms step_avg:95.30ms +step:1458/1705 train_time:138947ms step_avg:95.30ms +step:1459/1705 train_time:139043ms step_avg:95.30ms +step:1460/1705 train_time:139138ms step_avg:95.30ms +step:1461/1705 train_time:139233ms step_avg:95.30ms +step:1462/1705 train_time:139328ms step_avg:95.30ms +step:1463/1705 train_time:139424ms step_avg:95.30ms +step:1464/1705 train_time:139519ms step_avg:95.30ms +step:1465/1705 train_time:139617ms step_avg:95.30ms +step:1466/1705 train_time:139709ms step_avg:95.30ms +step:1467/1705 train_time:139805ms step_avg:95.30ms +step:1468/1705 train_time:139901ms step_avg:95.30ms +step:1469/1705 train_time:139997ms step_avg:95.30ms +step:1470/1705 train_time:140092ms step_avg:95.30ms +step:1471/1705 train_time:140186ms step_avg:95.30ms +step:1472/1705 train_time:140283ms step_avg:95.30ms +step:1473/1705 train_time:140378ms step_avg:95.30ms +step:1474/1705 train_time:140474ms step_avg:95.30ms +step:1475/1705 train_time:140569ms step_avg:95.30ms +step:1476/1705 train_time:140664ms step_avg:95.30ms +step:1477/1705 train_time:140760ms step_avg:95.30ms +step:1478/1705 train_time:140855ms step_avg:95.30ms +step:1479/1705 train_time:140950ms step_avg:95.30ms +step:1480/1705 train_time:141047ms step_avg:95.30ms +step:1481/1705 train_time:141142ms step_avg:95.30ms +step:1482/1705 train_time:141238ms step_avg:95.30ms +step:1483/1705 train_time:141333ms step_avg:95.30ms +step:1484/1705 train_time:141428ms step_avg:95.30ms +step:1485/1705 train_time:141667ms step_avg:95.40ms +step:1486/1705 train_time:141898ms step_avg:95.49ms +step:1487/1705 train_time:141992ms step_avg:95.49ms +step:1488/1705 train_time:142086ms step_avg:95.49ms +step:1489/1705 train_time:142181ms step_avg:95.49ms +step:1490/1705 train_time:142275ms step_avg:95.49ms +step:1491/1705 train_time:142370ms step_avg:95.49ms +step:1492/1705 train_time:142464ms step_avg:95.49ms +step:1493/1705 train_time:142559ms step_avg:95.48ms +step:1494/1705 train_time:142653ms step_avg:95.48ms +step:1495/1705 train_time:142752ms step_avg:95.49ms +step:1496/1705 train_time:142851ms step_avg:95.49ms +step:1497/1705 train_time:142948ms step_avg:95.49ms +step:1498/1705 train_time:143043ms step_avg:95.49ms +step:1499/1705 train_time:143138ms step_avg:95.49ms +step:1500/1705 train_time:143232ms step_avg:95.49ms +step:1500/1705 val_loss:3.3196 train_time:143326ms step_avg:95.55ms +step:1501/1705 train_time:143349ms step_avg:95.50ms +step:1502/1705 train_time:143430ms step_avg:95.49ms +step:1503/1705 train_time:143527ms step_avg:95.49ms +step:1504/1705 train_time:143622ms step_avg:95.49ms +step:1505/1705 train_time:143716ms step_avg:95.49ms +step:1506/1705 train_time:143810ms step_avg:95.49ms +step:1507/1705 train_time:143905ms step_avg:95.49ms +step:1508/1705 train_time:144000ms step_avg:95.49ms +step:1509/1705 train_time:144094ms step_avg:95.49ms +step:1510/1705 train_time:144188ms step_avg:95.49ms +step:1511/1705 train_time:144284ms step_avg:95.49ms +step:1512/1705 train_time:144382ms step_avg:95.49ms +step:1513/1705 train_time:144480ms step_avg:95.49ms +step:1514/1705 train_time:144578ms step_avg:95.49ms +step:1515/1705 train_time:144674ms step_avg:95.49ms +step:1516/1705 train_time:144768ms step_avg:95.49ms +step:1517/1705 train_time:144862ms step_avg:95.49ms +step:1518/1705 train_time:144956ms step_avg:95.49ms +step:1519/1705 train_time:145051ms step_avg:95.49ms +step:1520/1705 train_time:145146ms step_avg:95.49ms +step:1521/1705 train_time:145240ms step_avg:95.49ms +step:1522/1705 train_time:145337ms step_avg:95.49ms +step:1523/1705 train_time:145433ms step_avg:95.49ms +step:1524/1705 train_time:145529ms step_avg:95.49ms +step:1525/1705 train_time:145625ms step_avg:95.49ms +step:1526/1705 train_time:145720ms step_avg:95.49ms +step:1527/1705 train_time:145816ms step_avg:95.49ms +step:1528/1705 train_time:145911ms step_avg:95.49ms +step:1529/1705 train_time:146005ms step_avg:95.49ms +step:1530/1705 train_time:146099ms step_avg:95.49ms +step:1531/1705 train_time:146195ms step_avg:95.49ms +step:1532/1705 train_time:146290ms step_avg:95.49ms +step:1533/1705 train_time:146386ms step_avg:95.49ms +step:1534/1705 train_time:146482ms step_avg:95.49ms +step:1535/1705 train_time:146579ms step_avg:95.49ms +step:1536/1705 train_time:146675ms step_avg:95.49ms +step:1537/1705 train_time:146770ms step_avg:95.49ms +step:1538/1705 train_time:146865ms step_avg:95.49ms +step:1539/1705 train_time:146959ms step_avg:95.49ms +step:1540/1705 train_time:147055ms step_avg:95.49ms +step:1541/1705 train_time:147150ms step_avg:95.49ms +step:1542/1705 train_time:147246ms step_avg:95.49ms +step:1543/1705 train_time:147341ms step_avg:95.49ms +step:1544/1705 train_time:147437ms step_avg:95.49ms +step:1545/1705 train_time:147533ms step_avg:95.49ms +step:1546/1705 train_time:147630ms step_avg:95.49ms +step:1547/1705 train_time:147725ms step_avg:95.49ms +step:1548/1705 train_time:147820ms step_avg:95.49ms +step:1549/1705 train_time:147915ms step_avg:95.49ms +step:1550/1705 train_time:148010ms step_avg:95.49ms +step:1551/1705 train_time:148105ms step_avg:95.49ms +step:1552/1705 train_time:148200ms step_avg:95.49ms +step:1553/1705 train_time:148296ms step_avg:95.49ms +step:1554/1705 train_time:148391ms step_avg:95.49ms +step:1555/1705 train_time:148486ms step_avg:95.49ms +step:1556/1705 train_time:148582ms step_avg:95.49ms +step:1557/1705 train_time:148678ms step_avg:95.49ms +step:1558/1705 train_time:148774ms step_avg:95.49ms +step:1559/1705 train_time:148868ms step_avg:95.49ms +step:1560/1705 train_time:148963ms step_avg:95.49ms +step:1561/1705 train_time:149058ms step_avg:95.49ms +step:1562/1705 train_time:149154ms step_avg:95.49ms +step:1563/1705 train_time:149249ms step_avg:95.49ms +step:1564/1705 train_time:149344ms step_avg:95.49ms +step:1565/1705 train_time:149439ms step_avg:95.49ms +step:1566/1705 train_time:149535ms step_avg:95.49ms +step:1567/1705 train_time:149631ms step_avg:95.49ms +step:1568/1705 train_time:149727ms step_avg:95.49ms +step:1569/1705 train_time:149821ms step_avg:95.49ms +step:1570/1705 train_time:149917ms step_avg:95.49ms +step:1571/1705 train_time:150013ms step_avg:95.49ms +step:1572/1705 train_time:150108ms step_avg:95.49ms +step:1573/1705 train_time:150203ms step_avg:95.49ms +step:1574/1705 train_time:150298ms step_avg:95.49ms +step:1575/1705 train_time:150395ms step_avg:95.49ms +step:1576/1705 train_time:150490ms step_avg:95.49ms +step:1577/1705 train_time:150586ms step_avg:95.49ms +step:1578/1705 train_time:150681ms step_avg:95.49ms +step:1579/1705 train_time:150777ms step_avg:95.49ms +step:1580/1705 train_time:150872ms step_avg:95.49ms +step:1581/1705 train_time:150967ms step_avg:95.49ms +step:1582/1705 train_time:151062ms step_avg:95.49ms +step:1583/1705 train_time:151158ms step_avg:95.49ms +step:1584/1705 train_time:151253ms step_avg:95.49ms +step:1585/1705 train_time:151348ms step_avg:95.49ms +step:1586/1705 train_time:151443ms step_avg:95.49ms +step:1587/1705 train_time:151539ms step_avg:95.49ms +step:1588/1705 train_time:151634ms step_avg:95.49ms +step:1589/1705 train_time:151730ms step_avg:95.49ms +step:1590/1705 train_time:151826ms step_avg:95.49ms +step:1591/1705 train_time:151921ms step_avg:95.49ms +step:1592/1705 train_time:152016ms step_avg:95.49ms +step:1593/1705 train_time:152112ms step_avg:95.49ms +step:1594/1705 train_time:152207ms step_avg:95.49ms +step:1595/1705 train_time:152301ms step_avg:95.49ms +step:1596/1705 train_time:152398ms step_avg:95.49ms +step:1597/1705 train_time:152494ms step_avg:95.49ms +step:1598/1705 train_time:152590ms step_avg:95.49ms +step:1599/1705 train_time:152685ms step_avg:95.49ms +step:1600/1705 train_time:152780ms step_avg:95.49ms +step:1601/1705 train_time:152874ms step_avg:95.49ms +step:1602/1705 train_time:152970ms step_avg:95.49ms +step:1603/1705 train_time:153065ms step_avg:95.49ms +step:1604/1705 train_time:153160ms step_avg:95.49ms +step:1605/1705 train_time:153255ms step_avg:95.49ms +step:1606/1705 train_time:153350ms step_avg:95.49ms +step:1607/1705 train_time:153446ms step_avg:95.49ms +step:1608/1705 train_time:153541ms step_avg:95.49ms +step:1609/1705 train_time:153637ms step_avg:95.49ms +step:1610/1705 train_time:153733ms step_avg:95.49ms +step:1611/1705 train_time:153829ms step_avg:95.49ms +step:1612/1705 train_time:153923ms step_avg:95.49ms +step:1613/1705 train_time:154018ms step_avg:95.49ms +step:1614/1705 train_time:154115ms step_avg:95.49ms +step:1615/1705 train_time:154210ms step_avg:95.49ms +step:1616/1705 train_time:154306ms step_avg:95.49ms +step:1617/1705 train_time:154401ms step_avg:95.49ms +step:1618/1705 train_time:154497ms step_avg:95.49ms +step:1619/1705 train_time:154593ms step_avg:95.49ms +step:1620/1705 train_time:154688ms step_avg:95.49ms +step:1621/1705 train_time:154783ms step_avg:95.49ms +step:1622/1705 train_time:154878ms step_avg:95.49ms +step:1623/1705 train_time:154974ms step_avg:95.49ms +step:1624/1705 train_time:155069ms step_avg:95.49ms +step:1625/1705 train_time:155166ms step_avg:95.49ms +step:1625/1705 val_loss:3.2917 train_time:155260ms step_avg:95.54ms +step:1626/1705 train_time:155285ms step_avg:95.50ms +step:1627/1705 train_time:155363ms step_avg:95.49ms +step:1628/1705 train_time:155462ms step_avg:95.49ms +step:1629/1705 train_time:155558ms step_avg:95.49ms +step:1630/1705 train_time:155652ms step_avg:95.49ms +step:1631/1705 train_time:155746ms step_avg:95.49ms +step:1632/1705 train_time:155841ms step_avg:95.49ms +step:1633/1705 train_time:155936ms step_avg:95.49ms +step:1634/1705 train_time:156029ms step_avg:95.49ms +step:1635/1705 train_time:156123ms step_avg:95.49ms +step:1636/1705 train_time:156219ms step_avg:95.49ms +step:1637/1705 train_time:156316ms step_avg:95.49ms +step:1638/1705 train_time:156412ms step_avg:95.49ms +step:1639/1705 train_time:156509ms step_avg:95.49ms +step:1640/1705 train_time:156606ms step_avg:95.49ms +step:1641/1705 train_time:156703ms step_avg:95.49ms +step:1642/1705 train_time:156799ms step_avg:95.49ms +step:1643/1705 train_time:156893ms step_avg:95.49ms +step:1644/1705 train_time:156987ms step_avg:95.49ms +step:1645/1705 train_time:157082ms step_avg:95.49ms +step:1646/1705 train_time:157177ms step_avg:95.49ms +step:1647/1705 train_time:157272ms step_avg:95.49ms +step:1648/1705 train_time:157369ms step_avg:95.49ms +step:1649/1705 train_time:157465ms step_avg:95.49ms +step:1650/1705 train_time:157562ms step_avg:95.49ms +step:1651/1705 train_time:157657ms step_avg:95.49ms +step:1652/1705 train_time:157752ms step_avg:95.49ms +step:1653/1705 train_time:157847ms step_avg:95.49ms +step:1654/1705 train_time:157943ms step_avg:95.49ms +step:1655/1705 train_time:158037ms step_avg:95.49ms +step:1656/1705 train_time:158131ms step_avg:95.49ms +step:1657/1705 train_time:158227ms step_avg:95.49ms +step:1658/1705 train_time:158324ms step_avg:95.49ms +step:1659/1705 train_time:158421ms step_avg:95.49ms +step:1660/1705 train_time:158517ms step_avg:95.49ms +step:1661/1705 train_time:158612ms step_avg:95.49ms +step:1662/1705 train_time:158707ms step_avg:95.49ms +step:1663/1705 train_time:158803ms step_avg:95.49ms +step:1664/1705 train_time:158899ms step_avg:95.49ms +step:1665/1705 train_time:158993ms step_avg:95.49ms +step:1666/1705 train_time:159088ms step_avg:95.49ms +step:1667/1705 train_time:159183ms step_avg:95.49ms +step:1668/1705 train_time:159279ms step_avg:95.49ms +step:1669/1705 train_time:159374ms step_avg:95.49ms +step:1670/1705 train_time:159469ms step_avg:95.49ms +step:1671/1705 train_time:159566ms step_avg:95.49ms +step:1672/1705 train_time:159662ms step_avg:95.49ms +step:1673/1705 train_time:159758ms step_avg:95.49ms +step:1674/1705 train_time:159854ms step_avg:95.49ms +step:1675/1705 train_time:159949ms step_avg:95.49ms +step:1676/1705 train_time:160044ms step_avg:95.49ms +step:1677/1705 train_time:160139ms step_avg:95.49ms +step:1678/1705 train_time:160234ms step_avg:95.49ms +step:1679/1705 train_time:160329ms step_avg:95.49ms +step:1680/1705 train_time:160425ms step_avg:95.49ms +step:1681/1705 train_time:160522ms step_avg:95.49ms +step:1682/1705 train_time:160617ms step_avg:95.49ms +step:1683/1705 train_time:160713ms step_avg:95.49ms +step:1684/1705 train_time:160808ms step_avg:95.49ms +step:1685/1705 train_time:160904ms step_avg:95.49ms +step:1686/1705 train_time:160998ms step_avg:95.49ms +step:1687/1705 train_time:161093ms step_avg:95.49ms +step:1688/1705 train_time:161188ms step_avg:95.49ms +step:1689/1705 train_time:161283ms step_avg:95.49ms +step:1690/1705 train_time:161379ms step_avg:95.49ms +step:1691/1705 train_time:161474ms step_avg:95.49ms +step:1692/1705 train_time:161570ms step_avg:95.49ms +step:1693/1705 train_time:161665ms step_avg:95.49ms +step:1694/1705 train_time:161762ms step_avg:95.49ms +step:1695/1705 train_time:161859ms step_avg:95.49ms +step:1696/1705 train_time:161954ms step_avg:95.49ms +step:1697/1705 train_time:162048ms step_avg:95.49ms +step:1698/1705 train_time:162285ms step_avg:95.57ms +step:1699/1705 train_time:162486ms step_avg:95.64ms +step:1700/1705 train_time:162580ms step_avg:95.64ms +step:1701/1705 train_time:162673ms step_avg:95.63ms +step:1702/1705 train_time:162768ms step_avg:95.63ms +step:1703/1705 train_time:162862ms step_avg:95.63ms +step:1704/1705 train_time:162956ms step_avg:95.63ms +step:1705/1705 train_time:163050ms step_avg:95.63ms +step:1705/1705 val_loss:3.2775 train_time:163144ms step_avg:95.69ms +peak memory allocated: 34489 MiB reserved: 49496 MiB diff --git a/records/090525_SkipMLPBlocks/cf8c8a10-ea32-46a0-8276-241330023e83.txt b/records/090525_SkipMLPBlocks/cf8c8a10-ea32-46a0-8276-241330023e83.txt new file mode 100644 index 000000000..4d203736f --- /dev/null +++ b/records/090525_SkipMLPBlocks/cf8c8a10-ea32-46a0-8276-241330023e83.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:28:29 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 42C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 131W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 66237 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 66238 C /usr/bin/python3 610MiB | +| 0 N/A N/A 66239 C /usr/bin/python3 610MiB | +| 0 N/A N/A 66240 C /usr/bin/python3 610MiB | +| 0 N/A N/A 66241 C /usr/bin/python3 610MiB | +| 0 N/A N/A 66242 C /usr/bin/python3 610MiB | +| 0 N/A N/A 66243 C /usr/bin/python3 610MiB | +| 0 N/A N/A 66244 C /usr/bin/python3 610MiB | +| 1 N/A N/A 66238 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 66239 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 66240 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 66241 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 66242 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 66243 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 66244 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1705 train_time:382ms step_avg:382.22ms +step:2/1705 train_time:404ms step_avg:201.94ms +step:3/1705 train_time:472ms step_avg:157.27ms +step:4/1705 train_time:563ms step_avg:140.71ms +step:5/1705 train_time:655ms step_avg:130.90ms +step:6/1705 train_time:747ms step_avg:124.47ms +step:7/1705 train_time:839ms step_avg:119.87ms +step:8/1705 train_time:931ms step_avg:116.36ms +step:9/1705 train_time:1023ms step_avg:113.66ms +step:10/1705 train_time:1115ms step_avg:111.53ms +step:11/1705 train_time:1208ms step_avg:109.78ms +step:12/1705 train_time:1303ms step_avg:108.57ms +step:13/1705 train_time:1401ms step_avg:107.80ms +step:14/1705 train_time:1496ms step_avg:106.84ms +step:15/1705 train_time:1588ms step_avg:105.87ms +step:16/1705 train_time:1680ms step_avg:105.03ms +step:17/1705 train_time:1772ms step_avg:104.26ms +step:18/1705 train_time:1865ms step_avg:103.60ms +step:19/1705 train_time:1957ms step_avg:103.00ms +step:20/1705 train_time:2049ms step_avg:102.47ms +step:21/1705 train_time:2142ms step_avg:102.00ms +step:22/1705 train_time:2235ms step_avg:101.57ms +step:23/1705 train_time:2329ms step_avg:101.28ms +step:24/1705 train_time:2426ms step_avg:101.10ms +step:25/1705 train_time:2521ms step_avg:100.84ms +step:26/1705 train_time:2614ms step_avg:100.55ms +step:27/1705 train_time:2707ms step_avg:100.27ms +step:28/1705 train_time:2801ms step_avg:100.03ms +step:29/1705 train_time:2894ms step_avg:99.78ms +step:30/1705 train_time:2986ms step_avg:99.55ms +step:31/1705 train_time:3079ms step_avg:99.32ms +step:32/1705 train_time:3171ms step_avg:99.10ms +step:33/1705 train_time:3266ms step_avg:98.96ms +step:34/1705 train_time:3360ms step_avg:98.82ms +step:35/1705 train_time:3453ms step_avg:98.67ms +step:36/1705 train_time:3548ms step_avg:98.56ms +step:37/1705 train_time:3642ms step_avg:98.42ms +step:38/1705 train_time:3735ms step_avg:98.28ms +step:39/1705 train_time:3827ms step_avg:98.13ms +step:40/1705 train_time:3920ms step_avg:98.00ms +step:41/1705 train_time:4013ms step_avg:97.88ms +step:42/1705 train_time:4106ms step_avg:97.75ms +step:43/1705 train_time:4198ms step_avg:97.63ms +step:44/1705 train_time:4291ms step_avg:97.53ms +step:45/1705 train_time:4385ms step_avg:97.45ms +step:46/1705 train_time:4479ms step_avg:97.38ms +step:47/1705 train_time:4572ms step_avg:97.29ms +step:48/1705 train_time:4666ms step_avg:97.21ms +step:49/1705 train_time:4759ms step_avg:97.13ms +step:50/1705 train_time:4852ms step_avg:97.04ms +step:51/1705 train_time:4945ms step_avg:96.97ms +step:52/1705 train_time:5039ms step_avg:96.90ms +step:53/1705 train_time:5131ms step_avg:96.82ms +step:54/1705 train_time:5225ms step_avg:96.75ms +step:55/1705 train_time:5318ms step_avg:96.69ms +step:56/1705 train_time:5412ms step_avg:96.64ms +step:57/1705 train_time:5505ms step_avg:96.59ms +step:58/1705 train_time:5599ms step_avg:96.53ms +step:59/1705 train_time:5692ms step_avg:96.47ms +step:60/1705 train_time:5785ms step_avg:96.42ms +step:61/1705 train_time:5878ms step_avg:96.37ms +step:62/1705 train_time:5971ms step_avg:96.30ms +step:63/1705 train_time:6063ms step_avg:96.24ms +step:64/1705 train_time:6156ms step_avg:96.19ms +step:65/1705 train_time:6249ms step_avg:96.14ms +step:66/1705 train_time:6343ms step_avg:96.10ms +step:67/1705 train_time:6436ms step_avg:96.06ms +step:68/1705 train_time:6529ms step_avg:96.02ms +step:69/1705 train_time:6623ms step_avg:95.98ms +step:70/1705 train_time:6717ms step_avg:95.96ms +step:71/1705 train_time:6810ms step_avg:95.92ms +step:72/1705 train_time:6904ms step_avg:95.89ms +step:73/1705 train_time:6996ms step_avg:95.84ms +step:74/1705 train_time:7089ms step_avg:95.80ms +step:75/1705 train_time:7183ms step_avg:95.77ms +step:76/1705 train_time:7275ms step_avg:95.73ms +step:77/1705 train_time:7368ms step_avg:95.69ms +step:78/1705 train_time:7462ms step_avg:95.67ms +step:79/1705 train_time:7555ms step_avg:95.63ms +step:80/1705 train_time:7649ms step_avg:95.61ms +step:81/1705 train_time:7743ms step_avg:95.59ms +step:82/1705 train_time:7836ms step_avg:95.56ms +step:83/1705 train_time:7928ms step_avg:95.52ms +step:84/1705 train_time:8021ms step_avg:95.49ms +step:85/1705 train_time:8114ms step_avg:95.46ms +step:86/1705 train_time:8207ms step_avg:95.43ms +step:87/1705 train_time:8300ms step_avg:95.40ms +step:88/1705 train_time:8392ms step_avg:95.37ms +step:89/1705 train_time:8485ms step_avg:95.34ms +step:90/1705 train_time:8579ms step_avg:95.32ms +step:91/1705 train_time:8672ms step_avg:95.29ms +step:92/1705 train_time:8765ms step_avg:95.28ms +step:93/1705 train_time:8859ms step_avg:95.25ms +step:94/1705 train_time:8951ms step_avg:95.22ms +step:95/1705 train_time:9044ms step_avg:95.20ms +step:96/1705 train_time:9136ms step_avg:95.17ms +step:97/1705 train_time:9229ms step_avg:95.14ms +step:98/1705 train_time:9322ms step_avg:95.13ms +step:99/1705 train_time:9416ms step_avg:95.11ms +step:100/1705 train_time:9509ms step_avg:95.09ms +step:101/1705 train_time:9602ms step_avg:95.07ms +step:102/1705 train_time:9695ms step_avg:95.05ms +step:103/1705 train_time:9788ms step_avg:95.03ms +step:104/1705 train_time:9883ms step_avg:95.03ms +step:105/1705 train_time:9977ms step_avg:95.02ms +step:106/1705 train_time:10070ms step_avg:95.00ms +step:107/1705 train_time:10162ms step_avg:94.98ms +step:108/1705 train_time:10255ms step_avg:94.95ms +step:109/1705 train_time:10348ms step_avg:94.94ms +step:110/1705 train_time:10441ms step_avg:94.92ms +step:111/1705 train_time:10534ms step_avg:94.90ms +step:112/1705 train_time:10627ms step_avg:94.88ms +step:113/1705 train_time:10721ms step_avg:94.88ms +step:114/1705 train_time:10814ms step_avg:94.86ms +step:115/1705 train_time:10908ms step_avg:94.85ms +step:116/1705 train_time:11002ms step_avg:94.84ms +step:117/1705 train_time:11094ms step_avg:94.82ms +step:118/1705 train_time:11188ms step_avg:94.81ms +step:119/1705 train_time:11281ms step_avg:94.80ms +step:120/1705 train_time:11374ms step_avg:94.78ms +step:121/1705 train_time:11467ms step_avg:94.77ms +step:122/1705 train_time:11560ms step_avg:94.75ms +step:123/1705 train_time:11652ms step_avg:94.73ms +step:124/1705 train_time:11746ms step_avg:94.73ms +step:125/1705 train_time:11839ms step_avg:94.71ms +step:125/1705 val_loss:4.3144 train_time:11932ms step_avg:95.45ms +step:126/1705 train_time:11955ms step_avg:94.88ms +step:127/1705 train_time:12029ms step_avg:94.72ms +step:128/1705 train_time:12131ms step_avg:94.77ms +step:129/1705 train_time:12226ms step_avg:94.78ms +step:130/1705 train_time:12320ms step_avg:94.77ms +step:131/1705 train_time:12412ms step_avg:94.75ms +step:132/1705 train_time:12504ms step_avg:94.72ms +step:133/1705 train_time:12596ms step_avg:94.71ms +step:134/1705 train_time:12687ms step_avg:94.68ms +step:135/1705 train_time:12779ms step_avg:94.66ms +step:136/1705 train_time:12872ms step_avg:94.64ms +step:137/1705 train_time:12963ms step_avg:94.62ms +step:138/1705 train_time:13058ms step_avg:94.62ms +step:139/1705 train_time:13153ms step_avg:94.62ms +step:140/1705 train_time:13247ms step_avg:94.62ms +step:141/1705 train_time:13340ms step_avg:94.61ms +step:142/1705 train_time:13433ms step_avg:94.60ms +step:143/1705 train_time:13526ms step_avg:94.59ms +step:144/1705 train_time:13619ms step_avg:94.57ms +step:145/1705 train_time:13711ms step_avg:94.56ms +step:146/1705 train_time:13803ms step_avg:94.54ms +step:147/1705 train_time:13895ms step_avg:94.53ms +step:148/1705 train_time:13987ms step_avg:94.51ms +step:149/1705 train_time:14080ms step_avg:94.50ms +step:150/1705 train_time:14174ms step_avg:94.49ms +step:151/1705 train_time:14268ms step_avg:94.49ms +step:152/1705 train_time:14361ms step_avg:94.48ms +step:153/1705 train_time:14455ms step_avg:94.47ms +step:154/1705 train_time:14547ms step_avg:94.46ms +step:155/1705 train_time:14640ms step_avg:94.45ms +step:156/1705 train_time:14732ms step_avg:94.44ms +step:157/1705 train_time:14824ms step_avg:94.42ms +step:158/1705 train_time:14916ms step_avg:94.41ms +step:159/1705 train_time:15009ms step_avg:94.40ms +step:160/1705 train_time:15102ms step_avg:94.39ms +step:161/1705 train_time:15196ms step_avg:94.38ms +step:162/1705 train_time:15289ms step_avg:94.38ms +step:163/1705 train_time:15382ms step_avg:94.37ms +step:164/1705 train_time:15476ms step_avg:94.36ms +step:165/1705 train_time:15569ms step_avg:94.35ms +step:166/1705 train_time:15661ms step_avg:94.34ms +step:167/1705 train_time:15754ms step_avg:94.33ms +step:168/1705 train_time:15846ms step_avg:94.32ms +step:169/1705 train_time:15939ms step_avg:94.31ms +step:170/1705 train_time:16032ms step_avg:94.31ms +step:171/1705 train_time:16125ms step_avg:94.30ms +step:172/1705 train_time:16217ms step_avg:94.29ms +step:173/1705 train_time:16311ms step_avg:94.28ms +step:174/1705 train_time:16404ms step_avg:94.27ms +step:175/1705 train_time:16497ms step_avg:94.27ms +step:176/1705 train_time:16589ms step_avg:94.26ms +step:177/1705 train_time:16682ms step_avg:94.25ms +step:178/1705 train_time:16776ms step_avg:94.24ms +step:179/1705 train_time:16868ms step_avg:94.23ms +step:180/1705 train_time:16961ms step_avg:94.23ms +step:181/1705 train_time:17053ms step_avg:94.22ms +step:182/1705 train_time:17146ms step_avg:94.21ms +step:183/1705 train_time:17239ms step_avg:94.20ms +step:184/1705 train_time:17332ms step_avg:94.20ms +step:185/1705 train_time:17425ms step_avg:94.19ms +step:186/1705 train_time:17518ms step_avg:94.18ms +step:187/1705 train_time:17611ms step_avg:94.18ms +step:188/1705 train_time:17705ms step_avg:94.17ms +step:189/1705 train_time:17797ms step_avg:94.16ms +step:190/1705 train_time:17890ms step_avg:94.16ms +step:191/1705 train_time:17983ms step_avg:94.15ms +step:192/1705 train_time:18076ms step_avg:94.14ms +step:193/1705 train_time:18168ms step_avg:94.14ms +step:194/1705 train_time:18262ms step_avg:94.13ms +step:195/1705 train_time:18355ms step_avg:94.13ms +step:196/1705 train_time:18448ms step_avg:94.12ms +step:197/1705 train_time:18541ms step_avg:94.12ms +step:198/1705 train_time:18634ms step_avg:94.11ms +step:199/1705 train_time:18727ms step_avg:94.11ms +step:200/1705 train_time:18820ms step_avg:94.10ms +step:201/1705 train_time:18913ms step_avg:94.09ms +step:202/1705 train_time:19006ms step_avg:94.09ms +step:203/1705 train_time:19099ms step_avg:94.08ms +step:204/1705 train_time:19192ms step_avg:94.08ms +step:205/1705 train_time:19284ms step_avg:94.07ms +step:206/1705 train_time:19378ms step_avg:94.07ms +step:207/1705 train_time:19471ms step_avg:94.06ms +step:208/1705 train_time:19564ms step_avg:94.06ms +step:209/1705 train_time:19657ms step_avg:94.05ms +step:210/1705 train_time:19750ms step_avg:94.05ms +step:211/1705 train_time:19842ms step_avg:94.04ms +step:212/1705 train_time:19935ms step_avg:94.03ms +step:213/1705 train_time:20186ms step_avg:94.77ms +step:214/1705 train_time:20354ms step_avg:95.11ms +step:215/1705 train_time:20445ms step_avg:95.09ms +step:216/1705 train_time:20538ms step_avg:95.08ms +step:217/1705 train_time:20630ms step_avg:95.07ms +step:218/1705 train_time:20722ms step_avg:95.06ms +step:219/1705 train_time:20814ms step_avg:95.04ms +step:220/1705 train_time:20906ms step_avg:95.03ms +step:221/1705 train_time:20998ms step_avg:95.01ms +step:222/1705 train_time:21090ms step_avg:95.00ms +step:223/1705 train_time:21182ms step_avg:94.99ms +step:224/1705 train_time:21279ms step_avg:95.00ms +step:225/1705 train_time:21375ms step_avg:95.00ms +step:226/1705 train_time:21468ms step_avg:94.99ms +step:227/1705 train_time:21562ms step_avg:94.99ms +step:228/1705 train_time:21654ms step_avg:94.98ms +step:229/1705 train_time:21747ms step_avg:94.96ms +step:230/1705 train_time:21839ms step_avg:94.95ms +step:231/1705 train_time:21932ms step_avg:94.94ms +step:232/1705 train_time:22023ms step_avg:94.93ms +step:233/1705 train_time:22116ms step_avg:94.92ms +step:234/1705 train_time:22208ms step_avg:94.91ms +step:235/1705 train_time:22304ms step_avg:94.91ms +step:236/1705 train_time:22398ms step_avg:94.91ms +step:237/1705 train_time:22491ms step_avg:94.90ms +step:238/1705 train_time:22583ms step_avg:94.89ms +step:239/1705 train_time:22676ms step_avg:94.88ms +step:240/1705 train_time:22769ms step_avg:94.87ms +step:241/1705 train_time:22862ms step_avg:94.86ms +step:242/1705 train_time:22954ms step_avg:94.85ms +step:243/1705 train_time:23047ms step_avg:94.84ms +step:244/1705 train_time:23139ms step_avg:94.83ms +step:245/1705 train_time:23233ms step_avg:94.83ms +step:246/1705 train_time:23326ms step_avg:94.82ms +step:247/1705 train_time:23420ms step_avg:94.82ms +step:248/1705 train_time:23513ms step_avg:94.81ms +step:249/1705 train_time:23606ms step_avg:94.80ms +step:250/1705 train_time:23699ms step_avg:94.80ms +step:250/1705 val_loss:3.9838 train_time:23793ms step_avg:95.17ms +step:251/1705 train_time:23815ms step_avg:94.88ms +step:252/1705 train_time:23889ms step_avg:94.80ms +step:253/1705 train_time:23990ms step_avg:94.82ms +step:254/1705 train_time:24084ms step_avg:94.82ms +step:255/1705 train_time:24175ms step_avg:94.81ms +step:256/1705 train_time:24267ms step_avg:94.79ms +step:257/1705 train_time:24359ms step_avg:94.78ms +step:258/1705 train_time:24451ms step_avg:94.77ms +step:259/1705 train_time:24542ms step_avg:94.76ms +step:260/1705 train_time:24634ms step_avg:94.75ms +step:261/1705 train_time:24727ms step_avg:94.74ms +step:262/1705 train_time:24820ms step_avg:94.73ms +step:263/1705 train_time:24916ms step_avg:94.74ms +step:264/1705 train_time:25010ms step_avg:94.74ms +step:265/1705 train_time:25104ms step_avg:94.73ms +step:266/1705 train_time:25197ms step_avg:94.73ms +step:267/1705 train_time:25290ms step_avg:94.72ms +step:268/1705 train_time:25382ms step_avg:94.71ms +step:269/1705 train_time:25474ms step_avg:94.70ms +step:270/1705 train_time:25566ms step_avg:94.69ms +step:271/1705 train_time:25658ms step_avg:94.68ms +step:272/1705 train_time:25751ms step_avg:94.67ms +step:273/1705 train_time:25844ms step_avg:94.67ms +step:274/1705 train_time:25937ms step_avg:94.66ms +step:275/1705 train_time:26032ms step_avg:94.66ms +step:276/1705 train_time:26126ms step_avg:94.66ms +step:277/1705 train_time:26218ms step_avg:94.65ms +step:278/1705 train_time:26311ms step_avg:94.64ms +step:279/1705 train_time:26404ms step_avg:94.64ms +step:280/1705 train_time:26497ms step_avg:94.63ms +step:281/1705 train_time:26590ms step_avg:94.62ms +step:282/1705 train_time:26681ms step_avg:94.62ms +step:283/1705 train_time:26775ms step_avg:94.61ms +step:284/1705 train_time:26868ms step_avg:94.61ms +step:285/1705 train_time:26961ms step_avg:94.60ms +step:286/1705 train_time:27055ms step_avg:94.60ms +step:287/1705 train_time:27148ms step_avg:94.59ms +step:288/1705 train_time:27240ms step_avg:94.58ms +step:289/1705 train_time:27334ms step_avg:94.58ms +step:290/1705 train_time:27427ms step_avg:94.57ms +step:291/1705 train_time:27518ms step_avg:94.56ms +step:292/1705 train_time:27611ms step_avg:94.56ms +step:293/1705 train_time:27703ms step_avg:94.55ms +step:294/1705 train_time:27797ms step_avg:94.55ms +step:295/1705 train_time:27890ms step_avg:94.54ms +step:296/1705 train_time:27984ms step_avg:94.54ms +step:297/1705 train_time:28076ms step_avg:94.53ms +step:298/1705 train_time:28170ms step_avg:94.53ms +step:299/1705 train_time:28263ms step_avg:94.52ms +step:300/1705 train_time:28356ms step_avg:94.52ms +step:301/1705 train_time:28449ms step_avg:94.51ms +step:302/1705 train_time:28542ms step_avg:94.51ms +step:303/1705 train_time:28635ms step_avg:94.50ms +step:304/1705 train_time:28728ms step_avg:94.50ms +step:305/1705 train_time:28820ms step_avg:94.49ms +step:306/1705 train_time:28913ms step_avg:94.49ms +step:307/1705 train_time:29006ms step_avg:94.48ms +step:308/1705 train_time:29099ms step_avg:94.48ms +step:309/1705 train_time:29192ms step_avg:94.47ms +step:310/1705 train_time:29286ms step_avg:94.47ms +step:311/1705 train_time:29378ms step_avg:94.46ms +step:312/1705 train_time:29471ms step_avg:94.46ms +step:313/1705 train_time:29565ms step_avg:94.46ms +step:314/1705 train_time:29658ms step_avg:94.45ms +step:315/1705 train_time:29750ms step_avg:94.45ms +step:316/1705 train_time:29843ms step_avg:94.44ms +step:317/1705 train_time:29936ms step_avg:94.44ms +step:318/1705 train_time:30030ms step_avg:94.44ms +step:319/1705 train_time:30122ms step_avg:94.43ms +step:320/1705 train_time:30217ms step_avg:94.43ms +step:321/1705 train_time:30309ms step_avg:94.42ms +step:322/1705 train_time:30402ms step_avg:94.41ms +step:323/1705 train_time:30495ms step_avg:94.41ms +step:324/1705 train_time:30588ms step_avg:94.41ms +step:325/1705 train_time:30680ms step_avg:94.40ms +step:326/1705 train_time:30773ms step_avg:94.40ms +step:327/1705 train_time:30867ms step_avg:94.39ms +step:328/1705 train_time:30960ms step_avg:94.39ms +step:329/1705 train_time:31053ms step_avg:94.39ms +step:330/1705 train_time:31145ms step_avg:94.38ms +step:331/1705 train_time:31238ms step_avg:94.37ms +step:332/1705 train_time:31331ms step_avg:94.37ms +step:333/1705 train_time:31424ms step_avg:94.37ms +step:334/1705 train_time:31517ms step_avg:94.36ms +step:335/1705 train_time:31610ms step_avg:94.36ms +step:336/1705 train_time:31702ms step_avg:94.35ms +step:337/1705 train_time:31795ms step_avg:94.35ms +step:338/1705 train_time:31888ms step_avg:94.34ms +step:339/1705 train_time:31981ms step_avg:94.34ms +step:340/1705 train_time:32074ms step_avg:94.34ms +step:341/1705 train_time:32168ms step_avg:94.33ms +step:342/1705 train_time:32260ms step_avg:94.33ms +step:343/1705 train_time:32353ms step_avg:94.32ms +step:344/1705 train_time:32446ms step_avg:94.32ms +step:345/1705 train_time:32538ms step_avg:94.31ms +step:346/1705 train_time:32632ms step_avg:94.31ms +step:347/1705 train_time:32725ms step_avg:94.31ms +step:348/1705 train_time:32818ms step_avg:94.30ms +step:349/1705 train_time:32911ms step_avg:94.30ms +step:350/1705 train_time:33004ms step_avg:94.30ms +step:351/1705 train_time:33097ms step_avg:94.29ms +step:352/1705 train_time:33189ms step_avg:94.29ms +step:353/1705 train_time:33282ms step_avg:94.28ms +step:354/1705 train_time:33376ms step_avg:94.28ms +step:355/1705 train_time:33469ms step_avg:94.28ms +step:356/1705 train_time:33561ms step_avg:94.27ms +step:357/1705 train_time:33655ms step_avg:94.27ms +step:358/1705 train_time:33748ms step_avg:94.27ms +step:359/1705 train_time:33841ms step_avg:94.26ms +step:360/1705 train_time:33935ms step_avg:94.26ms +step:361/1705 train_time:34028ms step_avg:94.26ms +step:362/1705 train_time:34120ms step_avg:94.26ms +step:363/1705 train_time:34213ms step_avg:94.25ms +step:364/1705 train_time:34306ms step_avg:94.25ms +step:365/1705 train_time:34400ms step_avg:94.25ms +step:366/1705 train_time:34493ms step_avg:94.24ms +step:367/1705 train_time:34586ms step_avg:94.24ms +step:368/1705 train_time:34678ms step_avg:94.23ms +step:369/1705 train_time:34771ms step_avg:94.23ms +step:370/1705 train_time:34864ms step_avg:94.23ms +step:371/1705 train_time:34957ms step_avg:94.22ms +step:372/1705 train_time:35050ms step_avg:94.22ms +step:373/1705 train_time:35143ms step_avg:94.22ms +step:374/1705 train_time:35236ms step_avg:94.21ms +step:375/1705 train_time:35330ms step_avg:94.21ms +step:375/1705 val_loss:3.8258 train_time:35423ms step_avg:94.46ms +step:376/1705 train_time:35445ms step_avg:94.27ms +step:377/1705 train_time:35520ms step_avg:94.22ms +step:378/1705 train_time:35617ms step_avg:94.23ms +step:379/1705 train_time:35712ms step_avg:94.23ms +step:380/1705 train_time:35804ms step_avg:94.22ms +step:381/1705 train_time:35896ms step_avg:94.22ms +step:382/1705 train_time:35989ms step_avg:94.21ms +step:383/1705 train_time:36080ms step_avg:94.20ms +step:384/1705 train_time:36173ms step_avg:94.20ms +step:385/1705 train_time:36264ms step_avg:94.19ms +step:386/1705 train_time:36356ms step_avg:94.19ms +step:387/1705 train_time:36451ms step_avg:94.19ms +step:388/1705 train_time:36547ms step_avg:94.19ms +step:389/1705 train_time:36642ms step_avg:94.19ms +step:390/1705 train_time:36736ms step_avg:94.19ms +step:391/1705 train_time:36828ms step_avg:94.19ms +step:392/1705 train_time:36921ms step_avg:94.19ms +step:393/1705 train_time:37013ms step_avg:94.18ms +step:394/1705 train_time:37106ms step_avg:94.18ms +step:395/1705 train_time:37198ms step_avg:94.17ms +step:396/1705 train_time:37291ms step_avg:94.17ms +step:397/1705 train_time:37384ms step_avg:94.17ms +step:398/1705 train_time:37478ms step_avg:94.16ms +step:399/1705 train_time:37572ms step_avg:94.17ms +step:400/1705 train_time:37667ms step_avg:94.17ms +step:401/1705 train_time:37760ms step_avg:94.16ms +step:402/1705 train_time:37853ms step_avg:94.16ms +step:403/1705 train_time:37945ms step_avg:94.16ms +step:404/1705 train_time:38037ms step_avg:94.15ms +step:405/1705 train_time:38130ms step_avg:94.15ms +step:406/1705 train_time:38222ms step_avg:94.14ms +step:407/1705 train_time:38315ms step_avg:94.14ms +step:408/1705 train_time:38408ms step_avg:94.14ms +step:409/1705 train_time:38501ms step_avg:94.14ms +step:410/1705 train_time:38596ms step_avg:94.14ms +step:411/1705 train_time:38690ms step_avg:94.14ms +step:412/1705 train_time:38785ms step_avg:94.14ms +step:413/1705 train_time:38877ms step_avg:94.13ms +step:414/1705 train_time:38971ms step_avg:94.13ms +step:415/1705 train_time:39063ms step_avg:94.13ms +step:416/1705 train_time:39155ms step_avg:94.12ms +step:417/1705 train_time:39248ms step_avg:94.12ms +step:418/1705 train_time:39340ms step_avg:94.12ms +step:419/1705 train_time:39433ms step_avg:94.11ms +step:420/1705 train_time:39526ms step_avg:94.11ms +step:421/1705 train_time:39620ms step_avg:94.11ms +step:422/1705 train_time:39714ms step_avg:94.11ms +step:423/1705 train_time:39808ms step_avg:94.11ms +step:424/1705 train_time:39901ms step_avg:94.11ms +step:425/1705 train_time:40149ms step_avg:94.47ms +step:426/1705 train_time:40259ms step_avg:94.50ms +step:427/1705 train_time:40350ms step_avg:94.50ms +step:428/1705 train_time:40442ms step_avg:94.49ms +step:429/1705 train_time:40534ms step_avg:94.49ms +step:430/1705 train_time:40626ms step_avg:94.48ms +step:431/1705 train_time:40717ms step_avg:94.47ms +step:432/1705 train_time:40809ms step_avg:94.47ms +step:433/1705 train_time:40902ms step_avg:94.46ms +step:434/1705 train_time:40993ms step_avg:94.45ms +step:435/1705 train_time:41090ms step_avg:94.46ms +step:436/1705 train_time:41186ms step_avg:94.46ms +step:437/1705 train_time:41281ms step_avg:94.46ms +step:438/1705 train_time:41374ms step_avg:94.46ms +step:439/1705 train_time:41467ms step_avg:94.46ms +step:440/1705 train_time:41559ms step_avg:94.45ms +step:441/1705 train_time:41651ms step_avg:94.45ms +step:442/1705 train_time:41743ms step_avg:94.44ms +step:443/1705 train_time:41835ms step_avg:94.44ms +step:444/1705 train_time:41927ms step_avg:94.43ms +step:445/1705 train_time:42020ms step_avg:94.43ms +step:446/1705 train_time:42115ms step_avg:94.43ms +step:447/1705 train_time:42210ms step_avg:94.43ms +step:448/1705 train_time:42305ms step_avg:94.43ms +step:449/1705 train_time:42397ms step_avg:94.43ms +step:450/1705 train_time:42490ms step_avg:94.42ms +step:451/1705 train_time:42583ms step_avg:94.42ms +step:452/1705 train_time:42676ms step_avg:94.42ms +step:453/1705 train_time:42767ms step_avg:94.41ms +step:454/1705 train_time:42860ms step_avg:94.41ms +step:455/1705 train_time:42952ms step_avg:94.40ms +step:456/1705 train_time:43046ms step_avg:94.40ms +step:457/1705 train_time:43139ms step_avg:94.40ms +step:458/1705 train_time:43233ms step_avg:94.40ms +step:459/1705 train_time:43327ms step_avg:94.39ms +step:460/1705 train_time:43420ms step_avg:94.39ms +step:461/1705 train_time:43513ms step_avg:94.39ms +step:462/1705 train_time:43606ms step_avg:94.39ms +step:463/1705 train_time:43699ms step_avg:94.38ms +step:464/1705 train_time:43791ms step_avg:94.38ms +step:465/1705 train_time:43884ms step_avg:94.37ms +step:466/1705 train_time:43976ms step_avg:94.37ms +step:467/1705 train_time:44070ms step_avg:94.37ms +step:468/1705 train_time:44163ms step_avg:94.37ms +step:469/1705 train_time:44256ms step_avg:94.36ms +step:470/1705 train_time:44350ms step_avg:94.36ms +step:471/1705 train_time:44443ms step_avg:94.36ms +step:472/1705 train_time:44535ms step_avg:94.35ms +step:473/1705 train_time:44629ms step_avg:94.35ms +step:474/1705 train_time:44721ms step_avg:94.35ms +step:475/1705 train_time:44815ms step_avg:94.35ms +step:476/1705 train_time:44908ms step_avg:94.34ms +step:477/1705 train_time:45001ms step_avg:94.34ms +step:478/1705 train_time:45095ms step_avg:94.34ms +step:479/1705 train_time:45189ms step_avg:94.34ms +step:480/1705 train_time:45282ms step_avg:94.34ms +step:481/1705 train_time:45375ms step_avg:94.33ms +step:482/1705 train_time:45469ms step_avg:94.33ms +step:483/1705 train_time:45562ms step_avg:94.33ms +step:484/1705 train_time:45655ms step_avg:94.33ms +step:485/1705 train_time:45747ms step_avg:94.32ms +step:486/1705 train_time:45840ms step_avg:94.32ms +step:487/1705 train_time:45933ms step_avg:94.32ms +step:488/1705 train_time:46026ms step_avg:94.31ms +step:489/1705 train_time:46118ms step_avg:94.31ms +step:490/1705 train_time:46212ms step_avg:94.31ms +step:491/1705 train_time:46305ms step_avg:94.31ms +step:492/1705 train_time:46397ms step_avg:94.30ms +step:493/1705 train_time:46491ms step_avg:94.30ms +step:494/1705 train_time:46585ms step_avg:94.30ms +step:495/1705 train_time:46677ms step_avg:94.30ms +step:496/1705 train_time:46770ms step_avg:94.29ms +step:497/1705 train_time:46862ms step_avg:94.29ms +step:498/1705 train_time:46955ms step_avg:94.29ms +step:499/1705 train_time:47048ms step_avg:94.28ms +step:500/1705 train_time:47141ms step_avg:94.28ms +step:500/1705 val_loss:3.7219 train_time:47234ms step_avg:94.47ms +step:501/1705 train_time:47256ms step_avg:94.32ms +step:502/1705 train_time:47332ms step_avg:94.29ms +step:503/1705 train_time:47430ms step_avg:94.30ms +step:504/1705 train_time:47525ms step_avg:94.30ms +step:505/1705 train_time:47617ms step_avg:94.29ms +step:506/1705 train_time:47709ms step_avg:94.29ms +step:507/1705 train_time:47802ms step_avg:94.28ms +step:508/1705 train_time:47894ms step_avg:94.28ms +step:509/1705 train_time:47986ms step_avg:94.27ms +step:510/1705 train_time:48078ms step_avg:94.27ms +step:511/1705 train_time:48170ms step_avg:94.27ms +step:512/1705 train_time:48265ms step_avg:94.27ms +step:513/1705 train_time:48360ms step_avg:94.27ms +step:514/1705 train_time:48454ms step_avg:94.27ms +step:515/1705 train_time:48548ms step_avg:94.27ms +step:516/1705 train_time:48641ms step_avg:94.27ms +step:517/1705 train_time:48734ms step_avg:94.26ms +step:518/1705 train_time:48827ms step_avg:94.26ms +step:519/1705 train_time:48920ms step_avg:94.26ms +step:520/1705 train_time:49012ms step_avg:94.25ms +step:521/1705 train_time:49105ms step_avg:94.25ms +step:522/1705 train_time:49197ms step_avg:94.25ms +step:523/1705 train_time:49290ms step_avg:94.24ms +step:524/1705 train_time:49384ms step_avg:94.24ms +step:525/1705 train_time:49479ms step_avg:94.25ms +step:526/1705 train_time:49571ms step_avg:94.24ms +step:527/1705 train_time:49664ms step_avg:94.24ms +step:528/1705 train_time:49757ms step_avg:94.24ms +step:529/1705 train_time:49850ms step_avg:94.23ms +step:530/1705 train_time:49943ms step_avg:94.23ms +step:531/1705 train_time:50036ms step_avg:94.23ms +step:532/1705 train_time:50128ms step_avg:94.23ms +step:533/1705 train_time:50221ms step_avg:94.22ms +step:534/1705 train_time:50314ms step_avg:94.22ms +step:535/1705 train_time:50408ms step_avg:94.22ms +step:536/1705 train_time:50501ms step_avg:94.22ms +step:537/1705 train_time:50594ms step_avg:94.22ms +step:538/1705 train_time:50687ms step_avg:94.21ms +step:539/1705 train_time:50781ms step_avg:94.21ms +step:540/1705 train_time:50874ms step_avg:94.21ms +step:541/1705 train_time:50966ms step_avg:94.21ms +step:542/1705 train_time:51059ms step_avg:94.20ms +step:543/1705 train_time:51152ms step_avg:94.20ms +step:544/1705 train_time:51245ms step_avg:94.20ms +step:545/1705 train_time:51338ms step_avg:94.20ms +step:546/1705 train_time:51432ms step_avg:94.20ms +step:547/1705 train_time:51526ms step_avg:94.20ms +step:548/1705 train_time:51619ms step_avg:94.19ms +step:549/1705 train_time:51711ms step_avg:94.19ms +step:550/1705 train_time:51805ms step_avg:94.19ms +step:551/1705 train_time:51898ms step_avg:94.19ms +step:552/1705 train_time:51990ms step_avg:94.18ms +step:553/1705 train_time:52083ms step_avg:94.18ms +step:554/1705 train_time:52175ms step_avg:94.18ms +step:555/1705 train_time:52267ms step_avg:94.18ms +step:556/1705 train_time:52361ms step_avg:94.17ms +step:557/1705 train_time:52454ms step_avg:94.17ms +step:558/1705 train_time:52547ms step_avg:94.17ms +step:559/1705 train_time:52640ms step_avg:94.17ms +step:560/1705 train_time:52733ms step_avg:94.17ms +step:561/1705 train_time:52827ms step_avg:94.17ms +step:562/1705 train_time:52920ms step_avg:94.16ms +step:563/1705 train_time:53013ms step_avg:94.16ms +step:564/1705 train_time:53105ms step_avg:94.16ms +step:565/1705 train_time:53198ms step_avg:94.16ms +step:566/1705 train_time:53291ms step_avg:94.15ms +step:567/1705 train_time:53384ms step_avg:94.15ms +step:568/1705 train_time:53477ms step_avg:94.15ms +step:569/1705 train_time:53570ms step_avg:94.15ms +step:570/1705 train_time:53663ms step_avg:94.15ms +step:571/1705 train_time:53758ms step_avg:94.15ms +step:572/1705 train_time:53852ms step_avg:94.15ms +step:573/1705 train_time:53946ms step_avg:94.15ms +step:574/1705 train_time:54040ms step_avg:94.15ms +step:575/1705 train_time:54135ms step_avg:94.15ms +step:576/1705 train_time:54229ms step_avg:94.15ms +step:577/1705 train_time:54323ms step_avg:94.15ms +step:578/1705 train_time:54418ms step_avg:94.15ms +step:579/1705 train_time:54512ms step_avg:94.15ms +step:580/1705 train_time:54606ms step_avg:94.15ms +step:581/1705 train_time:54701ms step_avg:94.15ms +step:582/1705 train_time:54796ms step_avg:94.15ms +step:583/1705 train_time:54889ms step_avg:94.15ms +step:584/1705 train_time:54984ms step_avg:94.15ms +step:585/1705 train_time:55078ms step_avg:94.15ms +step:586/1705 train_time:55171ms step_avg:94.15ms +step:587/1705 train_time:55266ms step_avg:94.15ms +step:588/1705 train_time:55361ms step_avg:94.15ms +step:589/1705 train_time:55456ms step_avg:94.15ms +step:590/1705 train_time:55550ms step_avg:94.15ms +step:591/1705 train_time:55645ms step_avg:94.15ms +step:592/1705 train_time:55740ms step_avg:94.16ms +step:593/1705 train_time:55834ms step_avg:94.15ms +step:594/1705 train_time:55928ms step_avg:94.15ms +step:595/1705 train_time:56023ms step_avg:94.16ms +step:596/1705 train_time:56117ms step_avg:94.16ms +step:597/1705 train_time:56211ms step_avg:94.16ms +step:598/1705 train_time:56305ms step_avg:94.16ms +step:599/1705 train_time:56400ms step_avg:94.16ms +step:600/1705 train_time:56495ms step_avg:94.16ms +step:601/1705 train_time:56589ms step_avg:94.16ms +step:602/1705 train_time:56683ms step_avg:94.16ms +step:603/1705 train_time:56777ms step_avg:94.16ms +step:604/1705 train_time:56871ms step_avg:94.16ms +step:605/1705 train_time:56966ms step_avg:94.16ms +step:606/1705 train_time:57060ms step_avg:94.16ms +step:607/1705 train_time:57153ms step_avg:94.16ms +step:608/1705 train_time:57247ms step_avg:94.16ms +step:609/1705 train_time:57342ms step_avg:94.16ms +step:610/1705 train_time:57436ms step_avg:94.16ms +step:611/1705 train_time:57530ms step_avg:94.16ms +step:612/1705 train_time:57625ms step_avg:94.16ms +step:613/1705 train_time:57719ms step_avg:94.16ms +step:614/1705 train_time:57814ms step_avg:94.16ms +step:615/1705 train_time:57908ms step_avg:94.16ms +step:616/1705 train_time:58003ms step_avg:94.16ms +step:617/1705 train_time:58097ms step_avg:94.16ms +step:618/1705 train_time:58191ms step_avg:94.16ms +step:619/1705 train_time:58285ms step_avg:94.16ms +step:620/1705 train_time:58380ms step_avg:94.16ms +step:621/1705 train_time:58475ms step_avg:94.16ms +step:622/1705 train_time:58569ms step_avg:94.16ms +step:623/1705 train_time:58664ms step_avg:94.16ms +step:624/1705 train_time:58759ms step_avg:94.17ms +step:625/1705 train_time:58854ms step_avg:94.17ms +step:625/1705 val_loss:3.6232 train_time:58948ms step_avg:94.32ms +step:626/1705 train_time:58970ms step_avg:94.20ms +step:627/1705 train_time:59057ms step_avg:94.19ms +step:628/1705 train_time:59153ms step_avg:94.19ms +step:629/1705 train_time:59248ms step_avg:94.19ms +step:630/1705 train_time:59342ms step_avg:94.19ms +step:631/1705 train_time:59434ms step_avg:94.19ms +step:632/1705 train_time:59528ms step_avg:94.19ms +step:633/1705 train_time:59621ms step_avg:94.19ms +step:634/1705 train_time:59713ms step_avg:94.18ms +step:635/1705 train_time:59807ms step_avg:94.18ms +step:636/1705 train_time:59901ms step_avg:94.18ms +step:637/1705 train_time:59998ms step_avg:94.19ms +step:638/1705 train_time:60095ms step_avg:94.19ms +step:639/1705 train_time:60456ms step_avg:94.61ms +step:640/1705 train_time:60543ms step_avg:94.60ms +step:641/1705 train_time:60635ms step_avg:94.59ms +step:642/1705 train_time:60728ms step_avg:94.59ms +step:643/1705 train_time:60822ms step_avg:94.59ms +step:644/1705 train_time:60915ms step_avg:94.59ms +step:645/1705 train_time:61008ms step_avg:94.59ms +step:646/1705 train_time:61102ms step_avg:94.58ms +step:647/1705 train_time:61194ms step_avg:94.58ms +step:648/1705 train_time:61288ms step_avg:94.58ms +step:649/1705 train_time:61386ms step_avg:94.59ms +step:650/1705 train_time:61484ms step_avg:94.59ms +step:651/1705 train_time:61578ms step_avg:94.59ms +step:652/1705 train_time:61671ms step_avg:94.59ms +step:653/1705 train_time:61766ms step_avg:94.59ms +step:654/1705 train_time:61860ms step_avg:94.59ms +step:655/1705 train_time:61953ms step_avg:94.58ms +step:656/1705 train_time:62047ms step_avg:94.58ms +step:657/1705 train_time:62142ms step_avg:94.58ms +step:658/1705 train_time:62235ms step_avg:94.58ms +step:659/1705 train_time:62330ms step_avg:94.58ms +step:660/1705 train_time:62427ms step_avg:94.59ms +step:661/1705 train_time:62522ms step_avg:94.59ms +step:662/1705 train_time:62616ms step_avg:94.59ms +step:663/1705 train_time:62711ms step_avg:94.59ms +step:664/1705 train_time:62805ms step_avg:94.59ms +step:665/1705 train_time:62899ms step_avg:94.58ms +step:666/1705 train_time:62992ms step_avg:94.58ms +step:667/1705 train_time:63086ms step_avg:94.58ms +step:668/1705 train_time:63180ms step_avg:94.58ms +step:669/1705 train_time:63274ms step_avg:94.58ms +step:670/1705 train_time:63369ms step_avg:94.58ms +step:671/1705 train_time:63465ms step_avg:94.58ms +step:672/1705 train_time:63560ms step_avg:94.58ms +step:673/1705 train_time:63655ms step_avg:94.58ms +step:674/1705 train_time:63749ms step_avg:94.58ms +step:675/1705 train_time:63843ms step_avg:94.58ms +step:676/1705 train_time:63937ms step_avg:94.58ms +step:677/1705 train_time:64030ms step_avg:94.58ms +step:678/1705 train_time:64124ms step_avg:94.58ms +step:679/1705 train_time:64218ms step_avg:94.58ms +step:680/1705 train_time:64312ms step_avg:94.58ms +step:681/1705 train_time:64407ms step_avg:94.58ms +step:682/1705 train_time:64503ms step_avg:94.58ms +step:683/1705 train_time:64597ms step_avg:94.58ms +step:684/1705 train_time:64691ms step_avg:94.58ms +step:685/1705 train_time:64787ms step_avg:94.58ms +step:686/1705 train_time:64881ms step_avg:94.58ms +step:687/1705 train_time:64974ms step_avg:94.58ms +step:688/1705 train_time:65068ms step_avg:94.58ms +step:689/1705 train_time:65162ms step_avg:94.57ms +step:690/1705 train_time:65256ms step_avg:94.57ms +step:691/1705 train_time:65350ms step_avg:94.57ms +step:692/1705 train_time:65445ms step_avg:94.57ms +step:693/1705 train_time:65540ms step_avg:94.57ms +step:694/1705 train_time:65634ms step_avg:94.57ms +step:695/1705 train_time:65728ms step_avg:94.57ms +step:696/1705 train_time:65823ms step_avg:94.57ms +step:697/1705 train_time:65916ms step_avg:94.57ms +step:698/1705 train_time:66010ms step_avg:94.57ms +step:699/1705 train_time:66105ms step_avg:94.57ms +step:700/1705 train_time:66199ms step_avg:94.57ms +step:701/1705 train_time:66294ms step_avg:94.57ms +step:702/1705 train_time:66388ms step_avg:94.57ms +step:703/1705 train_time:66483ms step_avg:94.57ms +step:704/1705 train_time:66577ms step_avg:94.57ms +step:705/1705 train_time:66672ms step_avg:94.57ms +step:706/1705 train_time:66767ms step_avg:94.57ms +step:707/1705 train_time:66861ms step_avg:94.57ms +step:708/1705 train_time:66955ms step_avg:94.57ms +step:709/1705 train_time:67049ms step_avg:94.57ms +step:710/1705 train_time:67143ms step_avg:94.57ms +step:711/1705 train_time:67237ms step_avg:94.57ms +step:712/1705 train_time:67331ms step_avg:94.57ms +step:713/1705 train_time:67425ms step_avg:94.57ms +step:714/1705 train_time:67520ms step_avg:94.57ms +step:715/1705 train_time:67615ms step_avg:94.57ms +step:716/1705 train_time:67710ms step_avg:94.57ms +step:717/1705 train_time:67805ms step_avg:94.57ms +step:718/1705 train_time:67899ms step_avg:94.57ms +step:719/1705 train_time:67993ms step_avg:94.57ms +step:720/1705 train_time:68087ms step_avg:94.57ms +step:721/1705 train_time:68181ms step_avg:94.56ms +step:722/1705 train_time:68275ms step_avg:94.56ms +step:723/1705 train_time:68369ms step_avg:94.56ms +step:724/1705 train_time:68464ms step_avg:94.56ms +step:725/1705 train_time:68558ms step_avg:94.56ms +step:726/1705 train_time:68652ms step_avg:94.56ms +step:727/1705 train_time:68748ms step_avg:94.56ms +step:728/1705 train_time:68843ms step_avg:94.56ms +step:729/1705 train_time:68936ms step_avg:94.56ms +step:730/1705 train_time:69030ms step_avg:94.56ms +step:731/1705 train_time:69125ms step_avg:94.56ms +step:732/1705 train_time:69219ms step_avg:94.56ms +step:733/1705 train_time:69313ms step_avg:94.56ms +step:734/1705 train_time:69409ms step_avg:94.56ms +step:735/1705 train_time:69504ms step_avg:94.56ms +step:736/1705 train_time:69598ms step_avg:94.56ms +step:737/1705 train_time:69692ms step_avg:94.56ms +step:738/1705 train_time:69788ms step_avg:94.56ms +step:739/1705 train_time:69882ms step_avg:94.56ms +step:740/1705 train_time:69976ms step_avg:94.56ms +step:741/1705 train_time:70069ms step_avg:94.56ms +step:742/1705 train_time:70164ms step_avg:94.56ms +step:743/1705 train_time:70258ms step_avg:94.56ms +step:744/1705 train_time:70352ms step_avg:94.56ms +step:745/1705 train_time:70448ms step_avg:94.56ms +step:746/1705 train_time:70542ms step_avg:94.56ms +step:747/1705 train_time:70636ms step_avg:94.56ms +step:748/1705 train_time:70730ms step_avg:94.56ms +step:749/1705 train_time:70825ms step_avg:94.56ms +step:750/1705 train_time:70919ms step_avg:94.56ms +step:750/1705 val_loss:3.5678 train_time:71013ms step_avg:94.68ms +step:751/1705 train_time:71037ms step_avg:94.59ms +step:752/1705 train_time:71115ms step_avg:94.57ms +step:753/1705 train_time:71215ms step_avg:94.57ms +step:754/1705 train_time:71310ms step_avg:94.58ms +step:755/1705 train_time:71404ms step_avg:94.58ms +step:756/1705 train_time:71497ms step_avg:94.57ms +step:757/1705 train_time:71591ms step_avg:94.57ms +step:758/1705 train_time:71684ms step_avg:94.57ms +step:759/1705 train_time:71778ms step_avg:94.57ms +step:760/1705 train_time:71871ms step_avg:94.57ms +step:761/1705 train_time:71967ms step_avg:94.57ms +step:762/1705 train_time:72063ms step_avg:94.57ms +step:763/1705 train_time:72158ms step_avg:94.57ms +step:764/1705 train_time:72254ms step_avg:94.57ms +step:765/1705 train_time:72351ms step_avg:94.58ms +step:766/1705 train_time:72444ms step_avg:94.57ms +step:767/1705 train_time:72537ms step_avg:94.57ms +step:768/1705 train_time:72631ms step_avg:94.57ms +step:769/1705 train_time:72724ms step_avg:94.57ms +step:770/1705 train_time:72818ms step_avg:94.57ms +step:771/1705 train_time:72911ms step_avg:94.57ms +step:772/1705 train_time:73006ms step_avg:94.57ms +step:773/1705 train_time:73101ms step_avg:94.57ms +step:774/1705 train_time:73196ms step_avg:94.57ms +step:775/1705 train_time:73291ms step_avg:94.57ms +step:776/1705 train_time:73386ms step_avg:94.57ms +step:777/1705 train_time:73480ms step_avg:94.57ms +step:778/1705 train_time:73574ms step_avg:94.57ms +step:779/1705 train_time:73668ms step_avg:94.57ms +step:780/1705 train_time:73761ms step_avg:94.57ms +step:781/1705 train_time:73856ms step_avg:94.57ms +step:782/1705 train_time:73950ms step_avg:94.56ms +step:783/1705 train_time:74045ms step_avg:94.57ms +step:784/1705 train_time:74140ms step_avg:94.57ms +step:785/1705 train_time:74234ms step_avg:94.57ms +step:786/1705 train_time:74330ms step_avg:94.57ms +step:787/1705 train_time:74425ms step_avg:94.57ms +step:788/1705 train_time:74520ms step_avg:94.57ms +step:789/1705 train_time:74614ms step_avg:94.57ms +step:790/1705 train_time:74709ms step_avg:94.57ms +step:791/1705 train_time:74803ms step_avg:94.57ms +step:792/1705 train_time:74896ms step_avg:94.57ms +step:793/1705 train_time:74991ms step_avg:94.57ms +step:794/1705 train_time:75085ms step_avg:94.57ms +step:795/1705 train_time:75180ms step_avg:94.57ms +step:796/1705 train_time:75275ms step_avg:94.57ms +step:797/1705 train_time:75370ms step_avg:94.57ms +step:798/1705 train_time:75465ms step_avg:94.57ms +step:799/1705 train_time:75559ms step_avg:94.57ms +step:800/1705 train_time:75653ms step_avg:94.57ms +step:801/1705 train_time:75748ms step_avg:94.57ms +step:802/1705 train_time:75842ms step_avg:94.57ms +step:803/1705 train_time:75935ms step_avg:94.56ms +step:804/1705 train_time:76030ms step_avg:94.56ms +step:805/1705 train_time:76125ms step_avg:94.57ms +step:806/1705 train_time:76219ms step_avg:94.56ms +step:807/1705 train_time:76313ms step_avg:94.56ms +step:808/1705 train_time:76408ms step_avg:94.56ms +step:809/1705 train_time:76503ms step_avg:94.57ms +step:810/1705 train_time:76597ms step_avg:94.56ms +step:811/1705 train_time:76691ms step_avg:94.56ms +step:812/1705 train_time:76786ms step_avg:94.56ms +step:813/1705 train_time:76879ms step_avg:94.56ms +step:814/1705 train_time:76973ms step_avg:94.56ms +step:815/1705 train_time:77068ms step_avg:94.56ms +step:816/1705 train_time:77163ms step_avg:94.56ms +step:817/1705 train_time:77257ms step_avg:94.56ms +step:818/1705 train_time:77352ms step_avg:94.56ms +step:819/1705 train_time:77447ms step_avg:94.56ms +step:820/1705 train_time:77541ms step_avg:94.56ms +step:821/1705 train_time:77635ms step_avg:94.56ms +step:822/1705 train_time:77729ms step_avg:94.56ms +step:823/1705 train_time:77824ms step_avg:94.56ms +step:824/1705 train_time:77918ms step_avg:94.56ms +step:825/1705 train_time:78012ms step_avg:94.56ms +step:826/1705 train_time:78107ms step_avg:94.56ms +step:827/1705 train_time:78201ms step_avg:94.56ms +step:828/1705 train_time:78295ms step_avg:94.56ms +step:829/1705 train_time:78389ms step_avg:94.56ms +step:830/1705 train_time:78484ms step_avg:94.56ms +step:831/1705 train_time:78577ms step_avg:94.56ms +step:832/1705 train_time:78672ms step_avg:94.56ms +step:833/1705 train_time:78768ms step_avg:94.56ms +step:834/1705 train_time:78861ms step_avg:94.56ms +step:835/1705 train_time:78955ms step_avg:94.56ms +step:836/1705 train_time:79050ms step_avg:94.56ms +step:837/1705 train_time:79145ms step_avg:94.56ms +step:838/1705 train_time:79239ms step_avg:94.56ms +step:839/1705 train_time:79333ms step_avg:94.56ms +step:840/1705 train_time:79429ms step_avg:94.56ms +step:841/1705 train_time:79525ms step_avg:94.56ms +step:842/1705 train_time:79619ms step_avg:94.56ms +step:843/1705 train_time:79713ms step_avg:94.56ms +step:844/1705 train_time:79808ms step_avg:94.56ms +step:845/1705 train_time:79902ms step_avg:94.56ms +step:846/1705 train_time:79997ms step_avg:94.56ms +step:847/1705 train_time:80092ms step_avg:94.56ms +step:848/1705 train_time:80187ms step_avg:94.56ms +step:849/1705 train_time:80281ms step_avg:94.56ms +step:850/1705 train_time:80375ms step_avg:94.56ms +step:851/1705 train_time:80637ms step_avg:94.76ms +step:852/1705 train_time:80715ms step_avg:94.74ms +step:853/1705 train_time:80808ms step_avg:94.73ms +step:854/1705 train_time:80901ms step_avg:94.73ms +step:855/1705 train_time:80994ms step_avg:94.73ms +step:856/1705 train_time:81088ms step_avg:94.73ms +step:857/1705 train_time:81181ms step_avg:94.73ms +step:858/1705 train_time:81275ms step_avg:94.73ms +step:859/1705 train_time:81368ms step_avg:94.72ms +step:860/1705 train_time:81461ms step_avg:94.72ms +step:861/1705 train_time:81561ms step_avg:94.73ms +step:862/1705 train_time:81658ms step_avg:94.73ms +step:863/1705 train_time:81755ms step_avg:94.73ms +step:864/1705 train_time:81850ms step_avg:94.73ms +step:865/1705 train_time:81944ms step_avg:94.73ms +step:866/1705 train_time:82038ms step_avg:94.73ms +step:867/1705 train_time:82131ms step_avg:94.73ms +step:868/1705 train_time:82225ms step_avg:94.73ms +step:869/1705 train_time:82318ms step_avg:94.73ms +step:870/1705 train_time:82412ms step_avg:94.73ms +step:871/1705 train_time:82508ms step_avg:94.73ms +step:872/1705 train_time:82604ms step_avg:94.73ms +step:873/1705 train_time:82699ms step_avg:94.73ms +step:874/1705 train_time:82794ms step_avg:94.73ms +step:875/1705 train_time:82889ms step_avg:94.73ms +step:875/1705 val_loss:3.5253 train_time:82984ms step_avg:94.84ms +step:876/1705 train_time:83006ms step_avg:94.76ms +step:877/1705 train_time:83083ms step_avg:94.74ms +step:878/1705 train_time:83183ms step_avg:94.74ms +step:879/1705 train_time:83277ms step_avg:94.74ms +step:880/1705 train_time:83371ms step_avg:94.74ms +step:881/1705 train_time:83465ms step_avg:94.74ms +step:882/1705 train_time:83558ms step_avg:94.74ms +step:883/1705 train_time:83652ms step_avg:94.74ms +step:884/1705 train_time:83746ms step_avg:94.74ms +step:885/1705 train_time:83841ms step_avg:94.74ms +step:886/1705 train_time:83937ms step_avg:94.74ms +step:887/1705 train_time:84032ms step_avg:94.74ms +step:888/1705 train_time:84129ms step_avg:94.74ms +step:889/1705 train_time:84224ms step_avg:94.74ms +step:890/1705 train_time:84319ms step_avg:94.74ms +step:891/1705 train_time:84412ms step_avg:94.74ms +step:892/1705 train_time:84507ms step_avg:94.74ms +step:893/1705 train_time:84601ms step_avg:94.74ms +step:894/1705 train_time:84695ms step_avg:94.74ms +step:895/1705 train_time:84789ms step_avg:94.74ms +step:896/1705 train_time:84884ms step_avg:94.74ms +step:897/1705 train_time:84979ms step_avg:94.74ms +step:898/1705 train_time:85074ms step_avg:94.74ms +step:899/1705 train_time:85170ms step_avg:94.74ms +step:900/1705 train_time:85265ms step_avg:94.74ms +step:901/1705 train_time:85361ms step_avg:94.74ms +step:902/1705 train_time:85454ms step_avg:94.74ms +step:903/1705 train_time:85548ms step_avg:94.74ms +step:904/1705 train_time:85642ms step_avg:94.74ms +step:905/1705 train_time:85736ms step_avg:94.74ms +step:906/1705 train_time:85829ms step_avg:94.73ms +step:907/1705 train_time:85924ms step_avg:94.73ms +step:908/1705 train_time:86019ms step_avg:94.73ms +step:909/1705 train_time:86114ms step_avg:94.73ms +step:910/1705 train_time:86209ms step_avg:94.73ms +step:911/1705 train_time:86304ms step_avg:94.74ms +step:912/1705 train_time:86399ms step_avg:94.74ms +step:913/1705 train_time:86493ms step_avg:94.74ms +step:914/1705 train_time:86588ms step_avg:94.73ms +step:915/1705 train_time:86682ms step_avg:94.73ms +step:916/1705 train_time:86776ms step_avg:94.73ms +step:917/1705 train_time:86870ms step_avg:94.73ms +step:918/1705 train_time:86965ms step_avg:94.73ms +step:919/1705 train_time:87060ms step_avg:94.73ms +step:920/1705 train_time:87155ms step_avg:94.73ms +step:921/1705 train_time:87250ms step_avg:94.73ms +step:922/1705 train_time:87345ms step_avg:94.73ms +step:923/1705 train_time:87439ms step_avg:94.73ms +step:924/1705 train_time:87532ms step_avg:94.73ms +step:925/1705 train_time:87627ms step_avg:94.73ms +step:926/1705 train_time:87721ms step_avg:94.73ms +step:927/1705 train_time:87815ms step_avg:94.73ms +step:928/1705 train_time:87910ms step_avg:94.73ms +step:929/1705 train_time:88005ms step_avg:94.73ms +step:930/1705 train_time:88099ms step_avg:94.73ms +step:931/1705 train_time:88193ms step_avg:94.73ms +step:932/1705 train_time:88288ms step_avg:94.73ms +step:933/1705 train_time:88382ms step_avg:94.73ms +step:934/1705 train_time:88476ms step_avg:94.73ms +step:935/1705 train_time:88570ms step_avg:94.73ms +step:936/1705 train_time:88665ms step_avg:94.73ms +step:937/1705 train_time:88759ms step_avg:94.73ms +step:938/1705 train_time:88853ms step_avg:94.73ms +step:939/1705 train_time:88948ms step_avg:94.73ms +step:940/1705 train_time:89044ms step_avg:94.73ms +step:941/1705 train_time:89139ms step_avg:94.73ms +step:942/1705 train_time:89233ms step_avg:94.73ms +step:943/1705 train_time:89328ms step_avg:94.73ms +step:944/1705 train_time:89423ms step_avg:94.73ms +step:945/1705 train_time:89516ms step_avg:94.73ms +step:946/1705 train_time:89611ms step_avg:94.73ms +step:947/1705 train_time:89706ms step_avg:94.73ms +step:948/1705 train_time:89800ms step_avg:94.73ms +step:949/1705 train_time:89894ms step_avg:94.73ms +step:950/1705 train_time:89989ms step_avg:94.72ms +step:951/1705 train_time:90083ms step_avg:94.72ms +step:952/1705 train_time:90178ms step_avg:94.72ms +step:953/1705 train_time:90272ms step_avg:94.72ms +step:954/1705 train_time:90367ms step_avg:94.72ms +step:955/1705 train_time:90462ms step_avg:94.72ms +step:956/1705 train_time:90556ms step_avg:94.72ms +step:957/1705 train_time:90650ms step_avg:94.72ms +step:958/1705 train_time:90745ms step_avg:94.72ms +step:959/1705 train_time:90839ms step_avg:94.72ms +step:960/1705 train_time:90933ms step_avg:94.72ms +step:961/1705 train_time:91027ms step_avg:94.72ms +step:962/1705 train_time:91122ms step_avg:94.72ms +step:963/1705 train_time:91217ms step_avg:94.72ms +step:964/1705 train_time:91311ms step_avg:94.72ms +step:965/1705 train_time:91407ms step_avg:94.72ms +step:966/1705 train_time:91501ms step_avg:94.72ms +step:967/1705 train_time:91595ms step_avg:94.72ms +step:968/1705 train_time:91689ms step_avg:94.72ms +step:969/1705 train_time:91783ms step_avg:94.72ms +step:970/1705 train_time:91878ms step_avg:94.72ms +step:971/1705 train_time:91971ms step_avg:94.72ms +step:972/1705 train_time:92067ms step_avg:94.72ms +step:973/1705 train_time:92162ms step_avg:94.72ms +step:974/1705 train_time:92257ms step_avg:94.72ms +step:975/1705 train_time:92351ms step_avg:94.72ms +step:976/1705 train_time:92446ms step_avg:94.72ms +step:977/1705 train_time:92540ms step_avg:94.72ms +step:978/1705 train_time:92634ms step_avg:94.72ms +step:979/1705 train_time:92729ms step_avg:94.72ms +step:980/1705 train_time:92824ms step_avg:94.72ms +step:981/1705 train_time:92918ms step_avg:94.72ms +step:982/1705 train_time:93012ms step_avg:94.72ms +step:983/1705 train_time:93109ms step_avg:94.72ms +step:984/1705 train_time:93204ms step_avg:94.72ms +step:985/1705 train_time:93298ms step_avg:94.72ms +step:986/1705 train_time:93392ms step_avg:94.72ms +step:987/1705 train_time:93487ms step_avg:94.72ms +step:988/1705 train_time:93582ms step_avg:94.72ms +step:989/1705 train_time:93676ms step_avg:94.72ms +step:990/1705 train_time:93771ms step_avg:94.72ms +step:991/1705 train_time:93866ms step_avg:94.72ms +step:992/1705 train_time:93960ms step_avg:94.72ms +step:993/1705 train_time:94054ms step_avg:94.72ms +step:994/1705 train_time:94148ms step_avg:94.72ms +step:995/1705 train_time:94244ms step_avg:94.72ms +step:996/1705 train_time:94338ms step_avg:94.72ms +step:997/1705 train_time:94432ms step_avg:94.72ms +step:998/1705 train_time:94526ms step_avg:94.72ms +step:999/1705 train_time:94621ms step_avg:94.72ms +step:1000/1705 train_time:94716ms step_avg:94.72ms +step:1000/1705 val_loss:3.4858 train_time:94810ms step_avg:94.81ms +step:1001/1705 train_time:94832ms step_avg:94.74ms +step:1002/1705 train_time:94912ms step_avg:94.72ms +step:1003/1705 train_time:95012ms step_avg:94.73ms +step:1004/1705 train_time:95108ms step_avg:94.73ms +step:1005/1705 train_time:95201ms step_avg:94.73ms +step:1006/1705 train_time:95295ms step_avg:94.73ms +step:1007/1705 train_time:95389ms step_avg:94.73ms +step:1008/1705 train_time:95481ms step_avg:94.72ms +step:1009/1705 train_time:95574ms step_avg:94.72ms +step:1010/1705 train_time:95667ms step_avg:94.72ms +step:1011/1705 train_time:95762ms step_avg:94.72ms +step:1012/1705 train_time:95858ms step_avg:94.72ms +step:1013/1705 train_time:95954ms step_avg:94.72ms +step:1014/1705 train_time:96051ms step_avg:94.72ms +step:1015/1705 train_time:96147ms step_avg:94.73ms +step:1016/1705 train_time:96241ms step_avg:94.73ms +step:1017/1705 train_time:96334ms step_avg:94.72ms +step:1018/1705 train_time:96428ms step_avg:94.72ms +step:1019/1705 train_time:96522ms step_avg:94.72ms +step:1020/1705 train_time:96615ms step_avg:94.72ms +step:1021/1705 train_time:96709ms step_avg:94.72ms +step:1022/1705 train_time:96803ms step_avg:94.72ms +step:1023/1705 train_time:96899ms step_avg:94.72ms +step:1024/1705 train_time:96995ms step_avg:94.72ms +step:1025/1705 train_time:97090ms step_avg:94.72ms +step:1026/1705 train_time:97186ms step_avg:94.72ms +step:1027/1705 train_time:97280ms step_avg:94.72ms +step:1028/1705 train_time:97374ms step_avg:94.72ms +step:1029/1705 train_time:97468ms step_avg:94.72ms +step:1030/1705 train_time:97561ms step_avg:94.72ms +step:1031/1705 train_time:97654ms step_avg:94.72ms +step:1032/1705 train_time:97749ms step_avg:94.72ms +step:1033/1705 train_time:97844ms step_avg:94.72ms +step:1034/1705 train_time:97939ms step_avg:94.72ms +step:1035/1705 train_time:98033ms step_avg:94.72ms +step:1036/1705 train_time:98130ms step_avg:94.72ms +step:1037/1705 train_time:98226ms step_avg:94.72ms +step:1038/1705 train_time:98321ms step_avg:94.72ms +step:1039/1705 train_time:98414ms step_avg:94.72ms +step:1040/1705 train_time:98509ms step_avg:94.72ms +step:1041/1705 train_time:98603ms step_avg:94.72ms +step:1042/1705 train_time:98698ms step_avg:94.72ms +step:1043/1705 train_time:98791ms step_avg:94.72ms +step:1044/1705 train_time:98888ms step_avg:94.72ms +step:1045/1705 train_time:98982ms step_avg:94.72ms +step:1046/1705 train_time:99076ms step_avg:94.72ms +step:1047/1705 train_time:99171ms step_avg:94.72ms +step:1048/1705 train_time:99266ms step_avg:94.72ms +step:1049/1705 train_time:99361ms step_avg:94.72ms +step:1050/1705 train_time:99455ms step_avg:94.72ms +step:1051/1705 train_time:99549ms step_avg:94.72ms +step:1052/1705 train_time:99644ms step_avg:94.72ms +step:1053/1705 train_time:99738ms step_avg:94.72ms +step:1054/1705 train_time:99832ms step_avg:94.72ms +step:1055/1705 train_time:99927ms step_avg:94.72ms +step:1056/1705 train_time:100022ms step_avg:94.72ms +step:1057/1705 train_time:100117ms step_avg:94.72ms +step:1058/1705 train_time:100211ms step_avg:94.72ms +step:1059/1705 train_time:100306ms step_avg:94.72ms +step:1060/1705 train_time:100401ms step_avg:94.72ms +step:1061/1705 train_time:100494ms step_avg:94.72ms +step:1062/1705 train_time:100743ms step_avg:94.86ms +step:1063/1705 train_time:100846ms step_avg:94.87ms +step:1064/1705 train_time:100938ms step_avg:94.87ms +step:1065/1705 train_time:101032ms step_avg:94.87ms +step:1066/1705 train_time:101126ms step_avg:94.87ms +step:1067/1705 train_time:101219ms step_avg:94.86ms +step:1068/1705 train_time:101313ms step_avg:94.86ms +step:1069/1705 train_time:101407ms step_avg:94.86ms +step:1070/1705 train_time:101500ms step_avg:94.86ms +step:1071/1705 train_time:101593ms step_avg:94.86ms +step:1072/1705 train_time:101693ms step_avg:94.86ms +step:1073/1705 train_time:101791ms step_avg:94.87ms +step:1074/1705 train_time:101888ms step_avg:94.87ms +step:1075/1705 train_time:101982ms step_avg:94.87ms +step:1076/1705 train_time:102076ms step_avg:94.87ms +step:1077/1705 train_time:102170ms step_avg:94.87ms +step:1078/1705 train_time:102264ms step_avg:94.86ms +step:1079/1705 train_time:102357ms step_avg:94.86ms +step:1080/1705 train_time:102451ms step_avg:94.86ms +step:1081/1705 train_time:102544ms step_avg:94.86ms +step:1082/1705 train_time:102639ms step_avg:94.86ms +step:1083/1705 train_time:102734ms step_avg:94.86ms +step:1084/1705 train_time:102831ms step_avg:94.86ms +step:1085/1705 train_time:102928ms step_avg:94.86ms +step:1086/1705 train_time:103023ms step_avg:94.86ms +step:1087/1705 train_time:103117ms step_avg:94.86ms +step:1088/1705 train_time:103211ms step_avg:94.86ms +step:1089/1705 train_time:103305ms step_avg:94.86ms +step:1090/1705 train_time:103399ms step_avg:94.86ms +step:1091/1705 train_time:103493ms step_avg:94.86ms +step:1092/1705 train_time:103587ms step_avg:94.86ms +step:1093/1705 train_time:103682ms step_avg:94.86ms +step:1094/1705 train_time:103778ms step_avg:94.86ms +step:1095/1705 train_time:103873ms step_avg:94.86ms +step:1096/1705 train_time:103967ms step_avg:94.86ms +step:1097/1705 train_time:104062ms step_avg:94.86ms +step:1098/1705 train_time:104156ms step_avg:94.86ms +step:1099/1705 train_time:104250ms step_avg:94.86ms +step:1100/1705 train_time:104345ms step_avg:94.86ms +step:1101/1705 train_time:104439ms step_avg:94.86ms +step:1102/1705 train_time:104533ms step_avg:94.86ms +step:1103/1705 train_time:104628ms step_avg:94.86ms +step:1104/1705 train_time:104723ms step_avg:94.86ms +step:1105/1705 train_time:104818ms step_avg:94.86ms +step:1106/1705 train_time:104912ms step_avg:94.86ms +step:1107/1705 train_time:105007ms step_avg:94.86ms +step:1108/1705 train_time:105102ms step_avg:94.86ms +step:1109/1705 train_time:105196ms step_avg:94.86ms +step:1110/1705 train_time:105290ms step_avg:94.86ms +step:1111/1705 train_time:105385ms step_avg:94.86ms +step:1112/1705 train_time:105480ms step_avg:94.86ms +step:1113/1705 train_time:105573ms step_avg:94.85ms +step:1114/1705 train_time:105668ms step_avg:94.85ms +step:1115/1705 train_time:105763ms step_avg:94.86ms +step:1116/1705 train_time:105858ms step_avg:94.85ms +step:1117/1705 train_time:105953ms step_avg:94.85ms +step:1118/1705 train_time:106047ms step_avg:94.85ms +step:1119/1705 train_time:106142ms step_avg:94.85ms +step:1120/1705 train_time:106237ms step_avg:94.85ms +step:1121/1705 train_time:106331ms step_avg:94.85ms +step:1122/1705 train_time:106427ms step_avg:94.85ms +step:1123/1705 train_time:106521ms step_avg:94.85ms +step:1124/1705 train_time:106615ms step_avg:94.85ms +step:1125/1705 train_time:106710ms step_avg:94.85ms +step:1125/1705 val_loss:3.4384 train_time:106806ms step_avg:94.94ms +step:1126/1705 train_time:106828ms step_avg:94.87ms +step:1127/1705 train_time:106907ms step_avg:94.86ms +step:1128/1705 train_time:107005ms step_avg:94.86ms +step:1129/1705 train_time:107099ms step_avg:94.86ms +step:1130/1705 train_time:107193ms step_avg:94.86ms +step:1131/1705 train_time:107287ms step_avg:94.86ms +step:1132/1705 train_time:107380ms step_avg:94.86ms +step:1133/1705 train_time:107474ms step_avg:94.86ms +step:1134/1705 train_time:107568ms step_avg:94.86ms +step:1135/1705 train_time:107662ms step_avg:94.86ms +step:1136/1705 train_time:107757ms step_avg:94.86ms +step:1137/1705 train_time:107854ms step_avg:94.86ms +step:1138/1705 train_time:107950ms step_avg:94.86ms +step:1139/1705 train_time:108047ms step_avg:94.86ms +step:1140/1705 train_time:108142ms step_avg:94.86ms +step:1141/1705 train_time:108236ms step_avg:94.86ms +step:1142/1705 train_time:108330ms step_avg:94.86ms +step:1143/1705 train_time:108425ms step_avg:94.86ms +step:1144/1705 train_time:108520ms step_avg:94.86ms +step:1145/1705 train_time:108614ms step_avg:94.86ms +step:1146/1705 train_time:108709ms step_avg:94.86ms +step:1147/1705 train_time:108805ms step_avg:94.86ms +step:1148/1705 train_time:108901ms step_avg:94.86ms +step:1149/1705 train_time:108997ms step_avg:94.86ms +step:1150/1705 train_time:109093ms step_avg:94.86ms +step:1151/1705 train_time:109188ms step_avg:94.86ms +step:1152/1705 train_time:109284ms step_avg:94.86ms +step:1153/1705 train_time:109378ms step_avg:94.86ms +step:1154/1705 train_time:109473ms step_avg:94.86ms +step:1155/1705 train_time:109569ms step_avg:94.87ms +step:1156/1705 train_time:109664ms step_avg:94.86ms +step:1157/1705 train_time:109759ms step_avg:94.86ms +step:1158/1705 train_time:109855ms step_avg:94.87ms +step:1159/1705 train_time:109951ms step_avg:94.87ms +step:1160/1705 train_time:110047ms step_avg:94.87ms +step:1161/1705 train_time:110142ms step_avg:94.87ms +step:1162/1705 train_time:110236ms step_avg:94.87ms +step:1163/1705 train_time:110331ms step_avg:94.87ms +step:1164/1705 train_time:110427ms step_avg:94.87ms +step:1165/1705 train_time:110522ms step_avg:94.87ms +step:1166/1705 train_time:110617ms step_avg:94.87ms +step:1167/1705 train_time:110713ms step_avg:94.87ms +step:1168/1705 train_time:110809ms step_avg:94.87ms +step:1169/1705 train_time:110905ms step_avg:94.87ms +step:1170/1705 train_time:111000ms step_avg:94.87ms +step:1171/1705 train_time:111097ms step_avg:94.87ms +step:1172/1705 train_time:111193ms step_avg:94.87ms +step:1173/1705 train_time:111289ms step_avg:94.88ms +step:1174/1705 train_time:111384ms step_avg:94.88ms +step:1175/1705 train_time:111479ms step_avg:94.88ms +step:1176/1705 train_time:111574ms step_avg:94.88ms +step:1177/1705 train_time:111669ms step_avg:94.88ms +step:1178/1705 train_time:111764ms step_avg:94.88ms +step:1179/1705 train_time:111861ms step_avg:94.88ms +step:1180/1705 train_time:111955ms step_avg:94.88ms +step:1181/1705 train_time:112050ms step_avg:94.88ms +step:1182/1705 train_time:112146ms step_avg:94.88ms +step:1183/1705 train_time:112241ms step_avg:94.88ms +step:1184/1705 train_time:112336ms step_avg:94.88ms +step:1185/1705 train_time:112432ms step_avg:94.88ms +step:1186/1705 train_time:112527ms step_avg:94.88ms +step:1187/1705 train_time:112622ms step_avg:94.88ms +step:1188/1705 train_time:112717ms step_avg:94.88ms +step:1189/1705 train_time:112813ms step_avg:94.88ms +step:1190/1705 train_time:112909ms step_avg:94.88ms +step:1191/1705 train_time:113005ms step_avg:94.88ms +step:1192/1705 train_time:113100ms step_avg:94.88ms +step:1193/1705 train_time:113196ms step_avg:94.88ms +step:1194/1705 train_time:113291ms step_avg:94.88ms +step:1195/1705 train_time:113386ms step_avg:94.88ms +step:1196/1705 train_time:113482ms step_avg:94.88ms +step:1197/1705 train_time:113576ms step_avg:94.88ms +step:1198/1705 train_time:113673ms step_avg:94.89ms +step:1199/1705 train_time:113769ms step_avg:94.89ms +step:1200/1705 train_time:113864ms step_avg:94.89ms +step:1201/1705 train_time:113959ms step_avg:94.89ms +step:1202/1705 train_time:114054ms step_avg:94.89ms +step:1203/1705 train_time:114150ms step_avg:94.89ms +step:1204/1705 train_time:114246ms step_avg:94.89ms +step:1205/1705 train_time:114341ms step_avg:94.89ms +step:1206/1705 train_time:114436ms step_avg:94.89ms +step:1207/1705 train_time:114532ms step_avg:94.89ms +step:1208/1705 train_time:114628ms step_avg:94.89ms +step:1209/1705 train_time:114723ms step_avg:94.89ms +step:1210/1705 train_time:114818ms step_avg:94.89ms +step:1211/1705 train_time:114914ms step_avg:94.89ms +step:1212/1705 train_time:115010ms step_avg:94.89ms +step:1213/1705 train_time:115105ms step_avg:94.89ms +step:1214/1705 train_time:115200ms step_avg:94.89ms +step:1215/1705 train_time:115295ms step_avg:94.89ms +step:1216/1705 train_time:115391ms step_avg:94.89ms +step:1217/1705 train_time:115486ms step_avg:94.89ms +step:1218/1705 train_time:115582ms step_avg:94.90ms +step:1219/1705 train_time:115677ms step_avg:94.89ms +step:1220/1705 train_time:115772ms step_avg:94.90ms +step:1221/1705 train_time:115868ms step_avg:94.90ms +step:1222/1705 train_time:115963ms step_avg:94.90ms +step:1223/1705 train_time:116058ms step_avg:94.90ms +step:1224/1705 train_time:116154ms step_avg:94.90ms +step:1225/1705 train_time:116250ms step_avg:94.90ms +step:1226/1705 train_time:116347ms step_avg:94.90ms +step:1227/1705 train_time:116442ms step_avg:94.90ms +step:1228/1705 train_time:116536ms step_avg:94.90ms +step:1229/1705 train_time:116631ms step_avg:94.90ms +step:1230/1705 train_time:116726ms step_avg:94.90ms +step:1231/1705 train_time:116822ms step_avg:94.90ms +step:1232/1705 train_time:116916ms step_avg:94.90ms +step:1233/1705 train_time:117011ms step_avg:94.90ms +step:1234/1705 train_time:117107ms step_avg:94.90ms +step:1235/1705 train_time:117202ms step_avg:94.90ms +step:1236/1705 train_time:117297ms step_avg:94.90ms +step:1237/1705 train_time:117393ms step_avg:94.90ms +step:1238/1705 train_time:117489ms step_avg:94.90ms +step:1239/1705 train_time:117585ms step_avg:94.90ms +step:1240/1705 train_time:117680ms step_avg:94.90ms +step:1241/1705 train_time:117775ms step_avg:94.90ms +step:1242/1705 train_time:117870ms step_avg:94.90ms +step:1243/1705 train_time:117966ms step_avg:94.90ms +step:1244/1705 train_time:118062ms step_avg:94.90ms +step:1245/1705 train_time:118156ms step_avg:94.90ms +step:1246/1705 train_time:118252ms step_avg:94.91ms +step:1247/1705 train_time:118347ms step_avg:94.91ms +step:1248/1705 train_time:118443ms step_avg:94.91ms +step:1249/1705 train_time:118537ms step_avg:94.91ms +step:1250/1705 train_time:118633ms step_avg:94.91ms +step:1250/1705 val_loss:3.3898 train_time:118729ms step_avg:94.98ms +step:1251/1705 train_time:118752ms step_avg:94.93ms +step:1252/1705 train_time:118837ms step_avg:94.92ms +step:1253/1705 train_time:118932ms step_avg:94.92ms +step:1254/1705 train_time:119026ms step_avg:94.92ms +step:1255/1705 train_time:119120ms step_avg:94.92ms +step:1256/1705 train_time:119214ms step_avg:94.92ms +step:1257/1705 train_time:119309ms step_avg:94.92ms +step:1258/1705 train_time:119402ms step_avg:94.91ms +step:1259/1705 train_time:119496ms step_avg:94.91ms +step:1260/1705 train_time:119590ms step_avg:94.91ms +step:1261/1705 train_time:119689ms step_avg:94.92ms +step:1262/1705 train_time:119788ms step_avg:94.92ms +step:1263/1705 train_time:119887ms step_avg:94.92ms +step:1264/1705 train_time:119983ms step_avg:94.92ms +step:1265/1705 train_time:120078ms step_avg:94.92ms +step:1266/1705 train_time:120172ms step_avg:94.92ms +step:1267/1705 train_time:120268ms step_avg:94.92ms +step:1268/1705 train_time:120362ms step_avg:94.92ms +step:1269/1705 train_time:120456ms step_avg:94.92ms +step:1270/1705 train_time:120551ms step_avg:94.92ms +step:1271/1705 train_time:120646ms step_avg:94.92ms +step:1272/1705 train_time:120743ms step_avg:94.92ms +step:1273/1705 train_time:120840ms step_avg:94.93ms +step:1274/1705 train_time:121238ms step_avg:95.16ms +step:1275/1705 train_time:121309ms step_avg:95.14ms +step:1276/1705 train_time:121402ms step_avg:95.14ms +step:1277/1705 train_time:121496ms step_avg:95.14ms +step:1278/1705 train_time:121589ms step_avg:95.14ms +step:1279/1705 train_time:121683ms step_avg:95.14ms +step:1280/1705 train_time:121777ms step_avg:95.14ms +step:1281/1705 train_time:121870ms step_avg:95.14ms +step:1282/1705 train_time:121964ms step_avg:95.14ms +step:1283/1705 train_time:122058ms step_avg:95.13ms +step:1284/1705 train_time:122159ms step_avg:95.14ms +step:1285/1705 train_time:122257ms step_avg:95.14ms +step:1286/1705 train_time:122353ms step_avg:95.14ms +step:1287/1705 train_time:122448ms step_avg:95.14ms +step:1288/1705 train_time:122545ms step_avg:95.14ms +step:1289/1705 train_time:122639ms step_avg:95.14ms +step:1290/1705 train_time:122733ms step_avg:95.14ms +step:1291/1705 train_time:122827ms step_avg:95.14ms +step:1292/1705 train_time:122922ms step_avg:95.14ms +step:1293/1705 train_time:123016ms step_avg:95.14ms +step:1294/1705 train_time:123112ms step_avg:95.14ms +step:1295/1705 train_time:123209ms step_avg:95.14ms +step:1296/1705 train_time:123307ms step_avg:95.14ms +step:1297/1705 train_time:123403ms step_avg:95.14ms +step:1298/1705 train_time:123497ms step_avg:95.14ms +step:1299/1705 train_time:123593ms step_avg:95.14ms +step:1300/1705 train_time:123687ms step_avg:95.14ms +step:1301/1705 train_time:123783ms step_avg:95.14ms +step:1302/1705 train_time:123876ms step_avg:95.14ms +step:1303/1705 train_time:123970ms step_avg:95.14ms +step:1304/1705 train_time:124065ms step_avg:95.14ms +step:1305/1705 train_time:124161ms step_avg:95.14ms +step:1306/1705 train_time:124258ms step_avg:95.14ms +step:1307/1705 train_time:124354ms step_avg:95.14ms +step:1308/1705 train_time:124451ms step_avg:95.15ms +step:1309/1705 train_time:124546ms step_avg:95.15ms +step:1310/1705 train_time:124643ms step_avg:95.15ms +step:1311/1705 train_time:124739ms step_avg:95.15ms +step:1312/1705 train_time:124832ms step_avg:95.15ms +step:1313/1705 train_time:124927ms step_avg:95.15ms +step:1314/1705 train_time:125022ms step_avg:95.15ms +step:1315/1705 train_time:125116ms step_avg:95.15ms +step:1316/1705 train_time:125212ms step_avg:95.15ms +step:1317/1705 train_time:125308ms step_avg:95.15ms +step:1318/1705 train_time:125404ms step_avg:95.15ms +step:1319/1705 train_time:125500ms step_avg:95.15ms +step:1320/1705 train_time:125596ms step_avg:95.15ms +step:1321/1705 train_time:125692ms step_avg:95.15ms +step:1322/1705 train_time:125788ms step_avg:95.15ms +step:1323/1705 train_time:125884ms step_avg:95.15ms +step:1324/1705 train_time:125977ms step_avg:95.15ms +step:1325/1705 train_time:126072ms step_avg:95.15ms +step:1326/1705 train_time:126167ms step_avg:95.15ms +step:1327/1705 train_time:126263ms step_avg:95.15ms +step:1328/1705 train_time:126358ms step_avg:95.15ms +step:1329/1705 train_time:126454ms step_avg:95.15ms +step:1330/1705 train_time:126550ms step_avg:95.15ms +step:1331/1705 train_time:126646ms step_avg:95.15ms +step:1332/1705 train_time:126741ms step_avg:95.15ms +step:1333/1705 train_time:126835ms step_avg:95.15ms +step:1334/1705 train_time:126930ms step_avg:95.15ms +step:1335/1705 train_time:127025ms step_avg:95.15ms +step:1336/1705 train_time:127120ms step_avg:95.15ms +step:1337/1705 train_time:127215ms step_avg:95.15ms +step:1338/1705 train_time:127310ms step_avg:95.15ms +step:1339/1705 train_time:127406ms step_avg:95.15ms +step:1340/1705 train_time:127502ms step_avg:95.15ms +step:1341/1705 train_time:127596ms step_avg:95.15ms +step:1342/1705 train_time:127691ms step_avg:95.15ms +step:1343/1705 train_time:127788ms step_avg:95.15ms +step:1344/1705 train_time:127882ms step_avg:95.15ms +step:1345/1705 train_time:127977ms step_avg:95.15ms +step:1346/1705 train_time:128072ms step_avg:95.15ms +step:1347/1705 train_time:128167ms step_avg:95.15ms +step:1348/1705 train_time:128263ms step_avg:95.15ms +step:1349/1705 train_time:128359ms step_avg:95.15ms +step:1350/1705 train_time:128454ms step_avg:95.15ms +step:1351/1705 train_time:128551ms step_avg:95.15ms +step:1352/1705 train_time:128647ms step_avg:95.15ms +step:1353/1705 train_time:128743ms step_avg:95.15ms +step:1354/1705 train_time:128838ms step_avg:95.15ms +step:1355/1705 train_time:128933ms step_avg:95.15ms +step:1356/1705 train_time:129028ms step_avg:95.15ms +step:1357/1705 train_time:129123ms step_avg:95.15ms +step:1358/1705 train_time:129218ms step_avg:95.15ms +step:1359/1705 train_time:129313ms step_avg:95.15ms +step:1360/1705 train_time:129408ms step_avg:95.15ms +step:1361/1705 train_time:129505ms step_avg:95.15ms +step:1362/1705 train_time:129601ms step_avg:95.16ms +step:1363/1705 train_time:129696ms step_avg:95.15ms +step:1364/1705 train_time:129791ms step_avg:95.15ms +step:1365/1705 train_time:129886ms step_avg:95.15ms +step:1366/1705 train_time:129980ms step_avg:95.15ms +step:1367/1705 train_time:130075ms step_avg:95.15ms +step:1368/1705 train_time:130171ms step_avg:95.15ms +step:1369/1705 train_time:130266ms step_avg:95.15ms +step:1370/1705 train_time:130362ms step_avg:95.15ms +step:1371/1705 train_time:130457ms step_avg:95.15ms +step:1372/1705 train_time:130553ms step_avg:95.15ms +step:1373/1705 train_time:130649ms step_avg:95.16ms +step:1374/1705 train_time:130745ms step_avg:95.16ms +step:1375/1705 train_time:130840ms step_avg:95.16ms +step:1375/1705 val_loss:3.3524 train_time:130935ms step_avg:95.23ms +step:1376/1705 train_time:130957ms step_avg:95.17ms +step:1377/1705 train_time:131036ms step_avg:95.16ms +step:1378/1705 train_time:131134ms step_avg:95.16ms +step:1379/1705 train_time:131229ms step_avg:95.16ms +step:1380/1705 train_time:131324ms step_avg:95.16ms +step:1381/1705 train_time:131419ms step_avg:95.16ms +step:1382/1705 train_time:131513ms step_avg:95.16ms +step:1383/1705 train_time:131607ms step_avg:95.16ms +step:1384/1705 train_time:131703ms step_avg:95.16ms +step:1385/1705 train_time:131797ms step_avg:95.16ms +step:1386/1705 train_time:131893ms step_avg:95.16ms +step:1387/1705 train_time:131991ms step_avg:95.16ms +step:1388/1705 train_time:132089ms step_avg:95.16ms +step:1389/1705 train_time:132184ms step_avg:95.16ms +step:1390/1705 train_time:132279ms step_avg:95.16ms +step:1391/1705 train_time:132374ms step_avg:95.16ms +step:1392/1705 train_time:132468ms step_avg:95.16ms +step:1393/1705 train_time:132562ms step_avg:95.16ms +step:1394/1705 train_time:132657ms step_avg:95.16ms +step:1395/1705 train_time:132752ms step_avg:95.16ms +step:1396/1705 train_time:132847ms step_avg:95.16ms +step:1397/1705 train_time:132942ms step_avg:95.16ms +step:1398/1705 train_time:133039ms step_avg:95.16ms +step:1399/1705 train_time:133134ms step_avg:95.16ms +step:1400/1705 train_time:133230ms step_avg:95.16ms +step:1401/1705 train_time:133325ms step_avg:95.16ms +step:1402/1705 train_time:133420ms step_avg:95.16ms +step:1403/1705 train_time:133515ms step_avg:95.16ms +step:1404/1705 train_time:133611ms step_avg:95.16ms +step:1405/1705 train_time:133705ms step_avg:95.16ms +step:1406/1705 train_time:133801ms step_avg:95.16ms +step:1407/1705 train_time:133897ms step_avg:95.16ms +step:1408/1705 train_time:133993ms step_avg:95.17ms +step:1409/1705 train_time:134089ms step_avg:95.17ms +step:1410/1705 train_time:134184ms step_avg:95.17ms +step:1411/1705 train_time:134280ms step_avg:95.17ms +step:1412/1705 train_time:134375ms step_avg:95.17ms +step:1413/1705 train_time:134470ms step_avg:95.17ms +step:1414/1705 train_time:134565ms step_avg:95.17ms +step:1415/1705 train_time:134660ms step_avg:95.17ms +step:1416/1705 train_time:134755ms step_avg:95.17ms +step:1417/1705 train_time:134850ms step_avg:95.17ms +step:1418/1705 train_time:134944ms step_avg:95.17ms +step:1419/1705 train_time:135040ms step_avg:95.17ms +step:1420/1705 train_time:135136ms step_avg:95.17ms +step:1421/1705 train_time:135231ms step_avg:95.17ms +step:1422/1705 train_time:135326ms step_avg:95.17ms +step:1423/1705 train_time:135422ms step_avg:95.17ms +step:1424/1705 train_time:135517ms step_avg:95.17ms +step:1425/1705 train_time:135613ms step_avg:95.17ms +step:1426/1705 train_time:135707ms step_avg:95.17ms +step:1427/1705 train_time:135802ms step_avg:95.17ms +step:1428/1705 train_time:135898ms step_avg:95.17ms +step:1429/1705 train_time:135993ms step_avg:95.17ms +step:1430/1705 train_time:136089ms step_avg:95.17ms +step:1431/1705 train_time:136183ms step_avg:95.17ms +step:1432/1705 train_time:136279ms step_avg:95.17ms +step:1433/1705 train_time:136375ms step_avg:95.17ms +step:1434/1705 train_time:136470ms step_avg:95.17ms +step:1435/1705 train_time:136565ms step_avg:95.17ms +step:1436/1705 train_time:136660ms step_avg:95.17ms +step:1437/1705 train_time:136756ms step_avg:95.17ms +step:1438/1705 train_time:136850ms step_avg:95.17ms +step:1439/1705 train_time:136945ms step_avg:95.17ms +step:1440/1705 train_time:137041ms step_avg:95.17ms +step:1441/1705 train_time:137137ms step_avg:95.17ms +step:1442/1705 train_time:137232ms step_avg:95.17ms +step:1443/1705 train_time:137328ms step_avg:95.17ms +step:1444/1705 train_time:137423ms step_avg:95.17ms +step:1445/1705 train_time:137519ms step_avg:95.17ms +step:1446/1705 train_time:137614ms step_avg:95.17ms +step:1447/1705 train_time:137709ms step_avg:95.17ms +step:1448/1705 train_time:137804ms step_avg:95.17ms +step:1449/1705 train_time:137901ms step_avg:95.17ms +step:1450/1705 train_time:137996ms step_avg:95.17ms +step:1451/1705 train_time:138091ms step_avg:95.17ms +step:1452/1705 train_time:138186ms step_avg:95.17ms +step:1453/1705 train_time:138280ms step_avg:95.17ms +step:1454/1705 train_time:138377ms step_avg:95.17ms +step:1455/1705 train_time:138474ms step_avg:95.17ms +step:1456/1705 train_time:138570ms step_avg:95.17ms +step:1457/1705 train_time:138664ms step_avg:95.17ms +step:1458/1705 train_time:138759ms step_avg:95.17ms +step:1459/1705 train_time:138855ms step_avg:95.17ms +step:1460/1705 train_time:138950ms step_avg:95.17ms +step:1461/1705 train_time:139045ms step_avg:95.17ms +step:1462/1705 train_time:139141ms step_avg:95.17ms +step:1463/1705 train_time:139237ms step_avg:95.17ms +step:1464/1705 train_time:139332ms step_avg:95.17ms +step:1465/1705 train_time:139426ms step_avg:95.17ms +step:1466/1705 train_time:139522ms step_avg:95.17ms +step:1467/1705 train_time:139618ms step_avg:95.17ms +step:1468/1705 train_time:139714ms step_avg:95.17ms +step:1469/1705 train_time:139808ms step_avg:95.17ms +step:1470/1705 train_time:139902ms step_avg:95.17ms +step:1471/1705 train_time:139998ms step_avg:95.17ms +step:1472/1705 train_time:140094ms step_avg:95.17ms +step:1473/1705 train_time:140189ms step_avg:95.17ms +step:1474/1705 train_time:140284ms step_avg:95.17ms +step:1475/1705 train_time:140381ms step_avg:95.17ms +step:1476/1705 train_time:140476ms step_avg:95.17ms +step:1477/1705 train_time:140571ms step_avg:95.17ms +step:1478/1705 train_time:140666ms step_avg:95.17ms +step:1479/1705 train_time:140761ms step_avg:95.17ms +step:1480/1705 train_time:140857ms step_avg:95.17ms +step:1481/1705 train_time:140953ms step_avg:95.17ms +step:1482/1705 train_time:141050ms step_avg:95.18ms +step:1483/1705 train_time:141145ms step_avg:95.18ms +step:1484/1705 train_time:141240ms step_avg:95.17ms +step:1485/1705 train_time:141597ms step_avg:95.35ms +step:1486/1705 train_time:141697ms step_avg:95.35ms +step:1487/1705 train_time:141790ms step_avg:95.35ms +step:1488/1705 train_time:141884ms step_avg:95.35ms +step:1489/1705 train_time:141978ms step_avg:95.35ms +step:1490/1705 train_time:142072ms step_avg:95.35ms +step:1491/1705 train_time:142166ms step_avg:95.35ms +step:1492/1705 train_time:142260ms step_avg:95.35ms +step:1493/1705 train_time:142354ms step_avg:95.35ms +step:1494/1705 train_time:142449ms step_avg:95.35ms +step:1495/1705 train_time:142546ms step_avg:95.35ms +step:1496/1705 train_time:142645ms step_avg:95.35ms +step:1497/1705 train_time:142741ms step_avg:95.35ms +step:1498/1705 train_time:142836ms step_avg:95.35ms +step:1499/1705 train_time:142931ms step_avg:95.35ms +step:1500/1705 train_time:143026ms step_avg:95.35ms +step:1500/1705 val_loss:3.3200 train_time:143120ms step_avg:95.41ms +step:1501/1705 train_time:143143ms step_avg:95.37ms +step:1502/1705 train_time:143221ms step_avg:95.35ms +step:1503/1705 train_time:143318ms step_avg:95.35ms +step:1504/1705 train_time:143414ms step_avg:95.35ms +step:1505/1705 train_time:143508ms step_avg:95.35ms +step:1506/1705 train_time:143602ms step_avg:95.35ms +step:1507/1705 train_time:143696ms step_avg:95.35ms +step:1508/1705 train_time:143791ms step_avg:95.35ms +step:1509/1705 train_time:143885ms step_avg:95.35ms +step:1510/1705 train_time:143979ms step_avg:95.35ms +step:1511/1705 train_time:144075ms step_avg:95.35ms +step:1512/1705 train_time:144176ms step_avg:95.35ms +step:1513/1705 train_time:144273ms step_avg:95.36ms +step:1514/1705 train_time:144370ms step_avg:95.36ms +step:1515/1705 train_time:144466ms step_avg:95.36ms +step:1516/1705 train_time:144560ms step_avg:95.36ms +step:1517/1705 train_time:144654ms step_avg:95.36ms +step:1518/1705 train_time:144748ms step_avg:95.35ms +step:1519/1705 train_time:144843ms step_avg:95.35ms +step:1520/1705 train_time:144937ms step_avg:95.35ms +step:1521/1705 train_time:145032ms step_avg:95.35ms +step:1522/1705 train_time:145129ms step_avg:95.35ms +step:1523/1705 train_time:145227ms step_avg:95.36ms +step:1524/1705 train_time:145323ms step_avg:95.36ms +step:1525/1705 train_time:145419ms step_avg:95.36ms +step:1526/1705 train_time:145514ms step_avg:95.36ms +step:1527/1705 train_time:145609ms step_avg:95.36ms +step:1528/1705 train_time:145704ms step_avg:95.36ms +step:1529/1705 train_time:145798ms step_avg:95.36ms +step:1530/1705 train_time:145892ms step_avg:95.35ms +step:1531/1705 train_time:145986ms step_avg:95.35ms +step:1532/1705 train_time:146081ms step_avg:95.35ms +step:1533/1705 train_time:146178ms step_avg:95.35ms +step:1534/1705 train_time:146275ms step_avg:95.35ms +step:1535/1705 train_time:146371ms step_avg:95.36ms +step:1536/1705 train_time:146467ms step_avg:95.36ms +step:1537/1705 train_time:146562ms step_avg:95.36ms +step:1538/1705 train_time:146657ms step_avg:95.36ms +step:1539/1705 train_time:146751ms step_avg:95.36ms +step:1540/1705 train_time:146847ms step_avg:95.36ms +step:1541/1705 train_time:146941ms step_avg:95.35ms +step:1542/1705 train_time:147036ms step_avg:95.35ms +step:1543/1705 train_time:147132ms step_avg:95.35ms +step:1544/1705 train_time:147228ms step_avg:95.36ms +step:1545/1705 train_time:147324ms step_avg:95.36ms +step:1546/1705 train_time:147419ms step_avg:95.36ms +step:1547/1705 train_time:147514ms step_avg:95.36ms +step:1548/1705 train_time:147610ms step_avg:95.36ms +step:1549/1705 train_time:147705ms step_avg:95.36ms +step:1550/1705 train_time:147800ms step_avg:95.36ms +step:1551/1705 train_time:147895ms step_avg:95.35ms +step:1552/1705 train_time:147990ms step_avg:95.35ms +step:1553/1705 train_time:148085ms step_avg:95.35ms +step:1554/1705 train_time:148181ms step_avg:95.35ms +step:1555/1705 train_time:148277ms step_avg:95.35ms +step:1556/1705 train_time:148372ms step_avg:95.36ms +step:1557/1705 train_time:148468ms step_avg:95.36ms +step:1558/1705 train_time:148563ms step_avg:95.36ms +step:1559/1705 train_time:148658ms step_avg:95.35ms +step:1560/1705 train_time:148754ms step_avg:95.35ms +step:1561/1705 train_time:148850ms step_avg:95.36ms +step:1562/1705 train_time:148944ms step_avg:95.35ms +step:1563/1705 train_time:149039ms step_avg:95.35ms +step:1564/1705 train_time:149135ms step_avg:95.35ms +step:1565/1705 train_time:149230ms step_avg:95.35ms +step:1566/1705 train_time:149327ms step_avg:95.36ms +step:1567/1705 train_time:149424ms step_avg:95.36ms +step:1568/1705 train_time:149518ms step_avg:95.36ms +step:1569/1705 train_time:149613ms step_avg:95.36ms +step:1570/1705 train_time:149709ms step_avg:95.36ms +step:1571/1705 train_time:149804ms step_avg:95.36ms +step:1572/1705 train_time:149899ms step_avg:95.36ms +step:1573/1705 train_time:149994ms step_avg:95.36ms +step:1574/1705 train_time:150089ms step_avg:95.36ms +step:1575/1705 train_time:150185ms step_avg:95.36ms +step:1576/1705 train_time:150280ms step_avg:95.36ms +step:1577/1705 train_time:150375ms step_avg:95.35ms +step:1578/1705 train_time:150471ms step_avg:95.36ms +step:1579/1705 train_time:150567ms step_avg:95.36ms +step:1580/1705 train_time:150662ms step_avg:95.36ms +step:1581/1705 train_time:150757ms step_avg:95.36ms +step:1582/1705 train_time:150852ms step_avg:95.36ms +step:1583/1705 train_time:150949ms step_avg:95.36ms +step:1584/1705 train_time:151045ms step_avg:95.36ms +step:1585/1705 train_time:151140ms step_avg:95.36ms +step:1586/1705 train_time:151234ms step_avg:95.36ms +step:1587/1705 train_time:151330ms step_avg:95.36ms +step:1588/1705 train_time:151426ms step_avg:95.36ms +step:1589/1705 train_time:151521ms step_avg:95.36ms +step:1590/1705 train_time:151616ms step_avg:95.36ms +step:1591/1705 train_time:151712ms step_avg:95.36ms +step:1592/1705 train_time:151809ms step_avg:95.36ms +step:1593/1705 train_time:151905ms step_avg:95.36ms +step:1594/1705 train_time:152001ms step_avg:95.36ms +step:1595/1705 train_time:152095ms step_avg:95.36ms +step:1596/1705 train_time:152190ms step_avg:95.36ms +step:1597/1705 train_time:152286ms step_avg:95.36ms +step:1598/1705 train_time:152381ms step_avg:95.36ms +step:1599/1705 train_time:152475ms step_avg:95.36ms +step:1600/1705 train_time:152571ms step_avg:95.36ms +step:1601/1705 train_time:152666ms step_avg:95.36ms +step:1602/1705 train_time:152762ms step_avg:95.36ms +step:1603/1705 train_time:152858ms step_avg:95.36ms +step:1604/1705 train_time:152954ms step_avg:95.36ms +step:1605/1705 train_time:153050ms step_avg:95.36ms +step:1606/1705 train_time:153146ms step_avg:95.36ms +step:1607/1705 train_time:153242ms step_avg:95.36ms +step:1608/1705 train_time:153337ms step_avg:95.36ms +step:1609/1705 train_time:153432ms step_avg:95.36ms +step:1610/1705 train_time:153527ms step_avg:95.36ms +step:1611/1705 train_time:153623ms step_avg:95.36ms +step:1612/1705 train_time:153717ms step_avg:95.36ms +step:1613/1705 train_time:153813ms step_avg:95.36ms +step:1614/1705 train_time:153909ms step_avg:95.36ms +step:1615/1705 train_time:154004ms step_avg:95.36ms +step:1616/1705 train_time:154099ms step_avg:95.36ms +step:1617/1705 train_time:154194ms step_avg:95.36ms +step:1618/1705 train_time:154289ms step_avg:95.36ms +step:1619/1705 train_time:154384ms step_avg:95.36ms +step:1620/1705 train_time:154479ms step_avg:95.36ms +step:1621/1705 train_time:154574ms step_avg:95.36ms +step:1622/1705 train_time:154670ms step_avg:95.36ms +step:1623/1705 train_time:154766ms step_avg:95.36ms +step:1624/1705 train_time:154863ms step_avg:95.36ms +step:1625/1705 train_time:154959ms step_avg:95.36ms +step:1625/1705 val_loss:3.2920 train_time:155055ms step_avg:95.42ms +step:1626/1705 train_time:155078ms step_avg:95.37ms +step:1627/1705 train_time:155158ms step_avg:95.36ms +step:1628/1705 train_time:155257ms step_avg:95.37ms +step:1629/1705 train_time:155351ms step_avg:95.37ms +step:1630/1705 train_time:155446ms step_avg:95.37ms +step:1631/1705 train_time:155540ms step_avg:95.36ms +step:1632/1705 train_time:155634ms step_avg:95.36ms +step:1633/1705 train_time:155728ms step_avg:95.36ms +step:1634/1705 train_time:155823ms step_avg:95.36ms +step:1635/1705 train_time:155917ms step_avg:95.36ms +step:1636/1705 train_time:156012ms step_avg:95.36ms +step:1637/1705 train_time:156111ms step_avg:95.36ms +step:1638/1705 train_time:156210ms step_avg:95.37ms +step:1639/1705 train_time:156307ms step_avg:95.37ms +step:1640/1705 train_time:156402ms step_avg:95.37ms +step:1641/1705 train_time:156497ms step_avg:95.37ms +step:1642/1705 train_time:156592ms step_avg:95.37ms +step:1643/1705 train_time:156686ms step_avg:95.37ms +step:1644/1705 train_time:156781ms step_avg:95.37ms +step:1645/1705 train_time:156875ms step_avg:95.36ms +step:1646/1705 train_time:156970ms step_avg:95.36ms +step:1647/1705 train_time:157067ms step_avg:95.37ms +step:1648/1705 train_time:157165ms step_avg:95.37ms +step:1649/1705 train_time:157262ms step_avg:95.37ms +step:1650/1705 train_time:157357ms step_avg:95.37ms +step:1651/1705 train_time:157452ms step_avg:95.37ms +step:1652/1705 train_time:157546ms step_avg:95.37ms +step:1653/1705 train_time:157640ms step_avg:95.37ms +step:1654/1705 train_time:157734ms step_avg:95.37ms +step:1655/1705 train_time:157829ms step_avg:95.37ms +step:1656/1705 train_time:157925ms step_avg:95.37ms +step:1657/1705 train_time:158021ms step_avg:95.37ms +step:1658/1705 train_time:158118ms step_avg:95.37ms +step:1659/1705 train_time:158213ms step_avg:95.37ms +step:1660/1705 train_time:158309ms step_avg:95.37ms +step:1661/1705 train_time:158406ms step_avg:95.37ms +step:1662/1705 train_time:158502ms step_avg:95.37ms +step:1663/1705 train_time:158598ms step_avg:95.37ms +step:1664/1705 train_time:158692ms step_avg:95.37ms +step:1665/1705 train_time:158786ms step_avg:95.37ms +step:1666/1705 train_time:158881ms step_avg:95.37ms +step:1667/1705 train_time:158976ms step_avg:95.37ms +step:1668/1705 train_time:159072ms step_avg:95.37ms +step:1669/1705 train_time:159168ms step_avg:95.37ms +step:1670/1705 train_time:159264ms step_avg:95.37ms +step:1671/1705 train_time:159360ms step_avg:95.37ms +step:1672/1705 train_time:159455ms step_avg:95.37ms +step:1673/1705 train_time:159551ms step_avg:95.37ms +step:1674/1705 train_time:159646ms step_avg:95.37ms +step:1675/1705 train_time:159741ms step_avg:95.37ms +step:1676/1705 train_time:159836ms step_avg:95.37ms +step:1677/1705 train_time:159930ms step_avg:95.37ms +step:1678/1705 train_time:160026ms step_avg:95.37ms +step:1679/1705 train_time:160122ms step_avg:95.37ms +step:1680/1705 train_time:160217ms step_avg:95.37ms +step:1681/1705 train_time:160312ms step_avg:95.37ms +step:1682/1705 train_time:160408ms step_avg:95.37ms +step:1683/1705 train_time:160504ms step_avg:95.37ms +step:1684/1705 train_time:160599ms step_avg:95.37ms +step:1685/1705 train_time:160693ms step_avg:95.37ms +step:1686/1705 train_time:160788ms step_avg:95.37ms +step:1687/1705 train_time:160883ms step_avg:95.37ms +step:1688/1705 train_time:160979ms step_avg:95.37ms +step:1689/1705 train_time:161074ms step_avg:95.37ms +step:1690/1705 train_time:161170ms step_avg:95.37ms +step:1691/1705 train_time:161266ms step_avg:95.37ms +step:1692/1705 train_time:161361ms step_avg:95.37ms +step:1693/1705 train_time:161457ms step_avg:95.37ms +step:1694/1705 train_time:161552ms step_avg:95.37ms +step:1695/1705 train_time:161648ms step_avg:95.37ms +step:1696/1705 train_time:161744ms step_avg:95.37ms +step:1697/1705 train_time:161839ms step_avg:95.37ms +step:1698/1705 train_time:162087ms step_avg:95.46ms +step:1699/1705 train_time:162268ms step_avg:95.51ms +step:1700/1705 train_time:162361ms step_avg:95.51ms +step:1701/1705 train_time:162455ms step_avg:95.51ms +step:1702/1705 train_time:162549ms step_avg:95.50ms +step:1703/1705 train_time:162643ms step_avg:95.50ms +step:1704/1705 train_time:162738ms step_avg:95.50ms +step:1705/1705 train_time:162832ms step_avg:95.50ms +step:1705/1705 val_loss:3.2782 train_time:162926ms step_avg:95.56ms +peak memory allocated: 33992 MiB reserved: 49496 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_0f6c8eac-db39-49ce-bef8-08a34044625f.txt b/records/090525_SkipMLPBlocks/comparison_0f6c8eac-db39-49ce-bef8-08a34044625f.txt new file mode 100644 index 000000000..3762cf210 --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_0f6c8eac-db39-49ce-bef8-08a34044625f.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:39:10 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 128W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 45C P0 128W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 44C P0 132W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 94903 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 94904 C /usr/bin/python3 610MiB | +| 0 N/A N/A 94905 C /usr/bin/python3 610MiB | +| 0 N/A N/A 94906 C /usr/bin/python3 610MiB | +| 0 N/A N/A 94907 C /usr/bin/python3 610MiB | +| 0 N/A N/A 94908 C /usr/bin/python3 610MiB | +| 0 N/A N/A 94909 C /usr/bin/python3 610MiB | +| 0 N/A N/A 94910 C /usr/bin/python3 610MiB | +| 1 N/A N/A 94904 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 94905 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 94906 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 94907 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 94908 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 94909 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 94910 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:358ms step_avg:358.46ms +step:2/1670 train_time:378ms step_avg:189.16ms +step:3/1670 train_time:452ms step_avg:150.71ms +step:4/1670 train_time:546ms step_avg:136.40ms +step:5/1670 train_time:640ms step_avg:128.05ms +step:6/1670 train_time:735ms step_avg:122.48ms +step:7/1670 train_time:829ms step_avg:118.46ms +step:8/1670 train_time:924ms step_avg:115.53ms +step:9/1670 train_time:1020ms step_avg:113.32ms +step:10/1670 train_time:1115ms step_avg:111.52ms +step:11/1670 train_time:1210ms step_avg:109.99ms +step:12/1670 train_time:1308ms step_avg:109.01ms +step:13/1670 train_time:1407ms step_avg:108.21ms +step:14/1670 train_time:1504ms step_avg:107.45ms +step:15/1670 train_time:1601ms step_avg:106.71ms +step:16/1670 train_time:1697ms step_avg:106.05ms +step:17/1670 train_time:1792ms step_avg:105.42ms +step:18/1670 train_time:1887ms step_avg:104.84ms +step:19/1670 train_time:1982ms step_avg:104.34ms +step:20/1670 train_time:2078ms step_avg:103.89ms +step:21/1670 train_time:2174ms step_avg:103.52ms +step:22/1670 train_time:2270ms step_avg:103.20ms +step:23/1670 train_time:2366ms step_avg:102.88ms +step:24/1670 train_time:2463ms step_avg:102.62ms +step:25/1670 train_time:2560ms step_avg:102.40ms +step:26/1670 train_time:2656ms step_avg:102.16ms +step:27/1670 train_time:2752ms step_avg:101.93ms +step:28/1670 train_time:2847ms step_avg:101.69ms +step:29/1670 train_time:2943ms step_avg:101.47ms +step:30/1670 train_time:3038ms step_avg:101.27ms +step:31/1670 train_time:3133ms step_avg:101.07ms +step:32/1670 train_time:3229ms step_avg:100.92ms +step:33/1670 train_time:3326ms step_avg:100.78ms +step:34/1670 train_time:3422ms step_avg:100.65ms +step:35/1670 train_time:3519ms step_avg:100.55ms +step:36/1670 train_time:3615ms step_avg:100.42ms +step:37/1670 train_time:3712ms step_avg:100.31ms +step:38/1670 train_time:3807ms step_avg:100.18ms +step:39/1670 train_time:3903ms step_avg:100.07ms +step:40/1670 train_time:3999ms step_avg:99.97ms +step:41/1670 train_time:4095ms step_avg:99.87ms +step:42/1670 train_time:4191ms step_avg:99.77ms +step:43/1670 train_time:4286ms step_avg:99.68ms +step:44/1670 train_time:4382ms step_avg:99.58ms +step:45/1670 train_time:4479ms step_avg:99.54ms +step:46/1670 train_time:4575ms step_avg:99.46ms +step:47/1670 train_time:4673ms step_avg:99.43ms +step:48/1670 train_time:4768ms step_avg:99.33ms +step:49/1670 train_time:4863ms step_avg:99.24ms +step:50/1670 train_time:4959ms step_avg:99.19ms +step:51/1670 train_time:5055ms step_avg:99.12ms +step:52/1670 train_time:5151ms step_avg:99.05ms +step:53/1670 train_time:5246ms step_avg:98.99ms +step:54/1670 train_time:5342ms step_avg:98.93ms +step:55/1670 train_time:5438ms step_avg:98.87ms +step:56/1670 train_time:5534ms step_avg:98.83ms +step:57/1670 train_time:5630ms step_avg:98.77ms +step:58/1670 train_time:5726ms step_avg:98.72ms +step:59/1670 train_time:5821ms step_avg:98.66ms +step:60/1670 train_time:5917ms step_avg:98.62ms +step:61/1670 train_time:6014ms step_avg:98.59ms +step:62/1670 train_time:6109ms step_avg:98.54ms +step:63/1670 train_time:6205ms step_avg:98.49ms +step:64/1670 train_time:6301ms step_avg:98.45ms +step:65/1670 train_time:6396ms step_avg:98.41ms +step:66/1670 train_time:6493ms step_avg:98.37ms +step:67/1670 train_time:6588ms step_avg:98.33ms +step:68/1670 train_time:6684ms step_avg:98.29ms +step:69/1670 train_time:6780ms step_avg:98.27ms +step:70/1670 train_time:6876ms step_avg:98.23ms +step:71/1670 train_time:6972ms step_avg:98.19ms +step:72/1670 train_time:7067ms step_avg:98.15ms +step:73/1670 train_time:7163ms step_avg:98.12ms +step:74/1670 train_time:7259ms step_avg:98.09ms +step:75/1670 train_time:7354ms step_avg:98.06ms +step:76/1670 train_time:7450ms step_avg:98.02ms +step:77/1670 train_time:7545ms step_avg:97.99ms +step:78/1670 train_time:7641ms step_avg:97.97ms +step:79/1670 train_time:7737ms step_avg:97.94ms +step:80/1670 train_time:7834ms step_avg:97.92ms +step:81/1670 train_time:7930ms step_avg:97.90ms +step:82/1670 train_time:8026ms step_avg:97.88ms +step:83/1670 train_time:8122ms step_avg:97.86ms +step:84/1670 train_time:8219ms step_avg:97.85ms +step:85/1670 train_time:8314ms step_avg:97.82ms +step:86/1670 train_time:8410ms step_avg:97.79ms +step:87/1670 train_time:8505ms step_avg:97.76ms +step:88/1670 train_time:8601ms step_avg:97.74ms +step:89/1670 train_time:8696ms step_avg:97.71ms +step:90/1670 train_time:8793ms step_avg:97.70ms +step:91/1670 train_time:8888ms step_avg:97.67ms +step:92/1670 train_time:8984ms step_avg:97.66ms +step:93/1670 train_time:9081ms step_avg:97.64ms +step:94/1670 train_time:9177ms step_avg:97.62ms +step:95/1670 train_time:9272ms step_avg:97.60ms +step:96/1670 train_time:9367ms step_avg:97.57ms +step:97/1670 train_time:9463ms step_avg:97.56ms +step:98/1670 train_time:9560ms step_avg:97.55ms +step:99/1670 train_time:9656ms step_avg:97.53ms +step:100/1670 train_time:9751ms step_avg:97.51ms +step:101/1670 train_time:9847ms step_avg:97.49ms +step:102/1670 train_time:9943ms step_avg:97.48ms +step:103/1670 train_time:10040ms step_avg:97.47ms +step:104/1670 train_time:10136ms step_avg:97.46ms +step:105/1670 train_time:10232ms step_avg:97.45ms +step:106/1670 train_time:10328ms step_avg:97.43ms +step:107/1670 train_time:10423ms step_avg:97.41ms +step:108/1670 train_time:10519ms step_avg:97.40ms +step:109/1670 train_time:10615ms step_avg:97.39ms +step:110/1670 train_time:10711ms step_avg:97.37ms +step:111/1670 train_time:10806ms step_avg:97.35ms +step:112/1670 train_time:10902ms step_avg:97.34ms +step:113/1670 train_time:10998ms step_avg:97.33ms +step:114/1670 train_time:11094ms step_avg:97.32ms +step:115/1670 train_time:11191ms step_avg:97.31ms +step:116/1670 train_time:11286ms step_avg:97.29ms +step:117/1670 train_time:11381ms step_avg:97.28ms +step:118/1670 train_time:11477ms step_avg:97.26ms +step:119/1670 train_time:11574ms step_avg:97.26ms +step:120/1670 train_time:11670ms step_avg:97.25ms +step:121/1670 train_time:11766ms step_avg:97.24ms +step:122/1670 train_time:11861ms step_avg:97.22ms +step:123/1670 train_time:11957ms step_avg:97.21ms +step:124/1670 train_time:12053ms step_avg:97.20ms +step:125/1670 train_time:12150ms step_avg:97.20ms +step:125/1670 val_loss:4.3007 train_time:12245ms step_avg:97.96ms +step:126/1670 train_time:12266ms step_avg:97.35ms +step:127/1670 train_time:12342ms step_avg:97.18ms +step:128/1670 train_time:12441ms step_avg:97.20ms +step:129/1670 train_time:12549ms step_avg:97.28ms +step:130/1670 train_time:12646ms step_avg:97.28ms +step:131/1670 train_time:12740ms step_avg:97.25ms +step:132/1670 train_time:12834ms step_avg:97.23ms +step:133/1670 train_time:12930ms step_avg:97.21ms +step:134/1670 train_time:13024ms step_avg:97.19ms +step:135/1670 train_time:13118ms step_avg:97.17ms +step:136/1670 train_time:13215ms step_avg:97.17ms +step:137/1670 train_time:13312ms step_avg:97.17ms +step:138/1670 train_time:13408ms step_avg:97.16ms +step:139/1670 train_time:13505ms step_avg:97.16ms +step:140/1670 train_time:13602ms step_avg:97.16ms +step:141/1670 train_time:13697ms step_avg:97.14ms +step:142/1670 train_time:13793ms step_avg:97.14ms +step:143/1670 train_time:13887ms step_avg:97.11ms +step:144/1670 train_time:13982ms step_avg:97.10ms +step:145/1670 train_time:14077ms step_avg:97.08ms +step:146/1670 train_time:14172ms step_avg:97.07ms +step:147/1670 train_time:14268ms step_avg:97.06ms +step:148/1670 train_time:14364ms step_avg:97.05ms +step:149/1670 train_time:14460ms step_avg:97.05ms +step:150/1670 train_time:14556ms step_avg:97.04ms +step:151/1670 train_time:14652ms step_avg:97.04ms +step:152/1670 train_time:14749ms step_avg:97.03ms +step:153/1670 train_time:14844ms step_avg:97.02ms +step:154/1670 train_time:14939ms step_avg:97.01ms +step:155/1670 train_time:15034ms step_avg:96.99ms +step:156/1670 train_time:15130ms step_avg:96.99ms +step:157/1670 train_time:15225ms step_avg:96.98ms +step:158/1670 train_time:15321ms step_avg:96.97ms +step:159/1670 train_time:15416ms step_avg:96.96ms +step:160/1670 train_time:15512ms step_avg:96.95ms +step:161/1670 train_time:15608ms step_avg:96.94ms +step:162/1670 train_time:15704ms step_avg:96.94ms +step:163/1670 train_time:15799ms step_avg:96.93ms +step:164/1670 train_time:15894ms step_avg:96.91ms +step:165/1670 train_time:15990ms step_avg:96.91ms +step:166/1670 train_time:16085ms step_avg:96.90ms +step:167/1670 train_time:16179ms step_avg:96.88ms +step:168/1670 train_time:16275ms step_avg:96.88ms +step:169/1670 train_time:16372ms step_avg:96.88ms +step:170/1670 train_time:16468ms step_avg:96.87ms +step:171/1670 train_time:16563ms step_avg:96.86ms +step:172/1670 train_time:16659ms step_avg:96.85ms +step:173/1670 train_time:16755ms step_avg:96.85ms +step:174/1670 train_time:16850ms step_avg:96.84ms +step:175/1670 train_time:16946ms step_avg:96.83ms +step:176/1670 train_time:17040ms step_avg:96.82ms +step:177/1670 train_time:17136ms step_avg:96.81ms +step:178/1670 train_time:17231ms step_avg:96.80ms +step:179/1670 train_time:17326ms step_avg:96.80ms +step:180/1670 train_time:17421ms step_avg:96.78ms +step:181/1670 train_time:17517ms step_avg:96.78ms +step:182/1670 train_time:17613ms step_avg:96.78ms +step:183/1670 train_time:17709ms step_avg:96.77ms +step:184/1670 train_time:17805ms step_avg:96.77ms +step:185/1670 train_time:17900ms step_avg:96.75ms +step:186/1670 train_time:17995ms step_avg:96.75ms +step:187/1670 train_time:18091ms step_avg:96.74ms +step:188/1670 train_time:18187ms step_avg:96.74ms +step:189/1670 train_time:18282ms step_avg:96.73ms +step:190/1670 train_time:18377ms step_avg:96.72ms +step:191/1670 train_time:18472ms step_avg:96.71ms +step:192/1670 train_time:18569ms step_avg:96.71ms +step:193/1670 train_time:18664ms step_avg:96.71ms +step:194/1670 train_time:18760ms step_avg:96.70ms +step:195/1670 train_time:18854ms step_avg:96.69ms +step:196/1670 train_time:18951ms step_avg:96.69ms +step:197/1670 train_time:19047ms step_avg:96.68ms +step:198/1670 train_time:19142ms step_avg:96.68ms +step:199/1670 train_time:19237ms step_avg:96.67ms +step:200/1670 train_time:19333ms step_avg:96.66ms +step:201/1670 train_time:19429ms step_avg:96.66ms +step:202/1670 train_time:19524ms step_avg:96.65ms +step:203/1670 train_time:19619ms step_avg:96.65ms +step:204/1670 train_time:19716ms step_avg:96.65ms +step:205/1670 train_time:19811ms step_avg:96.64ms +step:206/1670 train_time:19907ms step_avg:96.64ms +step:207/1670 train_time:20003ms step_avg:96.63ms +step:208/1670 train_time:20098ms step_avg:96.62ms +step:209/1670 train_time:20193ms step_avg:96.62ms +step:210/1670 train_time:20289ms step_avg:96.62ms +step:211/1670 train_time:20384ms step_avg:96.61ms +step:212/1670 train_time:20479ms step_avg:96.60ms +step:213/1670 train_time:20775ms step_avg:97.54ms +step:214/1670 train_time:20905ms step_avg:97.69ms +step:215/1670 train_time:20999ms step_avg:97.67ms +step:216/1670 train_time:21093ms step_avg:97.65ms +step:217/1670 train_time:21188ms step_avg:97.64ms +step:218/1670 train_time:21282ms step_avg:97.62ms +step:219/1670 train_time:21377ms step_avg:97.61ms +step:220/1670 train_time:21472ms step_avg:97.60ms +step:221/1670 train_time:21566ms step_avg:97.58ms +step:222/1670 train_time:21660ms step_avg:97.57ms +step:223/1670 train_time:21759ms step_avg:97.57ms +step:224/1670 train_time:21856ms step_avg:97.57ms +step:225/1670 train_time:21954ms step_avg:97.57ms +step:226/1670 train_time:22051ms step_avg:97.57ms +step:227/1670 train_time:22147ms step_avg:97.56ms +step:228/1670 train_time:22242ms step_avg:97.55ms +step:229/1670 train_time:22337ms step_avg:97.54ms +step:230/1670 train_time:22432ms step_avg:97.53ms +step:231/1670 train_time:22527ms step_avg:97.52ms +step:232/1670 train_time:22622ms step_avg:97.51ms +step:233/1670 train_time:22717ms step_avg:97.50ms +step:234/1670 train_time:22813ms step_avg:97.49ms +step:235/1670 train_time:22911ms step_avg:97.49ms +step:236/1670 train_time:23008ms step_avg:97.49ms +step:237/1670 train_time:23104ms step_avg:97.48ms +step:238/1670 train_time:23199ms step_avg:97.47ms +step:239/1670 train_time:23294ms step_avg:97.47ms +step:240/1670 train_time:23390ms step_avg:97.46ms +step:241/1670 train_time:23486ms step_avg:97.45ms +step:242/1670 train_time:23580ms step_avg:97.44ms +step:243/1670 train_time:23675ms step_avg:97.43ms +step:244/1670 train_time:23771ms step_avg:97.42ms +step:245/1670 train_time:23867ms step_avg:97.42ms +step:246/1670 train_time:23964ms step_avg:97.41ms +step:247/1670 train_time:24060ms step_avg:97.41ms +step:248/1670 train_time:24155ms step_avg:97.40ms +step:249/1670 train_time:24251ms step_avg:97.39ms +step:250/1670 train_time:24346ms step_avg:97.39ms +step:250/1670 val_loss:3.9722 train_time:24441ms step_avg:97.76ms +step:251/1670 train_time:24464ms step_avg:97.46ms +step:252/1670 train_time:24544ms step_avg:97.40ms +step:253/1670 train_time:24643ms step_avg:97.40ms +step:254/1670 train_time:24739ms step_avg:97.40ms +step:255/1670 train_time:24834ms step_avg:97.39ms +step:256/1670 train_time:24928ms step_avg:97.37ms +step:257/1670 train_time:25023ms step_avg:97.36ms +step:258/1670 train_time:25118ms step_avg:97.36ms +step:259/1670 train_time:25213ms step_avg:97.35ms +step:260/1670 train_time:25308ms step_avg:97.34ms +step:261/1670 train_time:25403ms step_avg:97.33ms +step:262/1670 train_time:25501ms step_avg:97.33ms +step:263/1670 train_time:25599ms step_avg:97.34ms +step:264/1670 train_time:25695ms step_avg:97.33ms +step:265/1670 train_time:25791ms step_avg:97.32ms +step:266/1670 train_time:25885ms step_avg:97.31ms +step:267/1670 train_time:25981ms step_avg:97.31ms +step:268/1670 train_time:26075ms step_avg:97.30ms +step:269/1670 train_time:26170ms step_avg:97.29ms +step:270/1670 train_time:26265ms step_avg:97.28ms +step:271/1670 train_time:26361ms step_avg:97.27ms +step:272/1670 train_time:26458ms step_avg:97.27ms +step:273/1670 train_time:26555ms step_avg:97.27ms +step:274/1670 train_time:26651ms step_avg:97.27ms +step:275/1670 train_time:26747ms step_avg:97.26ms +step:276/1670 train_time:26842ms step_avg:97.25ms +step:277/1670 train_time:26938ms step_avg:97.25ms +step:278/1670 train_time:27033ms step_avg:97.24ms +step:279/1670 train_time:27128ms step_avg:97.23ms +step:280/1670 train_time:27223ms step_avg:97.23ms +step:281/1670 train_time:27319ms step_avg:97.22ms +step:282/1670 train_time:27415ms step_avg:97.22ms +step:283/1670 train_time:27511ms step_avg:97.21ms +step:284/1670 train_time:27608ms step_avg:97.21ms +step:285/1670 train_time:27704ms step_avg:97.21ms +step:286/1670 train_time:27800ms step_avg:97.20ms +step:287/1670 train_time:27895ms step_avg:97.20ms +step:288/1670 train_time:27991ms step_avg:97.19ms +step:289/1670 train_time:28086ms step_avg:97.18ms +step:290/1670 train_time:28180ms step_avg:97.17ms +step:291/1670 train_time:28276ms step_avg:97.17ms +step:292/1670 train_time:28371ms step_avg:97.16ms +step:293/1670 train_time:28466ms step_avg:97.15ms +step:294/1670 train_time:28562ms step_avg:97.15ms +step:295/1670 train_time:28659ms step_avg:97.15ms +step:296/1670 train_time:28755ms step_avg:97.15ms +step:297/1670 train_time:28851ms step_avg:97.14ms +step:298/1670 train_time:28946ms step_avg:97.14ms +step:299/1670 train_time:29042ms step_avg:97.13ms +step:300/1670 train_time:29138ms step_avg:97.13ms +step:301/1670 train_time:29233ms step_avg:97.12ms +step:302/1670 train_time:29328ms step_avg:97.11ms +step:303/1670 train_time:29423ms step_avg:97.11ms +step:304/1670 train_time:29519ms step_avg:97.10ms +step:305/1670 train_time:29614ms step_avg:97.10ms +step:306/1670 train_time:29710ms step_avg:97.09ms +step:307/1670 train_time:29806ms step_avg:97.09ms +step:308/1670 train_time:29902ms step_avg:97.08ms +step:309/1670 train_time:29998ms step_avg:97.08ms +step:310/1670 train_time:30094ms step_avg:97.08ms +step:311/1670 train_time:30189ms step_avg:97.07ms +step:312/1670 train_time:30284ms step_avg:97.07ms +step:313/1670 train_time:30380ms step_avg:97.06ms +step:314/1670 train_time:30476ms step_avg:97.06ms +step:315/1670 train_time:30572ms step_avg:97.05ms +step:316/1670 train_time:30667ms step_avg:97.05ms +step:317/1670 train_time:30763ms step_avg:97.04ms +step:318/1670 train_time:30859ms step_avg:97.04ms +step:319/1670 train_time:30954ms step_avg:97.04ms +step:320/1670 train_time:31050ms step_avg:97.03ms +step:321/1670 train_time:31146ms step_avg:97.03ms +step:322/1670 train_time:31241ms step_avg:97.02ms +step:323/1670 train_time:31336ms step_avg:97.02ms +step:324/1670 train_time:31432ms step_avg:97.01ms +step:325/1670 train_time:31527ms step_avg:97.01ms +step:326/1670 train_time:31624ms step_avg:97.00ms +step:327/1670 train_time:31719ms step_avg:97.00ms +step:328/1670 train_time:31815ms step_avg:97.00ms +step:329/1670 train_time:31910ms step_avg:96.99ms +step:330/1670 train_time:32005ms step_avg:96.98ms +step:331/1670 train_time:32101ms step_avg:96.98ms +step:332/1670 train_time:32196ms step_avg:96.98ms +step:333/1670 train_time:32292ms step_avg:96.97ms +step:334/1670 train_time:32387ms step_avg:96.97ms +step:335/1670 train_time:32482ms step_avg:96.96ms +step:336/1670 train_time:32578ms step_avg:96.96ms +step:337/1670 train_time:32674ms step_avg:96.96ms +step:338/1670 train_time:32770ms step_avg:96.95ms +step:339/1670 train_time:32865ms step_avg:96.95ms +step:340/1670 train_time:32961ms step_avg:96.94ms +step:341/1670 train_time:33057ms step_avg:96.94ms +step:342/1670 train_time:33152ms step_avg:96.94ms +step:343/1670 train_time:33248ms step_avg:96.93ms +step:344/1670 train_time:33343ms step_avg:96.93ms +step:345/1670 train_time:33439ms step_avg:96.92ms +step:346/1670 train_time:33534ms step_avg:96.92ms +step:347/1670 train_time:33630ms step_avg:96.92ms +step:348/1670 train_time:33725ms step_avg:96.91ms +step:349/1670 train_time:33821ms step_avg:96.91ms +step:350/1670 train_time:33918ms step_avg:96.91ms +step:351/1670 train_time:34014ms step_avg:96.91ms +step:352/1670 train_time:34110ms step_avg:96.90ms +step:353/1670 train_time:34206ms step_avg:96.90ms +step:354/1670 train_time:34302ms step_avg:96.90ms +step:355/1670 train_time:34398ms step_avg:96.89ms +step:356/1670 train_time:34493ms step_avg:96.89ms +step:357/1670 train_time:34589ms step_avg:96.89ms +step:358/1670 train_time:34684ms step_avg:96.88ms +step:359/1670 train_time:34779ms step_avg:96.88ms +step:360/1670 train_time:34875ms step_avg:96.88ms +step:361/1670 train_time:34971ms step_avg:96.87ms +step:362/1670 train_time:35066ms step_avg:96.87ms +step:363/1670 train_time:35162ms step_avg:96.87ms +step:364/1670 train_time:35258ms step_avg:96.86ms +step:365/1670 train_time:35353ms step_avg:96.86ms +step:366/1670 train_time:35449ms step_avg:96.85ms +step:367/1670 train_time:35544ms step_avg:96.85ms +step:368/1670 train_time:35640ms step_avg:96.85ms +step:369/1670 train_time:35736ms step_avg:96.85ms +step:370/1670 train_time:35831ms step_avg:96.84ms +step:371/1670 train_time:35927ms step_avg:96.84ms +step:372/1670 train_time:36022ms step_avg:96.83ms +step:373/1670 train_time:36119ms step_avg:96.83ms +step:374/1670 train_time:36214ms step_avg:96.83ms +step:375/1670 train_time:36310ms step_avg:96.83ms +step:375/1670 val_loss:3.8222 train_time:36405ms step_avg:97.08ms +step:376/1670 train_time:36428ms step_avg:96.88ms +step:377/1670 train_time:36510ms step_avg:96.84ms +step:378/1670 train_time:36609ms step_avg:96.85ms +step:379/1670 train_time:36704ms step_avg:96.85ms +step:380/1670 train_time:36800ms step_avg:96.84ms +step:381/1670 train_time:36894ms step_avg:96.84ms +step:382/1670 train_time:36989ms step_avg:96.83ms +step:383/1670 train_time:37084ms step_avg:96.82ms +step:384/1670 train_time:37179ms step_avg:96.82ms +step:385/1670 train_time:37273ms step_avg:96.81ms +step:386/1670 train_time:37369ms step_avg:96.81ms +step:387/1670 train_time:37467ms step_avg:96.81ms +step:388/1670 train_time:37564ms step_avg:96.82ms +step:389/1670 train_time:37660ms step_avg:96.81ms +step:390/1670 train_time:37756ms step_avg:96.81ms +step:391/1670 train_time:37851ms step_avg:96.81ms +step:392/1670 train_time:37947ms step_avg:96.80ms +step:393/1670 train_time:38041ms step_avg:96.80ms +step:394/1670 train_time:38136ms step_avg:96.79ms +step:395/1670 train_time:38231ms step_avg:96.79ms +step:396/1670 train_time:38327ms step_avg:96.79ms +step:397/1670 train_time:38423ms step_avg:96.78ms +step:398/1670 train_time:38520ms step_avg:96.78ms +step:399/1670 train_time:38616ms step_avg:96.78ms +step:400/1670 train_time:38712ms step_avg:96.78ms +step:401/1670 train_time:38808ms step_avg:96.78ms +step:402/1670 train_time:38905ms step_avg:96.78ms +step:403/1670 train_time:39001ms step_avg:96.78ms +step:404/1670 train_time:39095ms step_avg:96.77ms +step:405/1670 train_time:39190ms step_avg:96.77ms +step:406/1670 train_time:39285ms step_avg:96.76ms +step:407/1670 train_time:39381ms step_avg:96.76ms +step:408/1670 train_time:39477ms step_avg:96.76ms +step:409/1670 train_time:39573ms step_avg:96.75ms +step:410/1670 train_time:39670ms step_avg:96.75ms +step:411/1670 train_time:39766ms step_avg:96.75ms +step:412/1670 train_time:39861ms step_avg:96.75ms +step:413/1670 train_time:39956ms step_avg:96.75ms +step:414/1670 train_time:40052ms step_avg:96.74ms +step:415/1670 train_time:40147ms step_avg:96.74ms +step:416/1670 train_time:40242ms step_avg:96.74ms +step:417/1670 train_time:40338ms step_avg:96.73ms +step:418/1670 train_time:40434ms step_avg:96.73ms +step:419/1670 train_time:40530ms step_avg:96.73ms +step:420/1670 train_time:40627ms step_avg:96.73ms +step:421/1670 train_time:40723ms step_avg:96.73ms +step:422/1670 train_time:40819ms step_avg:96.73ms +step:423/1670 train_time:40914ms step_avg:96.72ms +step:424/1670 train_time:41009ms step_avg:96.72ms +step:425/1670 train_time:41307ms step_avg:97.19ms +step:426/1670 train_time:41454ms step_avg:97.31ms +step:427/1670 train_time:41548ms step_avg:97.30ms +step:428/1670 train_time:41643ms step_avg:97.30ms +step:429/1670 train_time:41737ms step_avg:97.29ms +step:430/1670 train_time:41832ms step_avg:97.28ms +step:431/1670 train_time:41927ms step_avg:97.28ms +step:432/1670 train_time:42022ms step_avg:97.27ms +step:433/1670 train_time:42117ms step_avg:97.27ms +step:434/1670 train_time:42211ms step_avg:97.26ms +step:435/1670 train_time:42307ms step_avg:97.26ms +step:436/1670 train_time:42407ms step_avg:97.26ms +step:437/1670 train_time:42506ms step_avg:97.27ms +step:438/1670 train_time:42602ms step_avg:97.26ms +step:439/1670 train_time:42697ms step_avg:97.26ms +step:440/1670 train_time:42792ms step_avg:97.25ms +step:441/1670 train_time:42888ms step_avg:97.25ms +step:442/1670 train_time:42983ms step_avg:97.25ms +step:443/1670 train_time:43078ms step_avg:97.24ms +step:444/1670 train_time:43172ms step_avg:97.24ms +step:445/1670 train_time:43268ms step_avg:97.23ms +step:446/1670 train_time:43365ms step_avg:97.23ms +step:447/1670 train_time:43463ms step_avg:97.23ms +step:448/1670 train_time:43559ms step_avg:97.23ms +step:449/1670 train_time:43655ms step_avg:97.23ms +step:450/1670 train_time:43750ms step_avg:97.22ms +step:451/1670 train_time:43847ms step_avg:97.22ms +step:452/1670 train_time:43942ms step_avg:97.22ms +step:453/1670 train_time:44037ms step_avg:97.21ms +step:454/1670 train_time:44132ms step_avg:97.21ms +step:455/1670 train_time:44228ms step_avg:97.20ms +step:456/1670 train_time:44323ms step_avg:97.20ms +step:457/1670 train_time:44419ms step_avg:97.20ms +step:458/1670 train_time:44516ms step_avg:97.20ms +step:459/1670 train_time:44612ms step_avg:97.19ms +step:460/1670 train_time:44709ms step_avg:97.19ms +step:461/1670 train_time:44805ms step_avg:97.19ms +step:462/1670 train_time:44900ms step_avg:97.19ms +step:463/1670 train_time:44996ms step_avg:97.18ms +step:464/1670 train_time:45090ms step_avg:97.18ms +step:465/1670 train_time:45186ms step_avg:97.17ms +step:466/1670 train_time:45281ms step_avg:97.17ms +step:467/1670 train_time:45376ms step_avg:97.17ms +step:468/1670 train_time:45473ms step_avg:97.16ms +step:469/1670 train_time:45569ms step_avg:97.16ms +step:470/1670 train_time:45665ms step_avg:97.16ms +step:471/1670 train_time:45761ms step_avg:97.16ms +step:472/1670 train_time:45856ms step_avg:97.15ms +step:473/1670 train_time:45952ms step_avg:97.15ms +step:474/1670 train_time:46048ms step_avg:97.15ms +step:475/1670 train_time:46143ms step_avg:97.14ms +step:476/1670 train_time:46239ms step_avg:97.14ms +step:477/1670 train_time:46334ms step_avg:97.14ms +step:478/1670 train_time:46430ms step_avg:97.13ms +step:479/1670 train_time:46526ms step_avg:97.13ms +step:480/1670 train_time:46622ms step_avg:97.13ms +step:481/1670 train_time:46717ms step_avg:97.13ms +step:482/1670 train_time:46814ms step_avg:97.12ms +step:483/1670 train_time:46910ms step_avg:97.12ms +step:484/1670 train_time:47005ms step_avg:97.12ms +step:485/1670 train_time:47102ms step_avg:97.12ms +step:486/1670 train_time:47196ms step_avg:97.11ms +step:487/1670 train_time:47293ms step_avg:97.11ms +step:488/1670 train_time:47388ms step_avg:97.11ms +step:489/1670 train_time:47484ms step_avg:97.11ms +step:490/1670 train_time:47580ms step_avg:97.10ms +step:491/1670 train_time:47676ms step_avg:97.10ms +step:492/1670 train_time:47771ms step_avg:97.10ms +step:493/1670 train_time:47868ms step_avg:97.10ms +step:494/1670 train_time:47964ms step_avg:97.09ms +step:495/1670 train_time:48060ms step_avg:97.09ms +step:496/1670 train_time:48155ms step_avg:97.09ms +step:497/1670 train_time:48250ms step_avg:97.08ms +step:498/1670 train_time:48347ms step_avg:97.08ms +step:499/1670 train_time:48443ms step_avg:97.08ms +step:500/1670 train_time:48540ms step_avg:97.08ms +step:500/1670 val_loss:3.7170 train_time:48634ms step_avg:97.27ms +step:501/1670 train_time:48655ms step_avg:97.12ms +step:502/1670 train_time:48738ms step_avg:97.09ms +step:503/1670 train_time:48838ms step_avg:97.09ms +step:504/1670 train_time:48935ms step_avg:97.09ms +step:505/1670 train_time:49030ms step_avg:97.09ms +step:506/1670 train_time:49125ms step_avg:97.09ms +step:507/1670 train_time:49220ms step_avg:97.08ms +step:508/1670 train_time:49315ms step_avg:97.08ms +step:509/1670 train_time:49410ms step_avg:97.07ms +step:510/1670 train_time:49504ms step_avg:97.07ms +step:511/1670 train_time:49600ms step_avg:97.06ms +step:512/1670 train_time:49697ms step_avg:97.06ms +step:513/1670 train_time:49795ms step_avg:97.07ms +step:514/1670 train_time:49893ms step_avg:97.07ms +step:515/1670 train_time:49989ms step_avg:97.07ms +step:516/1670 train_time:50085ms step_avg:97.06ms +step:517/1670 train_time:50180ms step_avg:97.06ms +step:518/1670 train_time:50275ms step_avg:97.06ms +step:519/1670 train_time:50370ms step_avg:97.05ms +step:520/1670 train_time:50465ms step_avg:97.05ms +step:521/1670 train_time:50560ms step_avg:97.05ms +step:522/1670 train_time:50657ms step_avg:97.04ms +step:523/1670 train_time:50754ms step_avg:97.04ms +step:524/1670 train_time:50852ms step_avg:97.05ms +step:525/1670 train_time:50948ms step_avg:97.04ms +step:526/1670 train_time:51044ms step_avg:97.04ms +step:527/1670 train_time:51139ms step_avg:97.04ms +step:528/1670 train_time:51234ms step_avg:97.03ms +step:529/1670 train_time:51330ms step_avg:97.03ms +step:530/1670 train_time:51426ms step_avg:97.03ms +step:531/1670 train_time:51521ms step_avg:97.03ms +step:532/1670 train_time:51616ms step_avg:97.02ms +step:533/1670 train_time:51713ms step_avg:97.02ms +step:534/1670 train_time:51810ms step_avg:97.02ms +step:535/1670 train_time:51906ms step_avg:97.02ms +step:536/1670 train_time:52002ms step_avg:97.02ms +step:537/1670 train_time:52097ms step_avg:97.02ms +step:538/1670 train_time:52193ms step_avg:97.01ms +step:539/1670 train_time:52289ms step_avg:97.01ms +step:540/1670 train_time:52385ms step_avg:97.01ms +step:541/1670 train_time:52480ms step_avg:97.00ms +step:542/1670 train_time:52575ms step_avg:97.00ms +step:543/1670 train_time:52671ms step_avg:97.00ms +step:544/1670 train_time:52767ms step_avg:97.00ms +step:545/1670 train_time:52863ms step_avg:97.00ms +step:546/1670 train_time:52959ms step_avg:96.99ms +step:547/1670 train_time:53055ms step_avg:96.99ms +step:548/1670 train_time:53151ms step_avg:96.99ms +step:549/1670 train_time:53247ms step_avg:96.99ms +step:550/1670 train_time:53342ms step_avg:96.98ms +step:551/1670 train_time:53437ms step_avg:96.98ms +step:552/1670 train_time:53533ms step_avg:96.98ms +step:553/1670 train_time:53628ms step_avg:96.98ms +step:554/1670 train_time:53724ms step_avg:96.98ms +step:555/1670 train_time:53820ms step_avg:96.97ms +step:556/1670 train_time:53916ms step_avg:96.97ms +step:557/1670 train_time:54013ms step_avg:96.97ms +step:558/1670 train_time:54109ms step_avg:96.97ms +step:559/1670 train_time:54206ms step_avg:96.97ms +step:560/1670 train_time:54304ms step_avg:96.97ms +step:561/1670 train_time:54400ms step_avg:96.97ms +step:562/1670 train_time:54496ms step_avg:96.97ms +step:563/1670 train_time:54593ms step_avg:96.97ms +step:564/1670 train_time:54691ms step_avg:96.97ms +step:565/1670 train_time:54790ms step_avg:96.97ms +step:566/1670 train_time:54888ms step_avg:96.97ms +step:567/1670 train_time:54986ms step_avg:96.98ms +step:568/1670 train_time:55082ms step_avg:96.98ms +step:569/1670 train_time:55180ms step_avg:96.98ms +step:570/1670 train_time:55277ms step_avg:96.98ms +step:571/1670 train_time:55374ms step_avg:96.98ms +step:572/1670 train_time:55471ms step_avg:96.98ms +step:573/1670 train_time:55568ms step_avg:96.98ms +step:574/1670 train_time:55666ms step_avg:96.98ms +step:575/1670 train_time:55763ms step_avg:96.98ms +step:576/1670 train_time:55860ms step_avg:96.98ms +step:577/1670 train_time:55958ms step_avg:96.98ms +step:578/1670 train_time:56056ms step_avg:96.98ms +step:579/1670 train_time:56154ms step_avg:96.98ms +step:580/1670 train_time:56254ms step_avg:96.99ms +step:581/1670 train_time:56352ms step_avg:96.99ms +step:582/1670 train_time:56448ms step_avg:96.99ms +step:583/1670 train_time:56545ms step_avg:96.99ms +step:584/1670 train_time:56642ms step_avg:96.99ms +step:585/1670 train_time:56738ms step_avg:96.99ms +step:586/1670 train_time:56836ms step_avg:96.99ms +step:587/1670 train_time:56933ms step_avg:96.99ms +step:588/1670 train_time:57031ms step_avg:96.99ms +step:589/1670 train_time:57129ms step_avg:96.99ms +step:590/1670 train_time:57226ms step_avg:96.99ms +step:591/1670 train_time:57323ms step_avg:96.99ms +step:592/1670 train_time:57419ms step_avg:96.99ms +step:593/1670 train_time:57517ms step_avg:96.99ms +step:594/1670 train_time:57615ms step_avg:96.99ms +step:595/1670 train_time:57713ms step_avg:97.00ms +step:596/1670 train_time:57811ms step_avg:97.00ms +step:597/1670 train_time:57908ms step_avg:97.00ms +step:598/1670 train_time:58005ms step_avg:97.00ms +step:599/1670 train_time:58101ms step_avg:97.00ms +step:600/1670 train_time:58199ms step_avg:97.00ms +step:601/1670 train_time:58296ms step_avg:97.00ms +step:602/1670 train_time:58394ms step_avg:97.00ms +step:603/1670 train_time:58492ms step_avg:97.00ms +step:604/1670 train_time:58589ms step_avg:97.00ms +step:605/1670 train_time:58685ms step_avg:97.00ms +step:606/1670 train_time:58781ms step_avg:97.00ms +step:607/1670 train_time:58879ms step_avg:97.00ms +step:608/1670 train_time:58976ms step_avg:97.00ms +step:609/1670 train_time:59074ms step_avg:97.00ms +step:610/1670 train_time:59172ms step_avg:97.00ms +step:611/1670 train_time:59270ms step_avg:97.01ms +step:612/1670 train_time:59368ms step_avg:97.01ms +step:613/1670 train_time:59464ms step_avg:97.01ms +step:614/1670 train_time:59561ms step_avg:97.00ms +step:615/1670 train_time:59657ms step_avg:97.00ms +step:616/1670 train_time:59755ms step_avg:97.00ms +step:617/1670 train_time:59853ms step_avg:97.01ms +step:618/1670 train_time:59951ms step_avg:97.01ms +step:619/1670 train_time:60048ms step_avg:97.01ms +step:620/1670 train_time:60147ms step_avg:97.01ms +step:621/1670 train_time:60243ms step_avg:97.01ms +step:622/1670 train_time:60340ms step_avg:97.01ms +step:623/1670 train_time:60437ms step_avg:97.01ms +step:624/1670 train_time:60535ms step_avg:97.01ms +step:625/1670 train_time:60631ms step_avg:97.01ms +step:625/1670 val_loss:3.6163 train_time:60728ms step_avg:97.16ms +step:626/1670 train_time:60750ms step_avg:97.04ms +step:627/1670 train_time:60838ms step_avg:97.03ms +step:628/1670 train_time:60936ms step_avg:97.03ms +step:629/1670 train_time:61034ms step_avg:97.03ms +step:630/1670 train_time:61130ms step_avg:97.03ms +step:631/1670 train_time:61226ms step_avg:97.03ms +step:632/1670 train_time:61323ms step_avg:97.03ms +step:633/1670 train_time:61418ms step_avg:97.03ms +step:634/1670 train_time:61514ms step_avg:97.03ms +step:635/1670 train_time:61611ms step_avg:97.02ms +step:636/1670 train_time:61710ms step_avg:97.03ms +step:637/1670 train_time:61810ms step_avg:97.03ms +step:638/1670 train_time:61912ms step_avg:97.04ms +step:639/1670 train_time:62157ms step_avg:97.27ms +step:640/1670 train_time:62360ms step_avg:97.44ms +step:641/1670 train_time:62456ms step_avg:97.44ms +step:642/1670 train_time:62551ms step_avg:97.43ms +step:643/1670 train_time:62647ms step_avg:97.43ms +step:644/1670 train_time:62743ms step_avg:97.43ms +step:645/1670 train_time:62839ms step_avg:97.42ms +step:646/1670 train_time:62934ms step_avg:97.42ms +step:647/1670 train_time:63030ms step_avg:97.42ms +step:648/1670 train_time:63127ms step_avg:97.42ms +step:649/1670 train_time:63229ms step_avg:97.43ms +step:650/1670 train_time:63330ms step_avg:97.43ms +step:651/1670 train_time:63429ms step_avg:97.43ms +step:652/1670 train_time:63527ms step_avg:97.43ms +step:653/1670 train_time:63623ms step_avg:97.43ms +step:654/1670 train_time:63719ms step_avg:97.43ms +step:655/1670 train_time:63815ms step_avg:97.43ms +step:656/1670 train_time:63911ms step_avg:97.42ms +step:657/1670 train_time:64007ms step_avg:97.42ms +step:658/1670 train_time:64105ms step_avg:97.42ms +step:659/1670 train_time:64203ms step_avg:97.43ms +step:660/1670 train_time:64301ms step_avg:97.43ms +step:661/1670 train_time:64399ms step_avg:97.43ms +step:662/1670 train_time:64497ms step_avg:97.43ms +step:663/1670 train_time:64594ms step_avg:97.43ms +step:664/1670 train_time:64691ms step_avg:97.43ms +step:665/1670 train_time:64788ms step_avg:97.43ms +step:666/1670 train_time:64884ms step_avg:97.42ms +step:667/1670 train_time:64980ms step_avg:97.42ms +step:668/1670 train_time:65077ms step_avg:97.42ms +step:669/1670 train_time:65175ms step_avg:97.42ms +step:670/1670 train_time:65273ms step_avg:97.42ms +step:671/1670 train_time:65372ms step_avg:97.42ms +step:672/1670 train_time:65471ms step_avg:97.43ms +step:673/1670 train_time:65568ms step_avg:97.43ms +step:674/1670 train_time:65665ms step_avg:97.43ms +step:675/1670 train_time:65761ms step_avg:97.42ms +step:676/1670 train_time:65857ms step_avg:97.42ms +step:677/1670 train_time:65954ms step_avg:97.42ms +step:678/1670 train_time:66051ms step_avg:97.42ms +step:679/1670 train_time:66148ms step_avg:97.42ms +step:680/1670 train_time:66246ms step_avg:97.42ms +step:681/1670 train_time:66345ms step_avg:97.42ms +step:682/1670 train_time:66443ms step_avg:97.42ms +step:683/1670 train_time:66540ms step_avg:97.42ms +step:684/1670 train_time:66637ms step_avg:97.42ms +step:685/1670 train_time:66735ms step_avg:97.42ms +step:686/1670 train_time:66831ms step_avg:97.42ms +step:687/1670 train_time:66928ms step_avg:97.42ms +step:688/1670 train_time:67025ms step_avg:97.42ms +step:689/1670 train_time:67121ms step_avg:97.42ms +step:690/1670 train_time:67218ms step_avg:97.42ms +step:691/1670 train_time:67316ms step_avg:97.42ms +step:692/1670 train_time:67415ms step_avg:97.42ms +step:693/1670 train_time:67512ms step_avg:97.42ms +step:694/1670 train_time:67611ms step_avg:97.42ms +step:695/1670 train_time:67709ms step_avg:97.42ms +step:696/1670 train_time:67806ms step_avg:97.42ms +step:697/1670 train_time:67903ms step_avg:97.42ms +step:698/1670 train_time:67999ms step_avg:97.42ms +step:699/1670 train_time:68096ms step_avg:97.42ms +step:700/1670 train_time:68194ms step_avg:97.42ms +step:701/1670 train_time:68291ms step_avg:97.42ms +step:702/1670 train_time:68390ms step_avg:97.42ms +step:703/1670 train_time:68488ms step_avg:97.42ms +step:704/1670 train_time:68585ms step_avg:97.42ms +step:705/1670 train_time:68682ms step_avg:97.42ms +step:706/1670 train_time:68778ms step_avg:97.42ms +step:707/1670 train_time:68876ms step_avg:97.42ms +step:708/1670 train_time:68974ms step_avg:97.42ms +step:709/1670 train_time:69071ms step_avg:97.42ms +step:710/1670 train_time:69168ms step_avg:97.42ms +step:711/1670 train_time:69266ms step_avg:97.42ms +step:712/1670 train_time:69364ms step_avg:97.42ms +step:713/1670 train_time:69460ms step_avg:97.42ms +step:714/1670 train_time:69557ms step_avg:97.42ms +step:715/1670 train_time:69656ms step_avg:97.42ms +step:716/1670 train_time:69753ms step_avg:97.42ms +step:717/1670 train_time:69850ms step_avg:97.42ms +step:718/1670 train_time:69948ms step_avg:97.42ms +step:719/1670 train_time:70045ms step_avg:97.42ms +step:720/1670 train_time:70143ms step_avg:97.42ms +step:721/1670 train_time:70239ms step_avg:97.42ms +step:722/1670 train_time:70336ms step_avg:97.42ms +step:723/1670 train_time:70433ms step_avg:97.42ms +step:724/1670 train_time:70532ms step_avg:97.42ms +step:725/1670 train_time:70630ms step_avg:97.42ms +step:726/1670 train_time:70727ms step_avg:97.42ms +step:727/1670 train_time:70824ms step_avg:97.42ms +step:728/1670 train_time:70920ms step_avg:97.42ms +step:729/1670 train_time:71017ms step_avg:97.42ms +step:730/1670 train_time:71114ms step_avg:97.42ms +step:731/1670 train_time:71212ms step_avg:97.42ms +step:732/1670 train_time:71310ms step_avg:97.42ms +step:733/1670 train_time:71409ms step_avg:97.42ms +step:734/1670 train_time:71507ms step_avg:97.42ms +step:735/1670 train_time:71604ms step_avg:97.42ms +step:736/1670 train_time:71701ms step_avg:97.42ms +step:737/1670 train_time:71798ms step_avg:97.42ms +step:738/1670 train_time:71895ms step_avg:97.42ms +step:739/1670 train_time:71992ms step_avg:97.42ms +step:740/1670 train_time:72089ms step_avg:97.42ms +step:741/1670 train_time:72187ms step_avg:97.42ms +step:742/1670 train_time:72284ms step_avg:97.42ms +step:743/1670 train_time:72381ms step_avg:97.42ms +step:744/1670 train_time:72478ms step_avg:97.42ms +step:745/1670 train_time:72575ms step_avg:97.42ms +step:746/1670 train_time:72673ms step_avg:97.42ms +step:747/1670 train_time:72772ms step_avg:97.42ms +step:748/1670 train_time:72869ms step_avg:97.42ms +step:749/1670 train_time:72966ms step_avg:97.42ms +step:750/1670 train_time:73063ms step_avg:97.42ms +step:750/1670 val_loss:3.5631 train_time:73158ms step_avg:97.54ms +step:751/1670 train_time:73183ms step_avg:97.45ms +step:752/1670 train_time:73263ms step_avg:97.42ms +step:753/1670 train_time:73364ms step_avg:97.43ms +step:754/1670 train_time:73461ms step_avg:97.43ms +step:755/1670 train_time:73557ms step_avg:97.43ms +step:756/1670 train_time:73652ms step_avg:97.42ms +step:757/1670 train_time:73749ms step_avg:97.42ms +step:758/1670 train_time:73845ms step_avg:97.42ms +step:759/1670 train_time:73942ms step_avg:97.42ms +step:760/1670 train_time:74037ms step_avg:97.42ms +step:761/1670 train_time:74135ms step_avg:97.42ms +step:762/1670 train_time:74235ms step_avg:97.42ms +step:763/1670 train_time:74334ms step_avg:97.42ms +step:764/1670 train_time:74431ms step_avg:97.42ms +step:765/1670 train_time:74529ms step_avg:97.42ms +step:766/1670 train_time:74627ms step_avg:97.42ms +step:767/1670 train_time:74723ms step_avg:97.42ms +step:768/1670 train_time:74820ms step_avg:97.42ms +step:769/1670 train_time:74916ms step_avg:97.42ms +step:770/1670 train_time:75013ms step_avg:97.42ms +step:771/1670 train_time:75111ms step_avg:97.42ms +step:772/1670 train_time:75209ms step_avg:97.42ms +step:773/1670 train_time:75308ms step_avg:97.42ms +step:774/1670 train_time:75408ms step_avg:97.43ms +step:775/1670 train_time:75506ms step_avg:97.43ms +step:776/1670 train_time:75603ms step_avg:97.43ms +step:777/1670 train_time:75699ms step_avg:97.43ms +step:778/1670 train_time:75795ms step_avg:97.42ms +step:779/1670 train_time:75892ms step_avg:97.42ms +step:780/1670 train_time:75989ms step_avg:97.42ms +step:781/1670 train_time:76086ms step_avg:97.42ms +step:782/1670 train_time:76185ms step_avg:97.42ms +step:783/1670 train_time:76283ms step_avg:97.42ms +step:784/1670 train_time:76381ms step_avg:97.43ms +step:785/1670 train_time:76479ms step_avg:97.43ms +step:786/1670 train_time:76576ms step_avg:97.42ms +step:787/1670 train_time:76673ms step_avg:97.42ms +step:788/1670 train_time:76770ms step_avg:97.42ms +step:789/1670 train_time:76867ms step_avg:97.42ms +step:790/1670 train_time:76964ms step_avg:97.42ms +step:791/1670 train_time:77063ms step_avg:97.42ms +step:792/1670 train_time:77160ms step_avg:97.42ms +step:793/1670 train_time:77258ms step_avg:97.42ms +step:794/1670 train_time:77355ms step_avg:97.42ms +step:795/1670 train_time:77452ms step_avg:97.42ms +step:796/1670 train_time:77549ms step_avg:97.42ms +step:797/1670 train_time:77648ms step_avg:97.42ms +step:798/1670 train_time:77745ms step_avg:97.43ms +step:799/1670 train_time:77842ms step_avg:97.42ms +step:800/1670 train_time:77939ms step_avg:97.42ms +step:801/1670 train_time:78036ms step_avg:97.42ms +step:802/1670 train_time:78133ms step_avg:97.42ms +step:803/1670 train_time:78231ms step_avg:97.42ms +step:804/1670 train_time:78329ms step_avg:97.42ms +step:805/1670 train_time:78428ms step_avg:97.43ms +step:806/1670 train_time:78526ms step_avg:97.43ms +step:807/1670 train_time:78623ms step_avg:97.43ms +step:808/1670 train_time:78720ms step_avg:97.43ms +step:809/1670 train_time:78816ms step_avg:97.42ms +step:810/1670 train_time:78914ms step_avg:97.42ms +step:811/1670 train_time:79011ms step_avg:97.42ms +step:812/1670 train_time:79109ms step_avg:97.42ms +step:813/1670 train_time:79206ms step_avg:97.42ms +step:814/1670 train_time:79304ms step_avg:97.42ms +step:815/1670 train_time:79402ms step_avg:97.43ms +step:816/1670 train_time:79499ms step_avg:97.42ms +step:817/1670 train_time:79596ms step_avg:97.42ms +step:818/1670 train_time:79692ms step_avg:97.42ms +step:819/1670 train_time:79789ms step_avg:97.42ms +step:820/1670 train_time:79886ms step_avg:97.42ms +step:821/1670 train_time:79985ms step_avg:97.42ms +step:822/1670 train_time:80083ms step_avg:97.42ms +step:823/1670 train_time:80181ms step_avg:97.42ms +step:824/1670 train_time:80279ms step_avg:97.43ms +step:825/1670 train_time:80376ms step_avg:97.43ms +step:826/1670 train_time:80472ms step_avg:97.42ms +step:827/1670 train_time:80570ms step_avg:97.42ms +step:828/1670 train_time:80667ms step_avg:97.42ms +step:829/1670 train_time:80764ms step_avg:97.42ms +step:830/1670 train_time:80862ms step_avg:97.42ms +step:831/1670 train_time:80960ms step_avg:97.42ms +step:832/1670 train_time:81056ms step_avg:97.42ms +step:833/1670 train_time:81153ms step_avg:97.42ms +step:834/1670 train_time:81251ms step_avg:97.42ms +step:835/1670 train_time:81348ms step_avg:97.42ms +step:836/1670 train_time:81446ms step_avg:97.42ms +step:837/1670 train_time:81544ms step_avg:97.42ms +step:838/1670 train_time:81642ms step_avg:97.42ms +step:839/1670 train_time:81738ms step_avg:97.42ms +step:840/1670 train_time:81835ms step_avg:97.42ms +step:841/1670 train_time:81932ms step_avg:97.42ms +step:842/1670 train_time:82030ms step_avg:97.42ms +step:843/1670 train_time:82127ms step_avg:97.42ms +step:844/1670 train_time:82226ms step_avg:97.42ms +step:845/1670 train_time:82323ms step_avg:97.42ms +step:846/1670 train_time:82420ms step_avg:97.42ms +step:847/1670 train_time:82518ms step_avg:97.42ms +step:848/1670 train_time:82614ms step_avg:97.42ms +step:849/1670 train_time:82712ms step_avg:97.42ms +step:850/1670 train_time:82809ms step_avg:97.42ms +step:851/1670 train_time:83081ms step_avg:97.63ms +step:852/1670 train_time:83264ms step_avg:97.73ms +step:853/1670 train_time:83359ms step_avg:97.72ms +step:854/1670 train_time:83455ms step_avg:97.72ms +step:855/1670 train_time:83551ms step_avg:97.72ms +step:856/1670 train_time:83647ms step_avg:97.72ms +step:857/1670 train_time:83743ms step_avg:97.72ms +step:858/1670 train_time:83840ms step_avg:97.72ms +step:859/1670 train_time:83936ms step_avg:97.71ms +step:860/1670 train_time:84032ms step_avg:97.71ms +step:861/1670 train_time:84137ms step_avg:97.72ms +step:862/1670 train_time:84236ms step_avg:97.72ms +step:863/1670 train_time:84335ms step_avg:97.72ms +step:864/1670 train_time:84432ms step_avg:97.72ms +step:865/1670 train_time:84528ms step_avg:97.72ms +step:866/1670 train_time:84626ms step_avg:97.72ms +step:867/1670 train_time:84722ms step_avg:97.72ms +step:868/1670 train_time:84818ms step_avg:97.72ms +step:869/1670 train_time:84914ms step_avg:97.71ms +step:870/1670 train_time:85010ms step_avg:97.71ms +step:871/1670 train_time:85110ms step_avg:97.72ms +step:872/1670 train_time:85210ms step_avg:97.72ms +step:873/1670 train_time:85309ms step_avg:97.72ms +step:874/1670 train_time:85407ms step_avg:97.72ms +step:875/1670 train_time:85505ms step_avg:97.72ms +step:875/1670 val_loss:3.5229 train_time:85602ms step_avg:97.83ms +step:876/1670 train_time:85623ms step_avg:97.74ms +step:877/1670 train_time:85707ms step_avg:97.73ms +step:878/1670 train_time:85807ms step_avg:97.73ms +step:879/1670 train_time:85905ms step_avg:97.73ms +step:880/1670 train_time:86002ms step_avg:97.73ms +step:881/1670 train_time:86098ms step_avg:97.73ms +step:882/1670 train_time:86194ms step_avg:97.73ms +step:883/1670 train_time:86290ms step_avg:97.72ms +step:884/1670 train_time:86386ms step_avg:97.72ms +step:885/1670 train_time:86483ms step_avg:97.72ms +step:886/1670 train_time:86582ms step_avg:97.72ms +step:887/1670 train_time:86684ms step_avg:97.73ms +step:888/1670 train_time:86784ms step_avg:97.73ms +step:889/1670 train_time:86883ms step_avg:97.73ms +step:890/1670 train_time:86980ms step_avg:97.73ms +step:891/1670 train_time:87077ms step_avg:97.73ms +step:892/1670 train_time:87173ms step_avg:97.73ms +step:893/1670 train_time:87271ms step_avg:97.73ms +step:894/1670 train_time:87366ms step_avg:97.72ms +step:895/1670 train_time:87463ms step_avg:97.72ms +step:896/1670 train_time:87562ms step_avg:97.73ms +step:897/1670 train_time:87661ms step_avg:97.73ms +step:898/1670 train_time:87762ms step_avg:97.73ms +step:899/1670 train_time:87861ms step_avg:97.73ms +step:900/1670 train_time:87959ms step_avg:97.73ms +step:901/1670 train_time:88056ms step_avg:97.73ms +step:902/1670 train_time:88151ms step_avg:97.73ms +step:903/1670 train_time:88248ms step_avg:97.73ms +step:904/1670 train_time:88344ms step_avg:97.73ms +step:905/1670 train_time:88441ms step_avg:97.72ms +step:906/1670 train_time:88538ms step_avg:97.72ms +step:907/1670 train_time:88637ms step_avg:97.73ms +step:908/1670 train_time:88735ms step_avg:97.73ms +step:909/1670 train_time:88834ms step_avg:97.73ms +step:910/1670 train_time:88931ms step_avg:97.73ms +step:911/1670 train_time:89027ms step_avg:97.72ms +step:912/1670 train_time:89124ms step_avg:97.72ms +step:913/1670 train_time:89221ms step_avg:97.72ms +step:914/1670 train_time:89318ms step_avg:97.72ms +step:915/1670 train_time:89414ms step_avg:97.72ms +step:916/1670 train_time:89511ms step_avg:97.72ms +step:917/1670 train_time:89607ms step_avg:97.72ms +step:918/1670 train_time:89706ms step_avg:97.72ms +step:919/1670 train_time:89805ms step_avg:97.72ms +step:920/1670 train_time:89903ms step_avg:97.72ms +step:921/1670 train_time:90001ms step_avg:97.72ms +step:922/1670 train_time:90099ms step_avg:97.72ms +step:923/1670 train_time:90196ms step_avg:97.72ms +step:924/1670 train_time:90294ms step_avg:97.72ms +step:925/1670 train_time:90391ms step_avg:97.72ms +step:926/1670 train_time:90487ms step_avg:97.72ms +step:927/1670 train_time:90584ms step_avg:97.72ms +step:928/1670 train_time:90682ms step_avg:97.72ms +step:929/1670 train_time:90780ms step_avg:97.72ms +step:930/1670 train_time:90878ms step_avg:97.72ms +step:931/1670 train_time:90976ms step_avg:97.72ms +step:932/1670 train_time:91073ms step_avg:97.72ms +step:933/1670 train_time:91170ms step_avg:97.72ms +step:934/1670 train_time:91267ms step_avg:97.72ms +step:935/1670 train_time:91365ms step_avg:97.72ms +step:936/1670 train_time:91463ms step_avg:97.72ms +step:937/1670 train_time:91560ms step_avg:97.72ms +step:938/1670 train_time:91658ms step_avg:97.72ms +step:939/1670 train_time:91756ms step_avg:97.72ms +step:940/1670 train_time:91853ms step_avg:97.72ms +step:941/1670 train_time:91950ms step_avg:97.72ms +step:942/1670 train_time:92047ms step_avg:97.71ms +step:943/1670 train_time:92144ms step_avg:97.71ms +step:944/1670 train_time:92242ms step_avg:97.71ms +step:945/1670 train_time:92340ms step_avg:97.71ms +step:946/1670 train_time:92437ms step_avg:97.71ms +step:947/1670 train_time:92534ms step_avg:97.71ms +step:948/1670 train_time:92631ms step_avg:97.71ms +step:949/1670 train_time:92728ms step_avg:97.71ms +step:950/1670 train_time:92827ms step_avg:97.71ms +step:951/1670 train_time:92924ms step_avg:97.71ms +step:952/1670 train_time:93022ms step_avg:97.71ms +step:953/1670 train_time:93120ms step_avg:97.71ms +step:954/1670 train_time:93218ms step_avg:97.71ms +step:955/1670 train_time:93315ms step_avg:97.71ms +step:956/1670 train_time:93411ms step_avg:97.71ms +step:957/1670 train_time:93508ms step_avg:97.71ms +step:958/1670 train_time:93605ms step_avg:97.71ms +step:959/1670 train_time:93702ms step_avg:97.71ms +step:960/1670 train_time:93800ms step_avg:97.71ms +step:961/1670 train_time:93898ms step_avg:97.71ms +step:962/1670 train_time:93996ms step_avg:97.71ms +step:963/1670 train_time:94093ms step_avg:97.71ms +step:964/1670 train_time:94190ms step_avg:97.71ms +step:965/1670 train_time:94287ms step_avg:97.71ms +step:966/1670 train_time:94384ms step_avg:97.71ms +step:967/1670 train_time:94481ms step_avg:97.71ms +step:968/1670 train_time:94578ms step_avg:97.70ms +step:969/1670 train_time:94676ms step_avg:97.71ms +step:970/1670 train_time:94774ms step_avg:97.71ms +step:971/1670 train_time:94871ms step_avg:97.70ms +step:972/1670 train_time:94968ms step_avg:97.70ms +step:973/1670 train_time:95065ms step_avg:97.70ms +step:974/1670 train_time:95163ms step_avg:97.70ms +step:975/1670 train_time:95261ms step_avg:97.70ms +step:976/1670 train_time:95358ms step_avg:97.70ms +step:977/1670 train_time:95457ms step_avg:97.70ms +step:978/1670 train_time:95554ms step_avg:97.70ms +step:979/1670 train_time:95651ms step_avg:97.70ms +step:980/1670 train_time:95748ms step_avg:97.70ms +step:981/1670 train_time:95846ms step_avg:97.70ms +step:982/1670 train_time:95944ms step_avg:97.70ms +step:983/1670 train_time:96041ms step_avg:97.70ms +step:984/1670 train_time:96139ms step_avg:97.70ms +step:985/1670 train_time:96236ms step_avg:97.70ms +step:986/1670 train_time:96333ms step_avg:97.70ms +step:987/1670 train_time:96429ms step_avg:97.70ms +step:988/1670 train_time:96526ms step_avg:97.70ms +step:989/1670 train_time:96624ms step_avg:97.70ms +step:990/1670 train_time:96722ms step_avg:97.70ms +step:991/1670 train_time:96820ms step_avg:97.70ms +step:992/1670 train_time:96917ms step_avg:97.70ms +step:993/1670 train_time:97015ms step_avg:97.70ms +step:994/1670 train_time:97112ms step_avg:97.70ms +step:995/1670 train_time:97208ms step_avg:97.70ms +step:996/1670 train_time:97306ms step_avg:97.70ms +step:997/1670 train_time:97403ms step_avg:97.70ms +step:998/1670 train_time:97501ms step_avg:97.70ms +step:999/1670 train_time:97598ms step_avg:97.70ms +step:1000/1670 train_time:97696ms step_avg:97.70ms +step:1000/1670 val_loss:3.4804 train_time:97792ms step_avg:97.79ms +step:1001/1670 train_time:97814ms step_avg:97.72ms +step:1002/1670 train_time:97899ms step_avg:97.70ms +step:1003/1670 train_time:97998ms step_avg:97.71ms +step:1004/1670 train_time:98097ms step_avg:97.71ms +step:1005/1670 train_time:98194ms step_avg:97.71ms +step:1006/1670 train_time:98291ms step_avg:97.70ms +step:1007/1670 train_time:98387ms step_avg:97.70ms +step:1008/1670 train_time:98483ms step_avg:97.70ms +step:1009/1670 train_time:98579ms step_avg:97.70ms +step:1010/1670 train_time:98675ms step_avg:97.70ms +step:1011/1670 train_time:98774ms step_avg:97.70ms +step:1012/1670 train_time:98873ms step_avg:97.70ms +step:1013/1670 train_time:98974ms step_avg:97.70ms +step:1014/1670 train_time:99073ms step_avg:97.71ms +step:1015/1670 train_time:99171ms step_avg:97.71ms +step:1016/1670 train_time:99268ms step_avg:97.70ms +step:1017/1670 train_time:99365ms step_avg:97.70ms +step:1018/1670 train_time:99461ms step_avg:97.70ms +step:1019/1670 train_time:99557ms step_avg:97.70ms +step:1020/1670 train_time:99654ms step_avg:97.70ms +step:1021/1670 train_time:99752ms step_avg:97.70ms +step:1022/1670 train_time:99851ms step_avg:97.70ms +step:1023/1670 train_time:99951ms step_avg:97.70ms +step:1024/1670 train_time:100050ms step_avg:97.71ms +step:1025/1670 train_time:100148ms step_avg:97.71ms +step:1026/1670 train_time:100245ms step_avg:97.70ms +step:1027/1670 train_time:100341ms step_avg:97.70ms +step:1028/1670 train_time:100439ms step_avg:97.70ms +step:1029/1670 train_time:100536ms step_avg:97.70ms +step:1030/1670 train_time:100632ms step_avg:97.70ms +step:1031/1670 train_time:100729ms step_avg:97.70ms +step:1032/1670 train_time:100826ms step_avg:97.70ms +step:1033/1670 train_time:100924ms step_avg:97.70ms +step:1034/1670 train_time:101022ms step_avg:97.70ms +step:1035/1670 train_time:101120ms step_avg:97.70ms +step:1036/1670 train_time:101217ms step_avg:97.70ms +step:1037/1670 train_time:101315ms step_avg:97.70ms +step:1038/1670 train_time:101414ms step_avg:97.70ms +step:1039/1670 train_time:101511ms step_avg:97.70ms +step:1040/1670 train_time:101608ms step_avg:97.70ms +step:1041/1670 train_time:101705ms step_avg:97.70ms +step:1042/1670 train_time:101801ms step_avg:97.70ms +step:1043/1670 train_time:101899ms step_avg:97.70ms +step:1044/1670 train_time:101997ms step_avg:97.70ms +step:1045/1670 train_time:102095ms step_avg:97.70ms +step:1046/1670 train_time:102193ms step_avg:97.70ms +step:1047/1670 train_time:102291ms step_avg:97.70ms +step:1048/1670 train_time:102389ms step_avg:97.70ms +step:1049/1670 train_time:102486ms step_avg:97.70ms +step:1050/1670 train_time:102583ms step_avg:97.70ms +step:1051/1670 train_time:102679ms step_avg:97.70ms +step:1052/1670 train_time:102776ms step_avg:97.70ms +step:1053/1670 train_time:102873ms step_avg:97.70ms +step:1054/1670 train_time:102971ms step_avg:97.70ms +step:1055/1670 train_time:103069ms step_avg:97.70ms +step:1056/1670 train_time:103167ms step_avg:97.70ms +step:1057/1670 train_time:103265ms step_avg:97.70ms +step:1058/1670 train_time:103362ms step_avg:97.70ms +step:1059/1670 train_time:103459ms step_avg:97.70ms +step:1060/1670 train_time:103557ms step_avg:97.69ms +step:1061/1670 train_time:103654ms step_avg:97.70ms +step:1062/1670 train_time:103922ms step_avg:97.86ms +step:1063/1670 train_time:104096ms step_avg:97.93ms +step:1064/1670 train_time:104191ms step_avg:97.92ms +step:1065/1670 train_time:104287ms step_avg:97.92ms +step:1066/1670 train_time:104383ms step_avg:97.92ms +step:1067/1670 train_time:104479ms step_avg:97.92ms +step:1068/1670 train_time:104575ms step_avg:97.92ms +step:1069/1670 train_time:104671ms step_avg:97.92ms +step:1070/1670 train_time:104768ms step_avg:97.91ms +step:1071/1670 train_time:104864ms step_avg:97.91ms +step:1072/1670 train_time:104969ms step_avg:97.92ms +step:1073/1670 train_time:105071ms step_avg:97.92ms +step:1074/1670 train_time:105169ms step_avg:97.92ms +step:1075/1670 train_time:105266ms step_avg:97.92ms +step:1076/1670 train_time:105363ms step_avg:97.92ms +step:1077/1670 train_time:105459ms step_avg:97.92ms +step:1078/1670 train_time:105555ms step_avg:97.92ms +step:1079/1670 train_time:105651ms step_avg:97.92ms +step:1080/1670 train_time:105748ms step_avg:97.92ms +step:1081/1670 train_time:105845ms step_avg:97.91ms +step:1082/1670 train_time:105943ms step_avg:97.91ms +step:1083/1670 train_time:106043ms step_avg:97.92ms +step:1084/1670 train_time:106140ms step_avg:97.92ms +step:1085/1670 train_time:106240ms step_avg:97.92ms +step:1086/1670 train_time:106337ms step_avg:97.92ms +step:1087/1670 train_time:106434ms step_avg:97.92ms +step:1088/1670 train_time:106531ms step_avg:97.91ms +step:1089/1670 train_time:106628ms step_avg:97.91ms +step:1090/1670 train_time:106725ms step_avg:97.91ms +step:1091/1670 train_time:106821ms step_avg:97.91ms +step:1092/1670 train_time:106919ms step_avg:97.91ms +step:1093/1670 train_time:107017ms step_avg:97.91ms +step:1094/1670 train_time:107115ms step_avg:97.91ms +step:1095/1670 train_time:107215ms step_avg:97.91ms +step:1096/1670 train_time:107312ms step_avg:97.91ms +step:1097/1670 train_time:107409ms step_avg:97.91ms +step:1098/1670 train_time:107506ms step_avg:97.91ms +step:1099/1670 train_time:107602ms step_avg:97.91ms +step:1100/1670 train_time:107699ms step_avg:97.91ms +step:1101/1670 train_time:107796ms step_avg:97.91ms +step:1102/1670 train_time:107894ms step_avg:97.91ms +step:1103/1670 train_time:107992ms step_avg:97.91ms +step:1104/1670 train_time:108091ms step_avg:97.91ms +step:1105/1670 train_time:108190ms step_avg:97.91ms +step:1106/1670 train_time:108289ms step_avg:97.91ms +step:1107/1670 train_time:108386ms step_avg:97.91ms +step:1108/1670 train_time:108483ms step_avg:97.91ms +step:1109/1670 train_time:108580ms step_avg:97.91ms +step:1110/1670 train_time:108677ms step_avg:97.91ms +step:1111/1670 train_time:108774ms step_avg:97.91ms +step:1112/1670 train_time:108872ms step_avg:97.91ms +step:1113/1670 train_time:108970ms step_avg:97.91ms +step:1114/1670 train_time:109069ms step_avg:97.91ms +step:1115/1670 train_time:109167ms step_avg:97.91ms +step:1116/1670 train_time:109265ms step_avg:97.91ms +step:1117/1670 train_time:109363ms step_avg:97.91ms +step:1118/1670 train_time:109460ms step_avg:97.91ms +step:1119/1670 train_time:109557ms step_avg:97.91ms +step:1120/1670 train_time:109655ms step_avg:97.91ms +step:1121/1670 train_time:109753ms step_avg:97.91ms +step:1122/1670 train_time:109852ms step_avg:97.91ms +step:1123/1670 train_time:109951ms step_avg:97.91ms +step:1124/1670 train_time:110050ms step_avg:97.91ms +step:1125/1670 train_time:110150ms step_avg:97.91ms +step:1125/1670 val_loss:3.4262 train_time:110248ms step_avg:98.00ms +step:1126/1670 train_time:110269ms step_avg:97.93ms +step:1127/1670 train_time:110356ms step_avg:97.92ms +step:1128/1670 train_time:110456ms step_avg:97.92ms +step:1129/1670 train_time:110553ms step_avg:97.92ms +step:1130/1670 train_time:110649ms step_avg:97.92ms +step:1131/1670 train_time:110746ms step_avg:97.92ms +step:1132/1670 train_time:110843ms step_avg:97.92ms +step:1133/1670 train_time:110940ms step_avg:97.92ms +step:1134/1670 train_time:111037ms step_avg:97.92ms +step:1135/1670 train_time:111134ms step_avg:97.92ms +step:1136/1670 train_time:111235ms step_avg:97.92ms +step:1137/1670 train_time:111335ms step_avg:97.92ms +step:1138/1670 train_time:111434ms step_avg:97.92ms +step:1139/1670 train_time:111532ms step_avg:97.92ms +step:1140/1670 train_time:111630ms step_avg:97.92ms +step:1141/1670 train_time:111728ms step_avg:97.92ms +step:1142/1670 train_time:111825ms step_avg:97.92ms +step:1143/1670 train_time:111922ms step_avg:97.92ms +step:1144/1670 train_time:112020ms step_avg:97.92ms +step:1145/1670 train_time:112116ms step_avg:97.92ms +step:1146/1670 train_time:112215ms step_avg:97.92ms +step:1147/1670 train_time:112314ms step_avg:97.92ms +step:1148/1670 train_time:112414ms step_avg:97.92ms +step:1149/1670 train_time:112512ms step_avg:97.92ms +step:1150/1670 train_time:112610ms step_avg:97.92ms +step:1151/1670 train_time:112707ms step_avg:97.92ms +step:1152/1670 train_time:112804ms step_avg:97.92ms +step:1153/1670 train_time:112902ms step_avg:97.92ms +step:1154/1670 train_time:112999ms step_avg:97.92ms +step:1155/1670 train_time:113096ms step_avg:97.92ms +step:1156/1670 train_time:113195ms step_avg:97.92ms +step:1157/1670 train_time:113292ms step_avg:97.92ms +step:1158/1670 train_time:113392ms step_avg:97.92ms +step:1159/1670 train_time:113490ms step_avg:97.92ms +step:1160/1670 train_time:113588ms step_avg:97.92ms +step:1161/1670 train_time:113687ms step_avg:97.92ms +step:1162/1670 train_time:113783ms step_avg:97.92ms +step:1163/1670 train_time:113881ms step_avg:97.92ms +step:1164/1670 train_time:113980ms step_avg:97.92ms +step:1165/1670 train_time:114078ms step_avg:97.92ms +step:1166/1670 train_time:114176ms step_avg:97.92ms +step:1167/1670 train_time:114273ms step_avg:97.92ms +step:1168/1670 train_time:114371ms step_avg:97.92ms +step:1169/1670 train_time:114469ms step_avg:97.92ms +step:1170/1670 train_time:114568ms step_avg:97.92ms +step:1171/1670 train_time:114666ms step_avg:97.92ms +step:1172/1670 train_time:114765ms step_avg:97.92ms +step:1173/1670 train_time:114862ms step_avg:97.92ms +step:1174/1670 train_time:114960ms step_avg:97.92ms +step:1175/1670 train_time:115058ms step_avg:97.92ms +step:1176/1670 train_time:115156ms step_avg:97.92ms +step:1177/1670 train_time:115254ms step_avg:97.92ms +step:1178/1670 train_time:115352ms step_avg:97.92ms +step:1179/1670 train_time:115450ms step_avg:97.92ms +step:1180/1670 train_time:115548ms step_avg:97.92ms +step:1181/1670 train_time:115647ms step_avg:97.92ms +step:1182/1670 train_time:115745ms step_avg:97.92ms +step:1183/1670 train_time:115843ms step_avg:97.92ms +step:1184/1670 train_time:115941ms step_avg:97.92ms +step:1185/1670 train_time:116040ms step_avg:97.92ms +step:1186/1670 train_time:116137ms step_avg:97.92ms +step:1187/1670 train_time:116235ms step_avg:97.92ms +step:1188/1670 train_time:116332ms step_avg:97.92ms +step:1189/1670 train_time:116430ms step_avg:97.92ms +step:1190/1670 train_time:116528ms step_avg:97.92ms +step:1191/1670 train_time:116626ms step_avg:97.92ms +step:1192/1670 train_time:116724ms step_avg:97.92ms +step:1193/1670 train_time:116822ms step_avg:97.92ms +step:1194/1670 train_time:116920ms step_avg:97.92ms +step:1195/1670 train_time:117018ms step_avg:97.92ms +step:1196/1670 train_time:117116ms step_avg:97.92ms +step:1197/1670 train_time:117214ms step_avg:97.92ms +step:1198/1670 train_time:117311ms step_avg:97.92ms +step:1199/1670 train_time:117409ms step_avg:97.92ms +step:1200/1670 train_time:117507ms step_avg:97.92ms +step:1201/1670 train_time:117605ms step_avg:97.92ms +step:1202/1670 train_time:117703ms step_avg:97.92ms +step:1203/1670 train_time:117802ms step_avg:97.92ms +step:1204/1670 train_time:117900ms step_avg:97.92ms +step:1205/1670 train_time:117998ms step_avg:97.92ms +step:1206/1670 train_time:118095ms step_avg:97.92ms +step:1207/1670 train_time:118194ms step_avg:97.92ms +step:1208/1670 train_time:118292ms step_avg:97.92ms +step:1209/1670 train_time:118390ms step_avg:97.92ms +step:1210/1670 train_time:118488ms step_avg:97.92ms +step:1211/1670 train_time:118586ms step_avg:97.92ms +step:1212/1670 train_time:118684ms step_avg:97.92ms +step:1213/1670 train_time:118781ms step_avg:97.92ms +step:1214/1670 train_time:118879ms step_avg:97.92ms +step:1215/1670 train_time:118977ms step_avg:97.92ms +step:1216/1670 train_time:119075ms step_avg:97.92ms +step:1217/1670 train_time:119173ms step_avg:97.92ms +step:1218/1670 train_time:119271ms step_avg:97.92ms +step:1219/1670 train_time:119369ms step_avg:97.92ms +step:1220/1670 train_time:119468ms step_avg:97.92ms +step:1221/1670 train_time:119567ms step_avg:97.93ms +step:1222/1670 train_time:119665ms step_avg:97.93ms +step:1223/1670 train_time:119762ms step_avg:97.92ms +step:1224/1670 train_time:119862ms step_avg:97.93ms +step:1225/1670 train_time:119958ms step_avg:97.92ms +step:1226/1670 train_time:120056ms step_avg:97.92ms +step:1227/1670 train_time:120155ms step_avg:97.93ms +step:1228/1670 train_time:120253ms step_avg:97.93ms +step:1229/1670 train_time:120350ms step_avg:97.93ms +step:1230/1670 train_time:120448ms step_avg:97.93ms +step:1231/1670 train_time:120547ms step_avg:97.93ms +step:1232/1670 train_time:120645ms step_avg:97.93ms +step:1233/1670 train_time:120744ms step_avg:97.93ms +step:1234/1670 train_time:120843ms step_avg:97.93ms +step:1235/1670 train_time:120940ms step_avg:97.93ms +step:1236/1670 train_time:121039ms step_avg:97.93ms +step:1237/1670 train_time:121137ms step_avg:97.93ms +step:1238/1670 train_time:121234ms step_avg:97.93ms +step:1239/1670 train_time:121332ms step_avg:97.93ms +step:1240/1670 train_time:121430ms step_avg:97.93ms +step:1241/1670 train_time:121528ms step_avg:97.93ms +step:1242/1670 train_time:121625ms step_avg:97.93ms +step:1243/1670 train_time:121723ms step_avg:97.93ms +step:1244/1670 train_time:121822ms step_avg:97.93ms +step:1245/1670 train_time:121920ms step_avg:97.93ms +step:1246/1670 train_time:122019ms step_avg:97.93ms +step:1247/1670 train_time:122118ms step_avg:97.93ms +step:1248/1670 train_time:122219ms step_avg:97.93ms +step:1249/1670 train_time:122318ms step_avg:97.93ms +step:1250/1670 train_time:122417ms step_avg:97.93ms +step:1250/1670 val_loss:3.3827 train_time:122514ms step_avg:98.01ms +step:1251/1670 train_time:122536ms step_avg:97.95ms +step:1252/1670 train_time:122619ms step_avg:97.94ms +step:1253/1670 train_time:122718ms step_avg:97.94ms +step:1254/1670 train_time:122816ms step_avg:97.94ms +step:1255/1670 train_time:122913ms step_avg:97.94ms +step:1256/1670 train_time:123010ms step_avg:97.94ms +step:1257/1670 train_time:123107ms step_avg:97.94ms +step:1258/1670 train_time:123204ms step_avg:97.94ms +step:1259/1670 train_time:123302ms step_avg:97.94ms +step:1260/1670 train_time:123397ms step_avg:97.93ms +step:1261/1670 train_time:123497ms step_avg:97.94ms +step:1262/1670 train_time:123597ms step_avg:97.94ms +step:1263/1670 train_time:123696ms step_avg:97.94ms +step:1264/1670 train_time:123795ms step_avg:97.94ms +step:1265/1670 train_time:123893ms step_avg:97.94ms +step:1266/1670 train_time:123990ms step_avg:97.94ms +step:1267/1670 train_time:124088ms step_avg:97.94ms +step:1268/1670 train_time:124185ms step_avg:97.94ms +step:1269/1670 train_time:124283ms step_avg:97.94ms +step:1270/1670 train_time:124380ms step_avg:97.94ms +step:1271/1670 train_time:124478ms step_avg:97.94ms +step:1272/1670 train_time:124576ms step_avg:97.94ms +step:1273/1670 train_time:124676ms step_avg:97.94ms +step:1274/1670 train_time:125042ms step_avg:98.15ms +step:1275/1670 train_time:125134ms step_avg:98.14ms +step:1276/1670 train_time:125231ms step_avg:98.14ms +step:1277/1670 train_time:125328ms step_avg:98.14ms +step:1278/1670 train_time:125426ms step_avg:98.14ms +step:1279/1670 train_time:125523ms step_avg:98.14ms +step:1280/1670 train_time:125620ms step_avg:98.14ms +step:1281/1670 train_time:125716ms step_avg:98.14ms +step:1282/1670 train_time:125813ms step_avg:98.14ms +step:1283/1670 train_time:125911ms step_avg:98.14ms +step:1284/1670 train_time:126015ms step_avg:98.14ms +step:1285/1670 train_time:126117ms step_avg:98.15ms +step:1286/1670 train_time:126216ms step_avg:98.15ms +step:1287/1670 train_time:126314ms step_avg:98.15ms +step:1288/1670 train_time:126412ms step_avg:98.15ms +step:1289/1670 train_time:126509ms step_avg:98.15ms +step:1290/1670 train_time:126608ms step_avg:98.15ms +step:1291/1670 train_time:126706ms step_avg:98.15ms +step:1292/1670 train_time:126803ms step_avg:98.15ms +step:1293/1670 train_time:126900ms step_avg:98.14ms +step:1294/1670 train_time:127000ms step_avg:98.15ms +step:1295/1670 train_time:127100ms step_avg:98.15ms +step:1296/1670 train_time:127199ms step_avg:98.15ms +step:1297/1670 train_time:127298ms step_avg:98.15ms +step:1298/1670 train_time:127395ms step_avg:98.15ms +step:1299/1670 train_time:127493ms step_avg:98.15ms +step:1300/1670 train_time:127590ms step_avg:98.15ms +step:1301/1670 train_time:127688ms step_avg:98.15ms +step:1302/1670 train_time:127786ms step_avg:98.15ms +step:1303/1670 train_time:127883ms step_avg:98.15ms +step:1304/1670 train_time:127982ms step_avg:98.15ms +step:1305/1670 train_time:128083ms step_avg:98.15ms +step:1306/1670 train_time:128181ms step_avg:98.15ms +step:1307/1670 train_time:128280ms step_avg:98.15ms +step:1308/1670 train_time:128379ms step_avg:98.15ms +step:1309/1670 train_time:128477ms step_avg:98.15ms +step:1310/1670 train_time:128574ms step_avg:98.15ms +step:1311/1670 train_time:128672ms step_avg:98.15ms +step:1312/1670 train_time:128769ms step_avg:98.15ms +step:1313/1670 train_time:128868ms step_avg:98.15ms +step:1314/1670 train_time:128966ms step_avg:98.15ms +step:1315/1670 train_time:129065ms step_avg:98.15ms +step:1316/1670 train_time:129165ms step_avg:98.15ms +step:1317/1670 train_time:129265ms step_avg:98.15ms +step:1318/1670 train_time:129364ms step_avg:98.15ms +step:1319/1670 train_time:129463ms step_avg:98.15ms +step:1320/1670 train_time:129561ms step_avg:98.15ms +step:1321/1670 train_time:129658ms step_avg:98.15ms +step:1322/1670 train_time:129756ms step_avg:98.15ms +step:1323/1670 train_time:129853ms step_avg:98.15ms +step:1324/1670 train_time:129951ms step_avg:98.15ms +step:1325/1670 train_time:130049ms step_avg:98.15ms +step:1326/1670 train_time:130148ms step_avg:98.15ms +step:1327/1670 train_time:130247ms step_avg:98.15ms +step:1328/1670 train_time:130347ms step_avg:98.15ms +step:1329/1670 train_time:130447ms step_avg:98.15ms +step:1330/1670 train_time:130545ms step_avg:98.15ms +step:1331/1670 train_time:130644ms step_avg:98.15ms +step:1332/1670 train_time:130742ms step_avg:98.15ms +step:1333/1670 train_time:130840ms step_avg:98.15ms +step:1334/1670 train_time:130937ms step_avg:98.15ms +step:1335/1670 train_time:131034ms step_avg:98.15ms +step:1336/1670 train_time:131132ms step_avg:98.15ms +step:1337/1670 train_time:131232ms step_avg:98.15ms +step:1338/1670 train_time:131332ms step_avg:98.16ms +step:1339/1670 train_time:131431ms step_avg:98.16ms +step:1340/1670 train_time:131529ms step_avg:98.16ms +step:1341/1670 train_time:131628ms step_avg:98.16ms +step:1342/1670 train_time:131727ms step_avg:98.16ms +step:1343/1670 train_time:131826ms step_avg:98.16ms +step:1344/1670 train_time:131924ms step_avg:98.16ms +step:1345/1670 train_time:132022ms step_avg:98.16ms +step:1346/1670 train_time:132120ms step_avg:98.16ms +step:1347/1670 train_time:132218ms step_avg:98.16ms +step:1348/1670 train_time:132315ms step_avg:98.16ms +step:1349/1670 train_time:132414ms step_avg:98.16ms +step:1350/1670 train_time:132512ms step_avg:98.16ms +step:1351/1670 train_time:132610ms step_avg:98.16ms +step:1352/1670 train_time:132709ms step_avg:98.16ms +step:1353/1670 train_time:132810ms step_avg:98.16ms +step:1354/1670 train_time:132909ms step_avg:98.16ms +step:1355/1670 train_time:133007ms step_avg:98.16ms +step:1356/1670 train_time:133106ms step_avg:98.16ms +step:1357/1670 train_time:133205ms step_avg:98.16ms +step:1358/1670 train_time:133303ms step_avg:98.16ms +step:1359/1670 train_time:133401ms step_avg:98.16ms +step:1360/1670 train_time:133500ms step_avg:98.16ms +step:1361/1670 train_time:133596ms step_avg:98.16ms +step:1362/1670 train_time:133695ms step_avg:98.16ms +step:1363/1670 train_time:133794ms step_avg:98.16ms +step:1364/1670 train_time:133892ms step_avg:98.16ms +step:1365/1670 train_time:133990ms step_avg:98.16ms +step:1366/1670 train_time:134089ms step_avg:98.16ms +step:1367/1670 train_time:134187ms step_avg:98.16ms +step:1368/1670 train_time:134286ms step_avg:98.16ms +step:1369/1670 train_time:134385ms step_avg:98.16ms +step:1370/1670 train_time:134484ms step_avg:98.16ms +step:1371/1670 train_time:134582ms step_avg:98.16ms +step:1372/1670 train_time:134681ms step_avg:98.16ms +step:1373/1670 train_time:134781ms step_avg:98.17ms +step:1374/1670 train_time:134879ms step_avg:98.16ms +step:1375/1670 train_time:134976ms step_avg:98.16ms +step:1375/1670 val_loss:3.3459 train_time:135073ms step_avg:98.23ms +step:1376/1670 train_time:135095ms step_avg:98.18ms +step:1377/1670 train_time:135179ms step_avg:98.17ms +step:1378/1670 train_time:135277ms step_avg:98.17ms +step:1379/1670 train_time:135375ms step_avg:98.17ms +step:1380/1670 train_time:135473ms step_avg:98.17ms +step:1381/1670 train_time:135570ms step_avg:98.17ms +step:1382/1670 train_time:135666ms step_avg:98.17ms +step:1383/1670 train_time:135764ms step_avg:98.17ms +step:1384/1670 train_time:135861ms step_avg:98.17ms +step:1385/1670 train_time:135958ms step_avg:98.16ms +step:1386/1670 train_time:136058ms step_avg:98.17ms +step:1387/1670 train_time:136159ms step_avg:98.17ms +step:1388/1670 train_time:136258ms step_avg:98.17ms +step:1389/1670 train_time:136357ms step_avg:98.17ms +step:1390/1670 train_time:136455ms step_avg:98.17ms +step:1391/1670 train_time:136552ms step_avg:98.17ms +step:1392/1670 train_time:136650ms step_avg:98.17ms +step:1393/1670 train_time:136747ms step_avg:98.17ms +step:1394/1670 train_time:136844ms step_avg:98.17ms +step:1395/1670 train_time:136941ms step_avg:98.17ms +step:1396/1670 train_time:137039ms step_avg:98.17ms +step:1397/1670 train_time:137138ms step_avg:98.17ms +step:1398/1670 train_time:137237ms step_avg:98.17ms +step:1399/1670 train_time:137335ms step_avg:98.17ms +step:1400/1670 train_time:137434ms step_avg:98.17ms +step:1401/1670 train_time:137532ms step_avg:98.17ms +step:1402/1670 train_time:137630ms step_avg:98.17ms +step:1403/1670 train_time:137728ms step_avg:98.17ms +step:1404/1670 train_time:137826ms step_avg:98.17ms +step:1405/1670 train_time:137923ms step_avg:98.17ms +step:1406/1670 train_time:138022ms step_avg:98.17ms +step:1407/1670 train_time:138120ms step_avg:98.17ms +step:1408/1670 train_time:138222ms step_avg:98.17ms +step:1409/1670 train_time:138321ms step_avg:98.17ms +step:1410/1670 train_time:138419ms step_avg:98.17ms +step:1411/1670 train_time:138517ms step_avg:98.17ms +step:1412/1670 train_time:138615ms step_avg:98.17ms +step:1413/1670 train_time:138712ms step_avg:98.17ms +step:1414/1670 train_time:138810ms step_avg:98.17ms +step:1415/1670 train_time:138909ms step_avg:98.17ms +step:1416/1670 train_time:139009ms step_avg:98.17ms +step:1417/1670 train_time:139110ms step_avg:98.17ms +step:1418/1670 train_time:139209ms step_avg:98.17ms +step:1419/1670 train_time:139310ms step_avg:98.17ms +step:1420/1670 train_time:139409ms step_avg:98.18ms +step:1421/1670 train_time:139508ms step_avg:98.18ms +step:1422/1670 train_time:139606ms step_avg:98.18ms +step:1423/1670 train_time:139703ms step_avg:98.17ms +step:1424/1670 train_time:139800ms step_avg:98.17ms +step:1425/1670 train_time:139897ms step_avg:98.17ms +step:1426/1670 train_time:139996ms step_avg:98.17ms +step:1427/1670 train_time:140096ms step_avg:98.18ms +step:1428/1670 train_time:140197ms step_avg:98.18ms +step:1429/1670 train_time:140296ms step_avg:98.18ms +step:1430/1670 train_time:140394ms step_avg:98.18ms +step:1431/1670 train_time:140493ms step_avg:98.18ms +step:1432/1670 train_time:140591ms step_avg:98.18ms +step:1433/1670 train_time:140689ms step_avg:98.18ms +step:1434/1670 train_time:140787ms step_avg:98.18ms +step:1435/1670 train_time:140885ms step_avg:98.18ms +step:1436/1670 train_time:140983ms step_avg:98.18ms +step:1437/1670 train_time:141082ms step_avg:98.18ms +step:1438/1670 train_time:141179ms step_avg:98.18ms +step:1439/1670 train_time:141277ms step_avg:98.18ms +step:1440/1670 train_time:141375ms step_avg:98.18ms +step:1441/1670 train_time:141473ms step_avg:98.18ms +step:1442/1670 train_time:141572ms step_avg:98.18ms +step:1443/1670 train_time:141671ms step_avg:98.18ms +step:1444/1670 train_time:141770ms step_avg:98.18ms +step:1445/1670 train_time:141868ms step_avg:98.18ms +step:1446/1670 train_time:141966ms step_avg:98.18ms +step:1447/1670 train_time:142066ms step_avg:98.18ms +step:1448/1670 train_time:142167ms step_avg:98.18ms +step:1449/1670 train_time:142266ms step_avg:98.18ms +step:1450/1670 train_time:142366ms step_avg:98.18ms +step:1451/1670 train_time:142464ms step_avg:98.18ms +step:1452/1670 train_time:142561ms step_avg:98.18ms +step:1453/1670 train_time:142658ms step_avg:98.18ms +step:1454/1670 train_time:142756ms step_avg:98.18ms +step:1455/1670 train_time:142854ms step_avg:98.18ms +step:1456/1670 train_time:142953ms step_avg:98.18ms +step:1457/1670 train_time:143051ms step_avg:98.18ms +step:1458/1670 train_time:143150ms step_avg:98.18ms +step:1459/1670 train_time:143251ms step_avg:98.18ms +step:1460/1670 train_time:143351ms step_avg:98.19ms +step:1461/1670 train_time:143450ms step_avg:98.19ms +step:1462/1670 train_time:143548ms step_avg:98.19ms +step:1463/1670 train_time:143646ms step_avg:98.19ms +step:1464/1670 train_time:143745ms step_avg:98.19ms +step:1465/1670 train_time:143843ms step_avg:98.19ms +step:1466/1670 train_time:143941ms step_avg:98.19ms +step:1467/1670 train_time:144038ms step_avg:98.19ms +step:1468/1670 train_time:144136ms step_avg:98.19ms +step:1469/1670 train_time:144236ms step_avg:98.19ms +step:1470/1670 train_time:144335ms step_avg:98.19ms +step:1471/1670 train_time:144433ms step_avg:98.19ms +step:1472/1670 train_time:144533ms step_avg:98.19ms +step:1473/1670 train_time:144631ms step_avg:98.19ms +step:1474/1670 train_time:144730ms step_avg:98.19ms +step:1475/1670 train_time:144828ms step_avg:98.19ms +step:1476/1670 train_time:144927ms step_avg:98.19ms +step:1477/1670 train_time:145027ms step_avg:98.19ms +step:1478/1670 train_time:145125ms step_avg:98.19ms +step:1479/1670 train_time:145226ms step_avg:98.19ms +step:1480/1670 train_time:145325ms step_avg:98.19ms +step:1481/1670 train_time:145424ms step_avg:98.19ms +step:1482/1670 train_time:145522ms step_avg:98.19ms +step:1483/1670 train_time:145620ms step_avg:98.19ms +step:1484/1670 train_time:145717ms step_avg:98.19ms +step:1485/1670 train_time:146077ms step_avg:98.37ms +step:1486/1670 train_time:146178ms step_avg:98.37ms +step:1487/1670 train_time:146274ms step_avg:98.37ms +step:1488/1670 train_time:146370ms step_avg:98.37ms +step:1489/1670 train_time:146467ms step_avg:98.37ms +step:1490/1670 train_time:146565ms step_avg:98.37ms +step:1491/1670 train_time:146662ms step_avg:98.36ms +step:1492/1670 train_time:146759ms step_avg:98.36ms +step:1493/1670 train_time:146856ms step_avg:98.36ms +step:1494/1670 train_time:146954ms step_avg:98.36ms +step:1495/1670 train_time:147060ms step_avg:98.37ms +step:1496/1670 train_time:147160ms step_avg:98.37ms +step:1497/1670 train_time:147258ms step_avg:98.37ms +step:1498/1670 train_time:147356ms step_avg:98.37ms +step:1499/1670 train_time:147453ms step_avg:98.37ms +step:1500/1670 train_time:147551ms step_avg:98.37ms +step:1500/1670 val_loss:3.3130 train_time:147648ms step_avg:98.43ms +step:1501/1670 train_time:147670ms step_avg:98.38ms +step:1502/1670 train_time:147753ms step_avg:98.37ms +step:1503/1670 train_time:147853ms step_avg:98.37ms +step:1504/1670 train_time:147952ms step_avg:98.37ms +step:1505/1670 train_time:148049ms step_avg:98.37ms +step:1506/1670 train_time:148146ms step_avg:98.37ms +step:1507/1670 train_time:148244ms step_avg:98.37ms +step:1508/1670 train_time:148340ms step_avg:98.37ms +step:1509/1670 train_time:148438ms step_avg:98.37ms +step:1510/1670 train_time:148535ms step_avg:98.37ms +step:1511/1670 train_time:148635ms step_avg:98.37ms +step:1512/1670 train_time:148736ms step_avg:98.37ms +step:1513/1670 train_time:148836ms step_avg:98.37ms +step:1514/1670 train_time:148936ms step_avg:98.37ms +step:1515/1670 train_time:149036ms step_avg:98.37ms +step:1516/1670 train_time:149135ms step_avg:98.37ms +step:1517/1670 train_time:149233ms step_avg:98.37ms +step:1518/1670 train_time:149331ms step_avg:98.37ms +step:1519/1670 train_time:149428ms step_avg:98.37ms +step:1520/1670 train_time:149526ms step_avg:98.37ms +step:1521/1670 train_time:149625ms step_avg:98.37ms +step:1522/1670 train_time:149723ms step_avg:98.37ms +step:1523/1670 train_time:149823ms step_avg:98.37ms +step:1524/1670 train_time:149922ms step_avg:98.37ms +step:1525/1670 train_time:150021ms step_avg:98.37ms +step:1526/1670 train_time:150119ms step_avg:98.37ms +step:1527/1670 train_time:150218ms step_avg:98.37ms +step:1528/1670 train_time:150316ms step_avg:98.37ms +step:1529/1670 train_time:150414ms step_avg:98.37ms +step:1530/1670 train_time:150513ms step_avg:98.37ms +step:1531/1670 train_time:150614ms step_avg:98.38ms +step:1532/1670 train_time:150713ms step_avg:98.38ms +step:1533/1670 train_time:150813ms step_avg:98.38ms +step:1534/1670 train_time:150913ms step_avg:98.38ms +step:1535/1670 train_time:151013ms step_avg:98.38ms +step:1536/1670 train_time:151112ms step_avg:98.38ms +step:1537/1670 train_time:151211ms step_avg:98.38ms +step:1538/1670 train_time:151310ms step_avg:98.38ms +step:1539/1670 train_time:151407ms step_avg:98.38ms +step:1540/1670 train_time:151504ms step_avg:98.38ms +step:1541/1670 train_time:151602ms step_avg:98.38ms +step:1542/1670 train_time:151701ms step_avg:98.38ms +step:1543/1670 train_time:151801ms step_avg:98.38ms +step:1544/1670 train_time:151900ms step_avg:98.38ms +step:1545/1670 train_time:152000ms step_avg:98.38ms +step:1546/1670 train_time:152098ms step_avg:98.38ms +step:1547/1670 train_time:152198ms step_avg:98.38ms +step:1548/1670 train_time:152298ms step_avg:98.38ms +step:1549/1670 train_time:152397ms step_avg:98.38ms +step:1550/1670 train_time:152495ms step_avg:98.38ms +step:1551/1670 train_time:152594ms step_avg:98.38ms +step:1552/1670 train_time:152694ms step_avg:98.39ms +step:1553/1670 train_time:152793ms step_avg:98.39ms +step:1554/1670 train_time:152892ms step_avg:98.39ms +step:1555/1670 train_time:152991ms step_avg:98.39ms +step:1556/1670 train_time:153089ms step_avg:98.39ms +step:1557/1670 train_time:153187ms step_avg:98.39ms +step:1558/1670 train_time:153285ms step_avg:98.39ms +step:1559/1670 train_time:153382ms step_avg:98.38ms +step:1560/1670 train_time:153480ms step_avg:98.38ms +step:1561/1670 train_time:153579ms step_avg:98.38ms +step:1562/1670 train_time:153677ms step_avg:98.38ms +step:1563/1670 train_time:153776ms step_avg:98.39ms +step:1564/1670 train_time:153876ms step_avg:98.39ms +step:1565/1670 train_time:153974ms step_avg:98.39ms +step:1566/1670 train_time:154073ms step_avg:98.39ms +step:1567/1670 train_time:154171ms step_avg:98.39ms +step:1568/1670 train_time:154269ms step_avg:98.39ms +step:1569/1670 train_time:154369ms step_avg:98.39ms +step:1570/1670 train_time:154466ms step_avg:98.39ms +step:1571/1670 train_time:154563ms step_avg:98.39ms +step:1572/1670 train_time:154661ms step_avg:98.38ms +step:1573/1670 train_time:154759ms step_avg:98.38ms +step:1574/1670 train_time:154857ms step_avg:98.38ms +step:1575/1670 train_time:154956ms step_avg:98.38ms +step:1576/1670 train_time:155054ms step_avg:98.38ms +step:1577/1670 train_time:155152ms step_avg:98.38ms +step:1578/1670 train_time:155250ms step_avg:98.38ms +step:1579/1670 train_time:155350ms step_avg:98.39ms +step:1580/1670 train_time:155448ms step_avg:98.39ms +step:1581/1670 train_time:155546ms step_avg:98.38ms +step:1582/1670 train_time:155644ms step_avg:98.38ms +step:1583/1670 train_time:155742ms step_avg:98.38ms +step:1584/1670 train_time:155840ms step_avg:98.38ms +step:1585/1670 train_time:155938ms step_avg:98.38ms +step:1586/1670 train_time:156037ms step_avg:98.38ms +step:1587/1670 train_time:156136ms step_avg:98.38ms +step:1588/1670 train_time:156235ms step_avg:98.38ms +step:1589/1670 train_time:156333ms step_avg:98.38ms +step:1590/1670 train_time:156433ms step_avg:98.39ms +step:1591/1670 train_time:156532ms step_avg:98.39ms +step:1592/1670 train_time:156631ms step_avg:98.39ms +step:1593/1670 train_time:156730ms step_avg:98.39ms +step:1594/1670 train_time:156828ms step_avg:98.39ms +step:1595/1670 train_time:156927ms step_avg:98.39ms +step:1596/1670 train_time:157026ms step_avg:98.39ms +step:1597/1670 train_time:157123ms step_avg:98.39ms +step:1598/1670 train_time:157221ms step_avg:98.39ms +step:1599/1670 train_time:157319ms step_avg:98.39ms +step:1600/1670 train_time:157418ms step_avg:98.39ms +step:1601/1670 train_time:157517ms step_avg:98.39ms +step:1602/1670 train_time:157617ms step_avg:98.39ms +step:1603/1670 train_time:157717ms step_avg:98.39ms +step:1604/1670 train_time:157815ms step_avg:98.39ms +step:1605/1670 train_time:157913ms step_avg:98.39ms +step:1606/1670 train_time:158012ms step_avg:98.39ms +step:1607/1670 train_time:158112ms step_avg:98.39ms +step:1608/1670 train_time:158210ms step_avg:98.39ms +step:1609/1670 train_time:158308ms step_avg:98.39ms +step:1610/1670 train_time:158405ms step_avg:98.39ms +step:1611/1670 train_time:158503ms step_avg:98.39ms +step:1612/1670 train_time:158602ms step_avg:98.39ms +step:1613/1670 train_time:158701ms step_avg:98.39ms +step:1614/1670 train_time:158799ms step_avg:98.39ms +step:1615/1670 train_time:158899ms step_avg:98.39ms +step:1616/1670 train_time:158998ms step_avg:98.39ms +step:1617/1670 train_time:159097ms step_avg:98.39ms +step:1618/1670 train_time:159197ms step_avg:98.39ms +step:1619/1670 train_time:159297ms step_avg:98.39ms +step:1620/1670 train_time:159397ms step_avg:98.39ms +step:1621/1670 train_time:159496ms step_avg:98.39ms +step:1622/1670 train_time:159596ms step_avg:98.39ms +step:1623/1670 train_time:159695ms step_avg:98.40ms +step:1624/1670 train_time:159794ms step_avg:98.40ms +step:1625/1670 train_time:159893ms step_avg:98.40ms +step:1625/1670 val_loss:3.2865 train_time:159990ms step_avg:98.46ms +step:1626/1670 train_time:160012ms step_avg:98.41ms +step:1627/1670 train_time:160095ms step_avg:98.40ms +step:1628/1670 train_time:160195ms step_avg:98.40ms +step:1629/1670 train_time:160293ms step_avg:98.40ms +step:1630/1670 train_time:160389ms step_avg:98.40ms +step:1631/1670 train_time:160487ms step_avg:98.40ms +step:1632/1670 train_time:160585ms step_avg:98.40ms +step:1633/1670 train_time:160682ms step_avg:98.40ms +step:1634/1670 train_time:160781ms step_avg:98.40ms +step:1635/1670 train_time:160879ms step_avg:98.40ms +step:1636/1670 train_time:160979ms step_avg:98.40ms +step:1637/1670 train_time:161080ms step_avg:98.40ms +step:1638/1670 train_time:161181ms step_avg:98.40ms +step:1639/1670 train_time:161281ms step_avg:98.40ms +step:1640/1670 train_time:161380ms step_avg:98.40ms +step:1641/1670 train_time:161478ms step_avg:98.40ms +step:1642/1670 train_time:161575ms step_avg:98.40ms +step:1643/1670 train_time:161673ms step_avg:98.40ms +step:1644/1670 train_time:161770ms step_avg:98.40ms +step:1645/1670 train_time:161868ms step_avg:98.40ms +step:1646/1670 train_time:161967ms step_avg:98.40ms +step:1647/1670 train_time:162069ms step_avg:98.40ms +step:1648/1670 train_time:162170ms step_avg:98.40ms +step:1649/1670 train_time:162270ms step_avg:98.41ms +step:1650/1670 train_time:162369ms step_avg:98.41ms +step:1651/1670 train_time:162468ms step_avg:98.41ms +step:1652/1670 train_time:162567ms step_avg:98.41ms +step:1653/1670 train_time:162665ms step_avg:98.41ms +step:1654/1670 train_time:162763ms step_avg:98.41ms +step:1655/1670 train_time:162861ms step_avg:98.41ms +step:1656/1670 train_time:162959ms step_avg:98.41ms +step:1657/1670 train_time:163059ms step_avg:98.41ms +step:1658/1670 train_time:163160ms step_avg:98.41ms +step:1659/1670 train_time:163261ms step_avg:98.41ms +step:1660/1670 train_time:163361ms step_avg:98.41ms +step:1661/1670 train_time:163460ms step_avg:98.41ms +step:1662/1670 train_time:163559ms step_avg:98.41ms +step:1663/1670 train_time:163657ms step_avg:98.41ms +step:1664/1670 train_time:163754ms step_avg:98.41ms +step:1665/1670 train_time:163851ms step_avg:98.41ms +step:1666/1670 train_time:163949ms step_avg:98.41ms +step:1667/1670 train_time:164048ms step_avg:98.41ms +step:1668/1670 train_time:164146ms step_avg:98.41ms +step:1669/1670 train_time:164245ms step_avg:98.41ms +step:1670/1670 train_time:164344ms step_avg:98.41ms +step:1670/1670 val_loss:3.2784 train_time:164443ms step_avg:98.47ms +peak memory allocated: 34613 MiB reserved: 49116 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_1b9374fc-2a63-47a1-b144-2fc8ad635792.txt b/records/090525_SkipMLPBlocks/comparison_1b9374fc-2a63-47a1-b144-2fc8ad635792.txt new file mode 100644 index 000000000..5d5d3441a --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_1b9374fc-2a63-47a1-b144-2fc8ad635792.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:05:54 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 129W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 80638 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 80639 C /usr/bin/python3 610MiB | +| 0 N/A N/A 80640 C /usr/bin/python3 610MiB | +| 0 N/A N/A 80641 C /usr/bin/python3 610MiB | +| 0 N/A N/A 80642 C /usr/bin/python3 610MiB | +| 0 N/A N/A 80643 C /usr/bin/python3 610MiB | +| 0 N/A N/A 80644 C /usr/bin/python3 610MiB | +| 0 N/A N/A 80645 C /usr/bin/python3 610MiB | +| 1 N/A N/A 80639 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 80640 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 80641 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 80642 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 80643 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 80644 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 80645 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:428ms step_avg:428.15ms +step:2/1670 train_time:448ms step_avg:224.23ms +step:3/1670 train_time:522ms step_avg:174.06ms +step:4/1670 train_time:616ms step_avg:154.12ms +step:5/1670 train_time:711ms step_avg:142.12ms +step:6/1670 train_time:805ms step_avg:134.20ms +step:7/1670 train_time:901ms step_avg:128.72ms +step:8/1670 train_time:996ms step_avg:124.49ms +step:9/1670 train_time:1090ms step_avg:121.14ms +step:10/1670 train_time:1186ms step_avg:118.56ms +step:11/1670 train_time:1281ms step_avg:116.45ms +step:12/1670 train_time:1380ms step_avg:114.98ms +step:13/1670 train_time:1479ms step_avg:113.78ms +step:14/1670 train_time:1577ms step_avg:112.64ms +step:15/1670 train_time:1673ms step_avg:111.52ms +step:16/1670 train_time:1768ms step_avg:110.48ms +step:17/1670 train_time:1864ms step_avg:109.62ms +step:18/1670 train_time:1959ms step_avg:108.84ms +step:19/1670 train_time:2055ms step_avg:108.17ms +step:20/1670 train_time:2150ms step_avg:107.51ms +step:21/1670 train_time:2245ms step_avg:106.93ms +step:22/1670 train_time:2342ms step_avg:106.46ms +step:23/1670 train_time:2439ms step_avg:106.05ms +step:24/1670 train_time:2536ms step_avg:105.68ms +step:25/1670 train_time:2633ms step_avg:105.31ms +step:26/1670 train_time:2729ms step_avg:104.95ms +step:27/1670 train_time:2825ms step_avg:104.62ms +step:28/1670 train_time:2921ms step_avg:104.31ms +step:29/1670 train_time:3016ms step_avg:104.00ms +step:30/1670 train_time:3111ms step_avg:103.71ms +step:31/1670 train_time:3206ms step_avg:103.43ms +step:32/1670 train_time:3302ms step_avg:103.20ms +step:33/1670 train_time:3399ms step_avg:102.99ms +step:34/1670 train_time:3496ms step_avg:102.82ms +step:35/1670 train_time:3593ms step_avg:102.65ms +step:36/1670 train_time:3689ms step_avg:102.46ms +step:37/1670 train_time:3784ms step_avg:102.27ms +step:38/1670 train_time:3881ms step_avg:102.14ms +step:39/1670 train_time:3976ms step_avg:101.96ms +step:40/1670 train_time:4072ms step_avg:101.81ms +step:41/1670 train_time:4168ms step_avg:101.65ms +step:42/1670 train_time:4263ms step_avg:101.51ms +step:43/1670 train_time:4359ms step_avg:101.37ms +step:44/1670 train_time:4455ms step_avg:101.24ms +step:45/1670 train_time:4550ms step_avg:101.12ms +step:46/1670 train_time:4647ms step_avg:101.02ms +step:47/1670 train_time:4744ms step_avg:100.93ms +step:48/1670 train_time:4840ms step_avg:100.84ms +step:49/1670 train_time:4937ms step_avg:100.76ms +step:50/1670 train_time:5033ms step_avg:100.66ms +step:51/1670 train_time:5128ms step_avg:100.55ms +step:52/1670 train_time:5224ms step_avg:100.47ms +step:53/1670 train_time:5320ms step_avg:100.38ms +step:54/1670 train_time:5417ms step_avg:100.31ms +step:55/1670 train_time:5511ms step_avg:100.21ms +step:56/1670 train_time:5608ms step_avg:100.14ms +step:57/1670 train_time:5704ms step_avg:100.08ms +step:58/1670 train_time:5801ms step_avg:100.01ms +step:59/1670 train_time:5897ms step_avg:99.95ms +step:60/1670 train_time:5993ms step_avg:99.89ms +step:61/1670 train_time:6088ms step_avg:99.81ms +step:62/1670 train_time:6184ms step_avg:99.74ms +step:63/1670 train_time:6280ms step_avg:99.69ms +step:64/1670 train_time:6376ms step_avg:99.63ms +step:65/1670 train_time:6473ms step_avg:99.58ms +step:66/1670 train_time:6568ms step_avg:99.52ms +step:67/1670 train_time:6663ms step_avg:99.45ms +step:68/1670 train_time:6760ms step_avg:99.40ms +step:69/1670 train_time:6856ms step_avg:99.36ms +step:70/1670 train_time:6951ms step_avg:99.30ms +step:71/1670 train_time:7046ms step_avg:99.24ms +step:72/1670 train_time:7142ms step_avg:99.19ms +step:73/1670 train_time:7238ms step_avg:99.14ms +step:74/1670 train_time:7333ms step_avg:99.10ms +step:75/1670 train_time:7430ms step_avg:99.06ms +step:76/1670 train_time:7526ms step_avg:99.02ms +step:77/1670 train_time:7622ms step_avg:98.99ms +step:78/1670 train_time:7718ms step_avg:98.95ms +step:79/1670 train_time:7814ms step_avg:98.91ms +step:80/1670 train_time:7909ms step_avg:98.87ms +step:81/1670 train_time:8005ms step_avg:98.83ms +step:82/1670 train_time:8102ms step_avg:98.80ms +step:83/1670 train_time:8198ms step_avg:98.78ms +step:84/1670 train_time:8294ms step_avg:98.73ms +step:85/1670 train_time:8388ms step_avg:98.69ms +step:86/1670 train_time:8484ms step_avg:98.65ms +step:87/1670 train_time:8580ms step_avg:98.62ms +step:88/1670 train_time:8676ms step_avg:98.59ms +step:89/1670 train_time:8771ms step_avg:98.55ms +step:90/1670 train_time:8867ms step_avg:98.52ms +step:91/1670 train_time:8963ms step_avg:98.49ms +step:92/1670 train_time:9060ms step_avg:98.48ms +step:93/1670 train_time:9156ms step_avg:98.45ms +step:94/1670 train_time:9252ms step_avg:98.42ms +step:95/1670 train_time:9346ms step_avg:98.38ms +step:96/1670 train_time:9442ms step_avg:98.36ms +step:97/1670 train_time:9539ms step_avg:98.34ms +step:98/1670 train_time:9635ms step_avg:98.32ms +step:99/1670 train_time:9731ms step_avg:98.29ms +step:100/1670 train_time:9826ms step_avg:98.26ms +step:101/1670 train_time:9922ms step_avg:98.24ms +step:102/1670 train_time:10017ms step_avg:98.21ms +step:103/1670 train_time:10114ms step_avg:98.19ms +step:104/1670 train_time:10209ms step_avg:98.17ms +step:105/1670 train_time:10305ms step_avg:98.14ms +step:106/1670 train_time:10402ms step_avg:98.13ms +step:107/1670 train_time:10500ms step_avg:98.13ms +step:108/1670 train_time:10595ms step_avg:98.10ms +step:109/1670 train_time:10691ms step_avg:98.08ms +step:110/1670 train_time:10787ms step_avg:98.06ms +step:111/1670 train_time:10883ms step_avg:98.04ms +step:112/1670 train_time:10979ms step_avg:98.03ms +step:113/1670 train_time:11075ms step_avg:98.01ms +step:114/1670 train_time:11171ms step_avg:97.99ms +step:115/1670 train_time:11266ms step_avg:97.97ms +step:116/1670 train_time:11362ms step_avg:97.95ms +step:117/1670 train_time:11458ms step_avg:97.93ms +step:118/1670 train_time:11554ms step_avg:97.91ms +step:119/1670 train_time:11649ms step_avg:97.89ms +step:120/1670 train_time:11744ms step_avg:97.87ms +step:121/1670 train_time:11840ms step_avg:97.85ms +step:122/1670 train_time:11937ms step_avg:97.84ms +step:123/1670 train_time:12033ms step_avg:97.83ms +step:124/1670 train_time:12130ms step_avg:97.82ms +step:125/1670 train_time:12225ms step_avg:97.80ms +step:125/1670 val_loss:4.3028 train_time:12321ms step_avg:98.57ms +step:126/1670 train_time:12342ms step_avg:97.95ms +step:127/1670 train_time:12429ms step_avg:97.86ms +step:128/1670 train_time:12529ms step_avg:97.89ms +step:129/1670 train_time:12625ms step_avg:97.87ms +step:130/1670 train_time:12720ms step_avg:97.85ms +step:131/1670 train_time:12816ms step_avg:97.83ms +step:132/1670 train_time:12911ms step_avg:97.81ms +step:133/1670 train_time:13005ms step_avg:97.79ms +step:134/1670 train_time:13100ms step_avg:97.76ms +step:135/1670 train_time:13196ms step_avg:97.75ms +step:136/1670 train_time:13290ms step_avg:97.72ms +step:137/1670 train_time:13389ms step_avg:97.73ms +step:138/1670 train_time:13486ms step_avg:97.72ms +step:139/1670 train_time:13582ms step_avg:97.71ms +step:140/1670 train_time:13679ms step_avg:97.71ms +step:141/1670 train_time:13775ms step_avg:97.69ms +step:142/1670 train_time:13870ms step_avg:97.68ms +step:143/1670 train_time:13964ms step_avg:97.65ms +step:144/1670 train_time:14059ms step_avg:97.63ms +step:145/1670 train_time:14155ms step_avg:97.62ms +step:146/1670 train_time:14250ms step_avg:97.60ms +step:147/1670 train_time:14346ms step_avg:97.59ms +step:148/1670 train_time:14442ms step_avg:97.58ms +step:149/1670 train_time:14539ms step_avg:97.58ms +step:150/1670 train_time:14637ms step_avg:97.58ms +step:151/1670 train_time:14733ms step_avg:97.57ms +step:152/1670 train_time:14830ms step_avg:97.56ms +step:153/1670 train_time:14925ms step_avg:97.55ms +step:154/1670 train_time:15019ms step_avg:97.53ms +step:155/1670 train_time:15116ms step_avg:97.52ms +step:156/1670 train_time:15211ms step_avg:97.51ms +step:157/1670 train_time:15307ms step_avg:97.49ms +step:158/1670 train_time:15403ms step_avg:97.48ms +step:159/1670 train_time:15498ms step_avg:97.47ms +step:160/1670 train_time:15595ms step_avg:97.47ms +step:161/1670 train_time:15691ms step_avg:97.46ms +step:162/1670 train_time:15788ms step_avg:97.46ms +step:163/1670 train_time:15884ms step_avg:97.45ms +step:164/1670 train_time:15979ms step_avg:97.44ms +step:165/1670 train_time:16074ms step_avg:97.42ms +step:166/1670 train_time:16170ms step_avg:97.41ms +step:167/1670 train_time:16265ms step_avg:97.40ms +step:168/1670 train_time:16360ms step_avg:97.38ms +step:169/1670 train_time:16457ms step_avg:97.38ms +step:170/1670 train_time:16553ms step_avg:97.37ms +step:171/1670 train_time:16650ms step_avg:97.37ms +step:172/1670 train_time:16746ms step_avg:97.36ms +step:173/1670 train_time:16841ms step_avg:97.35ms +step:174/1670 train_time:16937ms step_avg:97.34ms +step:175/1670 train_time:17033ms step_avg:97.33ms +step:176/1670 train_time:17128ms step_avg:97.32ms +step:177/1670 train_time:17223ms step_avg:97.30ms +step:178/1670 train_time:17318ms step_avg:97.29ms +step:179/1670 train_time:17415ms step_avg:97.29ms +step:180/1670 train_time:17511ms step_avg:97.29ms +step:181/1670 train_time:17607ms step_avg:97.28ms +step:182/1670 train_time:17703ms step_avg:97.27ms +step:183/1670 train_time:17798ms step_avg:97.26ms +step:184/1670 train_time:17895ms step_avg:97.25ms +step:185/1670 train_time:17991ms step_avg:97.25ms +step:186/1670 train_time:18087ms step_avg:97.24ms +step:187/1670 train_time:18182ms step_avg:97.23ms +step:188/1670 train_time:18278ms step_avg:97.22ms +step:189/1670 train_time:18373ms step_avg:97.21ms +step:190/1670 train_time:18469ms step_avg:97.21ms +step:191/1670 train_time:18565ms step_avg:97.20ms +step:192/1670 train_time:18661ms step_avg:97.19ms +step:193/1670 train_time:18757ms step_avg:97.18ms +step:194/1670 train_time:18853ms step_avg:97.18ms +step:195/1670 train_time:18949ms step_avg:97.17ms +step:196/1670 train_time:19044ms step_avg:97.16ms +step:197/1670 train_time:19140ms step_avg:97.16ms +step:198/1670 train_time:19236ms step_avg:97.15ms +step:199/1670 train_time:19332ms step_avg:97.14ms +step:200/1670 train_time:19428ms step_avg:97.14ms +step:201/1670 train_time:19523ms step_avg:97.13ms +step:202/1670 train_time:19619ms step_avg:97.12ms +step:203/1670 train_time:19715ms step_avg:97.12ms +step:204/1670 train_time:19810ms step_avg:97.11ms +step:205/1670 train_time:19906ms step_avg:97.10ms +step:206/1670 train_time:20001ms step_avg:97.09ms +step:207/1670 train_time:20097ms step_avg:97.09ms +step:208/1670 train_time:20193ms step_avg:97.08ms +step:209/1670 train_time:20288ms step_avg:97.07ms +step:210/1670 train_time:20383ms step_avg:97.06ms +step:211/1670 train_time:20479ms step_avg:97.06ms +step:212/1670 train_time:20575ms step_avg:97.05ms +step:213/1670 train_time:20938ms step_avg:98.30ms +step:214/1670 train_time:21010ms step_avg:98.18ms +step:215/1670 train_time:21104ms step_avg:98.16ms +step:216/1670 train_time:21199ms step_avg:98.14ms +step:217/1670 train_time:21294ms step_avg:98.13ms +step:218/1670 train_time:21388ms step_avg:98.11ms +step:219/1670 train_time:21482ms step_avg:98.09ms +step:220/1670 train_time:21577ms step_avg:98.08ms +step:221/1670 train_time:21671ms step_avg:98.06ms +step:222/1670 train_time:21766ms step_avg:98.04ms +step:223/1670 train_time:21862ms step_avg:98.04ms +step:224/1670 train_time:21961ms step_avg:98.04ms +step:225/1670 train_time:22060ms step_avg:98.04ms +step:226/1670 train_time:22156ms step_avg:98.03ms +step:227/1670 train_time:22251ms step_avg:98.02ms +step:228/1670 train_time:22347ms step_avg:98.01ms +step:229/1670 train_time:22441ms step_avg:98.00ms +step:230/1670 train_time:22536ms step_avg:97.98ms +step:231/1670 train_time:22631ms step_avg:97.97ms +step:232/1670 train_time:22725ms step_avg:97.95ms +step:233/1670 train_time:22822ms step_avg:97.95ms +step:234/1670 train_time:22919ms step_avg:97.95ms +step:235/1670 train_time:23016ms step_avg:97.94ms +step:236/1670 train_time:23113ms step_avg:97.94ms +step:237/1670 train_time:23208ms step_avg:97.93ms +step:238/1670 train_time:23303ms step_avg:97.91ms +step:239/1670 train_time:23399ms step_avg:97.90ms +step:240/1670 train_time:23494ms step_avg:97.89ms +step:241/1670 train_time:23589ms step_avg:97.88ms +step:242/1670 train_time:23684ms step_avg:97.87ms +step:243/1670 train_time:23779ms step_avg:97.86ms +step:244/1670 train_time:23875ms step_avg:97.85ms +step:245/1670 train_time:23971ms step_avg:97.84ms +step:246/1670 train_time:24066ms step_avg:97.83ms +step:247/1670 train_time:24162ms step_avg:97.82ms +step:248/1670 train_time:24259ms step_avg:97.82ms +step:249/1670 train_time:24354ms step_avg:97.81ms +step:250/1670 train_time:24450ms step_avg:97.80ms +step:250/1670 val_loss:3.9672 train_time:24544ms step_avg:98.17ms +step:251/1670 train_time:24565ms step_avg:97.87ms +step:252/1670 train_time:24646ms step_avg:97.80ms +step:253/1670 train_time:24746ms step_avg:97.81ms +step:254/1670 train_time:24845ms step_avg:97.81ms +step:255/1670 train_time:24942ms step_avg:97.81ms +step:256/1670 train_time:25036ms step_avg:97.80ms +step:257/1670 train_time:25131ms step_avg:97.79ms +step:258/1670 train_time:25226ms step_avg:97.78ms +step:259/1670 train_time:25321ms step_avg:97.76ms +step:260/1670 train_time:25416ms step_avg:97.75ms +step:261/1670 train_time:25511ms step_avg:97.74ms +step:262/1670 train_time:25607ms step_avg:97.74ms +step:263/1670 train_time:25705ms step_avg:97.74ms +step:264/1670 train_time:25803ms step_avg:97.74ms +step:265/1670 train_time:25899ms step_avg:97.73ms +step:266/1670 train_time:25995ms step_avg:97.73ms +step:267/1670 train_time:26089ms step_avg:97.71ms +step:268/1670 train_time:26185ms step_avg:97.71ms +step:269/1670 train_time:26280ms step_avg:97.70ms +step:270/1670 train_time:26376ms step_avg:97.69ms +step:271/1670 train_time:26470ms step_avg:97.68ms +step:272/1670 train_time:26565ms step_avg:97.67ms +step:273/1670 train_time:26662ms step_avg:97.66ms +step:274/1670 train_time:26758ms step_avg:97.66ms +step:275/1670 train_time:26854ms step_avg:97.65ms +step:276/1670 train_time:26950ms step_avg:97.65ms +step:277/1670 train_time:27047ms step_avg:97.64ms +step:278/1670 train_time:27144ms step_avg:97.64ms +step:279/1670 train_time:27238ms step_avg:97.63ms +step:280/1670 train_time:27334ms step_avg:97.62ms +step:281/1670 train_time:27429ms step_avg:97.61ms +step:282/1670 train_time:27524ms step_avg:97.60ms +step:283/1670 train_time:27619ms step_avg:97.59ms +step:284/1670 train_time:27715ms step_avg:97.59ms +step:285/1670 train_time:27810ms step_avg:97.58ms +step:286/1670 train_time:27906ms step_avg:97.57ms +step:287/1670 train_time:28003ms step_avg:97.57ms +step:288/1670 train_time:28099ms step_avg:97.57ms +step:289/1670 train_time:28194ms step_avg:97.56ms +step:290/1670 train_time:28289ms step_avg:97.55ms +step:291/1670 train_time:28384ms step_avg:97.54ms +step:292/1670 train_time:28480ms step_avg:97.53ms +step:293/1670 train_time:28574ms step_avg:97.52ms +step:294/1670 train_time:28669ms step_avg:97.52ms +step:295/1670 train_time:28766ms step_avg:97.51ms +step:296/1670 train_time:28862ms step_avg:97.51ms +step:297/1670 train_time:28958ms step_avg:97.50ms +step:298/1670 train_time:29054ms step_avg:97.50ms +step:299/1670 train_time:29149ms step_avg:97.49ms +step:300/1670 train_time:29245ms step_avg:97.48ms +step:301/1670 train_time:29341ms step_avg:97.48ms +step:302/1670 train_time:29437ms step_avg:97.47ms +step:303/1670 train_time:29532ms step_avg:97.47ms +step:304/1670 train_time:29628ms step_avg:97.46ms +step:305/1670 train_time:29724ms step_avg:97.46ms +step:306/1670 train_time:29820ms step_avg:97.45ms +step:307/1670 train_time:29917ms step_avg:97.45ms +step:308/1670 train_time:30012ms step_avg:97.44ms +step:309/1670 train_time:30108ms step_avg:97.44ms +step:310/1670 train_time:30203ms step_avg:97.43ms +step:311/1670 train_time:30299ms step_avg:97.42ms +step:312/1670 train_time:30393ms step_avg:97.41ms +step:313/1670 train_time:30488ms step_avg:97.41ms +step:314/1670 train_time:30584ms step_avg:97.40ms +step:315/1670 train_time:30679ms step_avg:97.40ms +step:316/1670 train_time:30775ms step_avg:97.39ms +step:317/1670 train_time:30871ms step_avg:97.38ms +step:318/1670 train_time:30967ms step_avg:97.38ms +step:319/1670 train_time:31063ms step_avg:97.38ms +step:320/1670 train_time:31159ms step_avg:97.37ms +step:321/1670 train_time:31254ms step_avg:97.36ms +step:322/1670 train_time:31350ms step_avg:97.36ms +step:323/1670 train_time:31446ms step_avg:97.36ms +step:324/1670 train_time:31541ms step_avg:97.35ms +step:325/1670 train_time:31636ms step_avg:97.34ms +step:326/1670 train_time:31732ms step_avg:97.34ms +step:327/1670 train_time:31827ms step_avg:97.33ms +step:328/1670 train_time:31925ms step_avg:97.33ms +step:329/1670 train_time:32021ms step_avg:97.33ms +step:330/1670 train_time:32117ms step_avg:97.33ms +step:331/1670 train_time:32213ms step_avg:97.32ms +step:332/1670 train_time:32308ms step_avg:97.31ms +step:333/1670 train_time:32404ms step_avg:97.31ms +step:334/1670 train_time:32500ms step_avg:97.31ms +step:335/1670 train_time:32595ms step_avg:97.30ms +step:336/1670 train_time:32691ms step_avg:97.29ms +step:337/1670 train_time:32786ms step_avg:97.29ms +step:338/1670 train_time:32882ms step_avg:97.28ms +step:339/1670 train_time:32978ms step_avg:97.28ms +step:340/1670 train_time:33073ms step_avg:97.27ms +step:341/1670 train_time:33169ms step_avg:97.27ms +step:342/1670 train_time:33265ms step_avg:97.27ms +step:343/1670 train_time:33361ms step_avg:97.26ms +step:344/1670 train_time:33456ms step_avg:97.25ms +step:345/1670 train_time:33552ms step_avg:97.25ms +step:346/1670 train_time:33647ms step_avg:97.24ms +step:347/1670 train_time:33743ms step_avg:97.24ms +step:348/1670 train_time:33839ms step_avg:97.24ms +step:349/1670 train_time:33934ms step_avg:97.23ms +step:350/1670 train_time:34030ms step_avg:97.23ms +step:351/1670 train_time:34125ms step_avg:97.22ms +step:352/1670 train_time:34221ms step_avg:97.22ms +step:353/1670 train_time:34317ms step_avg:97.21ms +step:354/1670 train_time:34413ms step_avg:97.21ms +step:355/1670 train_time:34509ms step_avg:97.21ms +step:356/1670 train_time:34604ms step_avg:97.20ms +step:357/1670 train_time:34700ms step_avg:97.20ms +step:358/1670 train_time:34796ms step_avg:97.20ms +step:359/1670 train_time:34891ms step_avg:97.19ms +step:360/1670 train_time:34987ms step_avg:97.19ms +step:361/1670 train_time:35083ms step_avg:97.18ms +step:362/1670 train_time:35179ms step_avg:97.18ms +step:363/1670 train_time:35275ms step_avg:97.18ms +step:364/1670 train_time:35371ms step_avg:97.17ms +step:365/1670 train_time:35466ms step_avg:97.17ms +step:366/1670 train_time:35562ms step_avg:97.17ms +step:367/1670 train_time:35658ms step_avg:97.16ms +step:368/1670 train_time:35754ms step_avg:97.16ms +step:369/1670 train_time:35850ms step_avg:97.15ms +step:370/1670 train_time:35946ms step_avg:97.15ms +step:371/1670 train_time:36042ms step_avg:97.15ms +step:372/1670 train_time:36138ms step_avg:97.14ms +step:373/1670 train_time:36233ms step_avg:97.14ms +step:374/1670 train_time:36329ms step_avg:97.14ms +step:375/1670 train_time:36425ms step_avg:97.13ms +step:375/1670 val_loss:3.8186 train_time:36520ms step_avg:97.39ms +step:376/1670 train_time:36542ms step_avg:97.19ms +step:377/1670 train_time:36624ms step_avg:97.15ms +step:378/1670 train_time:36725ms step_avg:97.16ms +step:379/1670 train_time:36820ms step_avg:97.15ms +step:380/1670 train_time:36915ms step_avg:97.14ms +step:381/1670 train_time:37009ms step_avg:97.14ms +step:382/1670 train_time:37104ms step_avg:97.13ms +step:383/1670 train_time:37198ms step_avg:97.12ms +step:384/1670 train_time:37293ms step_avg:97.12ms +step:385/1670 train_time:37388ms step_avg:97.11ms +step:386/1670 train_time:37485ms step_avg:97.11ms +step:387/1670 train_time:37582ms step_avg:97.11ms +step:388/1670 train_time:37680ms step_avg:97.11ms +step:389/1670 train_time:37776ms step_avg:97.11ms +step:390/1670 train_time:37872ms step_avg:97.11ms +step:391/1670 train_time:37968ms step_avg:97.11ms +step:392/1670 train_time:38063ms step_avg:97.10ms +step:393/1670 train_time:38158ms step_avg:97.09ms +step:394/1670 train_time:38253ms step_avg:97.09ms +step:395/1670 train_time:38349ms step_avg:97.09ms +step:396/1670 train_time:38444ms step_avg:97.08ms +step:397/1670 train_time:38540ms step_avg:97.08ms +step:398/1670 train_time:38636ms step_avg:97.08ms +step:399/1670 train_time:38733ms step_avg:97.08ms +step:400/1670 train_time:38830ms step_avg:97.07ms +step:401/1670 train_time:38926ms step_avg:97.07ms +step:402/1670 train_time:39021ms step_avg:97.07ms +step:403/1670 train_time:39116ms step_avg:97.06ms +step:404/1670 train_time:39211ms step_avg:97.06ms +step:405/1670 train_time:39307ms step_avg:97.05ms +step:406/1670 train_time:39402ms step_avg:97.05ms +step:407/1670 train_time:39498ms step_avg:97.05ms +step:408/1670 train_time:39594ms step_avg:97.04ms +step:409/1670 train_time:39690ms step_avg:97.04ms +step:410/1670 train_time:39787ms step_avg:97.04ms +step:411/1670 train_time:39882ms step_avg:97.04ms +step:412/1670 train_time:39977ms step_avg:97.03ms +step:413/1670 train_time:40073ms step_avg:97.03ms +step:414/1670 train_time:40168ms step_avg:97.03ms +step:415/1670 train_time:40264ms step_avg:97.02ms +step:416/1670 train_time:40359ms step_avg:97.02ms +step:417/1670 train_time:40455ms step_avg:97.01ms +step:418/1670 train_time:40552ms step_avg:97.02ms +step:419/1670 train_time:40649ms step_avg:97.02ms +step:420/1670 train_time:40746ms step_avg:97.02ms +step:421/1670 train_time:40843ms step_avg:97.01ms +step:422/1670 train_time:40938ms step_avg:97.01ms +step:423/1670 train_time:41033ms step_avg:97.01ms +step:424/1670 train_time:41129ms step_avg:97.00ms +step:425/1670 train_time:41421ms step_avg:97.46ms +step:426/1670 train_time:41530ms step_avg:97.49ms +step:427/1670 train_time:41624ms step_avg:97.48ms +step:428/1670 train_time:41718ms step_avg:97.47ms +step:429/1670 train_time:41813ms step_avg:97.47ms +step:430/1670 train_time:41908ms step_avg:97.46ms +step:431/1670 train_time:42003ms step_avg:97.45ms +step:432/1670 train_time:42097ms step_avg:97.45ms +step:433/1670 train_time:42192ms step_avg:97.44ms +step:434/1670 train_time:42287ms step_avg:97.44ms +step:435/1670 train_time:42384ms step_avg:97.43ms +step:436/1670 train_time:42484ms step_avg:97.44ms +step:437/1670 train_time:42581ms step_avg:97.44ms +step:438/1670 train_time:42676ms step_avg:97.43ms +step:439/1670 train_time:42772ms step_avg:97.43ms +step:440/1670 train_time:42868ms step_avg:97.43ms +step:441/1670 train_time:42963ms step_avg:97.42ms +step:442/1670 train_time:43058ms step_avg:97.42ms +step:443/1670 train_time:43152ms step_avg:97.41ms +step:444/1670 train_time:43249ms step_avg:97.41ms +step:445/1670 train_time:43344ms step_avg:97.40ms +step:446/1670 train_time:43439ms step_avg:97.40ms +step:447/1670 train_time:43536ms step_avg:97.40ms +step:448/1670 train_time:43633ms step_avg:97.40ms +step:449/1670 train_time:43729ms step_avg:97.39ms +step:450/1670 train_time:43825ms step_avg:97.39ms +step:451/1670 train_time:43920ms step_avg:97.38ms +step:452/1670 train_time:44015ms step_avg:97.38ms +step:453/1670 train_time:44110ms step_avg:97.37ms +step:454/1670 train_time:44205ms step_avg:97.37ms +step:455/1670 train_time:44300ms step_avg:97.36ms +step:456/1670 train_time:44396ms step_avg:97.36ms +step:457/1670 train_time:44492ms step_avg:97.36ms +step:458/1670 train_time:44589ms step_avg:97.36ms +step:459/1670 train_time:44685ms step_avg:97.35ms +step:460/1670 train_time:44781ms step_avg:97.35ms +step:461/1670 train_time:44876ms step_avg:97.35ms +step:462/1670 train_time:44972ms step_avg:97.34ms +step:463/1670 train_time:45067ms step_avg:97.34ms +step:464/1670 train_time:45163ms step_avg:97.33ms +step:465/1670 train_time:45257ms step_avg:97.33ms +step:466/1670 train_time:45353ms step_avg:97.32ms +step:467/1670 train_time:45450ms step_avg:97.32ms +step:468/1670 train_time:45547ms step_avg:97.32ms +step:469/1670 train_time:45643ms step_avg:97.32ms +step:470/1670 train_time:45738ms step_avg:97.31ms +step:471/1670 train_time:45834ms step_avg:97.31ms +step:472/1670 train_time:45929ms step_avg:97.31ms +step:473/1670 train_time:46025ms step_avg:97.30ms +step:474/1670 train_time:46120ms step_avg:97.30ms +step:475/1670 train_time:46215ms step_avg:97.30ms +step:476/1670 train_time:46310ms step_avg:97.29ms +step:477/1670 train_time:46406ms step_avg:97.29ms +step:478/1670 train_time:46501ms step_avg:97.28ms +step:479/1670 train_time:46597ms step_avg:97.28ms +step:480/1670 train_time:46693ms step_avg:97.28ms +step:481/1670 train_time:46790ms step_avg:97.28ms +step:482/1670 train_time:46886ms step_avg:97.27ms +step:483/1670 train_time:46981ms step_avg:97.27ms +step:484/1670 train_time:47077ms step_avg:97.27ms +step:485/1670 train_time:47173ms step_avg:97.26ms +step:486/1670 train_time:47268ms step_avg:97.26ms +step:487/1670 train_time:47364ms step_avg:97.26ms +step:488/1670 train_time:47459ms step_avg:97.25ms +step:489/1670 train_time:47555ms step_avg:97.25ms +step:490/1670 train_time:47652ms step_avg:97.25ms +step:491/1670 train_time:47748ms step_avg:97.25ms +step:492/1670 train_time:47844ms step_avg:97.24ms +step:493/1670 train_time:47940ms step_avg:97.24ms +step:494/1670 train_time:48035ms step_avg:97.24ms +step:495/1670 train_time:48131ms step_avg:97.24ms +step:496/1670 train_time:48227ms step_avg:97.23ms +step:497/1670 train_time:48323ms step_avg:97.23ms +step:498/1670 train_time:48419ms step_avg:97.23ms +step:499/1670 train_time:48514ms step_avg:97.22ms +step:500/1670 train_time:48609ms step_avg:97.22ms +step:500/1670 val_loss:3.7198 train_time:48705ms step_avg:97.41ms +step:501/1670 train_time:48726ms step_avg:97.26ms +step:502/1670 train_time:48808ms step_avg:97.23ms +step:503/1670 train_time:48906ms step_avg:97.23ms +step:504/1670 train_time:49002ms step_avg:97.23ms +step:505/1670 train_time:49098ms step_avg:97.22ms +step:506/1670 train_time:49193ms step_avg:97.22ms +step:507/1670 train_time:49288ms step_avg:97.21ms +step:508/1670 train_time:49383ms step_avg:97.21ms +step:509/1670 train_time:49478ms step_avg:97.21ms +step:510/1670 train_time:49573ms step_avg:97.20ms +step:511/1670 train_time:49669ms step_avg:97.20ms +step:512/1670 train_time:49766ms step_avg:97.20ms +step:513/1670 train_time:49863ms step_avg:97.20ms +step:514/1670 train_time:49960ms step_avg:97.20ms +step:515/1670 train_time:50056ms step_avg:97.20ms +step:516/1670 train_time:50152ms step_avg:97.19ms +step:517/1670 train_time:50247ms step_avg:97.19ms +step:518/1670 train_time:50342ms step_avg:97.19ms +step:519/1670 train_time:50438ms step_avg:97.18ms +step:520/1670 train_time:50534ms step_avg:97.18ms +step:521/1670 train_time:50630ms step_avg:97.18ms +step:522/1670 train_time:50726ms step_avg:97.18ms +step:523/1670 train_time:50823ms step_avg:97.18ms +step:524/1670 train_time:50919ms step_avg:97.17ms +step:525/1670 train_time:51015ms step_avg:97.17ms +step:526/1670 train_time:51112ms step_avg:97.17ms +step:527/1670 train_time:51206ms step_avg:97.17ms +step:528/1670 train_time:51301ms step_avg:97.16ms +step:529/1670 train_time:51397ms step_avg:97.16ms +step:530/1670 train_time:51493ms step_avg:97.16ms +step:531/1670 train_time:51589ms step_avg:97.15ms +step:532/1670 train_time:51684ms step_avg:97.15ms +step:533/1670 train_time:51781ms step_avg:97.15ms +step:534/1670 train_time:51877ms step_avg:97.15ms +step:535/1670 train_time:51973ms step_avg:97.15ms +step:536/1670 train_time:52069ms step_avg:97.14ms +step:537/1670 train_time:52165ms step_avg:97.14ms +step:538/1670 train_time:52260ms step_avg:97.14ms +step:539/1670 train_time:52355ms step_avg:97.13ms +step:540/1670 train_time:52450ms step_avg:97.13ms +step:541/1670 train_time:52546ms step_avg:97.13ms +step:542/1670 train_time:52641ms step_avg:97.12ms +step:543/1670 train_time:52737ms step_avg:97.12ms +step:544/1670 train_time:52833ms step_avg:97.12ms +step:545/1670 train_time:52930ms step_avg:97.12ms +step:546/1670 train_time:53025ms step_avg:97.12ms +step:547/1670 train_time:53121ms step_avg:97.11ms +step:548/1670 train_time:53218ms step_avg:97.11ms +step:549/1670 train_time:53313ms step_avg:97.11ms +step:550/1670 train_time:53408ms step_avg:97.11ms +step:551/1670 train_time:53504ms step_avg:97.10ms +step:552/1670 train_time:53600ms step_avg:97.10ms +step:553/1670 train_time:53697ms step_avg:97.10ms +step:554/1670 train_time:53793ms step_avg:97.10ms +step:555/1670 train_time:53888ms step_avg:97.10ms +step:556/1670 train_time:53984ms step_avg:97.09ms +step:557/1670 train_time:54080ms step_avg:97.09ms +step:558/1670 train_time:54176ms step_avg:97.09ms +step:559/1670 train_time:54273ms step_avg:97.09ms +step:560/1670 train_time:54370ms step_avg:97.09ms +step:561/1670 train_time:54466ms step_avg:97.09ms +step:562/1670 train_time:54563ms step_avg:97.09ms +step:563/1670 train_time:54660ms step_avg:97.09ms +step:564/1670 train_time:54757ms step_avg:97.09ms +step:565/1670 train_time:54856ms step_avg:97.09ms +step:566/1670 train_time:54953ms step_avg:97.09ms +step:567/1670 train_time:55050ms step_avg:97.09ms +step:568/1670 train_time:55146ms step_avg:97.09ms +step:569/1670 train_time:55243ms step_avg:97.09ms +step:570/1670 train_time:55340ms step_avg:97.09ms +step:571/1670 train_time:55438ms step_avg:97.09ms +step:572/1670 train_time:55535ms step_avg:97.09ms +step:573/1670 train_time:55632ms step_avg:97.09ms +step:574/1670 train_time:55729ms step_avg:97.09ms +step:575/1670 train_time:55827ms step_avg:97.09ms +step:576/1670 train_time:55926ms step_avg:97.09ms +step:577/1670 train_time:56024ms step_avg:97.10ms +step:578/1670 train_time:56121ms step_avg:97.09ms +step:579/1670 train_time:56217ms step_avg:97.09ms +step:580/1670 train_time:56314ms step_avg:97.09ms +step:581/1670 train_time:56411ms step_avg:97.09ms +step:582/1670 train_time:56507ms step_avg:97.09ms +step:583/1670 train_time:56604ms step_avg:97.09ms +step:584/1670 train_time:56702ms step_avg:97.09ms +step:585/1670 train_time:56801ms step_avg:97.10ms +step:586/1670 train_time:56898ms step_avg:97.10ms +step:587/1670 train_time:56996ms step_avg:97.10ms +step:588/1670 train_time:57092ms step_avg:97.10ms +step:589/1670 train_time:57188ms step_avg:97.09ms +step:590/1670 train_time:57285ms step_avg:97.09ms +step:591/1670 train_time:57383ms step_avg:97.09ms +step:592/1670 train_time:57479ms step_avg:97.09ms +step:593/1670 train_time:57578ms step_avg:97.10ms +step:594/1670 train_time:57676ms step_avg:97.10ms +step:595/1670 train_time:57773ms step_avg:97.10ms +step:596/1670 train_time:57870ms step_avg:97.10ms +step:597/1670 train_time:57966ms step_avg:97.10ms +step:598/1670 train_time:58064ms step_avg:97.10ms +step:599/1670 train_time:58161ms step_avg:97.10ms +step:600/1670 train_time:58258ms step_avg:97.10ms +step:601/1670 train_time:58356ms step_avg:97.10ms +step:602/1670 train_time:58452ms step_avg:97.10ms +step:603/1670 train_time:58549ms step_avg:97.10ms +step:604/1670 train_time:58646ms step_avg:97.10ms +step:605/1670 train_time:58744ms step_avg:97.10ms +step:606/1670 train_time:58841ms step_avg:97.10ms +step:607/1670 train_time:58940ms step_avg:97.10ms +step:608/1670 train_time:59038ms step_avg:97.10ms +step:609/1670 train_time:59135ms step_avg:97.10ms +step:610/1670 train_time:59231ms step_avg:97.10ms +step:611/1670 train_time:59328ms step_avg:97.10ms +step:612/1670 train_time:59426ms step_avg:97.10ms +step:613/1670 train_time:59523ms step_avg:97.10ms +step:614/1670 train_time:59621ms step_avg:97.10ms +step:615/1670 train_time:59718ms step_avg:97.10ms +step:616/1670 train_time:59816ms step_avg:97.10ms +step:617/1670 train_time:59914ms step_avg:97.10ms +step:618/1670 train_time:60010ms step_avg:97.10ms +step:619/1670 train_time:60107ms step_avg:97.10ms +step:620/1670 train_time:60205ms step_avg:97.10ms +step:621/1670 train_time:60302ms step_avg:97.10ms +step:622/1670 train_time:60401ms step_avg:97.11ms +step:623/1670 train_time:60498ms step_avg:97.11ms +step:624/1670 train_time:60595ms step_avg:97.11ms +step:625/1670 train_time:60692ms step_avg:97.11ms +step:625/1670 val_loss:3.6177 train_time:60788ms step_avg:97.26ms +step:626/1670 train_time:60810ms step_avg:97.14ms +step:627/1670 train_time:60894ms step_avg:97.12ms +step:628/1670 train_time:60990ms step_avg:97.12ms +step:629/1670 train_time:61086ms step_avg:97.12ms +step:630/1670 train_time:61182ms step_avg:97.12ms +step:631/1670 train_time:61279ms step_avg:97.11ms +step:632/1670 train_time:61375ms step_avg:97.11ms +step:633/1670 train_time:61471ms step_avg:97.11ms +step:634/1670 train_time:61567ms step_avg:97.11ms +step:635/1670 train_time:61663ms step_avg:97.11ms +step:636/1670 train_time:61765ms step_avg:97.11ms +step:637/1670 train_time:61866ms step_avg:97.12ms +step:638/1670 train_time:61963ms step_avg:97.12ms +step:639/1670 train_time:62351ms step_avg:97.58ms +step:640/1670 train_time:62426ms step_avg:97.54ms +step:641/1670 train_time:62522ms step_avg:97.54ms +step:642/1670 train_time:62618ms step_avg:97.54ms +step:643/1670 train_time:62714ms step_avg:97.53ms +step:644/1670 train_time:62810ms step_avg:97.53ms +step:645/1670 train_time:62906ms step_avg:97.53ms +step:646/1670 train_time:63001ms step_avg:97.53ms +step:647/1670 train_time:63098ms step_avg:97.52ms +step:648/1670 train_time:63195ms step_avg:97.52ms +step:649/1670 train_time:63299ms step_avg:97.53ms +step:650/1670 train_time:63401ms step_avg:97.54ms +step:651/1670 train_time:63500ms step_avg:97.54ms +step:652/1670 train_time:63597ms step_avg:97.54ms +step:653/1670 train_time:63694ms step_avg:97.54ms +step:654/1670 train_time:63790ms step_avg:97.54ms +step:655/1670 train_time:63885ms step_avg:97.53ms +step:656/1670 train_time:63981ms step_avg:97.53ms +step:657/1670 train_time:64078ms step_avg:97.53ms +step:658/1670 train_time:64174ms step_avg:97.53ms +step:659/1670 train_time:64272ms step_avg:97.53ms +step:660/1670 train_time:64372ms step_avg:97.53ms +step:661/1670 train_time:64469ms step_avg:97.53ms +step:662/1670 train_time:64566ms step_avg:97.53ms +step:663/1670 train_time:64664ms step_avg:97.53ms +step:664/1670 train_time:64762ms step_avg:97.53ms +step:665/1670 train_time:64859ms step_avg:97.53ms +step:666/1670 train_time:64955ms step_avg:97.53ms +step:667/1670 train_time:65051ms step_avg:97.53ms +step:668/1670 train_time:65147ms step_avg:97.53ms +step:669/1670 train_time:65244ms step_avg:97.53ms +step:670/1670 train_time:65342ms step_avg:97.53ms +step:671/1670 train_time:65441ms step_avg:97.53ms +step:672/1670 train_time:65540ms step_avg:97.53ms +step:673/1670 train_time:65639ms step_avg:97.53ms +step:674/1670 train_time:65737ms step_avg:97.53ms +step:675/1670 train_time:65835ms step_avg:97.53ms +step:676/1670 train_time:65931ms step_avg:97.53ms +step:677/1670 train_time:66027ms step_avg:97.53ms +step:678/1670 train_time:66124ms step_avg:97.53ms +step:679/1670 train_time:66221ms step_avg:97.53ms +step:680/1670 train_time:66318ms step_avg:97.53ms +step:681/1670 train_time:66416ms step_avg:97.53ms +step:682/1670 train_time:66513ms step_avg:97.53ms +step:683/1670 train_time:66610ms step_avg:97.53ms +step:684/1670 train_time:66707ms step_avg:97.52ms +step:685/1670 train_time:66805ms step_avg:97.53ms +step:686/1670 train_time:66902ms step_avg:97.52ms +step:687/1670 train_time:66999ms step_avg:97.52ms +step:688/1670 train_time:67097ms step_avg:97.52ms +step:689/1670 train_time:67194ms step_avg:97.52ms +step:690/1670 train_time:67290ms step_avg:97.52ms +step:691/1670 train_time:67387ms step_avg:97.52ms +step:692/1670 train_time:67485ms step_avg:97.52ms +step:693/1670 train_time:67583ms step_avg:97.52ms +step:694/1670 train_time:67681ms step_avg:97.52ms +step:695/1670 train_time:67779ms step_avg:97.52ms +step:696/1670 train_time:67877ms step_avg:97.52ms +step:697/1670 train_time:67974ms step_avg:97.52ms +step:698/1670 train_time:68071ms step_avg:97.52ms +step:699/1670 train_time:68167ms step_avg:97.52ms +step:700/1670 train_time:68264ms step_avg:97.52ms +step:701/1670 train_time:68361ms step_avg:97.52ms +step:702/1670 train_time:68459ms step_avg:97.52ms +step:703/1670 train_time:68557ms step_avg:97.52ms +step:704/1670 train_time:68654ms step_avg:97.52ms +step:705/1670 train_time:68750ms step_avg:97.52ms +step:706/1670 train_time:68846ms step_avg:97.52ms +step:707/1670 train_time:68944ms step_avg:97.52ms +step:708/1670 train_time:69042ms step_avg:97.52ms +step:709/1670 train_time:69141ms step_avg:97.52ms +step:710/1670 train_time:69238ms step_avg:97.52ms +step:711/1670 train_time:69336ms step_avg:97.52ms +step:712/1670 train_time:69434ms step_avg:97.52ms +step:713/1670 train_time:69531ms step_avg:97.52ms +step:714/1670 train_time:69628ms step_avg:97.52ms +step:715/1670 train_time:69725ms step_avg:97.52ms +step:716/1670 train_time:69821ms step_avg:97.52ms +step:717/1670 train_time:69918ms step_avg:97.52ms +step:718/1670 train_time:70016ms step_avg:97.51ms +step:719/1670 train_time:70112ms step_avg:97.51ms +step:720/1670 train_time:70208ms step_avg:97.51ms +step:721/1670 train_time:70305ms step_avg:97.51ms +step:722/1670 train_time:70402ms step_avg:97.51ms +step:723/1670 train_time:70500ms step_avg:97.51ms +step:724/1670 train_time:70598ms step_avg:97.51ms +step:725/1670 train_time:70697ms step_avg:97.51ms +step:726/1670 train_time:70794ms step_avg:97.51ms +step:727/1670 train_time:70891ms step_avg:97.51ms +step:728/1670 train_time:70987ms step_avg:97.51ms +step:729/1670 train_time:71085ms step_avg:97.51ms +step:730/1670 train_time:71181ms step_avg:97.51ms +step:731/1670 train_time:71279ms step_avg:97.51ms +step:732/1670 train_time:71376ms step_avg:97.51ms +step:733/1670 train_time:71474ms step_avg:97.51ms +step:734/1670 train_time:71572ms step_avg:97.51ms +step:735/1670 train_time:71669ms step_avg:97.51ms +step:736/1670 train_time:71765ms step_avg:97.51ms +step:737/1670 train_time:71863ms step_avg:97.51ms +step:738/1670 train_time:71960ms step_avg:97.51ms +step:739/1670 train_time:72058ms step_avg:97.51ms +step:740/1670 train_time:72155ms step_avg:97.51ms +step:741/1670 train_time:72251ms step_avg:97.50ms +step:742/1670 train_time:72347ms step_avg:97.50ms +step:743/1670 train_time:72444ms step_avg:97.50ms +step:744/1670 train_time:72542ms step_avg:97.50ms +step:745/1670 train_time:72641ms step_avg:97.50ms +step:746/1670 train_time:72740ms step_avg:97.51ms +step:747/1670 train_time:72838ms step_avg:97.51ms +step:748/1670 train_time:72935ms step_avg:97.51ms +step:749/1670 train_time:73033ms step_avg:97.51ms +step:750/1670 train_time:73129ms step_avg:97.50ms +step:750/1670 val_loss:3.5642 train_time:73224ms step_avg:97.63ms +step:751/1670 train_time:73245ms step_avg:97.53ms +step:752/1670 train_time:73328ms step_avg:97.51ms +step:753/1670 train_time:73427ms step_avg:97.51ms +step:754/1670 train_time:73524ms step_avg:97.51ms +step:755/1670 train_time:73620ms step_avg:97.51ms +step:756/1670 train_time:73717ms step_avg:97.51ms +step:757/1670 train_time:73813ms step_avg:97.51ms +step:758/1670 train_time:73910ms step_avg:97.51ms +step:759/1670 train_time:74005ms step_avg:97.50ms +step:760/1670 train_time:74102ms step_avg:97.50ms +step:761/1670 train_time:74200ms step_avg:97.50ms +step:762/1670 train_time:74301ms step_avg:97.51ms +step:763/1670 train_time:74402ms step_avg:97.51ms +step:764/1670 train_time:74501ms step_avg:97.51ms +step:765/1670 train_time:74598ms step_avg:97.51ms +step:766/1670 train_time:74696ms step_avg:97.51ms +step:767/1670 train_time:74792ms step_avg:97.51ms +step:768/1670 train_time:74889ms step_avg:97.51ms +step:769/1670 train_time:74986ms step_avg:97.51ms +step:770/1670 train_time:75083ms step_avg:97.51ms +step:771/1670 train_time:75180ms step_avg:97.51ms +step:772/1670 train_time:75280ms step_avg:97.51ms +step:773/1670 train_time:75380ms step_avg:97.52ms +step:774/1670 train_time:75479ms step_avg:97.52ms +step:775/1670 train_time:75577ms step_avg:97.52ms +step:776/1670 train_time:75674ms step_avg:97.52ms +step:777/1670 train_time:75770ms step_avg:97.52ms +step:778/1670 train_time:75866ms step_avg:97.51ms +step:779/1670 train_time:75963ms step_avg:97.51ms +step:780/1670 train_time:76060ms step_avg:97.51ms +step:781/1670 train_time:76156ms step_avg:97.51ms +step:782/1670 train_time:76254ms step_avg:97.51ms +step:783/1670 train_time:76353ms step_avg:97.51ms +step:784/1670 train_time:76452ms step_avg:97.51ms +step:785/1670 train_time:76549ms step_avg:97.51ms +step:786/1670 train_time:76645ms step_avg:97.51ms +step:787/1670 train_time:76743ms step_avg:97.51ms +step:788/1670 train_time:76840ms step_avg:97.51ms +step:789/1670 train_time:76937ms step_avg:97.51ms +step:790/1670 train_time:77034ms step_avg:97.51ms +step:791/1670 train_time:77131ms step_avg:97.51ms +step:792/1670 train_time:77227ms step_avg:97.51ms +step:793/1670 train_time:77324ms step_avg:97.51ms +step:794/1670 train_time:77422ms step_avg:97.51ms +step:795/1670 train_time:77521ms step_avg:97.51ms +step:796/1670 train_time:77619ms step_avg:97.51ms +step:797/1670 train_time:77717ms step_avg:97.51ms +step:798/1670 train_time:77816ms step_avg:97.51ms +step:799/1670 train_time:77913ms step_avg:97.51ms +step:800/1670 train_time:78010ms step_avg:97.51ms +step:801/1670 train_time:78106ms step_avg:97.51ms +step:802/1670 train_time:78203ms step_avg:97.51ms +step:803/1670 train_time:78300ms step_avg:97.51ms +step:804/1670 train_time:78398ms step_avg:97.51ms +step:805/1670 train_time:78497ms step_avg:97.51ms +step:806/1670 train_time:78595ms step_avg:97.51ms +step:807/1670 train_time:78693ms step_avg:97.51ms +step:808/1670 train_time:78790ms step_avg:97.51ms +step:809/1670 train_time:78886ms step_avg:97.51ms +step:810/1670 train_time:78983ms step_avg:97.51ms +step:811/1670 train_time:79080ms step_avg:97.51ms +step:812/1670 train_time:79177ms step_avg:97.51ms +step:813/1670 train_time:79274ms step_avg:97.51ms +step:814/1670 train_time:79372ms step_avg:97.51ms +step:815/1670 train_time:79469ms step_avg:97.51ms +step:816/1670 train_time:79566ms step_avg:97.51ms +step:817/1670 train_time:79663ms step_avg:97.51ms +step:818/1670 train_time:79762ms step_avg:97.51ms +step:819/1670 train_time:79860ms step_avg:97.51ms +step:820/1670 train_time:79958ms step_avg:97.51ms +step:821/1670 train_time:80055ms step_avg:97.51ms +step:822/1670 train_time:80151ms step_avg:97.51ms +step:823/1670 train_time:80248ms step_avg:97.51ms +step:824/1670 train_time:80345ms step_avg:97.51ms +step:825/1670 train_time:80443ms step_avg:97.51ms +step:826/1670 train_time:80540ms step_avg:97.51ms +step:827/1670 train_time:80637ms step_avg:97.51ms +step:828/1670 train_time:80735ms step_avg:97.51ms +step:829/1670 train_time:80832ms step_avg:97.51ms +step:830/1670 train_time:80929ms step_avg:97.50ms +step:831/1670 train_time:81025ms step_avg:97.50ms +step:832/1670 train_time:81122ms step_avg:97.50ms +step:833/1670 train_time:81219ms step_avg:97.50ms +step:834/1670 train_time:81318ms step_avg:97.50ms +step:835/1670 train_time:81416ms step_avg:97.50ms +step:836/1670 train_time:81513ms step_avg:97.50ms +step:837/1670 train_time:81612ms step_avg:97.51ms +step:838/1670 train_time:81710ms step_avg:97.51ms +step:839/1670 train_time:81807ms step_avg:97.51ms +step:840/1670 train_time:81903ms step_avg:97.50ms +step:841/1670 train_time:82001ms step_avg:97.50ms +step:842/1670 train_time:82098ms step_avg:97.50ms +step:843/1670 train_time:82196ms step_avg:97.50ms +step:844/1670 train_time:82295ms step_avg:97.51ms +step:845/1670 train_time:82393ms step_avg:97.51ms +step:846/1670 train_time:82490ms step_avg:97.51ms +step:847/1670 train_time:82587ms step_avg:97.51ms +step:848/1670 train_time:82684ms step_avg:97.50ms +step:849/1670 train_time:82781ms step_avg:97.50ms +step:850/1670 train_time:82879ms step_avg:97.50ms +step:851/1670 train_time:83243ms step_avg:97.82ms +step:852/1670 train_time:83317ms step_avg:97.79ms +step:853/1670 train_time:83412ms step_avg:97.79ms +step:854/1670 train_time:83508ms step_avg:97.78ms +step:855/1670 train_time:83603ms step_avg:97.78ms +step:856/1670 train_time:83700ms step_avg:97.78ms +step:857/1670 train_time:83797ms step_avg:97.78ms +step:858/1670 train_time:83893ms step_avg:97.78ms +step:859/1670 train_time:83990ms step_avg:97.78ms +step:860/1670 train_time:84086ms step_avg:97.77ms +step:861/1670 train_time:84186ms step_avg:97.78ms +step:862/1670 train_time:84286ms step_avg:97.78ms +step:863/1670 train_time:84384ms step_avg:97.78ms +step:864/1670 train_time:84481ms step_avg:97.78ms +step:865/1670 train_time:84579ms step_avg:97.78ms +step:866/1670 train_time:84676ms step_avg:97.78ms +step:867/1670 train_time:84773ms step_avg:97.78ms +step:868/1670 train_time:84869ms step_avg:97.78ms +step:869/1670 train_time:84964ms step_avg:97.77ms +step:870/1670 train_time:85061ms step_avg:97.77ms +step:871/1670 train_time:85159ms step_avg:97.77ms +step:872/1670 train_time:85259ms step_avg:97.77ms +step:873/1670 train_time:85358ms step_avg:97.78ms +step:874/1670 train_time:85457ms step_avg:97.78ms +step:875/1670 train_time:85555ms step_avg:97.78ms +step:875/1670 val_loss:3.5231 train_time:85652ms step_avg:97.89ms +step:876/1670 train_time:85674ms step_avg:97.80ms +step:877/1670 train_time:85756ms step_avg:97.78ms +step:878/1670 train_time:85857ms step_avg:97.79ms +step:879/1670 train_time:85955ms step_avg:97.79ms +step:880/1670 train_time:86051ms step_avg:97.79ms +step:881/1670 train_time:86149ms step_avg:97.79ms +step:882/1670 train_time:86244ms step_avg:97.78ms +step:883/1670 train_time:86340ms step_avg:97.78ms +step:884/1670 train_time:86436ms step_avg:97.78ms +step:885/1670 train_time:86533ms step_avg:97.78ms +step:886/1670 train_time:86631ms step_avg:97.78ms +step:887/1670 train_time:86730ms step_avg:97.78ms +step:888/1670 train_time:86829ms step_avg:97.78ms +step:889/1670 train_time:86927ms step_avg:97.78ms +step:890/1670 train_time:87023ms step_avg:97.78ms +step:891/1670 train_time:87120ms step_avg:97.78ms +step:892/1670 train_time:87217ms step_avg:97.78ms +step:893/1670 train_time:87314ms step_avg:97.78ms +step:894/1670 train_time:87410ms step_avg:97.77ms +step:895/1670 train_time:87507ms step_avg:97.77ms +step:896/1670 train_time:87604ms step_avg:97.77ms +step:897/1670 train_time:87701ms step_avg:97.77ms +step:898/1670 train_time:87799ms step_avg:97.77ms +step:899/1670 train_time:87897ms step_avg:97.77ms +step:900/1670 train_time:87997ms step_avg:97.77ms +step:901/1670 train_time:88095ms step_avg:97.77ms +step:902/1670 train_time:88192ms step_avg:97.77ms +step:903/1670 train_time:88289ms step_avg:97.77ms +step:904/1670 train_time:88385ms step_avg:97.77ms +step:905/1670 train_time:88481ms step_avg:97.77ms +step:906/1670 train_time:88578ms step_avg:97.77ms +step:907/1670 train_time:88676ms step_avg:97.77ms +step:908/1670 train_time:88775ms step_avg:97.77ms +step:909/1670 train_time:88873ms step_avg:97.77ms +step:910/1670 train_time:88971ms step_avg:97.77ms +step:911/1670 train_time:89068ms step_avg:97.77ms +step:912/1670 train_time:89164ms step_avg:97.77ms +step:913/1670 train_time:89262ms step_avg:97.77ms +step:914/1670 train_time:89359ms step_avg:97.77ms +step:915/1670 train_time:89456ms step_avg:97.77ms +step:916/1670 train_time:89553ms step_avg:97.77ms +step:917/1670 train_time:89650ms step_avg:97.76ms +step:918/1670 train_time:89747ms step_avg:97.76ms +step:919/1670 train_time:89844ms step_avg:97.76ms +step:920/1670 train_time:89942ms step_avg:97.76ms +step:921/1670 train_time:90040ms step_avg:97.76ms +step:922/1670 train_time:90138ms step_avg:97.76ms +step:923/1670 train_time:90236ms step_avg:97.76ms +step:924/1670 train_time:90334ms step_avg:97.76ms +step:925/1670 train_time:90431ms step_avg:97.76ms +step:926/1670 train_time:90528ms step_avg:97.76ms +step:927/1670 train_time:90624ms step_avg:97.76ms +step:928/1670 train_time:90721ms step_avg:97.76ms +step:929/1670 train_time:90819ms step_avg:97.76ms +step:930/1670 train_time:90917ms step_avg:97.76ms +step:931/1670 train_time:91015ms step_avg:97.76ms +step:932/1670 train_time:91114ms step_avg:97.76ms +step:933/1670 train_time:91212ms step_avg:97.76ms +step:934/1670 train_time:91310ms step_avg:97.76ms +step:935/1670 train_time:91406ms step_avg:97.76ms +step:936/1670 train_time:91502ms step_avg:97.76ms +step:937/1670 train_time:91599ms step_avg:97.76ms +step:938/1670 train_time:91696ms step_avg:97.76ms +step:939/1670 train_time:91794ms step_avg:97.76ms +step:940/1670 train_time:91892ms step_avg:97.76ms +step:941/1670 train_time:91989ms step_avg:97.76ms +step:942/1670 train_time:92086ms step_avg:97.76ms +step:943/1670 train_time:92183ms step_avg:97.76ms +step:944/1670 train_time:92280ms step_avg:97.75ms +step:945/1670 train_time:92379ms step_avg:97.76ms +step:946/1670 train_time:92476ms step_avg:97.76ms +step:947/1670 train_time:92574ms step_avg:97.75ms +step:948/1670 train_time:92671ms step_avg:97.75ms +step:949/1670 train_time:92769ms step_avg:97.75ms +step:950/1670 train_time:92867ms step_avg:97.75ms +step:951/1670 train_time:92963ms step_avg:97.75ms +step:952/1670 train_time:93061ms step_avg:97.75ms +step:953/1670 train_time:93159ms step_avg:97.75ms +step:954/1670 train_time:93257ms step_avg:97.75ms +step:955/1670 train_time:93355ms step_avg:97.75ms +step:956/1670 train_time:93451ms step_avg:97.75ms +step:957/1670 train_time:93548ms step_avg:97.75ms +step:958/1670 train_time:93644ms step_avg:97.75ms +step:959/1670 train_time:93742ms step_avg:97.75ms +step:960/1670 train_time:93840ms step_avg:97.75ms +step:961/1670 train_time:93937ms step_avg:97.75ms +step:962/1670 train_time:94035ms step_avg:97.75ms +step:963/1670 train_time:94133ms step_avg:97.75ms +step:964/1670 train_time:94232ms step_avg:97.75ms +step:965/1670 train_time:94328ms step_avg:97.75ms +step:966/1670 train_time:94425ms step_avg:97.75ms +step:967/1670 train_time:94521ms step_avg:97.75ms +step:968/1670 train_time:94619ms step_avg:97.75ms +step:969/1670 train_time:94716ms step_avg:97.75ms +step:970/1670 train_time:94815ms step_avg:97.75ms +step:971/1670 train_time:94913ms step_avg:97.75ms +step:972/1670 train_time:95011ms step_avg:97.75ms +step:973/1670 train_time:95108ms step_avg:97.75ms +step:974/1670 train_time:95206ms step_avg:97.75ms +step:975/1670 train_time:95303ms step_avg:97.75ms +step:976/1670 train_time:95400ms step_avg:97.75ms +step:977/1670 train_time:95497ms step_avg:97.74ms +step:978/1670 train_time:95594ms step_avg:97.74ms +step:979/1670 train_time:95691ms step_avg:97.74ms +step:980/1670 train_time:95789ms step_avg:97.74ms +step:981/1670 train_time:95886ms step_avg:97.74ms +step:982/1670 train_time:95982ms step_avg:97.74ms +step:983/1670 train_time:96080ms step_avg:97.74ms +step:984/1670 train_time:96178ms step_avg:97.74ms +step:985/1670 train_time:96277ms step_avg:97.74ms +step:986/1670 train_time:96375ms step_avg:97.74ms +step:987/1670 train_time:96472ms step_avg:97.74ms +step:988/1670 train_time:96569ms step_avg:97.74ms +step:989/1670 train_time:96666ms step_avg:97.74ms +step:990/1670 train_time:96763ms step_avg:97.74ms +step:991/1670 train_time:96861ms step_avg:97.74ms +step:992/1670 train_time:96959ms step_avg:97.74ms +step:993/1670 train_time:97056ms step_avg:97.74ms +step:994/1670 train_time:97154ms step_avg:97.74ms +step:995/1670 train_time:97252ms step_avg:97.74ms +step:996/1670 train_time:97349ms step_avg:97.74ms +step:997/1670 train_time:97445ms step_avg:97.74ms +step:998/1670 train_time:97542ms step_avg:97.74ms +step:999/1670 train_time:97639ms step_avg:97.74ms +step:1000/1670 train_time:97737ms step_avg:97.74ms +step:1000/1670 val_loss:3.4783 train_time:97834ms step_avg:97.83ms +step:1001/1670 train_time:97856ms step_avg:97.76ms +step:1002/1670 train_time:97940ms step_avg:97.74ms +step:1003/1670 train_time:98039ms step_avg:97.75ms +step:1004/1670 train_time:98135ms step_avg:97.74ms +step:1005/1670 train_time:98231ms step_avg:97.74ms +step:1006/1670 train_time:98328ms step_avg:97.74ms +step:1007/1670 train_time:98424ms step_avg:97.74ms +step:1008/1670 train_time:98520ms step_avg:97.74ms +step:1009/1670 train_time:98617ms step_avg:97.74ms +step:1010/1670 train_time:98713ms step_avg:97.74ms +step:1011/1670 train_time:98812ms step_avg:97.74ms +step:1012/1670 train_time:98910ms step_avg:97.74ms +step:1013/1670 train_time:99010ms step_avg:97.74ms +step:1014/1670 train_time:99109ms step_avg:97.74ms +step:1015/1670 train_time:99207ms step_avg:97.74ms +step:1016/1670 train_time:99304ms step_avg:97.74ms +step:1017/1670 train_time:99401ms step_avg:97.74ms +step:1018/1670 train_time:99497ms step_avg:97.74ms +step:1019/1670 train_time:99593ms step_avg:97.74ms +step:1020/1670 train_time:99690ms step_avg:97.74ms +step:1021/1670 train_time:99788ms step_avg:97.74ms +step:1022/1670 train_time:99886ms step_avg:97.74ms +step:1023/1670 train_time:99986ms step_avg:97.74ms +step:1024/1670 train_time:100084ms step_avg:97.74ms +step:1025/1670 train_time:100182ms step_avg:97.74ms +step:1026/1670 train_time:100280ms step_avg:97.74ms +step:1027/1670 train_time:100376ms step_avg:97.74ms +step:1028/1670 train_time:100472ms step_avg:97.74ms +step:1029/1670 train_time:100569ms step_avg:97.73ms +step:1030/1670 train_time:100666ms step_avg:97.73ms +step:1031/1670 train_time:100763ms step_avg:97.73ms +step:1032/1670 train_time:100861ms step_avg:97.73ms +step:1033/1670 train_time:100959ms step_avg:97.73ms +step:1034/1670 train_time:101058ms step_avg:97.73ms +step:1035/1670 train_time:101155ms step_avg:97.73ms +step:1036/1670 train_time:101252ms step_avg:97.73ms +step:1037/1670 train_time:101350ms step_avg:97.73ms +step:1038/1670 train_time:101447ms step_avg:97.73ms +step:1039/1670 train_time:101545ms step_avg:97.73ms +step:1040/1670 train_time:101642ms step_avg:97.73ms +step:1041/1670 train_time:101739ms step_avg:97.73ms +step:1042/1670 train_time:101835ms step_avg:97.73ms +step:1043/1670 train_time:101933ms step_avg:97.73ms +step:1044/1670 train_time:102030ms step_avg:97.73ms +step:1045/1670 train_time:102129ms step_avg:97.73ms +step:1046/1670 train_time:102227ms step_avg:97.73ms +step:1047/1670 train_time:102325ms step_avg:97.73ms +step:1048/1670 train_time:102422ms step_avg:97.73ms +step:1049/1670 train_time:102520ms step_avg:97.73ms +step:1050/1670 train_time:102617ms step_avg:97.73ms +step:1051/1670 train_time:102714ms step_avg:97.73ms +step:1052/1670 train_time:102811ms step_avg:97.73ms +step:1053/1670 train_time:102908ms step_avg:97.73ms +step:1054/1670 train_time:103006ms step_avg:97.73ms +step:1055/1670 train_time:103105ms step_avg:97.73ms +step:1056/1670 train_time:103203ms step_avg:97.73ms +step:1057/1670 train_time:103301ms step_avg:97.73ms +step:1058/1670 train_time:103398ms step_avg:97.73ms +step:1059/1670 train_time:103494ms step_avg:97.73ms +step:1060/1670 train_time:103591ms step_avg:97.73ms +step:1061/1670 train_time:103688ms step_avg:97.73ms +step:1062/1670 train_time:103939ms step_avg:97.87ms +step:1063/1670 train_time:104148ms step_avg:97.98ms +step:1064/1670 train_time:104243ms step_avg:97.97ms +step:1065/1670 train_time:104339ms step_avg:97.97ms +step:1066/1670 train_time:104434ms step_avg:97.97ms +step:1067/1670 train_time:104530ms step_avg:97.97ms +step:1068/1670 train_time:104626ms step_avg:97.96ms +step:1069/1670 train_time:104723ms step_avg:97.96ms +step:1070/1670 train_time:104819ms step_avg:97.96ms +step:1071/1670 train_time:104915ms step_avg:97.96ms +step:1072/1670 train_time:105015ms step_avg:97.96ms +step:1073/1670 train_time:105115ms step_avg:97.96ms +step:1074/1670 train_time:105213ms step_avg:97.96ms +step:1075/1670 train_time:105311ms step_avg:97.96ms +step:1076/1670 train_time:105409ms step_avg:97.96ms +step:1077/1670 train_time:105506ms step_avg:97.96ms +step:1078/1670 train_time:105603ms step_avg:97.96ms +step:1079/1670 train_time:105700ms step_avg:97.96ms +step:1080/1670 train_time:105796ms step_avg:97.96ms +step:1081/1670 train_time:105892ms step_avg:97.96ms +step:1082/1670 train_time:105990ms step_avg:97.96ms +step:1083/1670 train_time:106089ms step_avg:97.96ms +step:1084/1670 train_time:106187ms step_avg:97.96ms +step:1085/1670 train_time:106286ms step_avg:97.96ms +step:1086/1670 train_time:106383ms step_avg:97.96ms +step:1087/1670 train_time:106482ms step_avg:97.96ms +step:1088/1670 train_time:106578ms step_avg:97.96ms +step:1089/1670 train_time:106674ms step_avg:97.96ms +step:1090/1670 train_time:106771ms step_avg:97.95ms +step:1091/1670 train_time:106868ms step_avg:97.95ms +step:1092/1670 train_time:106966ms step_avg:97.95ms +step:1093/1670 train_time:107065ms step_avg:97.96ms +step:1094/1670 train_time:107164ms step_avg:97.96ms +step:1095/1670 train_time:107263ms step_avg:97.96ms +step:1096/1670 train_time:107360ms step_avg:97.96ms +step:1097/1670 train_time:107457ms step_avg:97.96ms +step:1098/1670 train_time:107553ms step_avg:97.95ms +step:1099/1670 train_time:107649ms step_avg:97.95ms +step:1100/1670 train_time:107747ms step_avg:97.95ms +step:1101/1670 train_time:107845ms step_avg:97.95ms +step:1102/1670 train_time:107942ms step_avg:97.95ms +step:1103/1670 train_time:108040ms step_avg:97.95ms +step:1104/1670 train_time:108137ms step_avg:97.95ms +step:1105/1670 train_time:108234ms step_avg:97.95ms +step:1106/1670 train_time:108331ms step_avg:97.95ms +step:1107/1670 train_time:108429ms step_avg:97.95ms +step:1108/1670 train_time:108527ms step_avg:97.95ms +step:1109/1670 train_time:108625ms step_avg:97.95ms +step:1110/1670 train_time:108722ms step_avg:97.95ms +step:1111/1670 train_time:108819ms step_avg:97.95ms +step:1112/1670 train_time:108916ms step_avg:97.95ms +step:1113/1670 train_time:109013ms step_avg:97.95ms +step:1114/1670 train_time:109110ms step_avg:97.94ms +step:1115/1670 train_time:109209ms step_avg:97.95ms +step:1116/1670 train_time:109308ms step_avg:97.95ms +step:1117/1670 train_time:109406ms step_avg:97.95ms +step:1118/1670 train_time:109504ms step_avg:97.95ms +step:1119/1670 train_time:109601ms step_avg:97.95ms +step:1120/1670 train_time:109699ms step_avg:97.95ms +step:1121/1670 train_time:109797ms step_avg:97.95ms +step:1122/1670 train_time:109895ms step_avg:97.95ms +step:1123/1670 train_time:109992ms step_avg:97.94ms +step:1124/1670 train_time:110089ms step_avg:97.94ms +step:1125/1670 train_time:110187ms step_avg:97.94ms +step:1125/1670 val_loss:3.4256 train_time:110285ms step_avg:98.03ms +step:1126/1670 train_time:110307ms step_avg:97.96ms +step:1127/1670 train_time:110391ms step_avg:97.95ms +step:1128/1670 train_time:110489ms step_avg:97.95ms +step:1129/1670 train_time:110585ms step_avg:97.95ms +step:1130/1670 train_time:110682ms step_avg:97.95ms +step:1131/1670 train_time:110778ms step_avg:97.95ms +step:1132/1670 train_time:110875ms step_avg:97.95ms +step:1133/1670 train_time:110971ms step_avg:97.94ms +step:1134/1670 train_time:111068ms step_avg:97.94ms +step:1135/1670 train_time:111165ms step_avg:97.94ms +step:1136/1670 train_time:111266ms step_avg:97.95ms +step:1137/1670 train_time:111367ms step_avg:97.95ms +step:1138/1670 train_time:111466ms step_avg:97.95ms +step:1139/1670 train_time:111565ms step_avg:97.95ms +step:1140/1670 train_time:111663ms step_avg:97.95ms +step:1141/1670 train_time:111759ms step_avg:97.95ms +step:1142/1670 train_time:111856ms step_avg:97.95ms +step:1143/1670 train_time:111953ms step_avg:97.95ms +step:1144/1670 train_time:112050ms step_avg:97.95ms +step:1145/1670 train_time:112147ms step_avg:97.95ms +step:1146/1670 train_time:112245ms step_avg:97.95ms +step:1147/1670 train_time:112346ms step_avg:97.95ms +step:1148/1670 train_time:112445ms step_avg:97.95ms +step:1149/1670 train_time:112544ms step_avg:97.95ms +step:1150/1670 train_time:112642ms step_avg:97.95ms +step:1151/1670 train_time:112739ms step_avg:97.95ms +step:1152/1670 train_time:112836ms step_avg:97.95ms +step:1153/1670 train_time:112933ms step_avg:97.95ms +step:1154/1670 train_time:113030ms step_avg:97.95ms +step:1155/1670 train_time:113128ms step_avg:97.95ms +step:1156/1670 train_time:113226ms step_avg:97.95ms +step:1157/1670 train_time:113325ms step_avg:97.95ms +step:1158/1670 train_time:113425ms step_avg:97.95ms +step:1159/1670 train_time:113524ms step_avg:97.95ms +step:1160/1670 train_time:113621ms step_avg:97.95ms +step:1161/1670 train_time:113720ms step_avg:97.95ms +step:1162/1670 train_time:113817ms step_avg:97.95ms +step:1163/1670 train_time:113915ms step_avg:97.95ms +step:1164/1670 train_time:114012ms step_avg:97.95ms +step:1165/1670 train_time:114110ms step_avg:97.95ms +step:1166/1670 train_time:114208ms step_avg:97.95ms +step:1167/1670 train_time:114307ms step_avg:97.95ms +step:1168/1670 train_time:114405ms step_avg:97.95ms +step:1169/1670 train_time:114503ms step_avg:97.95ms +step:1170/1670 train_time:114602ms step_avg:97.95ms +step:1171/1670 train_time:114700ms step_avg:97.95ms +step:1172/1670 train_time:114798ms step_avg:97.95ms +step:1173/1670 train_time:114897ms step_avg:97.95ms +step:1174/1670 train_time:114994ms step_avg:97.95ms +step:1175/1670 train_time:115093ms step_avg:97.95ms +step:1176/1670 train_time:115192ms step_avg:97.95ms +step:1177/1670 train_time:115289ms step_avg:97.95ms +step:1178/1670 train_time:115387ms step_avg:97.95ms +step:1179/1670 train_time:115484ms step_avg:97.95ms +step:1180/1670 train_time:115582ms step_avg:97.95ms +step:1181/1670 train_time:115681ms step_avg:97.95ms +step:1182/1670 train_time:115778ms step_avg:97.95ms +step:1183/1670 train_time:115875ms step_avg:97.95ms +step:1184/1670 train_time:115973ms step_avg:97.95ms +step:1185/1670 train_time:116072ms step_avg:97.95ms +step:1186/1670 train_time:116170ms step_avg:97.95ms +step:1187/1670 train_time:116267ms step_avg:97.95ms +step:1188/1670 train_time:116365ms step_avg:97.95ms +step:1189/1670 train_time:116463ms step_avg:97.95ms +step:1190/1670 train_time:116561ms step_avg:97.95ms +step:1191/1670 train_time:116660ms step_avg:97.95ms +step:1192/1670 train_time:116757ms step_avg:97.95ms +step:1193/1670 train_time:116855ms step_avg:97.95ms +step:1194/1670 train_time:116953ms step_avg:97.95ms +step:1195/1670 train_time:117050ms step_avg:97.95ms +step:1196/1670 train_time:117148ms step_avg:97.95ms +step:1197/1670 train_time:117246ms step_avg:97.95ms +step:1198/1670 train_time:117344ms step_avg:97.95ms +step:1199/1670 train_time:117441ms step_avg:97.95ms +step:1200/1670 train_time:117539ms step_avg:97.95ms +step:1201/1670 train_time:117638ms step_avg:97.95ms +step:1202/1670 train_time:117736ms step_avg:97.95ms +step:1203/1670 train_time:117833ms step_avg:97.95ms +step:1204/1670 train_time:117930ms step_avg:97.95ms +step:1205/1670 train_time:118027ms step_avg:97.95ms +step:1206/1670 train_time:118125ms step_avg:97.95ms +step:1207/1670 train_time:118224ms step_avg:97.95ms +step:1208/1670 train_time:118322ms step_avg:97.95ms +step:1209/1670 train_time:118421ms step_avg:97.95ms +step:1210/1670 train_time:118520ms step_avg:97.95ms +step:1211/1670 train_time:118618ms step_avg:97.95ms +step:1212/1670 train_time:118716ms step_avg:97.95ms +step:1213/1670 train_time:118815ms step_avg:97.95ms +step:1214/1670 train_time:118913ms step_avg:97.95ms +step:1215/1670 train_time:119012ms step_avg:97.95ms +step:1216/1670 train_time:119110ms step_avg:97.95ms +step:1217/1670 train_time:119207ms step_avg:97.95ms +step:1218/1670 train_time:119305ms step_avg:97.95ms +step:1219/1670 train_time:119404ms step_avg:97.95ms +step:1220/1670 train_time:119502ms step_avg:97.95ms +step:1221/1670 train_time:119600ms step_avg:97.95ms +step:1222/1670 train_time:119698ms step_avg:97.95ms +step:1223/1670 train_time:119796ms step_avg:97.95ms +step:1224/1670 train_time:119895ms step_avg:97.95ms +step:1225/1670 train_time:119993ms step_avg:97.95ms +step:1226/1670 train_time:120092ms step_avg:97.95ms +step:1227/1670 train_time:120191ms step_avg:97.95ms +step:1228/1670 train_time:120289ms step_avg:97.95ms +step:1229/1670 train_time:120386ms step_avg:97.95ms +step:1230/1670 train_time:120484ms step_avg:97.95ms +step:1231/1670 train_time:120582ms step_avg:97.95ms +step:1232/1670 train_time:120680ms step_avg:97.95ms +step:1233/1670 train_time:120778ms step_avg:97.95ms +step:1234/1670 train_time:120877ms step_avg:97.96ms +step:1235/1670 train_time:120975ms step_avg:97.96ms +step:1236/1670 train_time:121074ms step_avg:97.96ms +step:1237/1670 train_time:121173ms step_avg:97.96ms +step:1238/1670 train_time:121272ms step_avg:97.96ms +step:1239/1670 train_time:121369ms step_avg:97.96ms +step:1240/1670 train_time:121467ms step_avg:97.96ms +step:1241/1670 train_time:121564ms step_avg:97.96ms +step:1242/1670 train_time:121662ms step_avg:97.96ms +step:1243/1670 train_time:121760ms step_avg:97.96ms +step:1244/1670 train_time:121858ms step_avg:97.96ms +step:1245/1670 train_time:121957ms step_avg:97.96ms +step:1246/1670 train_time:122054ms step_avg:97.96ms +step:1247/1670 train_time:122153ms step_avg:97.96ms +step:1248/1670 train_time:122252ms step_avg:97.96ms +step:1249/1670 train_time:122350ms step_avg:97.96ms +step:1250/1670 train_time:122447ms step_avg:97.96ms +step:1250/1670 val_loss:3.3820 train_time:122544ms step_avg:98.04ms +step:1251/1670 train_time:122565ms step_avg:97.97ms +step:1252/1670 train_time:122654ms step_avg:97.97ms +step:1253/1670 train_time:122754ms step_avg:97.97ms +step:1254/1670 train_time:122852ms step_avg:97.97ms +step:1255/1670 train_time:122949ms step_avg:97.97ms +step:1256/1670 train_time:123047ms step_avg:97.97ms +step:1257/1670 train_time:123143ms step_avg:97.97ms +step:1258/1670 train_time:123240ms step_avg:97.97ms +step:1259/1670 train_time:123337ms step_avg:97.96ms +step:1260/1670 train_time:123434ms step_avg:97.96ms +step:1261/1670 train_time:123535ms step_avg:97.97ms +step:1262/1670 train_time:123636ms step_avg:97.97ms +step:1263/1670 train_time:123736ms step_avg:97.97ms +step:1264/1670 train_time:123834ms step_avg:97.97ms +step:1265/1670 train_time:123931ms step_avg:97.97ms +step:1266/1670 train_time:124030ms step_avg:97.97ms +step:1267/1670 train_time:124127ms step_avg:97.97ms +step:1268/1670 train_time:124223ms step_avg:97.97ms +step:1269/1670 train_time:124320ms step_avg:97.97ms +step:1270/1670 train_time:124417ms step_avg:97.97ms +step:1271/1670 train_time:124515ms step_avg:97.97ms +step:1272/1670 train_time:124615ms step_avg:97.97ms +step:1273/1670 train_time:124714ms step_avg:97.97ms +step:1274/1670 train_time:125098ms step_avg:98.19ms +step:1275/1670 train_time:125175ms step_avg:98.18ms +step:1276/1670 train_time:125272ms step_avg:98.18ms +step:1277/1670 train_time:125368ms step_avg:98.17ms +step:1278/1670 train_time:125464ms step_avg:98.17ms +step:1279/1670 train_time:125561ms step_avg:98.17ms +step:1280/1670 train_time:125657ms step_avg:98.17ms +step:1281/1670 train_time:125754ms step_avg:98.17ms +step:1282/1670 train_time:125850ms step_avg:98.17ms +step:1283/1670 train_time:125948ms step_avg:98.17ms +step:1284/1670 train_time:126053ms step_avg:98.17ms +step:1285/1670 train_time:126156ms step_avg:98.18ms +step:1286/1670 train_time:126254ms step_avg:98.18ms +step:1287/1670 train_time:126352ms step_avg:98.18ms +step:1288/1670 train_time:126450ms step_avg:98.18ms +step:1289/1670 train_time:126548ms step_avg:98.18ms +step:1290/1670 train_time:126646ms step_avg:98.17ms +step:1291/1670 train_time:126743ms step_avg:98.17ms +step:1292/1670 train_time:126840ms step_avg:98.17ms +step:1293/1670 train_time:126938ms step_avg:98.17ms +step:1294/1670 train_time:127036ms step_avg:98.17ms +step:1295/1670 train_time:127136ms step_avg:98.17ms +step:1296/1670 train_time:127235ms step_avg:98.18ms +step:1297/1670 train_time:127333ms step_avg:98.18ms +step:1298/1670 train_time:127431ms step_avg:98.18ms +step:1299/1670 train_time:127529ms step_avg:98.17ms +step:1300/1670 train_time:127627ms step_avg:98.17ms +step:1301/1670 train_time:127725ms step_avg:98.17ms +step:1302/1670 train_time:127822ms step_avg:98.17ms +step:1303/1670 train_time:127919ms step_avg:98.17ms +step:1304/1670 train_time:128018ms step_avg:98.17ms +step:1305/1670 train_time:128118ms step_avg:98.17ms +step:1306/1670 train_time:128216ms step_avg:98.17ms +step:1307/1670 train_time:128313ms step_avg:98.17ms +step:1308/1670 train_time:128411ms step_avg:98.17ms +step:1309/1670 train_time:128509ms step_avg:98.17ms +step:1310/1670 train_time:128608ms step_avg:98.17ms +step:1311/1670 train_time:128708ms step_avg:98.18ms +step:1312/1670 train_time:128805ms step_avg:98.17ms +step:1313/1670 train_time:128903ms step_avg:98.17ms +step:1314/1670 train_time:129002ms step_avg:98.17ms +step:1315/1670 train_time:129102ms step_avg:98.18ms +step:1316/1670 train_time:129201ms step_avg:98.18ms +step:1317/1670 train_time:129298ms step_avg:98.18ms +step:1318/1670 train_time:129395ms step_avg:98.18ms +step:1319/1670 train_time:129492ms step_avg:98.17ms +step:1320/1670 train_time:129589ms step_avg:98.17ms +step:1321/1670 train_time:129688ms step_avg:98.17ms +step:1322/1670 train_time:129786ms step_avg:98.17ms +step:1323/1670 train_time:129884ms step_avg:98.17ms +step:1324/1670 train_time:129983ms step_avg:98.17ms +step:1325/1670 train_time:130083ms step_avg:98.18ms +step:1326/1670 train_time:130183ms step_avg:98.18ms +step:1327/1670 train_time:130281ms step_avg:98.18ms +step:1328/1670 train_time:130378ms step_avg:98.18ms +step:1329/1670 train_time:130476ms step_avg:98.18ms +step:1330/1670 train_time:130572ms step_avg:98.17ms +step:1331/1670 train_time:130672ms step_avg:98.18ms +step:1332/1670 train_time:130772ms step_avg:98.18ms +step:1333/1670 train_time:130871ms step_avg:98.18ms +step:1334/1670 train_time:130971ms step_avg:98.18ms +step:1335/1670 train_time:131071ms step_avg:98.18ms +step:1336/1670 train_time:131171ms step_avg:98.18ms +step:1337/1670 train_time:131271ms step_avg:98.18ms +step:1338/1670 train_time:131371ms step_avg:98.18ms +step:1339/1670 train_time:131469ms step_avg:98.18ms +step:1340/1670 train_time:131567ms step_avg:98.18ms +step:1341/1670 train_time:131664ms step_avg:98.18ms +step:1342/1670 train_time:131762ms step_avg:98.18ms +step:1343/1670 train_time:131859ms step_avg:98.18ms +step:1344/1670 train_time:131956ms step_avg:98.18ms +step:1345/1670 train_time:132054ms step_avg:98.18ms +step:1346/1670 train_time:132153ms step_avg:98.18ms +step:1347/1670 train_time:132251ms step_avg:98.18ms +step:1348/1670 train_time:132351ms step_avg:98.18ms +step:1349/1670 train_time:132449ms step_avg:98.18ms +step:1350/1670 train_time:132548ms step_avg:98.18ms +step:1351/1670 train_time:132647ms step_avg:98.18ms +step:1352/1670 train_time:132746ms step_avg:98.19ms +step:1353/1670 train_time:132844ms step_avg:98.18ms +step:1354/1670 train_time:132942ms step_avg:98.18ms +step:1355/1670 train_time:133039ms step_avg:98.18ms +step:1356/1670 train_time:133137ms step_avg:98.18ms +step:1357/1670 train_time:133235ms step_avg:98.18ms +step:1358/1670 train_time:133333ms step_avg:98.18ms +step:1359/1670 train_time:133431ms step_avg:98.18ms +step:1360/1670 train_time:133529ms step_avg:98.18ms +step:1361/1670 train_time:133627ms step_avg:98.18ms +step:1362/1670 train_time:133726ms step_avg:98.18ms +step:1363/1670 train_time:133824ms step_avg:98.18ms +step:1364/1670 train_time:133922ms step_avg:98.18ms +step:1365/1670 train_time:134020ms step_avg:98.18ms +step:1366/1670 train_time:134118ms step_avg:98.18ms +step:1367/1670 train_time:134215ms step_avg:98.18ms +step:1368/1670 train_time:134314ms step_avg:98.18ms +step:1369/1670 train_time:134411ms step_avg:98.18ms +step:1370/1670 train_time:134510ms step_avg:98.18ms +step:1371/1670 train_time:134608ms step_avg:98.18ms +step:1372/1670 train_time:134706ms step_avg:98.18ms +step:1373/1670 train_time:134805ms step_avg:98.18ms +step:1374/1670 train_time:134902ms step_avg:98.18ms +step:1375/1670 train_time:135001ms step_avg:98.18ms +step:1375/1670 val_loss:3.3446 train_time:135098ms step_avg:98.25ms +step:1376/1670 train_time:135120ms step_avg:98.20ms +step:1377/1670 train_time:135206ms step_avg:98.19ms +step:1378/1670 train_time:135308ms step_avg:98.19ms +step:1379/1670 train_time:135406ms step_avg:98.19ms +step:1380/1670 train_time:135503ms step_avg:98.19ms +step:1381/1670 train_time:135600ms step_avg:98.19ms +step:1382/1670 train_time:135697ms step_avg:98.19ms +step:1383/1670 train_time:135794ms step_avg:98.19ms +step:1384/1670 train_time:135892ms step_avg:98.19ms +step:1385/1670 train_time:135989ms step_avg:98.19ms +step:1386/1670 train_time:136087ms step_avg:98.19ms +step:1387/1670 train_time:136188ms step_avg:98.19ms +step:1388/1670 train_time:136288ms step_avg:98.19ms +step:1389/1670 train_time:136385ms step_avg:98.19ms +step:1390/1670 train_time:136483ms step_avg:98.19ms +step:1391/1670 train_time:136581ms step_avg:98.19ms +step:1392/1670 train_time:136678ms step_avg:98.19ms +step:1393/1670 train_time:136776ms step_avg:98.19ms +step:1394/1670 train_time:136872ms step_avg:98.19ms +step:1395/1670 train_time:136969ms step_avg:98.19ms +step:1396/1670 train_time:137067ms step_avg:98.19ms +step:1397/1670 train_time:137167ms step_avg:98.19ms +step:1398/1670 train_time:137266ms step_avg:98.19ms +step:1399/1670 train_time:137364ms step_avg:98.19ms +step:1400/1670 train_time:137463ms step_avg:98.19ms +step:1401/1670 train_time:137561ms step_avg:98.19ms +step:1402/1670 train_time:137659ms step_avg:98.19ms +step:1403/1670 train_time:137757ms step_avg:98.19ms +step:1404/1670 train_time:137856ms step_avg:98.19ms +step:1405/1670 train_time:137953ms step_avg:98.19ms +step:1406/1670 train_time:138051ms step_avg:98.19ms +step:1407/1670 train_time:138150ms step_avg:98.19ms +step:1408/1670 train_time:138248ms step_avg:98.19ms +step:1409/1670 train_time:138346ms step_avg:98.19ms +step:1410/1670 train_time:138444ms step_avg:98.19ms +step:1411/1670 train_time:138542ms step_avg:98.19ms +step:1412/1670 train_time:138640ms step_avg:98.19ms +step:1413/1670 train_time:138738ms step_avg:98.19ms +step:1414/1670 train_time:138836ms step_avg:98.19ms +step:1415/1670 train_time:138934ms step_avg:98.19ms +step:1416/1670 train_time:139032ms step_avg:98.19ms +step:1417/1670 train_time:139131ms step_avg:98.19ms +step:1418/1670 train_time:139230ms step_avg:98.19ms +step:1419/1670 train_time:139328ms step_avg:98.19ms +step:1420/1670 train_time:139426ms step_avg:98.19ms +step:1421/1670 train_time:139524ms step_avg:98.19ms +step:1422/1670 train_time:139622ms step_avg:98.19ms +step:1423/1670 train_time:139721ms step_avg:98.19ms +step:1424/1670 train_time:139819ms step_avg:98.19ms +step:1425/1670 train_time:139917ms step_avg:98.19ms +step:1426/1670 train_time:140015ms step_avg:98.19ms +step:1427/1670 train_time:140115ms step_avg:98.19ms +step:1428/1670 train_time:140216ms step_avg:98.19ms +step:1429/1670 train_time:140315ms step_avg:98.19ms +step:1430/1670 train_time:140414ms step_avg:98.19ms +step:1431/1670 train_time:140511ms step_avg:98.19ms +step:1432/1670 train_time:140609ms step_avg:98.19ms +step:1433/1670 train_time:140705ms step_avg:98.19ms +step:1434/1670 train_time:140802ms step_avg:98.19ms +step:1435/1670 train_time:140901ms step_avg:98.19ms +step:1436/1670 train_time:140999ms step_avg:98.19ms +step:1437/1670 train_time:141098ms step_avg:98.19ms +step:1438/1670 train_time:141197ms step_avg:98.19ms +step:1439/1670 train_time:141298ms step_avg:98.19ms +step:1440/1670 train_time:141398ms step_avg:98.19ms +step:1441/1670 train_time:141497ms step_avg:98.19ms +step:1442/1670 train_time:141597ms step_avg:98.19ms +step:1443/1670 train_time:141696ms step_avg:98.20ms +step:1444/1670 train_time:141794ms step_avg:98.20ms +step:1445/1670 train_time:141891ms step_avg:98.19ms +step:1446/1670 train_time:141988ms step_avg:98.19ms +step:1447/1670 train_time:142086ms step_avg:98.19ms +step:1448/1670 train_time:142184ms step_avg:98.19ms +step:1449/1670 train_time:142283ms step_avg:98.19ms +step:1450/1670 train_time:142382ms step_avg:98.19ms +step:1451/1670 train_time:142481ms step_avg:98.20ms +step:1452/1670 train_time:142580ms step_avg:98.20ms +step:1453/1670 train_time:142679ms step_avg:98.20ms +step:1454/1670 train_time:142777ms step_avg:98.20ms +step:1455/1670 train_time:142876ms step_avg:98.20ms +step:1456/1670 train_time:142973ms step_avg:98.20ms +step:1457/1670 train_time:143071ms step_avg:98.20ms +step:1458/1670 train_time:143168ms step_avg:98.19ms +step:1459/1670 train_time:143265ms step_avg:98.19ms +step:1460/1670 train_time:143363ms step_avg:98.19ms +step:1461/1670 train_time:143463ms step_avg:98.19ms +step:1462/1670 train_time:143561ms step_avg:98.20ms +step:1463/1670 train_time:143660ms step_avg:98.20ms +step:1464/1670 train_time:143759ms step_avg:98.20ms +step:1465/1670 train_time:143858ms step_avg:98.20ms +step:1466/1670 train_time:143957ms step_avg:98.20ms +step:1467/1670 train_time:144056ms step_avg:98.20ms +step:1468/1670 train_time:144156ms step_avg:98.20ms +step:1469/1670 train_time:144253ms step_avg:98.20ms +step:1470/1670 train_time:144351ms step_avg:98.20ms +step:1471/1670 train_time:144449ms step_avg:98.20ms +step:1472/1670 train_time:144546ms step_avg:98.20ms +step:1473/1670 train_time:144644ms step_avg:98.20ms +step:1474/1670 train_time:144742ms step_avg:98.20ms +step:1475/1670 train_time:144841ms step_avg:98.20ms +step:1476/1670 train_time:144940ms step_avg:98.20ms +step:1477/1670 train_time:145039ms step_avg:98.20ms +step:1478/1670 train_time:145138ms step_avg:98.20ms +step:1479/1670 train_time:145238ms step_avg:98.20ms +step:1480/1670 train_time:145337ms step_avg:98.20ms +step:1481/1670 train_time:145438ms step_avg:98.20ms +step:1482/1670 train_time:145538ms step_avg:98.20ms +step:1483/1670 train_time:145637ms step_avg:98.20ms +step:1484/1670 train_time:145734ms step_avg:98.20ms +step:1485/1670 train_time:146009ms step_avg:98.32ms +step:1486/1670 train_time:146190ms step_avg:98.38ms +step:1487/1670 train_time:146285ms step_avg:98.38ms +step:1488/1670 train_time:146382ms step_avg:98.37ms +step:1489/1670 train_time:146479ms step_avg:98.37ms +step:1490/1670 train_time:146577ms step_avg:98.37ms +step:1491/1670 train_time:146673ms step_avg:98.37ms +step:1492/1670 train_time:146770ms step_avg:98.37ms +step:1493/1670 train_time:146866ms step_avg:98.37ms +step:1494/1670 train_time:146964ms step_avg:98.37ms +step:1495/1670 train_time:147069ms step_avg:98.37ms +step:1496/1670 train_time:147171ms step_avg:98.38ms +step:1497/1670 train_time:147269ms step_avg:98.38ms +step:1498/1670 train_time:147366ms step_avg:98.37ms +step:1499/1670 train_time:147463ms step_avg:98.37ms +step:1500/1670 train_time:147561ms step_avg:98.37ms +step:1500/1670 val_loss:3.3128 train_time:147658ms step_avg:98.44ms +step:1501/1670 train_time:147679ms step_avg:98.39ms +step:1502/1670 train_time:147764ms step_avg:98.38ms +step:1503/1670 train_time:147864ms step_avg:98.38ms +step:1504/1670 train_time:147962ms step_avg:98.38ms +step:1505/1670 train_time:148059ms step_avg:98.38ms +step:1506/1670 train_time:148157ms step_avg:98.38ms +step:1507/1670 train_time:148255ms step_avg:98.38ms +step:1508/1670 train_time:148352ms step_avg:98.38ms +step:1509/1670 train_time:148449ms step_avg:98.38ms +step:1510/1670 train_time:148546ms step_avg:98.37ms +step:1511/1670 train_time:148645ms step_avg:98.38ms +step:1512/1670 train_time:148745ms step_avg:98.38ms +step:1513/1670 train_time:148844ms step_avg:98.38ms +step:1514/1670 train_time:148943ms step_avg:98.38ms +step:1515/1670 train_time:149040ms step_avg:98.38ms +step:1516/1670 train_time:149138ms step_avg:98.38ms +step:1517/1670 train_time:149236ms step_avg:98.38ms +step:1518/1670 train_time:149334ms step_avg:98.38ms +step:1519/1670 train_time:149432ms step_avg:98.38ms +step:1520/1670 train_time:149530ms step_avg:98.37ms +step:1521/1670 train_time:149628ms step_avg:98.37ms +step:1522/1670 train_time:149727ms step_avg:98.38ms +step:1523/1670 train_time:149826ms step_avg:98.38ms +step:1524/1670 train_time:149925ms step_avg:98.38ms +step:1525/1670 train_time:150022ms step_avg:98.38ms +step:1526/1670 train_time:150120ms step_avg:98.37ms +step:1527/1670 train_time:150218ms step_avg:98.37ms +step:1528/1670 train_time:150317ms step_avg:98.37ms +step:1529/1670 train_time:150415ms step_avg:98.37ms +step:1530/1670 train_time:150514ms step_avg:98.37ms +step:1531/1670 train_time:150613ms step_avg:98.38ms +step:1532/1670 train_time:150712ms step_avg:98.38ms +step:1533/1670 train_time:150810ms step_avg:98.38ms +step:1534/1670 train_time:150910ms step_avg:98.38ms +step:1535/1670 train_time:151008ms step_avg:98.38ms +step:1536/1670 train_time:151106ms step_avg:98.38ms +step:1537/1670 train_time:151202ms step_avg:98.38ms +step:1538/1670 train_time:151299ms step_avg:98.37ms +step:1539/1670 train_time:151396ms step_avg:98.37ms +step:1540/1670 train_time:151495ms step_avg:98.37ms +step:1541/1670 train_time:151593ms step_avg:98.37ms +step:1542/1670 train_time:151693ms step_avg:98.37ms +step:1543/1670 train_time:151793ms step_avg:98.38ms +step:1544/1670 train_time:151893ms step_avg:98.38ms +step:1545/1670 train_time:151991ms step_avg:98.38ms +step:1546/1670 train_time:152090ms step_avg:98.38ms +step:1547/1670 train_time:152188ms step_avg:98.38ms +step:1548/1670 train_time:152284ms step_avg:98.37ms +step:1549/1670 train_time:152382ms step_avg:98.37ms +step:1550/1670 train_time:152480ms step_avg:98.37ms +step:1551/1670 train_time:152578ms step_avg:98.37ms +step:1552/1670 train_time:152677ms step_avg:98.37ms +step:1553/1670 train_time:152778ms step_avg:98.38ms +step:1554/1670 train_time:152877ms step_avg:98.38ms +step:1555/1670 train_time:152979ms step_avg:98.38ms +step:1556/1670 train_time:153078ms step_avg:98.38ms +step:1557/1670 train_time:153179ms step_avg:98.38ms +step:1558/1670 train_time:153278ms step_avg:98.38ms +step:1559/1670 train_time:153377ms step_avg:98.38ms +step:1560/1670 train_time:153476ms step_avg:98.38ms +step:1561/1670 train_time:153574ms step_avg:98.38ms +step:1562/1670 train_time:153672ms step_avg:98.38ms +step:1563/1670 train_time:153771ms step_avg:98.38ms +step:1564/1670 train_time:153870ms step_avg:98.38ms +step:1565/1670 train_time:153969ms step_avg:98.38ms +step:1566/1670 train_time:154067ms step_avg:98.38ms +step:1567/1670 train_time:154164ms step_avg:98.38ms +step:1568/1670 train_time:154262ms step_avg:98.38ms +step:1569/1670 train_time:154361ms step_avg:98.38ms +step:1570/1670 train_time:154459ms step_avg:98.38ms +step:1571/1670 train_time:154557ms step_avg:98.38ms +step:1572/1670 train_time:154655ms step_avg:98.38ms +step:1573/1670 train_time:154754ms step_avg:98.38ms +step:1574/1670 train_time:154853ms step_avg:98.38ms +step:1575/1670 train_time:154951ms step_avg:98.38ms +step:1576/1670 train_time:155050ms step_avg:98.38ms +step:1577/1670 train_time:155149ms step_avg:98.38ms +step:1578/1670 train_time:155246ms step_avg:98.38ms +step:1579/1670 train_time:155343ms step_avg:98.38ms +step:1580/1670 train_time:155441ms step_avg:98.38ms +step:1581/1670 train_time:155539ms step_avg:98.38ms +step:1582/1670 train_time:155637ms step_avg:98.38ms +step:1583/1670 train_time:155735ms step_avg:98.38ms +step:1584/1670 train_time:155832ms step_avg:98.38ms +step:1585/1670 train_time:155931ms step_avg:98.38ms +step:1586/1670 train_time:156029ms step_avg:98.38ms +step:1587/1670 train_time:156127ms step_avg:98.38ms +step:1588/1670 train_time:156225ms step_avg:98.38ms +step:1589/1670 train_time:156323ms step_avg:98.38ms +step:1590/1670 train_time:156420ms step_avg:98.38ms +step:1591/1670 train_time:156519ms step_avg:98.38ms +step:1592/1670 train_time:156618ms step_avg:98.38ms +step:1593/1670 train_time:156716ms step_avg:98.38ms +step:1594/1670 train_time:156814ms step_avg:98.38ms +step:1595/1670 train_time:156914ms step_avg:98.38ms +step:1596/1670 train_time:157012ms step_avg:98.38ms +step:1597/1670 train_time:157111ms step_avg:98.38ms +step:1598/1670 train_time:157211ms step_avg:98.38ms +step:1599/1670 train_time:157310ms step_avg:98.38ms +step:1600/1670 train_time:157407ms step_avg:98.38ms +step:1601/1670 train_time:157504ms step_avg:98.38ms +step:1602/1670 train_time:157601ms step_avg:98.38ms +step:1603/1670 train_time:157699ms step_avg:98.38ms +step:1604/1670 train_time:157797ms step_avg:98.38ms +step:1605/1670 train_time:157896ms step_avg:98.38ms +step:1606/1670 train_time:157996ms step_avg:98.38ms +step:1607/1670 train_time:158096ms step_avg:98.38ms +step:1608/1670 train_time:158196ms step_avg:98.38ms +step:1609/1670 train_time:158294ms step_avg:98.38ms +step:1610/1670 train_time:158394ms step_avg:98.38ms +step:1611/1670 train_time:158491ms step_avg:98.38ms +step:1612/1670 train_time:158589ms step_avg:98.38ms +step:1613/1670 train_time:158686ms step_avg:98.38ms +step:1614/1670 train_time:158783ms step_avg:98.38ms +step:1615/1670 train_time:158880ms step_avg:98.38ms +step:1616/1670 train_time:158979ms step_avg:98.38ms +step:1617/1670 train_time:159078ms step_avg:98.38ms +step:1618/1670 train_time:159178ms step_avg:98.38ms +step:1619/1670 train_time:159276ms step_avg:98.38ms +step:1620/1670 train_time:159375ms step_avg:98.38ms +step:1621/1670 train_time:159474ms step_avg:98.38ms +step:1622/1670 train_time:159572ms step_avg:98.38ms +step:1623/1670 train_time:159670ms step_avg:98.38ms +step:1624/1670 train_time:159768ms step_avg:98.38ms +step:1625/1670 train_time:159865ms step_avg:98.38ms +step:1625/1670 val_loss:3.2856 train_time:159961ms step_avg:98.44ms +step:1626/1670 train_time:159983ms step_avg:98.39ms +step:1627/1670 train_time:160067ms step_avg:98.38ms +step:1628/1670 train_time:160167ms step_avg:98.38ms +step:1629/1670 train_time:160265ms step_avg:98.38ms +step:1630/1670 train_time:160363ms step_avg:98.38ms +step:1631/1670 train_time:160462ms step_avg:98.38ms +step:1632/1670 train_time:160559ms step_avg:98.38ms +step:1633/1670 train_time:160657ms step_avg:98.38ms +step:1634/1670 train_time:160756ms step_avg:98.38ms +step:1635/1670 train_time:160854ms step_avg:98.38ms +step:1636/1670 train_time:160954ms step_avg:98.38ms +step:1637/1670 train_time:161054ms step_avg:98.38ms +step:1638/1670 train_time:161154ms step_avg:98.38ms +step:1639/1670 train_time:161253ms step_avg:98.38ms +step:1640/1670 train_time:161350ms step_avg:98.38ms +step:1641/1670 train_time:161447ms step_avg:98.38ms +step:1642/1670 train_time:161545ms step_avg:98.38ms +step:1643/1670 train_time:161642ms step_avg:98.38ms +step:1644/1670 train_time:161739ms step_avg:98.38ms +step:1645/1670 train_time:161837ms step_avg:98.38ms +step:1646/1670 train_time:161936ms step_avg:98.38ms +step:1647/1670 train_time:162036ms step_avg:98.38ms +step:1648/1670 train_time:162138ms step_avg:98.38ms +step:1649/1670 train_time:162237ms step_avg:98.39ms +step:1650/1670 train_time:162335ms step_avg:98.38ms +step:1651/1670 train_time:162434ms step_avg:98.39ms +step:1652/1670 train_time:162532ms step_avg:98.39ms +step:1653/1670 train_time:162629ms step_avg:98.38ms +step:1654/1670 train_time:162726ms step_avg:98.38ms +step:1655/1670 train_time:162823ms step_avg:98.38ms +step:1656/1670 train_time:162921ms step_avg:98.38ms +step:1657/1670 train_time:163021ms step_avg:98.38ms +step:1658/1670 train_time:163121ms step_avg:98.38ms +step:1659/1670 train_time:163222ms step_avg:98.39ms +step:1660/1670 train_time:163321ms step_avg:98.39ms +step:1661/1670 train_time:163422ms step_avg:98.39ms +step:1662/1670 train_time:163521ms step_avg:98.39ms +step:1663/1670 train_time:163619ms step_avg:98.39ms +step:1664/1670 train_time:163717ms step_avg:98.39ms +step:1665/1670 train_time:163815ms step_avg:98.39ms +step:1666/1670 train_time:163913ms step_avg:98.39ms +step:1667/1670 train_time:164010ms step_avg:98.39ms +step:1668/1670 train_time:164108ms step_avg:98.39ms +step:1669/1670 train_time:164206ms step_avg:98.39ms +step:1670/1670 train_time:164304ms step_avg:98.39ms +step:1670/1670 val_loss:3.2778 train_time:164403ms step_avg:98.44ms +peak memory allocated: 34000 MiB reserved: 49576 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_3f42c181-6303-4ade-9f64-556d44d54065.txt b/records/090525_SkipMLPBlocks/comparison_3f42c181-6303-4ade-9f64-556d44d54065.txt new file mode 100644 index 000000000..3fe1483c6 --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_3f42c181-6303-4ade-9f64-556d44d54065.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:14:13 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 128W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 44C P0 132W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 84065 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 84066 C /usr/bin/python3 610MiB | +| 0 N/A N/A 84067 C /usr/bin/python3 610MiB | +| 0 N/A N/A 84068 C /usr/bin/python3 610MiB | +| 0 N/A N/A 84069 C /usr/bin/python3 610MiB | +| 0 N/A N/A 84070 C /usr/bin/python3 610MiB | +| 0 N/A N/A 84071 C /usr/bin/python3 610MiB | +| 0 N/A N/A 84072 C /usr/bin/python3 610MiB | +| 1 N/A N/A 84066 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 84067 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 84068 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 84069 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 84070 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 84071 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 84072 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:387ms step_avg:387.37ms +step:2/1670 train_time:407ms step_avg:203.50ms +step:3/1670 train_time:480ms step_avg:160.12ms +step:4/1670 train_time:574ms step_avg:143.57ms +step:5/1670 train_time:669ms step_avg:133.77ms +step:6/1670 train_time:764ms step_avg:127.36ms +step:7/1670 train_time:859ms step_avg:122.71ms +step:8/1670 train_time:954ms step_avg:119.23ms +step:9/1670 train_time:1049ms step_avg:116.52ms +step:10/1670 train_time:1144ms step_avg:114.45ms +step:11/1670 train_time:1240ms step_avg:112.72ms +step:12/1670 train_time:1338ms step_avg:111.54ms +step:13/1670 train_time:1437ms step_avg:110.50ms +step:14/1670 train_time:1533ms step_avg:109.50ms +step:15/1670 train_time:1629ms step_avg:108.58ms +step:16/1670 train_time:1724ms step_avg:107.76ms +step:17/1670 train_time:1821ms step_avg:107.12ms +step:18/1670 train_time:1916ms step_avg:106.45ms +step:19/1670 train_time:2012ms step_avg:105.87ms +step:20/1670 train_time:2107ms step_avg:105.34ms +step:21/1670 train_time:2203ms step_avg:104.88ms +step:22/1670 train_time:2299ms step_avg:104.51ms +step:23/1670 train_time:2396ms step_avg:104.19ms +step:24/1670 train_time:2493ms step_avg:103.86ms +step:25/1670 train_time:2588ms step_avg:103.54ms +step:26/1670 train_time:2684ms step_avg:103.24ms +step:27/1670 train_time:2780ms step_avg:102.96ms +step:28/1670 train_time:2876ms step_avg:102.70ms +step:29/1670 train_time:2971ms step_avg:102.46ms +step:30/1670 train_time:3067ms step_avg:102.22ms +step:31/1670 train_time:3163ms step_avg:102.02ms +step:32/1670 train_time:3259ms step_avg:101.86ms +step:33/1670 train_time:3356ms step_avg:101.71ms +step:34/1670 train_time:3452ms step_avg:101.54ms +step:35/1670 train_time:3548ms step_avg:101.38ms +step:36/1670 train_time:3645ms step_avg:101.25ms +step:37/1670 train_time:3741ms step_avg:101.12ms +step:38/1670 train_time:3838ms step_avg:100.99ms +step:39/1670 train_time:3933ms step_avg:100.84ms +step:40/1670 train_time:4028ms step_avg:100.70ms +step:41/1670 train_time:4124ms step_avg:100.59ms +step:42/1670 train_time:4220ms step_avg:100.48ms +step:43/1670 train_time:4316ms step_avg:100.37ms +step:44/1670 train_time:4411ms step_avg:100.26ms +step:45/1670 train_time:4508ms step_avg:100.17ms +step:46/1670 train_time:4605ms step_avg:100.10ms +step:47/1670 train_time:4701ms step_avg:100.03ms +step:48/1670 train_time:4797ms step_avg:99.94ms +step:49/1670 train_time:4893ms step_avg:99.85ms +step:50/1670 train_time:4988ms step_avg:99.77ms +step:51/1670 train_time:5085ms step_avg:99.70ms +step:52/1670 train_time:5181ms step_avg:99.64ms +step:53/1670 train_time:5277ms step_avg:99.56ms +step:54/1670 train_time:5373ms step_avg:99.49ms +step:55/1670 train_time:5469ms step_avg:99.44ms +step:56/1670 train_time:5566ms step_avg:99.40ms +step:57/1670 train_time:5663ms step_avg:99.35ms +step:58/1670 train_time:5759ms step_avg:99.29ms +step:59/1670 train_time:5855ms step_avg:99.24ms +step:60/1670 train_time:5950ms step_avg:99.17ms +step:61/1670 train_time:6047ms step_avg:99.13ms +step:62/1670 train_time:6143ms step_avg:99.08ms +step:63/1670 train_time:6240ms step_avg:99.04ms +step:64/1670 train_time:6335ms step_avg:98.99ms +step:65/1670 train_time:6430ms step_avg:98.93ms +step:66/1670 train_time:6526ms step_avg:98.88ms +step:67/1670 train_time:6622ms step_avg:98.84ms +step:68/1670 train_time:6718ms step_avg:98.80ms +step:69/1670 train_time:6814ms step_avg:98.75ms +step:70/1670 train_time:6909ms step_avg:98.70ms +step:71/1670 train_time:7005ms step_avg:98.66ms +step:72/1670 train_time:7101ms step_avg:98.63ms +step:73/1670 train_time:7197ms step_avg:98.60ms +step:74/1670 train_time:7294ms step_avg:98.56ms +step:75/1670 train_time:7389ms step_avg:98.52ms +step:76/1670 train_time:7485ms step_avg:98.49ms +step:77/1670 train_time:7581ms step_avg:98.45ms +step:78/1670 train_time:7676ms step_avg:98.42ms +step:79/1670 train_time:7772ms step_avg:98.38ms +step:80/1670 train_time:7868ms step_avg:98.35ms +step:81/1670 train_time:7964ms step_avg:98.32ms +step:82/1670 train_time:8060ms step_avg:98.29ms +step:83/1670 train_time:8156ms step_avg:98.27ms +step:84/1670 train_time:8252ms step_avg:98.24ms +step:85/1670 train_time:8348ms step_avg:98.21ms +step:86/1670 train_time:8444ms step_avg:98.19ms +step:87/1670 train_time:8540ms step_avg:98.17ms +step:88/1670 train_time:8637ms step_avg:98.14ms +step:89/1670 train_time:8732ms step_avg:98.11ms +step:90/1670 train_time:8828ms step_avg:98.09ms +step:91/1670 train_time:8924ms step_avg:98.06ms +step:92/1670 train_time:9020ms step_avg:98.04ms +step:93/1670 train_time:9115ms step_avg:98.01ms +step:94/1670 train_time:9212ms step_avg:98.00ms +step:95/1670 train_time:9308ms step_avg:97.97ms +step:96/1670 train_time:9404ms step_avg:97.96ms +step:97/1670 train_time:9499ms step_avg:97.93ms +step:98/1670 train_time:9595ms step_avg:97.91ms +step:99/1670 train_time:9691ms step_avg:97.89ms +step:100/1670 train_time:9787ms step_avg:97.87ms +step:101/1670 train_time:9883ms step_avg:97.85ms +step:102/1670 train_time:9979ms step_avg:97.83ms +step:103/1670 train_time:10075ms step_avg:97.82ms +step:104/1670 train_time:10170ms step_avg:97.79ms +step:105/1670 train_time:10267ms step_avg:97.78ms +step:106/1670 train_time:10364ms step_avg:97.77ms +step:107/1670 train_time:10460ms step_avg:97.76ms +step:108/1670 train_time:10555ms step_avg:97.73ms +step:109/1670 train_time:10651ms step_avg:97.71ms +step:110/1670 train_time:10746ms step_avg:97.69ms +step:111/1670 train_time:10843ms step_avg:97.68ms +step:112/1670 train_time:10939ms step_avg:97.67ms +step:113/1670 train_time:11035ms step_avg:97.65ms +step:114/1670 train_time:11130ms step_avg:97.63ms +step:115/1670 train_time:11226ms step_avg:97.62ms +step:116/1670 train_time:11323ms step_avg:97.61ms +step:117/1670 train_time:11419ms step_avg:97.60ms +step:118/1670 train_time:11514ms step_avg:97.58ms +step:119/1670 train_time:11609ms step_avg:97.56ms +step:120/1670 train_time:11706ms step_avg:97.55ms +step:121/1670 train_time:11801ms step_avg:97.53ms +step:122/1670 train_time:11897ms step_avg:97.52ms +step:123/1670 train_time:11992ms step_avg:97.50ms +step:124/1670 train_time:12089ms step_avg:97.49ms +step:125/1670 train_time:12184ms step_avg:97.47ms +step:125/1670 val_loss:4.3072 train_time:12279ms step_avg:98.23ms +step:126/1670 train_time:12301ms step_avg:97.63ms +step:127/1670 train_time:12384ms step_avg:97.51ms +step:128/1670 train_time:12490ms step_avg:97.58ms +step:129/1670 train_time:12587ms step_avg:97.57ms +step:130/1670 train_time:12682ms step_avg:97.55ms +step:131/1670 train_time:12777ms step_avg:97.54ms +step:132/1670 train_time:12873ms step_avg:97.52ms +step:133/1670 train_time:12967ms step_avg:97.49ms +step:134/1670 train_time:13061ms step_avg:97.47ms +step:135/1670 train_time:13156ms step_avg:97.45ms +step:136/1670 train_time:13251ms step_avg:97.44ms +step:137/1670 train_time:13348ms step_avg:97.43ms +step:138/1670 train_time:13445ms step_avg:97.43ms +step:139/1670 train_time:13542ms step_avg:97.42ms +step:140/1670 train_time:13638ms step_avg:97.41ms +step:141/1670 train_time:13734ms step_avg:97.40ms +step:142/1670 train_time:13830ms step_avg:97.39ms +step:143/1670 train_time:13924ms step_avg:97.37ms +step:144/1670 train_time:14019ms step_avg:97.35ms +step:145/1670 train_time:14114ms step_avg:97.34ms +step:146/1670 train_time:14209ms step_avg:97.32ms +step:147/1670 train_time:14304ms step_avg:97.31ms +step:148/1670 train_time:14400ms step_avg:97.30ms +step:149/1670 train_time:14498ms step_avg:97.30ms +step:150/1670 train_time:14595ms step_avg:97.30ms +step:151/1670 train_time:14691ms step_avg:97.29ms +step:152/1670 train_time:14786ms step_avg:97.28ms +step:153/1670 train_time:14881ms step_avg:97.26ms +step:154/1670 train_time:14976ms step_avg:97.25ms +step:155/1670 train_time:15071ms step_avg:97.23ms +step:156/1670 train_time:15166ms step_avg:97.22ms +step:157/1670 train_time:15260ms step_avg:97.20ms +step:158/1670 train_time:15356ms step_avg:97.19ms +step:159/1670 train_time:15453ms step_avg:97.19ms +step:160/1670 train_time:15549ms step_avg:97.18ms +step:161/1670 train_time:15646ms step_avg:97.18ms +step:162/1670 train_time:15740ms step_avg:97.16ms +step:163/1670 train_time:15837ms step_avg:97.16ms +step:164/1670 train_time:15932ms step_avg:97.15ms +step:165/1670 train_time:16027ms step_avg:97.13ms +step:166/1670 train_time:16122ms step_avg:97.12ms +step:167/1670 train_time:16217ms step_avg:97.11ms +step:168/1670 train_time:16313ms step_avg:97.10ms +step:169/1670 train_time:16408ms step_avg:97.09ms +step:170/1670 train_time:16503ms step_avg:97.08ms +step:171/1670 train_time:16599ms step_avg:97.07ms +step:172/1670 train_time:16695ms step_avg:97.07ms +step:173/1670 train_time:16792ms step_avg:97.06ms +step:174/1670 train_time:16887ms step_avg:97.05ms +step:175/1670 train_time:16982ms step_avg:97.04ms +step:176/1670 train_time:17077ms step_avg:97.03ms +step:177/1670 train_time:17173ms step_avg:97.02ms +step:178/1670 train_time:17268ms step_avg:97.01ms +step:179/1670 train_time:17363ms step_avg:97.00ms +step:180/1670 train_time:17459ms step_avg:96.99ms +step:181/1670 train_time:17554ms step_avg:96.99ms +step:182/1670 train_time:17651ms step_avg:96.98ms +step:183/1670 train_time:17746ms step_avg:96.97ms +step:184/1670 train_time:17841ms step_avg:96.96ms +step:185/1670 train_time:17937ms step_avg:96.96ms +step:186/1670 train_time:18032ms step_avg:96.95ms +step:187/1670 train_time:18128ms step_avg:96.94ms +step:188/1670 train_time:18222ms step_avg:96.93ms +step:189/1670 train_time:18318ms step_avg:96.92ms +step:190/1670 train_time:18413ms step_avg:96.91ms +step:191/1670 train_time:18509ms step_avg:96.91ms +step:192/1670 train_time:18604ms step_avg:96.90ms +step:193/1670 train_time:18700ms step_avg:96.89ms +step:194/1670 train_time:18796ms step_avg:96.89ms +step:195/1670 train_time:18892ms step_avg:96.88ms +step:196/1670 train_time:18988ms step_avg:96.88ms +step:197/1670 train_time:19083ms step_avg:96.87ms +step:198/1670 train_time:19178ms step_avg:96.86ms +step:199/1670 train_time:19273ms step_avg:96.85ms +step:200/1670 train_time:19368ms step_avg:96.84ms +step:201/1670 train_time:19464ms step_avg:96.84ms +step:202/1670 train_time:19559ms step_avg:96.83ms +step:203/1670 train_time:19655ms step_avg:96.82ms +step:204/1670 train_time:19751ms step_avg:96.82ms +step:205/1670 train_time:19846ms step_avg:96.81ms +step:206/1670 train_time:19941ms step_avg:96.80ms +step:207/1670 train_time:20037ms step_avg:96.80ms +step:208/1670 train_time:20133ms step_avg:96.79ms +step:209/1670 train_time:20229ms step_avg:96.79ms +step:210/1670 train_time:20324ms step_avg:96.78ms +step:211/1670 train_time:20419ms step_avg:96.77ms +step:212/1670 train_time:20515ms step_avg:96.77ms +step:213/1670 train_time:20798ms step_avg:97.64ms +step:214/1670 train_time:20918ms step_avg:97.75ms +step:215/1670 train_time:21012ms step_avg:97.73ms +step:216/1670 train_time:21106ms step_avg:97.71ms +step:217/1670 train_time:21200ms step_avg:97.70ms +step:218/1670 train_time:21295ms step_avg:97.68ms +step:219/1670 train_time:21389ms step_avg:97.67ms +step:220/1670 train_time:21483ms step_avg:97.65ms +step:221/1670 train_time:21578ms step_avg:97.64ms +step:222/1670 train_time:21673ms step_avg:97.63ms +step:223/1670 train_time:21771ms step_avg:97.63ms +step:224/1670 train_time:21870ms step_avg:97.63ms +step:225/1670 train_time:21969ms step_avg:97.64ms +step:226/1670 train_time:22065ms step_avg:97.63ms +step:227/1670 train_time:22159ms step_avg:97.62ms +step:228/1670 train_time:22254ms step_avg:97.60ms +step:229/1670 train_time:22349ms step_avg:97.60ms +step:230/1670 train_time:22443ms step_avg:97.58ms +step:231/1670 train_time:22538ms step_avg:97.57ms +step:232/1670 train_time:22633ms step_avg:97.55ms +step:233/1670 train_time:22728ms step_avg:97.55ms +step:234/1670 train_time:22824ms step_avg:97.54ms +step:235/1670 train_time:22920ms step_avg:97.53ms +step:236/1670 train_time:23017ms step_avg:97.53ms +step:237/1670 train_time:23114ms step_avg:97.53ms +step:238/1670 train_time:23211ms step_avg:97.52ms +step:239/1670 train_time:23306ms step_avg:97.52ms +step:240/1670 train_time:23400ms step_avg:97.50ms +step:241/1670 train_time:23495ms step_avg:97.49ms +step:242/1670 train_time:23590ms step_avg:97.48ms +step:243/1670 train_time:23685ms step_avg:97.47ms +step:244/1670 train_time:23781ms step_avg:97.46ms +step:245/1670 train_time:23877ms step_avg:97.46ms +step:246/1670 train_time:23973ms step_avg:97.45ms +step:247/1670 train_time:24071ms step_avg:97.45ms +step:248/1670 train_time:24167ms step_avg:97.45ms +step:249/1670 train_time:24263ms step_avg:97.44ms +step:250/1670 train_time:24358ms step_avg:97.43ms +step:250/1670 val_loss:3.9738 train_time:24453ms step_avg:97.81ms +step:251/1670 train_time:24474ms step_avg:97.51ms +step:252/1670 train_time:24557ms step_avg:97.45ms +step:253/1670 train_time:24654ms step_avg:97.45ms +step:254/1670 train_time:24751ms step_avg:97.45ms +step:255/1670 train_time:24847ms step_avg:97.44ms +step:256/1670 train_time:24942ms step_avg:97.43ms +step:257/1670 train_time:25037ms step_avg:97.42ms +step:258/1670 train_time:25131ms step_avg:97.41ms +step:259/1670 train_time:25226ms step_avg:97.40ms +step:260/1670 train_time:25321ms step_avg:97.39ms +step:261/1670 train_time:25416ms step_avg:97.38ms +step:262/1670 train_time:25513ms step_avg:97.38ms +step:263/1670 train_time:25610ms step_avg:97.38ms +step:264/1670 train_time:25707ms step_avg:97.38ms +step:265/1670 train_time:25804ms step_avg:97.37ms +step:266/1670 train_time:25899ms step_avg:97.36ms +step:267/1670 train_time:25994ms step_avg:97.35ms +step:268/1670 train_time:26088ms step_avg:97.34ms +step:269/1670 train_time:26183ms step_avg:97.34ms +step:270/1670 train_time:26278ms step_avg:97.33ms +step:271/1670 train_time:26372ms step_avg:97.31ms +step:272/1670 train_time:26469ms step_avg:97.31ms +step:273/1670 train_time:26565ms step_avg:97.31ms +step:274/1670 train_time:26662ms step_avg:97.31ms +step:275/1670 train_time:26758ms step_avg:97.30ms +step:276/1670 train_time:26853ms step_avg:97.29ms +step:277/1670 train_time:26948ms step_avg:97.29ms +step:278/1670 train_time:27044ms step_avg:97.28ms +step:279/1670 train_time:27140ms step_avg:97.27ms +step:280/1670 train_time:27234ms step_avg:97.26ms +step:281/1670 train_time:27329ms step_avg:97.25ms +step:282/1670 train_time:27424ms step_avg:97.25ms +step:283/1670 train_time:27521ms step_avg:97.25ms +step:284/1670 train_time:27616ms step_avg:97.24ms +step:285/1670 train_time:27712ms step_avg:97.24ms +step:286/1670 train_time:27809ms step_avg:97.23ms +step:287/1670 train_time:27904ms step_avg:97.23ms +step:288/1670 train_time:28000ms step_avg:97.22ms +step:289/1670 train_time:28095ms step_avg:97.21ms +step:290/1670 train_time:28190ms step_avg:97.21ms +step:291/1670 train_time:28285ms step_avg:97.20ms +step:292/1670 train_time:28381ms step_avg:97.19ms +step:293/1670 train_time:28476ms step_avg:97.19ms +step:294/1670 train_time:28571ms step_avg:97.18ms +step:295/1670 train_time:28667ms step_avg:97.17ms +step:296/1670 train_time:28763ms step_avg:97.17ms +step:297/1670 train_time:28859ms step_avg:97.17ms +step:298/1670 train_time:28954ms step_avg:97.16ms +step:299/1670 train_time:29050ms step_avg:97.16ms +step:300/1670 train_time:29145ms step_avg:97.15ms +step:301/1670 train_time:29241ms step_avg:97.15ms +step:302/1670 train_time:29336ms step_avg:97.14ms +step:303/1670 train_time:29431ms step_avg:97.13ms +step:304/1670 train_time:29527ms step_avg:97.13ms +step:305/1670 train_time:29623ms step_avg:97.12ms +step:306/1670 train_time:29718ms step_avg:97.12ms +step:307/1670 train_time:29813ms step_avg:97.11ms +step:308/1670 train_time:29909ms step_avg:97.11ms +step:309/1670 train_time:30006ms step_avg:97.11ms +step:310/1670 train_time:30101ms step_avg:97.10ms +step:311/1670 train_time:30196ms step_avg:97.09ms +step:312/1670 train_time:30291ms step_avg:97.09ms +step:313/1670 train_time:30386ms step_avg:97.08ms +step:314/1670 train_time:30482ms step_avg:97.08ms +step:315/1670 train_time:30577ms step_avg:97.07ms +step:316/1670 train_time:30672ms step_avg:97.06ms +step:317/1670 train_time:30768ms step_avg:97.06ms +step:318/1670 train_time:30864ms step_avg:97.06ms +step:319/1670 train_time:30960ms step_avg:97.05ms +step:320/1670 train_time:31057ms step_avg:97.05ms +step:321/1670 train_time:31152ms step_avg:97.05ms +step:322/1670 train_time:31248ms step_avg:97.04ms +step:323/1670 train_time:31344ms step_avg:97.04ms +step:324/1670 train_time:31439ms step_avg:97.03ms +step:325/1670 train_time:31533ms step_avg:97.03ms +step:326/1670 train_time:31629ms step_avg:97.02ms +step:327/1670 train_time:31725ms step_avg:97.02ms +step:328/1670 train_time:31820ms step_avg:97.01ms +step:329/1670 train_time:31916ms step_avg:97.01ms +step:330/1670 train_time:32011ms step_avg:97.00ms +step:331/1670 train_time:32107ms step_avg:97.00ms +step:332/1670 train_time:32204ms step_avg:97.00ms +step:333/1670 train_time:32300ms step_avg:97.00ms +step:334/1670 train_time:32396ms step_avg:96.99ms +step:335/1670 train_time:32491ms step_avg:96.99ms +step:336/1670 train_time:32586ms step_avg:96.98ms +step:337/1670 train_time:32681ms step_avg:96.98ms +step:338/1670 train_time:32777ms step_avg:96.97ms +step:339/1670 train_time:32872ms step_avg:96.97ms +step:340/1670 train_time:32969ms step_avg:96.97ms +step:341/1670 train_time:33064ms step_avg:96.96ms +step:342/1670 train_time:33160ms step_avg:96.96ms +step:343/1670 train_time:33256ms step_avg:96.96ms +step:344/1670 train_time:33351ms step_avg:96.95ms +step:345/1670 train_time:33446ms step_avg:96.95ms +step:346/1670 train_time:33542ms step_avg:96.94ms +step:347/1670 train_time:33637ms step_avg:96.94ms +step:348/1670 train_time:33731ms step_avg:96.93ms +step:349/1670 train_time:33826ms step_avg:96.92ms +step:350/1670 train_time:33923ms step_avg:96.92ms +step:351/1670 train_time:34019ms step_avg:96.92ms +step:352/1670 train_time:34114ms step_avg:96.91ms +step:353/1670 train_time:34210ms step_avg:96.91ms +step:354/1670 train_time:34306ms step_avg:96.91ms +step:355/1670 train_time:34403ms step_avg:96.91ms +step:356/1670 train_time:34499ms step_avg:96.91ms +step:357/1670 train_time:34593ms step_avg:96.90ms +step:358/1670 train_time:34689ms step_avg:96.90ms +step:359/1670 train_time:34784ms step_avg:96.89ms +step:360/1670 train_time:34879ms step_avg:96.89ms +step:361/1670 train_time:34975ms step_avg:96.88ms +step:362/1670 train_time:35070ms step_avg:96.88ms +step:363/1670 train_time:35166ms step_avg:96.88ms +step:364/1670 train_time:35263ms step_avg:96.88ms +step:365/1670 train_time:35359ms step_avg:96.87ms +step:366/1670 train_time:35454ms step_avg:96.87ms +step:367/1670 train_time:35549ms step_avg:96.86ms +step:368/1670 train_time:35645ms step_avg:96.86ms +step:369/1670 train_time:35740ms step_avg:96.86ms +step:370/1670 train_time:35836ms step_avg:96.85ms +step:371/1670 train_time:35931ms step_avg:96.85ms +step:372/1670 train_time:36027ms step_avg:96.85ms +step:373/1670 train_time:36123ms step_avg:96.84ms +step:374/1670 train_time:36219ms step_avg:96.84ms +step:375/1670 train_time:36315ms step_avg:96.84ms +step:375/1670 val_loss:3.8193 train_time:36410ms step_avg:97.09ms +step:376/1670 train_time:36431ms step_avg:96.89ms +step:377/1670 train_time:36513ms step_avg:96.85ms +step:378/1670 train_time:36614ms step_avg:96.86ms +step:379/1670 train_time:36710ms step_avg:96.86ms +step:380/1670 train_time:36805ms step_avg:96.86ms +step:381/1670 train_time:36900ms step_avg:96.85ms +step:382/1670 train_time:36994ms step_avg:96.84ms +step:383/1670 train_time:37089ms step_avg:96.84ms +step:384/1670 train_time:37184ms step_avg:96.83ms +step:385/1670 train_time:37279ms step_avg:96.83ms +step:386/1670 train_time:37375ms step_avg:96.83ms +step:387/1670 train_time:37474ms step_avg:96.83ms +step:388/1670 train_time:37573ms step_avg:96.84ms +step:389/1670 train_time:37671ms step_avg:96.84ms +step:390/1670 train_time:37767ms step_avg:96.84ms +step:391/1670 train_time:37862ms step_avg:96.83ms +step:392/1670 train_time:37957ms step_avg:96.83ms +step:393/1670 train_time:38052ms step_avg:96.82ms +step:394/1670 train_time:38147ms step_avg:96.82ms +step:395/1670 train_time:38242ms step_avg:96.81ms +step:396/1670 train_time:38336ms step_avg:96.81ms +step:397/1670 train_time:38433ms step_avg:96.81ms +step:398/1670 train_time:38530ms step_avg:96.81ms +step:399/1670 train_time:38626ms step_avg:96.81ms +step:400/1670 train_time:38723ms step_avg:96.81ms +step:401/1670 train_time:38818ms step_avg:96.80ms +step:402/1670 train_time:38913ms step_avg:96.80ms +step:403/1670 train_time:39008ms step_avg:96.79ms +step:404/1670 train_time:39103ms step_avg:96.79ms +step:405/1670 train_time:39198ms step_avg:96.79ms +step:406/1670 train_time:39293ms step_avg:96.78ms +step:407/1670 train_time:39389ms step_avg:96.78ms +step:408/1670 train_time:39484ms step_avg:96.78ms +step:409/1670 train_time:39580ms step_avg:96.77ms +step:410/1670 train_time:39678ms step_avg:96.77ms +step:411/1670 train_time:39774ms step_avg:96.77ms +step:412/1670 train_time:39871ms step_avg:96.77ms +step:413/1670 train_time:39967ms step_avg:96.77ms +step:414/1670 train_time:40062ms step_avg:96.77ms +step:415/1670 train_time:40157ms step_avg:96.76ms +step:416/1670 train_time:40252ms step_avg:96.76ms +step:417/1670 train_time:40348ms step_avg:96.76ms +step:418/1670 train_time:40443ms step_avg:96.75ms +step:419/1670 train_time:40539ms step_avg:96.75ms +step:420/1670 train_time:40636ms step_avg:96.75ms +step:421/1670 train_time:40732ms step_avg:96.75ms +step:422/1670 train_time:40827ms step_avg:96.75ms +step:423/1670 train_time:40923ms step_avg:96.75ms +step:424/1670 train_time:41018ms step_avg:96.74ms +step:425/1670 train_time:41298ms step_avg:97.17ms +step:426/1670 train_time:41486ms step_avg:97.39ms +step:427/1670 train_time:41580ms step_avg:97.38ms +step:428/1670 train_time:41675ms step_avg:97.37ms +step:429/1670 train_time:41770ms step_avg:97.36ms +step:430/1670 train_time:41863ms step_avg:97.36ms +step:431/1670 train_time:41958ms step_avg:97.35ms +step:432/1670 train_time:42053ms step_avg:97.35ms +step:433/1670 train_time:42148ms step_avg:97.34ms +step:434/1670 train_time:42243ms step_avg:97.33ms +step:435/1670 train_time:42339ms step_avg:97.33ms +step:436/1670 train_time:42440ms step_avg:97.34ms +step:437/1670 train_time:42538ms step_avg:97.34ms +step:438/1670 train_time:42635ms step_avg:97.34ms +step:439/1670 train_time:42731ms step_avg:97.34ms +step:440/1670 train_time:42827ms step_avg:97.33ms +step:441/1670 train_time:42921ms step_avg:97.33ms +step:442/1670 train_time:43016ms step_avg:97.32ms +step:443/1670 train_time:43111ms step_avg:97.32ms +step:444/1670 train_time:43206ms step_avg:97.31ms +step:445/1670 train_time:43301ms step_avg:97.31ms +step:446/1670 train_time:43397ms step_avg:97.30ms +step:447/1670 train_time:43494ms step_avg:97.30ms +step:448/1670 train_time:43591ms step_avg:97.30ms +step:449/1670 train_time:43688ms step_avg:97.30ms +step:450/1670 train_time:43783ms step_avg:97.30ms +step:451/1670 train_time:43879ms step_avg:97.29ms +step:452/1670 train_time:43975ms step_avg:97.29ms +step:453/1670 train_time:44070ms step_avg:97.29ms +step:454/1670 train_time:44166ms step_avg:97.28ms +step:455/1670 train_time:44260ms step_avg:97.28ms +step:456/1670 train_time:44355ms step_avg:97.27ms +step:457/1670 train_time:44452ms step_avg:97.27ms +step:458/1670 train_time:44549ms step_avg:97.27ms +step:459/1670 train_time:44645ms step_avg:97.27ms +step:460/1670 train_time:44740ms step_avg:97.26ms +step:461/1670 train_time:44836ms step_avg:97.26ms +step:462/1670 train_time:44933ms step_avg:97.26ms +step:463/1670 train_time:45029ms step_avg:97.25ms +step:464/1670 train_time:45124ms step_avg:97.25ms +step:465/1670 train_time:45219ms step_avg:97.24ms +step:466/1670 train_time:45314ms step_avg:97.24ms +step:467/1670 train_time:45410ms step_avg:97.24ms +step:468/1670 train_time:45506ms step_avg:97.24ms +step:469/1670 train_time:45601ms step_avg:97.23ms +step:470/1670 train_time:45697ms step_avg:97.23ms +step:471/1670 train_time:45793ms step_avg:97.23ms +step:472/1670 train_time:45890ms step_avg:97.22ms +step:473/1670 train_time:45985ms step_avg:97.22ms +step:474/1670 train_time:46080ms step_avg:97.22ms +step:475/1670 train_time:46176ms step_avg:97.21ms +step:476/1670 train_time:46272ms step_avg:97.21ms +step:477/1670 train_time:46368ms step_avg:97.21ms +step:478/1670 train_time:46464ms step_avg:97.21ms +step:479/1670 train_time:46559ms step_avg:97.20ms +step:480/1670 train_time:46655ms step_avg:97.20ms +step:481/1670 train_time:46751ms step_avg:97.19ms +step:482/1670 train_time:46847ms step_avg:97.19ms +step:483/1670 train_time:46943ms step_avg:97.19ms +step:484/1670 train_time:47038ms step_avg:97.19ms +step:485/1670 train_time:47134ms step_avg:97.18ms +step:486/1670 train_time:47230ms step_avg:97.18ms +step:487/1670 train_time:47325ms step_avg:97.18ms +step:488/1670 train_time:47420ms step_avg:97.17ms +step:489/1670 train_time:47516ms step_avg:97.17ms +step:490/1670 train_time:47612ms step_avg:97.17ms +step:491/1670 train_time:47709ms step_avg:97.17ms +step:492/1670 train_time:47804ms step_avg:97.16ms +step:493/1670 train_time:47899ms step_avg:97.16ms +step:494/1670 train_time:47995ms step_avg:97.16ms +step:495/1670 train_time:48091ms step_avg:97.15ms +step:496/1670 train_time:48186ms step_avg:97.15ms +step:497/1670 train_time:48281ms step_avg:97.15ms +step:498/1670 train_time:48378ms step_avg:97.15ms +step:499/1670 train_time:48475ms step_avg:97.14ms +step:500/1670 train_time:48570ms step_avg:97.14ms +step:500/1670 val_loss:3.7143 train_time:48665ms step_avg:97.33ms +step:501/1670 train_time:48686ms step_avg:97.18ms +step:502/1670 train_time:48768ms step_avg:97.15ms +step:503/1670 train_time:48871ms step_avg:97.16ms +step:504/1670 train_time:48969ms step_avg:97.16ms +step:505/1670 train_time:49064ms step_avg:97.16ms +step:506/1670 train_time:49159ms step_avg:97.15ms +step:507/1670 train_time:49253ms step_avg:97.15ms +step:508/1670 train_time:49348ms step_avg:97.14ms +step:509/1670 train_time:49443ms step_avg:97.14ms +step:510/1670 train_time:49538ms step_avg:97.13ms +step:511/1670 train_time:49634ms step_avg:97.13ms +step:512/1670 train_time:49731ms step_avg:97.13ms +step:513/1670 train_time:49829ms step_avg:97.13ms +step:514/1670 train_time:49927ms step_avg:97.14ms +step:515/1670 train_time:50024ms step_avg:97.13ms +step:516/1670 train_time:50120ms step_avg:97.13ms +step:517/1670 train_time:50214ms step_avg:97.13ms +step:518/1670 train_time:50309ms step_avg:97.12ms +step:519/1670 train_time:50404ms step_avg:97.12ms +step:520/1670 train_time:50499ms step_avg:97.11ms +step:521/1670 train_time:50595ms step_avg:97.11ms +step:522/1670 train_time:50691ms step_avg:97.11ms +step:523/1670 train_time:50788ms step_avg:97.11ms +step:524/1670 train_time:50885ms step_avg:97.11ms +step:525/1670 train_time:50982ms step_avg:97.11ms +step:526/1670 train_time:51077ms step_avg:97.11ms +step:527/1670 train_time:51173ms step_avg:97.10ms +step:528/1670 train_time:51268ms step_avg:97.10ms +step:529/1670 train_time:51363ms step_avg:97.09ms +step:530/1670 train_time:51459ms step_avg:97.09ms +step:531/1670 train_time:51554ms step_avg:97.09ms +step:532/1670 train_time:51649ms step_avg:97.08ms +step:533/1670 train_time:51745ms step_avg:97.08ms +step:534/1670 train_time:51842ms step_avg:97.08ms +step:535/1670 train_time:51938ms step_avg:97.08ms +step:536/1670 train_time:52034ms step_avg:97.08ms +step:537/1670 train_time:52129ms step_avg:97.07ms +step:538/1670 train_time:52225ms step_avg:97.07ms +step:539/1670 train_time:52320ms step_avg:97.07ms +step:540/1670 train_time:52416ms step_avg:97.07ms +step:541/1670 train_time:52512ms step_avg:97.06ms +step:542/1670 train_time:52608ms step_avg:97.06ms +step:543/1670 train_time:52703ms step_avg:97.06ms +step:544/1670 train_time:52799ms step_avg:97.06ms +step:545/1670 train_time:52895ms step_avg:97.06ms +step:546/1670 train_time:52991ms step_avg:97.05ms +step:547/1670 train_time:53087ms step_avg:97.05ms +step:548/1670 train_time:53183ms step_avg:97.05ms +step:549/1670 train_time:53279ms step_avg:97.05ms +step:550/1670 train_time:53374ms step_avg:97.04ms +step:551/1670 train_time:53470ms step_avg:97.04ms +step:552/1670 train_time:53566ms step_avg:97.04ms +step:553/1670 train_time:53662ms step_avg:97.04ms +step:554/1670 train_time:53759ms step_avg:97.04ms +step:555/1670 train_time:53855ms step_avg:97.04ms +step:556/1670 train_time:53951ms step_avg:97.03ms +step:557/1670 train_time:54047ms step_avg:97.03ms +step:558/1670 train_time:54143ms step_avg:97.03ms +step:559/1670 train_time:54240ms step_avg:97.03ms +step:560/1670 train_time:54337ms step_avg:97.03ms +step:561/1670 train_time:54434ms step_avg:97.03ms +step:562/1670 train_time:54530ms step_avg:97.03ms +step:563/1670 train_time:54626ms step_avg:97.03ms +step:564/1670 train_time:54723ms step_avg:97.03ms +step:565/1670 train_time:54821ms step_avg:97.03ms +step:566/1670 train_time:54918ms step_avg:97.03ms +step:567/1670 train_time:55017ms step_avg:97.03ms +step:568/1670 train_time:55113ms step_avg:97.03ms +step:569/1670 train_time:55209ms step_avg:97.03ms +step:570/1670 train_time:55306ms step_avg:97.03ms +step:571/1670 train_time:55404ms step_avg:97.03ms +step:572/1670 train_time:55501ms step_avg:97.03ms +step:573/1670 train_time:55598ms step_avg:97.03ms +step:574/1670 train_time:55695ms step_avg:97.03ms +step:575/1670 train_time:55792ms step_avg:97.03ms +step:576/1670 train_time:55889ms step_avg:97.03ms +step:577/1670 train_time:55987ms step_avg:97.03ms +step:578/1670 train_time:56085ms step_avg:97.03ms +step:579/1670 train_time:56183ms step_avg:97.04ms +step:580/1670 train_time:56281ms step_avg:97.04ms +step:581/1670 train_time:56379ms step_avg:97.04ms +step:582/1670 train_time:56476ms step_avg:97.04ms +step:583/1670 train_time:56572ms step_avg:97.04ms +step:584/1670 train_time:56669ms step_avg:97.04ms +step:585/1670 train_time:56767ms step_avg:97.04ms +step:586/1670 train_time:56864ms step_avg:97.04ms +step:587/1670 train_time:56962ms step_avg:97.04ms +step:588/1670 train_time:57060ms step_avg:97.04ms +step:589/1670 train_time:57158ms step_avg:97.04ms +step:590/1670 train_time:57255ms step_avg:97.04ms +step:591/1670 train_time:57352ms step_avg:97.04ms +step:592/1670 train_time:57449ms step_avg:97.04ms +step:593/1670 train_time:57546ms step_avg:97.04ms +step:594/1670 train_time:57643ms step_avg:97.04ms +step:595/1670 train_time:57741ms step_avg:97.04ms +step:596/1670 train_time:57838ms step_avg:97.04ms +step:597/1670 train_time:57934ms step_avg:97.04ms +step:598/1670 train_time:58031ms step_avg:97.04ms +step:599/1670 train_time:58128ms step_avg:97.04ms +step:600/1670 train_time:58227ms step_avg:97.05ms +step:601/1670 train_time:58324ms step_avg:97.04ms +step:602/1670 train_time:58421ms step_avg:97.04ms +step:603/1670 train_time:58518ms step_avg:97.05ms +step:604/1670 train_time:58616ms step_avg:97.05ms +step:605/1670 train_time:58712ms step_avg:97.04ms +step:606/1670 train_time:58809ms step_avg:97.04ms +step:607/1670 train_time:58906ms step_avg:97.05ms +step:608/1670 train_time:59004ms step_avg:97.05ms +step:609/1670 train_time:59102ms step_avg:97.05ms +step:610/1670 train_time:59199ms step_avg:97.05ms +step:611/1670 train_time:59296ms step_avg:97.05ms +step:612/1670 train_time:59393ms step_avg:97.05ms +step:613/1670 train_time:59490ms step_avg:97.05ms +step:614/1670 train_time:59587ms step_avg:97.05ms +step:615/1670 train_time:59685ms step_avg:97.05ms +step:616/1670 train_time:59782ms step_avg:97.05ms +step:617/1670 train_time:59880ms step_avg:97.05ms +step:618/1670 train_time:59977ms step_avg:97.05ms +step:619/1670 train_time:60074ms step_avg:97.05ms +step:620/1670 train_time:60170ms step_avg:97.05ms +step:621/1670 train_time:60268ms step_avg:97.05ms +step:622/1670 train_time:60366ms step_avg:97.05ms +step:623/1670 train_time:60463ms step_avg:97.05ms +step:624/1670 train_time:60561ms step_avg:97.05ms +step:625/1670 train_time:60658ms step_avg:97.05ms +step:625/1670 val_loss:3.6151 train_time:60754ms step_avg:97.21ms +step:626/1670 train_time:60776ms step_avg:97.09ms +step:627/1670 train_time:60861ms step_avg:97.07ms +step:628/1670 train_time:60959ms step_avg:97.07ms +step:629/1670 train_time:61057ms step_avg:97.07ms +step:630/1670 train_time:61153ms step_avg:97.07ms +step:631/1670 train_time:61249ms step_avg:97.07ms +step:632/1670 train_time:61344ms step_avg:97.06ms +step:633/1670 train_time:61440ms step_avg:97.06ms +step:634/1670 train_time:61536ms step_avg:97.06ms +step:635/1670 train_time:61632ms step_avg:97.06ms +step:636/1670 train_time:61731ms step_avg:97.06ms +step:637/1670 train_time:61829ms step_avg:97.06ms +step:638/1670 train_time:61927ms step_avg:97.06ms +step:639/1670 train_time:62299ms step_avg:97.49ms +step:640/1670 train_time:62381ms step_avg:97.47ms +step:641/1670 train_time:62477ms step_avg:97.47ms +step:642/1670 train_time:62573ms step_avg:97.47ms +step:643/1670 train_time:62669ms step_avg:97.46ms +step:644/1670 train_time:62765ms step_avg:97.46ms +step:645/1670 train_time:62862ms step_avg:97.46ms +step:646/1670 train_time:62958ms step_avg:97.46ms +step:647/1670 train_time:63055ms step_avg:97.46ms +step:648/1670 train_time:63151ms step_avg:97.45ms +step:649/1670 train_time:63251ms step_avg:97.46ms +step:650/1670 train_time:63350ms step_avg:97.46ms +step:651/1670 train_time:63447ms step_avg:97.46ms +step:652/1670 train_time:63543ms step_avg:97.46ms +step:653/1670 train_time:63640ms step_avg:97.46ms +step:654/1670 train_time:63738ms step_avg:97.46ms +step:655/1670 train_time:63834ms step_avg:97.46ms +step:656/1670 train_time:63930ms step_avg:97.46ms +step:657/1670 train_time:64026ms step_avg:97.45ms +step:658/1670 train_time:64124ms step_avg:97.45ms +step:659/1670 train_time:64223ms step_avg:97.46ms +step:660/1670 train_time:64324ms step_avg:97.46ms +step:661/1670 train_time:64424ms step_avg:97.46ms +step:662/1670 train_time:64521ms step_avg:97.46ms +step:663/1670 train_time:64619ms step_avg:97.46ms +step:664/1670 train_time:64715ms step_avg:97.46ms +step:665/1670 train_time:64812ms step_avg:97.46ms +step:666/1670 train_time:64908ms step_avg:97.46ms +step:667/1670 train_time:65004ms step_avg:97.46ms +step:668/1670 train_time:65102ms step_avg:97.46ms +step:669/1670 train_time:65200ms step_avg:97.46ms +step:670/1670 train_time:65299ms step_avg:97.46ms +step:671/1670 train_time:65398ms step_avg:97.46ms +step:672/1670 train_time:65496ms step_avg:97.46ms +step:673/1670 train_time:65594ms step_avg:97.47ms +step:674/1670 train_time:65691ms step_avg:97.46ms +step:675/1670 train_time:65787ms step_avg:97.46ms +step:676/1670 train_time:65884ms step_avg:97.46ms +step:677/1670 train_time:65980ms step_avg:97.46ms +step:678/1670 train_time:66077ms step_avg:97.46ms +step:679/1670 train_time:66175ms step_avg:97.46ms +step:680/1670 train_time:66272ms step_avg:97.46ms +step:681/1670 train_time:66370ms step_avg:97.46ms +step:682/1670 train_time:66467ms step_avg:97.46ms +step:683/1670 train_time:66565ms step_avg:97.46ms +step:684/1670 train_time:66663ms step_avg:97.46ms +step:685/1670 train_time:66759ms step_avg:97.46ms +step:686/1670 train_time:66856ms step_avg:97.46ms +step:687/1670 train_time:66953ms step_avg:97.46ms +step:688/1670 train_time:67050ms step_avg:97.46ms +step:689/1670 train_time:67145ms step_avg:97.45ms +step:690/1670 train_time:67242ms step_avg:97.45ms +step:691/1670 train_time:67341ms step_avg:97.45ms +step:692/1670 train_time:67438ms step_avg:97.45ms +step:693/1670 train_time:67537ms step_avg:97.46ms +step:694/1670 train_time:67634ms step_avg:97.46ms +step:695/1670 train_time:67731ms step_avg:97.46ms +step:696/1670 train_time:67828ms step_avg:97.45ms +step:697/1670 train_time:67924ms step_avg:97.45ms +step:698/1670 train_time:68021ms step_avg:97.45ms +step:699/1670 train_time:68120ms step_avg:97.45ms +step:700/1670 train_time:68218ms step_avg:97.45ms +step:701/1670 train_time:68315ms step_avg:97.45ms +step:702/1670 train_time:68412ms step_avg:97.45ms +step:703/1670 train_time:68509ms step_avg:97.45ms +step:704/1670 train_time:68605ms step_avg:97.45ms +step:705/1670 train_time:68702ms step_avg:97.45ms +step:706/1670 train_time:68800ms step_avg:97.45ms +step:707/1670 train_time:68898ms step_avg:97.45ms +step:708/1670 train_time:68995ms step_avg:97.45ms +step:709/1670 train_time:69092ms step_avg:97.45ms +step:710/1670 train_time:69188ms step_avg:97.45ms +step:711/1670 train_time:69284ms step_avg:97.45ms +step:712/1670 train_time:69382ms step_avg:97.45ms +step:713/1670 train_time:69480ms step_avg:97.45ms +step:714/1670 train_time:69578ms step_avg:97.45ms +step:715/1670 train_time:69675ms step_avg:97.45ms +step:716/1670 train_time:69773ms step_avg:97.45ms +step:717/1670 train_time:69869ms step_avg:97.45ms +step:718/1670 train_time:69966ms step_avg:97.45ms +step:719/1670 train_time:70062ms step_avg:97.44ms +step:720/1670 train_time:70160ms step_avg:97.44ms +step:721/1670 train_time:70258ms step_avg:97.44ms +step:722/1670 train_time:70355ms step_avg:97.44ms +step:723/1670 train_time:70452ms step_avg:97.44ms +step:724/1670 train_time:70548ms step_avg:97.44ms +step:725/1670 train_time:70645ms step_avg:97.44ms +step:726/1670 train_time:70743ms step_avg:97.44ms +step:727/1670 train_time:70840ms step_avg:97.44ms +step:728/1670 train_time:70939ms step_avg:97.44ms +step:729/1670 train_time:71037ms step_avg:97.44ms +step:730/1670 train_time:71135ms step_avg:97.44ms +step:731/1670 train_time:71231ms step_avg:97.44ms +step:732/1670 train_time:71328ms step_avg:97.44ms +step:733/1670 train_time:71426ms step_avg:97.44ms +step:734/1670 train_time:71522ms step_avg:97.44ms +step:735/1670 train_time:71620ms step_avg:97.44ms +step:736/1670 train_time:71718ms step_avg:97.44ms +step:737/1670 train_time:71816ms step_avg:97.44ms +step:738/1670 train_time:71913ms step_avg:97.44ms +step:739/1670 train_time:72008ms step_avg:97.44ms +step:740/1670 train_time:72106ms step_avg:97.44ms +step:741/1670 train_time:72204ms step_avg:97.44ms +step:742/1670 train_time:72302ms step_avg:97.44ms +step:743/1670 train_time:72400ms step_avg:97.44ms +step:744/1670 train_time:72498ms step_avg:97.44ms +step:745/1670 train_time:72595ms step_avg:97.44ms +step:746/1670 train_time:72692ms step_avg:97.44ms +step:747/1670 train_time:72788ms step_avg:97.44ms +step:748/1670 train_time:72885ms step_avg:97.44ms +step:749/1670 train_time:72982ms step_avg:97.44ms +step:750/1670 train_time:73081ms step_avg:97.44ms +step:750/1670 val_loss:3.5616 train_time:73177ms step_avg:97.57ms +step:751/1670 train_time:73200ms step_avg:97.47ms +step:752/1670 train_time:73283ms step_avg:97.45ms +step:753/1670 train_time:73381ms step_avg:97.45ms +step:754/1670 train_time:73478ms step_avg:97.45ms +step:755/1670 train_time:73575ms step_avg:97.45ms +step:756/1670 train_time:73672ms step_avg:97.45ms +step:757/1670 train_time:73768ms step_avg:97.45ms +step:758/1670 train_time:73865ms step_avg:97.45ms +step:759/1670 train_time:73961ms step_avg:97.45ms +step:760/1670 train_time:74057ms step_avg:97.44ms +step:761/1670 train_time:74157ms step_avg:97.45ms +step:762/1670 train_time:74259ms step_avg:97.45ms +step:763/1670 train_time:74358ms step_avg:97.45ms +step:764/1670 train_time:74456ms step_avg:97.46ms +step:765/1670 train_time:74553ms step_avg:97.46ms +step:766/1670 train_time:74650ms step_avg:97.45ms +step:767/1670 train_time:74747ms step_avg:97.45ms +step:768/1670 train_time:74843ms step_avg:97.45ms +step:769/1670 train_time:74939ms step_avg:97.45ms +step:770/1670 train_time:75035ms step_avg:97.45ms +step:771/1670 train_time:75133ms step_avg:97.45ms +step:772/1670 train_time:75233ms step_avg:97.45ms +step:773/1670 train_time:75333ms step_avg:97.46ms +step:774/1670 train_time:75431ms step_avg:97.46ms +step:775/1670 train_time:75529ms step_avg:97.46ms +step:776/1670 train_time:75626ms step_avg:97.46ms +step:777/1670 train_time:75722ms step_avg:97.45ms +step:778/1670 train_time:75818ms step_avg:97.45ms +step:779/1670 train_time:75915ms step_avg:97.45ms +step:780/1670 train_time:76012ms step_avg:97.45ms +step:781/1670 train_time:76110ms step_avg:97.45ms +step:782/1670 train_time:76207ms step_avg:97.45ms +step:783/1670 train_time:76305ms step_avg:97.45ms +step:784/1670 train_time:76402ms step_avg:97.45ms +step:785/1670 train_time:76499ms step_avg:97.45ms +step:786/1670 train_time:76596ms step_avg:97.45ms +step:787/1670 train_time:76694ms step_avg:97.45ms +step:788/1670 train_time:76792ms step_avg:97.45ms +step:789/1670 train_time:76888ms step_avg:97.45ms +step:790/1670 train_time:76985ms step_avg:97.45ms +step:791/1670 train_time:77081ms step_avg:97.45ms +step:792/1670 train_time:77178ms step_avg:97.45ms +step:793/1670 train_time:77276ms step_avg:97.45ms +step:794/1670 train_time:77374ms step_avg:97.45ms +step:795/1670 train_time:77472ms step_avg:97.45ms +step:796/1670 train_time:77569ms step_avg:97.45ms +step:797/1670 train_time:77666ms step_avg:97.45ms +step:798/1670 train_time:77762ms step_avg:97.45ms +step:799/1670 train_time:77858ms step_avg:97.44ms +step:800/1670 train_time:77955ms step_avg:97.44ms +step:801/1670 train_time:78052ms step_avg:97.44ms +step:802/1670 train_time:78150ms step_avg:97.44ms +step:803/1670 train_time:78248ms step_avg:97.44ms +step:804/1670 train_time:78345ms step_avg:97.44ms +step:805/1670 train_time:78442ms step_avg:97.44ms +step:806/1670 train_time:78539ms step_avg:97.44ms +step:807/1670 train_time:78637ms step_avg:97.44ms +step:808/1670 train_time:78734ms step_avg:97.44ms +step:809/1670 train_time:78832ms step_avg:97.44ms +step:810/1670 train_time:78929ms step_avg:97.44ms +step:811/1670 train_time:79026ms step_avg:97.44ms +step:812/1670 train_time:79123ms step_avg:97.44ms +step:813/1670 train_time:79219ms step_avg:97.44ms +step:814/1670 train_time:79316ms step_avg:97.44ms +step:815/1670 train_time:79414ms step_avg:97.44ms +step:816/1670 train_time:79513ms step_avg:97.44ms +step:817/1670 train_time:79612ms step_avg:97.44ms +step:818/1670 train_time:79708ms step_avg:97.44ms +step:819/1670 train_time:79806ms step_avg:97.44ms +step:820/1670 train_time:79902ms step_avg:97.44ms +step:821/1670 train_time:79999ms step_avg:97.44ms +step:822/1670 train_time:80096ms step_avg:97.44ms +step:823/1670 train_time:80195ms step_avg:97.44ms +step:824/1670 train_time:80292ms step_avg:97.44ms +step:825/1670 train_time:80391ms step_avg:97.44ms +step:826/1670 train_time:80489ms step_avg:97.44ms +step:827/1670 train_time:80586ms step_avg:97.44ms +step:828/1670 train_time:80683ms step_avg:97.44ms +step:829/1670 train_time:80780ms step_avg:97.44ms +step:830/1670 train_time:80877ms step_avg:97.44ms +step:831/1670 train_time:80974ms step_avg:97.44ms +step:832/1670 train_time:81072ms step_avg:97.44ms +step:833/1670 train_time:81170ms step_avg:97.44ms +step:834/1670 train_time:81267ms step_avg:97.44ms +step:835/1670 train_time:81364ms step_avg:97.44ms +step:836/1670 train_time:81461ms step_avg:97.44ms +step:837/1670 train_time:81557ms step_avg:97.44ms +step:838/1670 train_time:81655ms step_avg:97.44ms +step:839/1670 train_time:81753ms step_avg:97.44ms +step:840/1670 train_time:81850ms step_avg:97.44ms +step:841/1670 train_time:81948ms step_avg:97.44ms +step:842/1670 train_time:82045ms step_avg:97.44ms +step:843/1670 train_time:82143ms step_avg:97.44ms +step:844/1670 train_time:82240ms step_avg:97.44ms +step:845/1670 train_time:82338ms step_avg:97.44ms +step:846/1670 train_time:82435ms step_avg:97.44ms +step:847/1670 train_time:82533ms step_avg:97.44ms +step:848/1670 train_time:82631ms step_avg:97.44ms +step:849/1670 train_time:82728ms step_avg:97.44ms +step:850/1670 train_time:82825ms step_avg:97.44ms +step:851/1670 train_time:83093ms step_avg:97.64ms +step:852/1670 train_time:83284ms step_avg:97.75ms +step:853/1670 train_time:83379ms step_avg:97.75ms +step:854/1670 train_time:83476ms step_avg:97.75ms +step:855/1670 train_time:83572ms step_avg:97.75ms +step:856/1670 train_time:83669ms step_avg:97.74ms +step:857/1670 train_time:83765ms step_avg:97.74ms +step:858/1670 train_time:83860ms step_avg:97.74ms +step:859/1670 train_time:83957ms step_avg:97.74ms +step:860/1670 train_time:84053ms step_avg:97.74ms +step:861/1670 train_time:84155ms step_avg:97.74ms +step:862/1670 train_time:84256ms step_avg:97.75ms +step:863/1670 train_time:84356ms step_avg:97.75ms +step:864/1670 train_time:84455ms step_avg:97.75ms +step:865/1670 train_time:84553ms step_avg:97.75ms +step:866/1670 train_time:84649ms step_avg:97.75ms +step:867/1670 train_time:84746ms step_avg:97.75ms +step:868/1670 train_time:84842ms step_avg:97.74ms +step:869/1670 train_time:84938ms step_avg:97.74ms +step:870/1670 train_time:85035ms step_avg:97.74ms +step:871/1670 train_time:85133ms step_avg:97.74ms +step:872/1670 train_time:85233ms step_avg:97.74ms +step:873/1670 train_time:85333ms step_avg:97.75ms +step:874/1670 train_time:85432ms step_avg:97.75ms +step:875/1670 train_time:85531ms step_avg:97.75ms +step:875/1670 val_loss:3.5198 train_time:85627ms step_avg:97.86ms +step:876/1670 train_time:85648ms step_avg:97.77ms +step:877/1670 train_time:85733ms step_avg:97.76ms +step:878/1670 train_time:85833ms step_avg:97.76ms +step:879/1670 train_time:85930ms step_avg:97.76ms +step:880/1670 train_time:86026ms step_avg:97.76ms +step:881/1670 train_time:86122ms step_avg:97.75ms +step:882/1670 train_time:86218ms step_avg:97.75ms +step:883/1670 train_time:86314ms step_avg:97.75ms +step:884/1670 train_time:86410ms step_avg:97.75ms +step:885/1670 train_time:86508ms step_avg:97.75ms +step:886/1670 train_time:86608ms step_avg:97.75ms +step:887/1670 train_time:86709ms step_avg:97.76ms +step:888/1670 train_time:86809ms step_avg:97.76ms +step:889/1670 train_time:86907ms step_avg:97.76ms +step:890/1670 train_time:87004ms step_avg:97.76ms +step:891/1670 train_time:87101ms step_avg:97.76ms +step:892/1670 train_time:87197ms step_avg:97.75ms +step:893/1670 train_time:87293ms step_avg:97.75ms +step:894/1670 train_time:87389ms step_avg:97.75ms +step:895/1670 train_time:87486ms step_avg:97.75ms +step:896/1670 train_time:87584ms step_avg:97.75ms +step:897/1670 train_time:87683ms step_avg:97.75ms +step:898/1670 train_time:87780ms step_avg:97.75ms +step:899/1670 train_time:87877ms step_avg:97.75ms +step:900/1670 train_time:87975ms step_avg:97.75ms +step:901/1670 train_time:88072ms step_avg:97.75ms +step:902/1670 train_time:88170ms step_avg:97.75ms +step:903/1670 train_time:88266ms step_avg:97.75ms +step:904/1670 train_time:88363ms step_avg:97.75ms +step:905/1670 train_time:88460ms step_avg:97.75ms +step:906/1670 train_time:88556ms step_avg:97.74ms +step:907/1670 train_time:88654ms step_avg:97.74ms +step:908/1670 train_time:88753ms step_avg:97.75ms +step:909/1670 train_time:88850ms step_avg:97.74ms +step:910/1670 train_time:88949ms step_avg:97.75ms +step:911/1670 train_time:89045ms step_avg:97.74ms +step:912/1670 train_time:89143ms step_avg:97.74ms +step:913/1670 train_time:89239ms step_avg:97.74ms +step:914/1670 train_time:89335ms step_avg:97.74ms +step:915/1670 train_time:89433ms step_avg:97.74ms +step:916/1670 train_time:89530ms step_avg:97.74ms +step:917/1670 train_time:89629ms step_avg:97.74ms +step:918/1670 train_time:89727ms step_avg:97.74ms +step:919/1670 train_time:89826ms step_avg:97.74ms +step:920/1670 train_time:89925ms step_avg:97.74ms +step:921/1670 train_time:90022ms step_avg:97.74ms +step:922/1670 train_time:90118ms step_avg:97.74ms +step:923/1670 train_time:90215ms step_avg:97.74ms +step:924/1670 train_time:90312ms step_avg:97.74ms +step:925/1670 train_time:90410ms step_avg:97.74ms +step:926/1670 train_time:90507ms step_avg:97.74ms +step:927/1670 train_time:90605ms step_avg:97.74ms +step:928/1670 train_time:90702ms step_avg:97.74ms +step:929/1670 train_time:90800ms step_avg:97.74ms +step:930/1670 train_time:90897ms step_avg:97.74ms +step:931/1670 train_time:90994ms step_avg:97.74ms +step:932/1670 train_time:91091ms step_avg:97.74ms +step:933/1670 train_time:91189ms step_avg:97.74ms +step:934/1670 train_time:91285ms step_avg:97.74ms +step:935/1670 train_time:91382ms step_avg:97.73ms +step:936/1670 train_time:91479ms step_avg:97.73ms +step:937/1670 train_time:91575ms step_avg:97.73ms +step:938/1670 train_time:91673ms step_avg:97.73ms +step:939/1670 train_time:91771ms step_avg:97.73ms +step:940/1670 train_time:91869ms step_avg:97.73ms +step:941/1670 train_time:91969ms step_avg:97.74ms +step:942/1670 train_time:92068ms step_avg:97.74ms +step:943/1670 train_time:92165ms step_avg:97.74ms +step:944/1670 train_time:92262ms step_avg:97.74ms +step:945/1670 train_time:92358ms step_avg:97.73ms +step:946/1670 train_time:92455ms step_avg:97.73ms +step:947/1670 train_time:92552ms step_avg:97.73ms +step:948/1670 train_time:92649ms step_avg:97.73ms +step:949/1670 train_time:92746ms step_avg:97.73ms +step:950/1670 train_time:92844ms step_avg:97.73ms +step:951/1670 train_time:92942ms step_avg:97.73ms +step:952/1670 train_time:93038ms step_avg:97.73ms +step:953/1670 train_time:93135ms step_avg:97.73ms +step:954/1670 train_time:93233ms step_avg:97.73ms +step:955/1670 train_time:93330ms step_avg:97.73ms +step:956/1670 train_time:93429ms step_avg:97.73ms +step:957/1670 train_time:93526ms step_avg:97.73ms +step:958/1670 train_time:93623ms step_avg:97.73ms +step:959/1670 train_time:93720ms step_avg:97.73ms +step:960/1670 train_time:93817ms step_avg:97.73ms +step:961/1670 train_time:93915ms step_avg:97.73ms +step:962/1670 train_time:94014ms step_avg:97.73ms +step:963/1670 train_time:94111ms step_avg:97.73ms +step:964/1670 train_time:94209ms step_avg:97.73ms +step:965/1670 train_time:94307ms step_avg:97.73ms +step:966/1670 train_time:94404ms step_avg:97.73ms +step:967/1670 train_time:94501ms step_avg:97.73ms +step:968/1670 train_time:94598ms step_avg:97.73ms +step:969/1670 train_time:94695ms step_avg:97.72ms +step:970/1670 train_time:94792ms step_avg:97.72ms +step:971/1670 train_time:94891ms step_avg:97.72ms +step:972/1670 train_time:94988ms step_avg:97.72ms +step:973/1670 train_time:95085ms step_avg:97.72ms +step:974/1670 train_time:95183ms step_avg:97.72ms +step:975/1670 train_time:95280ms step_avg:97.72ms +step:976/1670 train_time:95376ms step_avg:97.72ms +step:977/1670 train_time:95473ms step_avg:97.72ms +step:978/1670 train_time:95571ms step_avg:97.72ms +step:979/1670 train_time:95669ms step_avg:97.72ms +step:980/1670 train_time:95767ms step_avg:97.72ms +step:981/1670 train_time:95865ms step_avg:97.72ms +step:982/1670 train_time:95962ms step_avg:97.72ms +step:983/1670 train_time:96059ms step_avg:97.72ms +step:984/1670 train_time:96156ms step_avg:97.72ms +step:985/1670 train_time:96254ms step_avg:97.72ms +step:986/1670 train_time:96351ms step_avg:97.72ms +step:987/1670 train_time:96448ms step_avg:97.72ms +step:988/1670 train_time:96546ms step_avg:97.72ms +step:989/1670 train_time:96643ms step_avg:97.72ms +step:990/1670 train_time:96739ms step_avg:97.72ms +step:991/1670 train_time:96836ms step_avg:97.72ms +step:992/1670 train_time:96934ms step_avg:97.72ms +step:993/1670 train_time:97032ms step_avg:97.72ms +step:994/1670 train_time:97130ms step_avg:97.72ms +step:995/1670 train_time:97227ms step_avg:97.72ms +step:996/1670 train_time:97325ms step_avg:97.72ms +step:997/1670 train_time:97423ms step_avg:97.72ms +step:998/1670 train_time:97520ms step_avg:97.71ms +step:999/1670 train_time:97616ms step_avg:97.71ms +step:1000/1670 train_time:97713ms step_avg:97.71ms +step:1000/1670 val_loss:3.4779 train_time:97810ms step_avg:97.81ms +step:1001/1670 train_time:97831ms step_avg:97.73ms +step:1002/1670 train_time:97916ms step_avg:97.72ms +step:1003/1670 train_time:98017ms step_avg:97.72ms +step:1004/1670 train_time:98113ms step_avg:97.72ms +step:1005/1670 train_time:98209ms step_avg:97.72ms +step:1006/1670 train_time:98306ms step_avg:97.72ms +step:1007/1670 train_time:98401ms step_avg:97.72ms +step:1008/1670 train_time:98497ms step_avg:97.72ms +step:1009/1670 train_time:98594ms step_avg:97.71ms +step:1010/1670 train_time:98691ms step_avg:97.71ms +step:1011/1670 train_time:98788ms step_avg:97.71ms +step:1012/1670 train_time:98889ms step_avg:97.72ms +step:1013/1670 train_time:98991ms step_avg:97.72ms +step:1014/1670 train_time:99089ms step_avg:97.72ms +step:1015/1670 train_time:99187ms step_avg:97.72ms +step:1016/1670 train_time:99283ms step_avg:97.72ms +step:1017/1670 train_time:99380ms step_avg:97.72ms +step:1018/1670 train_time:99477ms step_avg:97.72ms +step:1019/1670 train_time:99574ms step_avg:97.72ms +step:1020/1670 train_time:99670ms step_avg:97.72ms +step:1021/1670 train_time:99767ms step_avg:97.71ms +step:1022/1670 train_time:99865ms step_avg:97.72ms +step:1023/1670 train_time:99966ms step_avg:97.72ms +step:1024/1670 train_time:100065ms step_avg:97.72ms +step:1025/1670 train_time:100163ms step_avg:97.72ms +step:1026/1670 train_time:100260ms step_avg:97.72ms +step:1027/1670 train_time:100356ms step_avg:97.72ms +step:1028/1670 train_time:100452ms step_avg:97.72ms +step:1029/1670 train_time:100549ms step_avg:97.72ms +step:1030/1670 train_time:100646ms step_avg:97.71ms +step:1031/1670 train_time:100743ms step_avg:97.71ms +step:1032/1670 train_time:100841ms step_avg:97.71ms +step:1033/1670 train_time:100940ms step_avg:97.72ms +step:1034/1670 train_time:101038ms step_avg:97.72ms +step:1035/1670 train_time:101136ms step_avg:97.72ms +step:1036/1670 train_time:101232ms step_avg:97.71ms +step:1037/1670 train_time:101329ms step_avg:97.71ms +step:1038/1670 train_time:101426ms step_avg:97.71ms +step:1039/1670 train_time:101523ms step_avg:97.71ms +step:1040/1670 train_time:101619ms step_avg:97.71ms +step:1041/1670 train_time:101716ms step_avg:97.71ms +step:1042/1670 train_time:101813ms step_avg:97.71ms +step:1043/1670 train_time:101911ms step_avg:97.71ms +step:1044/1670 train_time:102009ms step_avg:97.71ms +step:1045/1670 train_time:102107ms step_avg:97.71ms +step:1046/1670 train_time:102205ms step_avg:97.71ms +step:1047/1670 train_time:102304ms step_avg:97.71ms +step:1048/1670 train_time:102400ms step_avg:97.71ms +step:1049/1670 train_time:102496ms step_avg:97.71ms +step:1050/1670 train_time:102593ms step_avg:97.71ms +step:1051/1670 train_time:102689ms step_avg:97.71ms +step:1052/1670 train_time:102787ms step_avg:97.71ms +step:1053/1670 train_time:102885ms step_avg:97.71ms +step:1054/1670 train_time:102983ms step_avg:97.71ms +step:1055/1670 train_time:103082ms step_avg:97.71ms +step:1056/1670 train_time:103180ms step_avg:97.71ms +step:1057/1670 train_time:103278ms step_avg:97.71ms +step:1058/1670 train_time:103375ms step_avg:97.71ms +step:1059/1670 train_time:103472ms step_avg:97.71ms +step:1060/1670 train_time:103569ms step_avg:97.71ms +step:1061/1670 train_time:103666ms step_avg:97.71ms +step:1062/1670 train_time:103930ms step_avg:97.86ms +step:1063/1670 train_time:104020ms step_avg:97.86ms +step:1064/1670 train_time:104116ms step_avg:97.85ms +step:1065/1670 train_time:104212ms step_avg:97.85ms +step:1066/1670 train_time:104309ms step_avg:97.85ms +step:1067/1670 train_time:104405ms step_avg:97.85ms +step:1068/1670 train_time:104501ms step_avg:97.85ms +step:1069/1670 train_time:104597ms step_avg:97.85ms +step:1070/1670 train_time:104693ms step_avg:97.84ms +step:1071/1670 train_time:104789ms step_avg:97.84ms +step:1072/1670 train_time:104891ms step_avg:97.85ms +step:1073/1670 train_time:104990ms step_avg:97.85ms +step:1074/1670 train_time:105088ms step_avg:97.85ms +step:1075/1670 train_time:105187ms step_avg:97.85ms +step:1076/1670 train_time:105284ms step_avg:97.85ms +step:1077/1670 train_time:105383ms step_avg:97.85ms +step:1078/1670 train_time:105480ms step_avg:97.85ms +step:1079/1670 train_time:105576ms step_avg:97.85ms +step:1080/1670 train_time:105673ms step_avg:97.85ms +step:1081/1670 train_time:105770ms step_avg:97.84ms +step:1082/1670 train_time:105868ms step_avg:97.84ms +step:1083/1670 train_time:105966ms step_avg:97.85ms +step:1084/1670 train_time:106066ms step_avg:97.85ms +step:1085/1670 train_time:106164ms step_avg:97.85ms +step:1086/1670 train_time:106261ms step_avg:97.85ms +step:1087/1670 train_time:106358ms step_avg:97.85ms +step:1088/1670 train_time:106454ms step_avg:97.84ms +step:1089/1670 train_time:106550ms step_avg:97.84ms +step:1090/1670 train_time:106648ms step_avg:97.84ms +step:1091/1670 train_time:106745ms step_avg:97.84ms +step:1092/1670 train_time:106843ms step_avg:97.84ms +step:1093/1670 train_time:106940ms step_avg:97.84ms +step:1094/1670 train_time:107038ms step_avg:97.84ms +step:1095/1670 train_time:107135ms step_avg:97.84ms +step:1096/1670 train_time:107232ms step_avg:97.84ms +step:1097/1670 train_time:107329ms step_avg:97.84ms +step:1098/1670 train_time:107427ms step_avg:97.84ms +step:1099/1670 train_time:107524ms step_avg:97.84ms +step:1100/1670 train_time:107621ms step_avg:97.84ms +step:1101/1670 train_time:107718ms step_avg:97.84ms +step:1102/1670 train_time:107815ms step_avg:97.84ms +step:1103/1670 train_time:107911ms step_avg:97.83ms +step:1104/1670 train_time:108008ms step_avg:97.83ms +step:1105/1670 train_time:108106ms step_avg:97.83ms +step:1106/1670 train_time:108204ms step_avg:97.83ms +step:1107/1670 train_time:108302ms step_avg:97.83ms +step:1108/1670 train_time:108399ms step_avg:97.83ms +step:1109/1670 train_time:108496ms step_avg:97.83ms +step:1110/1670 train_time:108592ms step_avg:97.83ms +step:1111/1670 train_time:108689ms step_avg:97.83ms +step:1112/1670 train_time:108787ms step_avg:97.83ms +step:1113/1670 train_time:108885ms step_avg:97.83ms +step:1114/1670 train_time:108984ms step_avg:97.83ms +step:1115/1670 train_time:109081ms step_avg:97.83ms +step:1116/1670 train_time:109178ms step_avg:97.83ms +step:1117/1670 train_time:109276ms step_avg:97.83ms +step:1118/1670 train_time:109373ms step_avg:97.83ms +step:1119/1670 train_time:109473ms step_avg:97.83ms +step:1120/1670 train_time:109571ms step_avg:97.83ms +step:1121/1670 train_time:109668ms step_avg:97.83ms +step:1122/1670 train_time:109767ms step_avg:97.83ms +step:1123/1670 train_time:109865ms step_avg:97.83ms +step:1124/1670 train_time:109963ms step_avg:97.83ms +step:1125/1670 train_time:110062ms step_avg:97.83ms +step:1125/1670 val_loss:3.4247 train_time:110161ms step_avg:97.92ms +step:1126/1670 train_time:110183ms step_avg:97.85ms +step:1127/1670 train_time:110267ms step_avg:97.84ms +step:1128/1670 train_time:110367ms step_avg:97.84ms +step:1129/1670 train_time:110465ms step_avg:97.84ms +step:1130/1670 train_time:110561ms step_avg:97.84ms +step:1131/1670 train_time:110657ms step_avg:97.84ms +step:1132/1670 train_time:110754ms step_avg:97.84ms +step:1133/1670 train_time:110852ms step_avg:97.84ms +step:1134/1670 train_time:110949ms step_avg:97.84ms +step:1135/1670 train_time:111046ms step_avg:97.84ms +step:1136/1670 train_time:111150ms step_avg:97.84ms +step:1137/1670 train_time:111253ms step_avg:97.85ms +step:1138/1670 train_time:111353ms step_avg:97.85ms +step:1139/1670 train_time:111453ms step_avg:97.85ms +step:1140/1670 train_time:111553ms step_avg:97.85ms +step:1141/1670 train_time:111651ms step_avg:97.85ms +step:1142/1670 train_time:111748ms step_avg:97.85ms +step:1143/1670 train_time:111844ms step_avg:97.85ms +step:1144/1670 train_time:111941ms step_avg:97.85ms +step:1145/1670 train_time:112039ms step_avg:97.85ms +step:1146/1670 train_time:112139ms step_avg:97.85ms +step:1147/1670 train_time:112236ms step_avg:97.85ms +step:1148/1670 train_time:112334ms step_avg:97.85ms +step:1149/1670 train_time:112433ms step_avg:97.85ms +step:1150/1670 train_time:112532ms step_avg:97.85ms +step:1151/1670 train_time:112631ms step_avg:97.85ms +step:1152/1670 train_time:112728ms step_avg:97.85ms +step:1153/1670 train_time:112825ms step_avg:97.85ms +step:1154/1670 train_time:112924ms step_avg:97.85ms +step:1155/1670 train_time:113021ms step_avg:97.85ms +step:1156/1670 train_time:113119ms step_avg:97.85ms +step:1157/1670 train_time:113217ms step_avg:97.85ms +step:1158/1670 train_time:113316ms step_avg:97.85ms +step:1159/1670 train_time:113414ms step_avg:97.86ms +step:1160/1670 train_time:113512ms step_avg:97.85ms +step:1161/1670 train_time:113609ms step_avg:97.85ms +step:1162/1670 train_time:113708ms step_avg:97.86ms +step:1163/1670 train_time:113806ms step_avg:97.86ms +step:1164/1670 train_time:113904ms step_avg:97.86ms +step:1165/1670 train_time:114001ms step_avg:97.86ms +step:1166/1670 train_time:114099ms step_avg:97.86ms +step:1167/1670 train_time:114197ms step_avg:97.86ms +step:1168/1670 train_time:114296ms step_avg:97.86ms +step:1169/1670 train_time:114394ms step_avg:97.86ms +step:1170/1670 train_time:114492ms step_avg:97.86ms +step:1171/1670 train_time:114591ms step_avg:97.86ms +step:1172/1670 train_time:114689ms step_avg:97.86ms +step:1173/1670 train_time:114786ms step_avg:97.86ms +step:1174/1670 train_time:114884ms step_avg:97.86ms +step:1175/1670 train_time:114983ms step_avg:97.86ms +step:1176/1670 train_time:115080ms step_avg:97.86ms +step:1177/1670 train_time:115178ms step_avg:97.86ms +step:1178/1670 train_time:115276ms step_avg:97.86ms +step:1179/1670 train_time:115374ms step_avg:97.86ms +step:1180/1670 train_time:115473ms step_avg:97.86ms +step:1181/1670 train_time:115571ms step_avg:97.86ms +step:1182/1670 train_time:115668ms step_avg:97.86ms +step:1183/1670 train_time:115767ms step_avg:97.86ms +step:1184/1670 train_time:115865ms step_avg:97.86ms +step:1185/1670 train_time:115962ms step_avg:97.86ms +step:1186/1670 train_time:116059ms step_avg:97.86ms +step:1187/1670 train_time:116157ms step_avg:97.86ms +step:1188/1670 train_time:116255ms step_avg:97.86ms +step:1189/1670 train_time:116353ms step_avg:97.86ms +step:1190/1670 train_time:116451ms step_avg:97.86ms +step:1191/1670 train_time:116550ms step_avg:97.86ms +step:1192/1670 train_time:116649ms step_avg:97.86ms +step:1193/1670 train_time:116747ms step_avg:97.86ms +step:1194/1670 train_time:116845ms step_avg:97.86ms +step:1195/1670 train_time:116942ms step_avg:97.86ms +step:1196/1670 train_time:117040ms step_avg:97.86ms +step:1197/1670 train_time:117138ms step_avg:97.86ms +step:1198/1670 train_time:117236ms step_avg:97.86ms +step:1199/1670 train_time:117334ms step_avg:97.86ms +step:1200/1670 train_time:117431ms step_avg:97.86ms +step:1201/1670 train_time:117529ms step_avg:97.86ms +step:1202/1670 train_time:117627ms step_avg:97.86ms +step:1203/1670 train_time:117726ms step_avg:97.86ms +step:1204/1670 train_time:117824ms step_avg:97.86ms +step:1205/1670 train_time:117921ms step_avg:97.86ms +step:1206/1670 train_time:118019ms step_avg:97.86ms +step:1207/1670 train_time:118117ms step_avg:97.86ms +step:1208/1670 train_time:118215ms step_avg:97.86ms +step:1209/1670 train_time:118313ms step_avg:97.86ms +step:1210/1670 train_time:118410ms step_avg:97.86ms +step:1211/1670 train_time:118508ms step_avg:97.86ms +step:1212/1670 train_time:118607ms step_avg:97.86ms +step:1213/1670 train_time:118705ms step_avg:97.86ms +step:1214/1670 train_time:118802ms step_avg:97.86ms +step:1215/1670 train_time:118899ms step_avg:97.86ms +step:1216/1670 train_time:118997ms step_avg:97.86ms +step:1217/1670 train_time:119095ms step_avg:97.86ms +step:1218/1670 train_time:119194ms step_avg:97.86ms +step:1219/1670 train_time:119293ms step_avg:97.86ms +step:1220/1670 train_time:119391ms step_avg:97.86ms +step:1221/1670 train_time:119489ms step_avg:97.86ms +step:1222/1670 train_time:119587ms step_avg:97.86ms +step:1223/1670 train_time:119685ms step_avg:97.86ms +step:1224/1670 train_time:119783ms step_avg:97.86ms +step:1225/1670 train_time:119881ms step_avg:97.86ms +step:1226/1670 train_time:119979ms step_avg:97.86ms +step:1227/1670 train_time:120077ms step_avg:97.86ms +step:1228/1670 train_time:120176ms step_avg:97.86ms +step:1229/1670 train_time:120272ms step_avg:97.86ms +step:1230/1670 train_time:120371ms step_avg:97.86ms +step:1231/1670 train_time:120469ms step_avg:97.86ms +step:1232/1670 train_time:120567ms step_avg:97.86ms +step:1233/1670 train_time:120666ms step_avg:97.86ms +step:1234/1670 train_time:120765ms step_avg:97.86ms +step:1235/1670 train_time:120863ms step_avg:97.86ms +step:1236/1670 train_time:120961ms step_avg:97.86ms +step:1237/1670 train_time:121059ms step_avg:97.87ms +step:1238/1670 train_time:121157ms step_avg:97.86ms +step:1239/1670 train_time:121254ms step_avg:97.86ms +step:1240/1670 train_time:121351ms step_avg:97.86ms +step:1241/1670 train_time:121449ms step_avg:97.86ms +step:1242/1670 train_time:121548ms step_avg:97.86ms +step:1243/1670 train_time:121645ms step_avg:97.86ms +step:1244/1670 train_time:121743ms step_avg:97.86ms +step:1245/1670 train_time:121841ms step_avg:97.86ms +step:1246/1670 train_time:121939ms step_avg:97.86ms +step:1247/1670 train_time:122038ms step_avg:97.87ms +step:1248/1670 train_time:122136ms step_avg:97.87ms +step:1249/1670 train_time:122234ms step_avg:97.87ms +step:1250/1670 train_time:122332ms step_avg:97.87ms +step:1250/1670 val_loss:3.3807 train_time:122428ms step_avg:97.94ms +step:1251/1670 train_time:122450ms step_avg:97.88ms +step:1252/1670 train_time:122535ms step_avg:97.87ms +step:1253/1670 train_time:122634ms step_avg:97.87ms +step:1254/1670 train_time:122732ms step_avg:97.87ms +step:1255/1670 train_time:122830ms step_avg:97.87ms +step:1256/1670 train_time:122929ms step_avg:97.87ms +step:1257/1670 train_time:123026ms step_avg:97.87ms +step:1258/1670 train_time:123122ms step_avg:97.87ms +step:1259/1670 train_time:123219ms step_avg:97.87ms +step:1260/1670 train_time:123316ms step_avg:97.87ms +step:1261/1670 train_time:123415ms step_avg:97.87ms +step:1262/1670 train_time:123515ms step_avg:97.87ms +step:1263/1670 train_time:123615ms step_avg:97.87ms +step:1264/1670 train_time:123713ms step_avg:97.87ms +step:1265/1670 train_time:123812ms step_avg:97.87ms +step:1266/1670 train_time:123909ms step_avg:97.87ms +step:1267/1670 train_time:124006ms step_avg:97.87ms +step:1268/1670 train_time:124104ms step_avg:97.87ms +step:1269/1670 train_time:124201ms step_avg:97.87ms +step:1270/1670 train_time:124298ms step_avg:97.87ms +step:1271/1670 train_time:124398ms step_avg:97.87ms +step:1272/1670 train_time:124497ms step_avg:97.88ms +step:1273/1670 train_time:124596ms step_avg:97.88ms +step:1274/1670 train_time:124857ms step_avg:98.00ms +step:1275/1670 train_time:125059ms step_avg:98.09ms +step:1276/1670 train_time:125155ms step_avg:98.08ms +step:1277/1670 train_time:125252ms step_avg:98.08ms +step:1278/1670 train_time:125349ms step_avg:98.08ms +step:1279/1670 train_time:125445ms step_avg:98.08ms +step:1280/1670 train_time:125542ms step_avg:98.08ms +step:1281/1670 train_time:125638ms step_avg:98.08ms +step:1282/1670 train_time:125735ms step_avg:98.08ms +step:1283/1670 train_time:125834ms step_avg:98.08ms +step:1284/1670 train_time:125937ms step_avg:98.08ms +step:1285/1670 train_time:126039ms step_avg:98.09ms +step:1286/1670 train_time:126138ms step_avg:98.09ms +step:1287/1670 train_time:126236ms step_avg:98.09ms +step:1288/1670 train_time:126334ms step_avg:98.09ms +step:1289/1670 train_time:126432ms step_avg:98.09ms +step:1290/1670 train_time:126530ms step_avg:98.09ms +step:1291/1670 train_time:126628ms step_avg:98.09ms +step:1292/1670 train_time:126725ms step_avg:98.08ms +step:1293/1670 train_time:126823ms step_avg:98.08ms +step:1294/1670 train_time:126924ms step_avg:98.09ms +step:1295/1670 train_time:127025ms step_avg:98.09ms +step:1296/1670 train_time:127124ms step_avg:98.09ms +step:1297/1670 train_time:127221ms step_avg:98.09ms +step:1298/1670 train_time:127318ms step_avg:98.09ms +step:1299/1670 train_time:127416ms step_avg:98.09ms +step:1300/1670 train_time:127512ms step_avg:98.09ms +step:1301/1670 train_time:127611ms step_avg:98.09ms +step:1302/1670 train_time:127708ms step_avg:98.09ms +step:1303/1670 train_time:127806ms step_avg:98.09ms +step:1304/1670 train_time:127906ms step_avg:98.09ms +step:1305/1670 train_time:128006ms step_avg:98.09ms +step:1306/1670 train_time:128104ms step_avg:98.09ms +step:1307/1670 train_time:128204ms step_avg:98.09ms +step:1308/1670 train_time:128303ms step_avg:98.09ms +step:1309/1670 train_time:128400ms step_avg:98.09ms +step:1310/1670 train_time:128497ms step_avg:98.09ms +step:1311/1670 train_time:128594ms step_avg:98.09ms +step:1312/1670 train_time:128692ms step_avg:98.09ms +step:1313/1670 train_time:128789ms step_avg:98.09ms +step:1314/1670 train_time:128887ms step_avg:98.09ms +step:1315/1670 train_time:128986ms step_avg:98.09ms +step:1316/1670 train_time:129085ms step_avg:98.09ms +step:1317/1670 train_time:129184ms step_avg:98.09ms +step:1318/1670 train_time:129283ms step_avg:98.09ms +step:1319/1670 train_time:129382ms step_avg:98.09ms +step:1320/1670 train_time:129479ms step_avg:98.09ms +step:1321/1670 train_time:129576ms step_avg:98.09ms +step:1322/1670 train_time:129673ms step_avg:98.09ms +step:1323/1670 train_time:129772ms step_avg:98.09ms +step:1324/1670 train_time:129869ms step_avg:98.09ms +step:1325/1670 train_time:129968ms step_avg:98.09ms +step:1326/1670 train_time:130066ms step_avg:98.09ms +step:1327/1670 train_time:130164ms step_avg:98.09ms +step:1328/1670 train_time:130262ms step_avg:98.09ms +step:1329/1670 train_time:130362ms step_avg:98.09ms +step:1330/1670 train_time:130460ms step_avg:98.09ms +step:1331/1670 train_time:130558ms step_avg:98.09ms +step:1332/1670 train_time:130656ms step_avg:98.09ms +step:1333/1670 train_time:130752ms step_avg:98.09ms +step:1334/1670 train_time:130850ms step_avg:98.09ms +step:1335/1670 train_time:130948ms step_avg:98.09ms +step:1336/1670 train_time:131046ms step_avg:98.09ms +step:1337/1670 train_time:131146ms step_avg:98.09ms +step:1338/1670 train_time:131246ms step_avg:98.09ms +step:1339/1670 train_time:131346ms step_avg:98.09ms +step:1340/1670 train_time:131446ms step_avg:98.09ms +step:1341/1670 train_time:131545ms step_avg:98.09ms +step:1342/1670 train_time:131643ms step_avg:98.09ms +step:1343/1670 train_time:131741ms step_avg:98.09ms +step:1344/1670 train_time:131837ms step_avg:98.09ms +step:1345/1670 train_time:131934ms step_avg:98.09ms +step:1346/1670 train_time:132032ms step_avg:98.09ms +step:1347/1670 train_time:132132ms step_avg:98.09ms +step:1348/1670 train_time:132232ms step_avg:98.10ms +step:1349/1670 train_time:132331ms step_avg:98.10ms +step:1350/1670 train_time:132431ms step_avg:98.10ms +step:1351/1670 train_time:132531ms step_avg:98.10ms +step:1352/1670 train_time:132629ms step_avg:98.10ms +step:1353/1670 train_time:132729ms step_avg:98.10ms +step:1354/1670 train_time:132829ms step_avg:98.10ms +step:1355/1670 train_time:132927ms step_avg:98.10ms +step:1356/1670 train_time:133025ms step_avg:98.10ms +step:1357/1670 train_time:133122ms step_avg:98.10ms +step:1358/1670 train_time:133220ms step_avg:98.10ms +step:1359/1670 train_time:133318ms step_avg:98.10ms +step:1360/1670 train_time:133417ms step_avg:98.10ms +step:1361/1670 train_time:133515ms step_avg:98.10ms +step:1362/1670 train_time:133613ms step_avg:98.10ms +step:1363/1670 train_time:133711ms step_avg:98.10ms +step:1364/1670 train_time:133809ms step_avg:98.10ms +step:1365/1670 train_time:133907ms step_avg:98.10ms +step:1366/1670 train_time:134005ms step_avg:98.10ms +step:1367/1670 train_time:134102ms step_avg:98.10ms +step:1368/1670 train_time:134200ms step_avg:98.10ms +step:1369/1670 train_time:134298ms step_avg:98.10ms +step:1370/1670 train_time:134395ms step_avg:98.10ms +step:1371/1670 train_time:134494ms step_avg:98.10ms +step:1372/1670 train_time:134592ms step_avg:98.10ms +step:1373/1670 train_time:134690ms step_avg:98.10ms +step:1374/1670 train_time:134788ms step_avg:98.10ms +step:1375/1670 train_time:134886ms step_avg:98.10ms +step:1375/1670 val_loss:3.3438 train_time:134983ms step_avg:98.17ms +step:1376/1670 train_time:135005ms step_avg:98.11ms +step:1377/1670 train_time:135088ms step_avg:98.10ms +step:1378/1670 train_time:135191ms step_avg:98.11ms +step:1379/1670 train_time:135288ms step_avg:98.11ms +step:1380/1670 train_time:135386ms step_avg:98.11ms +step:1381/1670 train_time:135483ms step_avg:98.11ms +step:1382/1670 train_time:135580ms step_avg:98.10ms +step:1383/1670 train_time:135677ms step_avg:98.10ms +step:1384/1670 train_time:135774ms step_avg:98.10ms +step:1385/1670 train_time:135871ms step_avg:98.10ms +step:1386/1670 train_time:135971ms step_avg:98.10ms +step:1387/1670 train_time:136071ms step_avg:98.10ms +step:1388/1670 train_time:136172ms step_avg:98.11ms +step:1389/1670 train_time:136272ms step_avg:98.11ms +step:1390/1670 train_time:136373ms step_avg:98.11ms +step:1391/1670 train_time:136472ms step_avg:98.11ms +step:1392/1670 train_time:136570ms step_avg:98.11ms +step:1393/1670 train_time:136668ms step_avg:98.11ms +step:1394/1670 train_time:136765ms step_avg:98.11ms +step:1395/1670 train_time:136862ms step_avg:98.11ms +step:1396/1670 train_time:136959ms step_avg:98.11ms +step:1397/1670 train_time:137058ms step_avg:98.11ms +step:1398/1670 train_time:137156ms step_avg:98.11ms +step:1399/1670 train_time:137255ms step_avg:98.11ms +step:1400/1670 train_time:137354ms step_avg:98.11ms +step:1401/1670 train_time:137452ms step_avg:98.11ms +step:1402/1670 train_time:137550ms step_avg:98.11ms +step:1403/1670 train_time:137648ms step_avg:98.11ms +step:1404/1670 train_time:137746ms step_avg:98.11ms +step:1405/1670 train_time:137844ms step_avg:98.11ms +step:1406/1670 train_time:137942ms step_avg:98.11ms +step:1407/1670 train_time:138039ms step_avg:98.11ms +step:1408/1670 train_time:138137ms step_avg:98.11ms +step:1409/1670 train_time:138237ms step_avg:98.11ms +step:1410/1670 train_time:138337ms step_avg:98.11ms +step:1411/1670 train_time:138436ms step_avg:98.11ms +step:1412/1670 train_time:138535ms step_avg:98.11ms +step:1413/1670 train_time:138633ms step_avg:98.11ms +step:1414/1670 train_time:138731ms step_avg:98.11ms +step:1415/1670 train_time:138829ms step_avg:98.11ms +step:1416/1670 train_time:138928ms step_avg:98.11ms +step:1417/1670 train_time:139027ms step_avg:98.11ms +step:1418/1670 train_time:139125ms step_avg:98.11ms +step:1419/1670 train_time:139223ms step_avg:98.11ms +step:1420/1670 train_time:139320ms step_avg:98.11ms +step:1421/1670 train_time:139418ms step_avg:98.11ms +step:1422/1670 train_time:139516ms step_avg:98.11ms +step:1423/1670 train_time:139614ms step_avg:98.11ms +step:1424/1670 train_time:139711ms step_avg:98.11ms +step:1425/1670 train_time:139810ms step_avg:98.11ms +step:1426/1670 train_time:139908ms step_avg:98.11ms +step:1427/1670 train_time:140006ms step_avg:98.11ms +step:1428/1670 train_time:140103ms step_avg:98.11ms +step:1429/1670 train_time:140201ms step_avg:98.11ms +step:1430/1670 train_time:140299ms step_avg:98.11ms +step:1431/1670 train_time:140397ms step_avg:98.11ms +step:1432/1670 train_time:140495ms step_avg:98.11ms +step:1433/1670 train_time:140594ms step_avg:98.11ms +step:1434/1670 train_time:140693ms step_avg:98.11ms +step:1435/1670 train_time:140793ms step_avg:98.11ms +step:1436/1670 train_time:140892ms step_avg:98.11ms +step:1437/1670 train_time:140992ms step_avg:98.12ms +step:1438/1670 train_time:141090ms step_avg:98.12ms +step:1439/1670 train_time:141190ms step_avg:98.12ms +step:1440/1670 train_time:141291ms step_avg:98.12ms +step:1441/1670 train_time:141389ms step_avg:98.12ms +step:1442/1670 train_time:141489ms step_avg:98.12ms +step:1443/1670 train_time:141586ms step_avg:98.12ms +step:1444/1670 train_time:141684ms step_avg:98.12ms +step:1445/1670 train_time:141781ms step_avg:98.12ms +step:1446/1670 train_time:141878ms step_avg:98.12ms +step:1447/1670 train_time:141977ms step_avg:98.12ms +step:1448/1670 train_time:142076ms step_avg:98.12ms +step:1449/1670 train_time:142176ms step_avg:98.12ms +step:1450/1670 train_time:142275ms step_avg:98.12ms +step:1451/1670 train_time:142373ms step_avg:98.12ms +step:1452/1670 train_time:142472ms step_avg:98.12ms +step:1453/1670 train_time:142571ms step_avg:98.12ms +step:1454/1670 train_time:142670ms step_avg:98.12ms +step:1455/1670 train_time:142768ms step_avg:98.12ms +step:1456/1670 train_time:142866ms step_avg:98.12ms +step:1457/1670 train_time:142964ms step_avg:98.12ms +step:1458/1670 train_time:143061ms step_avg:98.12ms +step:1459/1670 train_time:143159ms step_avg:98.12ms +step:1460/1670 train_time:143258ms step_avg:98.12ms +step:1461/1670 train_time:143358ms step_avg:98.12ms +step:1462/1670 train_time:143456ms step_avg:98.12ms +step:1463/1670 train_time:143554ms step_avg:98.12ms +step:1464/1670 train_time:143653ms step_avg:98.12ms +step:1465/1670 train_time:143753ms step_avg:98.12ms +step:1466/1670 train_time:143852ms step_avg:98.13ms +step:1467/1670 train_time:143951ms step_avg:98.13ms +step:1468/1670 train_time:144051ms step_avg:98.13ms +step:1469/1670 train_time:144150ms step_avg:98.13ms +step:1470/1670 train_time:144249ms step_avg:98.13ms +step:1471/1670 train_time:144347ms step_avg:98.13ms +step:1472/1670 train_time:144445ms step_avg:98.13ms +step:1473/1670 train_time:144541ms step_avg:98.13ms +step:1474/1670 train_time:144639ms step_avg:98.13ms +step:1475/1670 train_time:144737ms step_avg:98.13ms +step:1476/1670 train_time:144837ms step_avg:98.13ms +step:1477/1670 train_time:144935ms step_avg:98.13ms +step:1478/1670 train_time:145034ms step_avg:98.13ms +step:1479/1670 train_time:145133ms step_avg:98.13ms +step:1480/1670 train_time:145233ms step_avg:98.13ms +step:1481/1670 train_time:145333ms step_avg:98.13ms +step:1482/1670 train_time:145432ms step_avg:98.13ms +step:1483/1670 train_time:145530ms step_avg:98.13ms +step:1484/1670 train_time:145628ms step_avg:98.13ms +step:1485/1670 train_time:145886ms step_avg:98.24ms +step:1486/1670 train_time:146095ms step_avg:98.31ms +step:1487/1670 train_time:146192ms step_avg:98.31ms +step:1488/1670 train_time:146289ms step_avg:98.31ms +step:1489/1670 train_time:146385ms step_avg:98.31ms +step:1490/1670 train_time:146482ms step_avg:98.31ms +step:1491/1670 train_time:146579ms step_avg:98.31ms +step:1492/1670 train_time:146676ms step_avg:98.31ms +step:1493/1670 train_time:146773ms step_avg:98.31ms +step:1494/1670 train_time:146873ms step_avg:98.31ms +step:1495/1670 train_time:146979ms step_avg:98.31ms +step:1496/1670 train_time:147080ms step_avg:98.32ms +step:1497/1670 train_time:147178ms step_avg:98.32ms +step:1498/1670 train_time:147276ms step_avg:98.32ms +step:1499/1670 train_time:147375ms step_avg:98.32ms +step:1500/1670 train_time:147473ms step_avg:98.32ms +step:1500/1670 val_loss:3.3113 train_time:147570ms step_avg:98.38ms +step:1501/1670 train_time:147592ms step_avg:98.33ms +step:1502/1670 train_time:147675ms step_avg:98.32ms +step:1503/1670 train_time:147776ms step_avg:98.32ms +step:1504/1670 train_time:147873ms step_avg:98.32ms +step:1505/1670 train_time:147970ms step_avg:98.32ms +step:1506/1670 train_time:148067ms step_avg:98.32ms +step:1507/1670 train_time:148164ms step_avg:98.32ms +step:1508/1670 train_time:148260ms step_avg:98.32ms +step:1509/1670 train_time:148357ms step_avg:98.31ms +step:1510/1670 train_time:148455ms step_avg:98.31ms +step:1511/1670 train_time:148555ms step_avg:98.32ms +step:1512/1670 train_time:148656ms step_avg:98.32ms +step:1513/1670 train_time:148754ms step_avg:98.32ms +step:1514/1670 train_time:148854ms step_avg:98.32ms +step:1515/1670 train_time:148952ms step_avg:98.32ms +step:1516/1670 train_time:149051ms step_avg:98.32ms +step:1517/1670 train_time:149148ms step_avg:98.32ms +step:1518/1670 train_time:149246ms step_avg:98.32ms +step:1519/1670 train_time:149344ms step_avg:98.32ms +step:1520/1670 train_time:149442ms step_avg:98.32ms +step:1521/1670 train_time:149541ms step_avg:98.32ms +step:1522/1670 train_time:149641ms step_avg:98.32ms +step:1523/1670 train_time:149739ms step_avg:98.32ms +step:1524/1670 train_time:149837ms step_avg:98.32ms +step:1525/1670 train_time:149934ms step_avg:98.32ms +step:1526/1670 train_time:150032ms step_avg:98.32ms +step:1527/1670 train_time:150131ms step_avg:98.32ms +step:1528/1670 train_time:150230ms step_avg:98.32ms +step:1529/1670 train_time:150328ms step_avg:98.32ms +step:1530/1670 train_time:150428ms step_avg:98.32ms +step:1531/1670 train_time:150528ms step_avg:98.32ms +step:1532/1670 train_time:150626ms step_avg:98.32ms +step:1533/1670 train_time:150726ms step_avg:98.32ms +step:1534/1670 train_time:150825ms step_avg:98.32ms +step:1535/1670 train_time:150922ms step_avg:98.32ms +step:1536/1670 train_time:151021ms step_avg:98.32ms +step:1537/1670 train_time:151117ms step_avg:98.32ms +step:1538/1670 train_time:151214ms step_avg:98.32ms +step:1539/1670 train_time:151311ms step_avg:98.32ms +step:1540/1670 train_time:151411ms step_avg:98.32ms +step:1541/1670 train_time:151511ms step_avg:98.32ms +step:1542/1670 train_time:151610ms step_avg:98.32ms +step:1543/1670 train_time:151709ms step_avg:98.32ms +step:1544/1670 train_time:151809ms step_avg:98.32ms +step:1545/1670 train_time:151910ms step_avg:98.32ms +step:1546/1670 train_time:152011ms step_avg:98.33ms +step:1547/1670 train_time:152111ms step_avg:98.33ms +step:1548/1670 train_time:152209ms step_avg:98.33ms +step:1549/1670 train_time:152306ms step_avg:98.33ms +step:1550/1670 train_time:152405ms step_avg:98.33ms +step:1551/1670 train_time:152503ms step_avg:98.33ms +step:1552/1670 train_time:152601ms step_avg:98.33ms +step:1553/1670 train_time:152698ms step_avg:98.32ms +step:1554/1670 train_time:152796ms step_avg:98.32ms +step:1555/1670 train_time:152895ms step_avg:98.32ms +step:1556/1670 train_time:152995ms step_avg:98.33ms +step:1557/1670 train_time:153094ms step_avg:98.33ms +step:1558/1670 train_time:153193ms step_avg:98.33ms +step:1559/1670 train_time:153292ms step_avg:98.33ms +step:1560/1670 train_time:153391ms step_avg:98.33ms +step:1561/1670 train_time:153489ms step_avg:98.33ms +step:1562/1670 train_time:153589ms step_avg:98.33ms +step:1563/1670 train_time:153687ms step_avg:98.33ms +step:1564/1670 train_time:153788ms step_avg:98.33ms +step:1565/1670 train_time:153889ms step_avg:98.33ms +step:1566/1670 train_time:153988ms step_avg:98.33ms +step:1567/1670 train_time:154088ms step_avg:98.33ms +step:1568/1670 train_time:154186ms step_avg:98.33ms +step:1569/1670 train_time:154284ms step_avg:98.33ms +step:1570/1670 train_time:154382ms step_avg:98.33ms +step:1571/1670 train_time:154479ms step_avg:98.33ms +step:1572/1670 train_time:154576ms step_avg:98.33ms +step:1573/1670 train_time:154676ms step_avg:98.33ms +step:1574/1670 train_time:154774ms step_avg:98.33ms +step:1575/1670 train_time:154872ms step_avg:98.33ms +step:1576/1670 train_time:154971ms step_avg:98.33ms +step:1577/1670 train_time:155069ms step_avg:98.33ms +step:1578/1670 train_time:155168ms step_avg:98.33ms +step:1579/1670 train_time:155267ms step_avg:98.33ms +step:1580/1670 train_time:155365ms step_avg:98.33ms +step:1581/1670 train_time:155465ms step_avg:98.33ms +step:1582/1670 train_time:155563ms step_avg:98.33ms +step:1583/1670 train_time:155662ms step_avg:98.33ms +step:1584/1670 train_time:155760ms step_avg:98.33ms +step:1585/1670 train_time:155857ms step_avg:98.33ms +step:1586/1670 train_time:155954ms step_avg:98.33ms +step:1587/1670 train_time:156052ms step_avg:98.33ms +step:1588/1670 train_time:156151ms step_avg:98.33ms +step:1589/1670 train_time:156250ms step_avg:98.33ms +step:1590/1670 train_time:156349ms step_avg:98.33ms +step:1591/1670 train_time:156449ms step_avg:98.33ms +step:1592/1670 train_time:156549ms step_avg:98.33ms +step:1593/1670 train_time:156649ms step_avg:98.34ms +step:1594/1670 train_time:156748ms step_avg:98.34ms +step:1595/1670 train_time:156847ms step_avg:98.34ms +step:1596/1670 train_time:156946ms step_avg:98.34ms +step:1597/1670 train_time:157045ms step_avg:98.34ms +step:1598/1670 train_time:157142ms step_avg:98.34ms +step:1599/1670 train_time:157239ms step_avg:98.34ms +step:1600/1670 train_time:157336ms step_avg:98.33ms +step:1601/1670 train_time:157434ms step_avg:98.33ms +step:1602/1670 train_time:157532ms step_avg:98.33ms +step:1603/1670 train_time:157631ms step_avg:98.33ms +step:1604/1670 train_time:157730ms step_avg:98.34ms +step:1605/1670 train_time:157830ms step_avg:98.34ms +step:1606/1670 train_time:157929ms step_avg:98.34ms +step:1607/1670 train_time:158028ms step_avg:98.34ms +step:1608/1670 train_time:158128ms step_avg:98.34ms +step:1609/1670 train_time:158228ms step_avg:98.34ms +step:1610/1670 train_time:158326ms step_avg:98.34ms +step:1611/1670 train_time:158424ms step_avg:98.34ms +step:1612/1670 train_time:158522ms step_avg:98.34ms +step:1613/1670 train_time:158620ms step_avg:98.34ms +step:1614/1670 train_time:158719ms step_avg:98.34ms +step:1615/1670 train_time:158816ms step_avg:98.34ms +step:1616/1670 train_time:158916ms step_avg:98.34ms +step:1617/1670 train_time:159015ms step_avg:98.34ms +step:1618/1670 train_time:159114ms step_avg:98.34ms +step:1619/1670 train_time:159213ms step_avg:98.34ms +step:1620/1670 train_time:159311ms step_avg:98.34ms +step:1621/1670 train_time:159409ms step_avg:98.34ms +step:1622/1670 train_time:159508ms step_avg:98.34ms +step:1623/1670 train_time:159606ms step_avg:98.34ms +step:1624/1670 train_time:159705ms step_avg:98.34ms +step:1625/1670 train_time:159805ms step_avg:98.34ms +step:1625/1670 val_loss:3.2846 train_time:159905ms step_avg:98.40ms +step:1626/1670 train_time:159926ms step_avg:98.36ms +step:1627/1670 train_time:160012ms step_avg:98.35ms +step:1628/1670 train_time:160113ms step_avg:98.35ms +step:1629/1670 train_time:160212ms step_avg:98.35ms +step:1630/1670 train_time:160310ms step_avg:98.35ms +step:1631/1670 train_time:160407ms step_avg:98.35ms +step:1632/1670 train_time:160504ms step_avg:98.35ms +step:1633/1670 train_time:160601ms step_avg:98.35ms +step:1634/1670 train_time:160698ms step_avg:98.35ms +step:1635/1670 train_time:160795ms step_avg:98.35ms +step:1636/1670 train_time:160895ms step_avg:98.35ms +step:1637/1670 train_time:160999ms step_avg:98.35ms +step:1638/1670 train_time:161099ms step_avg:98.35ms +step:1639/1670 train_time:161198ms step_avg:98.35ms +step:1640/1670 train_time:161297ms step_avg:98.35ms +step:1641/1670 train_time:161396ms step_avg:98.35ms +step:1642/1670 train_time:161493ms step_avg:98.35ms +step:1643/1670 train_time:161590ms step_avg:98.35ms +step:1644/1670 train_time:161687ms step_avg:98.35ms +step:1645/1670 train_time:161785ms step_avg:98.35ms +step:1646/1670 train_time:161882ms step_avg:98.35ms +step:1647/1670 train_time:161981ms step_avg:98.35ms +step:1648/1670 train_time:162079ms step_avg:98.35ms +step:1649/1670 train_time:162178ms step_avg:98.35ms +step:1650/1670 train_time:162276ms step_avg:98.35ms +step:1651/1670 train_time:162375ms step_avg:98.35ms +step:1652/1670 train_time:162473ms step_avg:98.35ms +step:1653/1670 train_time:162571ms step_avg:98.35ms +step:1654/1670 train_time:162670ms step_avg:98.35ms +step:1655/1670 train_time:162768ms step_avg:98.35ms +step:1656/1670 train_time:162867ms step_avg:98.35ms +step:1657/1670 train_time:162966ms step_avg:98.35ms +step:1658/1670 train_time:163064ms step_avg:98.35ms +step:1659/1670 train_time:163162ms step_avg:98.35ms +step:1660/1670 train_time:163260ms step_avg:98.35ms +step:1661/1670 train_time:163358ms step_avg:98.35ms +step:1662/1670 train_time:163456ms step_avg:98.35ms +step:1663/1670 train_time:163553ms step_avg:98.35ms +step:1664/1670 train_time:163652ms step_avg:98.35ms +step:1665/1670 train_time:163750ms step_avg:98.35ms +step:1666/1670 train_time:163849ms step_avg:98.35ms +step:1667/1670 train_time:163949ms step_avg:98.35ms +step:1668/1670 train_time:164048ms step_avg:98.35ms +step:1669/1670 train_time:164149ms step_avg:98.35ms +step:1670/1670 train_time:164249ms step_avg:98.35ms +step:1670/1670 val_loss:3.2766 train_time:164348ms step_avg:98.41ms +peak memory allocated: 34613 MiB reserved: 50216 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_50e5b966-21a9-4545-8c88-91308e140958.txt b/records/090525_SkipMLPBlocks/comparison_50e5b966-21a9-4545-8c88-91308e140958.txt new file mode 100644 index 000000000..b02e2101d --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_50e5b966-21a9-4545-8c88-91308e140958.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:57:36 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 130W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 126W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 42C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 34C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 33C P0 123W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 77482 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 77483 C /usr/bin/python3 610MiB | +| 0 N/A N/A 77484 C /usr/bin/python3 610MiB | +| 0 N/A N/A 77485 C /usr/bin/python3 610MiB | +| 0 N/A N/A 77486 C /usr/bin/python3 610MiB | +| 0 N/A N/A 77487 C /usr/bin/python3 610MiB | +| 0 N/A N/A 77488 C /usr/bin/python3 610MiB | +| 0 N/A N/A 77489 C /usr/bin/python3 610MiB | +| 1 N/A N/A 77483 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 77484 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 77485 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 77486 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 77487 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 77488 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 77489 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:418ms step_avg:417.70ms +step:2/1670 train_time:438ms step_avg:219.23ms +step:3/1670 train_time:511ms step_avg:170.23ms +step:4/1670 train_time:605ms step_avg:151.19ms +step:5/1670 train_time:699ms step_avg:139.84ms +step:6/1670 train_time:794ms step_avg:132.29ms +step:7/1670 train_time:889ms step_avg:126.98ms +step:8/1670 train_time:984ms step_avg:122.99ms +step:9/1670 train_time:1079ms step_avg:119.87ms +step:10/1670 train_time:1174ms step_avg:117.36ms +step:11/1670 train_time:1269ms step_avg:115.34ms +step:12/1670 train_time:1369ms step_avg:114.10ms +step:13/1670 train_time:1468ms step_avg:112.90ms +step:14/1670 train_time:1565ms step_avg:111.77ms +step:15/1670 train_time:1661ms step_avg:110.70ms +step:16/1670 train_time:1756ms step_avg:109.75ms +step:17/1670 train_time:1852ms step_avg:108.96ms +step:18/1670 train_time:1948ms step_avg:108.23ms +step:19/1670 train_time:2044ms step_avg:107.60ms +step:20/1670 train_time:2141ms step_avg:107.04ms +step:21/1670 train_time:2236ms step_avg:106.47ms +step:22/1670 train_time:2332ms step_avg:105.99ms +step:23/1670 train_time:2430ms step_avg:105.64ms +step:24/1670 train_time:2527ms step_avg:105.30ms +step:25/1670 train_time:2625ms step_avg:104.98ms +step:26/1670 train_time:2721ms step_avg:104.64ms +step:27/1670 train_time:2816ms step_avg:104.31ms +step:28/1670 train_time:2912ms step_avg:104.01ms +step:29/1670 train_time:3008ms step_avg:103.74ms +step:30/1670 train_time:3105ms step_avg:103.50ms +step:31/1670 train_time:3202ms step_avg:103.28ms +step:32/1670 train_time:3298ms step_avg:103.07ms +step:33/1670 train_time:3394ms step_avg:102.85ms +step:34/1670 train_time:3490ms step_avg:102.65ms +step:35/1670 train_time:3587ms step_avg:102.48ms +step:36/1670 train_time:3683ms step_avg:102.31ms +step:37/1670 train_time:3779ms step_avg:102.14ms +step:38/1670 train_time:3874ms step_avg:101.96ms +step:39/1670 train_time:3970ms step_avg:101.78ms +step:40/1670 train_time:4066ms step_avg:101.65ms +step:41/1670 train_time:4162ms step_avg:101.51ms +step:42/1670 train_time:4259ms step_avg:101.41ms +step:43/1670 train_time:4355ms step_avg:101.28ms +step:44/1670 train_time:4451ms step_avg:101.16ms +step:45/1670 train_time:4548ms step_avg:101.07ms +step:46/1670 train_time:4643ms step_avg:100.94ms +step:47/1670 train_time:4740ms step_avg:100.84ms +step:48/1670 train_time:4835ms step_avg:100.73ms +step:49/1670 train_time:4931ms step_avg:100.63ms +step:50/1670 train_time:5027ms step_avg:100.53ms +step:51/1670 train_time:5122ms step_avg:100.44ms +step:52/1670 train_time:5218ms step_avg:100.35ms +step:53/1670 train_time:5314ms step_avg:100.27ms +step:54/1670 train_time:5411ms step_avg:100.20ms +step:55/1670 train_time:5508ms step_avg:100.14ms +step:56/1670 train_time:5604ms step_avg:100.07ms +step:57/1670 train_time:5700ms step_avg:100.01ms +step:58/1670 train_time:5796ms step_avg:99.93ms +step:59/1670 train_time:5891ms step_avg:99.85ms +step:60/1670 train_time:5987ms step_avg:99.78ms +step:61/1670 train_time:6082ms step_avg:99.70ms +step:62/1670 train_time:6178ms step_avg:99.65ms +step:63/1670 train_time:6274ms step_avg:99.58ms +step:64/1670 train_time:6370ms step_avg:99.53ms +step:65/1670 train_time:6466ms step_avg:99.48ms +step:66/1670 train_time:6564ms step_avg:99.45ms +step:67/1670 train_time:6661ms step_avg:99.41ms +step:68/1670 train_time:6757ms step_avg:99.37ms +step:69/1670 train_time:6853ms step_avg:99.31ms +step:70/1670 train_time:6949ms step_avg:99.27ms +step:71/1670 train_time:7045ms step_avg:99.22ms +step:72/1670 train_time:7141ms step_avg:99.18ms +step:73/1670 train_time:7237ms step_avg:99.13ms +step:74/1670 train_time:7332ms step_avg:99.08ms +step:75/1670 train_time:7428ms step_avg:99.04ms +step:76/1670 train_time:7524ms step_avg:99.01ms +step:77/1670 train_time:7621ms step_avg:98.98ms +step:78/1670 train_time:7716ms step_avg:98.93ms +step:79/1670 train_time:7811ms step_avg:98.88ms +step:80/1670 train_time:7907ms step_avg:98.84ms +step:81/1670 train_time:8004ms step_avg:98.81ms +step:82/1670 train_time:8100ms step_avg:98.78ms +step:83/1670 train_time:8196ms step_avg:98.74ms +step:84/1670 train_time:8291ms step_avg:98.70ms +step:85/1670 train_time:8387ms step_avg:98.67ms +step:86/1670 train_time:8482ms step_avg:98.63ms +step:87/1670 train_time:8579ms step_avg:98.61ms +step:88/1670 train_time:8674ms step_avg:98.57ms +step:89/1670 train_time:8770ms step_avg:98.53ms +step:90/1670 train_time:8866ms step_avg:98.51ms +step:91/1670 train_time:8963ms step_avg:98.49ms +step:92/1670 train_time:9059ms step_avg:98.47ms +step:93/1670 train_time:9154ms step_avg:98.43ms +step:94/1670 train_time:9250ms step_avg:98.41ms +step:95/1670 train_time:9345ms step_avg:98.37ms +step:96/1670 train_time:9440ms step_avg:98.34ms +step:97/1670 train_time:9536ms step_avg:98.31ms +step:98/1670 train_time:9631ms step_avg:98.28ms +step:99/1670 train_time:9727ms step_avg:98.26ms +step:100/1670 train_time:9823ms step_avg:98.23ms +step:101/1670 train_time:9919ms step_avg:98.21ms +step:102/1670 train_time:10015ms step_avg:98.18ms +step:103/1670 train_time:10111ms step_avg:98.16ms +step:104/1670 train_time:10207ms step_avg:98.15ms +step:105/1670 train_time:10303ms step_avg:98.13ms +step:106/1670 train_time:10400ms step_avg:98.11ms +step:107/1670 train_time:10495ms step_avg:98.09ms +step:108/1670 train_time:10591ms step_avg:98.06ms +step:109/1670 train_time:10686ms step_avg:98.04ms +step:110/1670 train_time:10781ms step_avg:98.01ms +step:111/1670 train_time:10877ms step_avg:97.99ms +step:112/1670 train_time:10972ms step_avg:97.96ms +step:113/1670 train_time:11069ms step_avg:97.95ms +step:114/1670 train_time:11164ms step_avg:97.93ms +step:115/1670 train_time:11260ms step_avg:97.91ms +step:116/1670 train_time:11356ms step_avg:97.90ms +step:117/1670 train_time:11452ms step_avg:97.88ms +step:118/1670 train_time:11548ms step_avg:97.86ms +step:119/1670 train_time:11643ms step_avg:97.84ms +step:120/1670 train_time:11739ms step_avg:97.82ms +step:121/1670 train_time:11834ms step_avg:97.80ms +step:122/1670 train_time:11929ms step_avg:97.78ms +step:123/1670 train_time:12025ms step_avg:97.77ms +step:124/1670 train_time:12122ms step_avg:97.75ms +step:125/1670 train_time:12217ms step_avg:97.74ms +step:125/1670 val_loss:4.2904 train_time:12312ms step_avg:98.49ms +step:126/1670 train_time:12333ms step_avg:97.88ms +step:127/1670 train_time:12418ms step_avg:97.78ms +step:128/1670 train_time:12523ms step_avg:97.84ms +step:129/1670 train_time:12621ms step_avg:97.83ms +step:130/1670 train_time:12717ms step_avg:97.82ms +step:131/1670 train_time:12812ms step_avg:97.80ms +step:132/1670 train_time:12906ms step_avg:97.78ms +step:133/1670 train_time:13001ms step_avg:97.75ms +step:134/1670 train_time:13095ms step_avg:97.73ms +step:135/1670 train_time:13190ms step_avg:97.70ms +step:136/1670 train_time:13284ms step_avg:97.68ms +step:137/1670 train_time:13380ms step_avg:97.67ms +step:138/1670 train_time:13478ms step_avg:97.67ms +step:139/1670 train_time:13575ms step_avg:97.66ms +step:140/1670 train_time:13672ms step_avg:97.66ms +step:141/1670 train_time:13769ms step_avg:97.65ms +step:142/1670 train_time:13865ms step_avg:97.64ms +step:143/1670 train_time:13960ms step_avg:97.62ms +step:144/1670 train_time:14054ms step_avg:97.60ms +step:145/1670 train_time:14149ms step_avg:97.58ms +step:146/1670 train_time:14244ms step_avg:97.56ms +step:147/1670 train_time:14338ms step_avg:97.54ms +step:148/1670 train_time:14434ms step_avg:97.53ms +step:149/1670 train_time:14530ms step_avg:97.52ms +step:150/1670 train_time:14627ms step_avg:97.52ms +step:151/1670 train_time:14724ms step_avg:97.51ms +step:152/1670 train_time:14820ms step_avg:97.50ms +step:153/1670 train_time:14916ms step_avg:97.49ms +step:154/1670 train_time:15011ms step_avg:97.48ms +step:155/1670 train_time:15107ms step_avg:97.46ms +step:156/1670 train_time:15202ms step_avg:97.45ms +step:157/1670 train_time:15298ms step_avg:97.44ms +step:158/1670 train_time:15394ms step_avg:97.43ms +step:159/1670 train_time:15489ms step_avg:97.42ms +step:160/1670 train_time:15585ms step_avg:97.41ms +step:161/1670 train_time:15681ms step_avg:97.39ms +step:162/1670 train_time:15776ms step_avg:97.38ms +step:163/1670 train_time:15872ms step_avg:97.37ms +step:164/1670 train_time:15968ms step_avg:97.36ms +step:165/1670 train_time:16063ms step_avg:97.35ms +step:166/1670 train_time:16158ms step_avg:97.34ms +step:167/1670 train_time:16254ms step_avg:97.33ms +step:168/1670 train_time:16348ms step_avg:97.31ms +step:169/1670 train_time:16445ms step_avg:97.31ms +step:170/1670 train_time:16540ms step_avg:97.30ms +step:171/1670 train_time:16636ms step_avg:97.29ms +step:172/1670 train_time:16732ms step_avg:97.28ms +step:173/1670 train_time:16828ms step_avg:97.27ms +step:174/1670 train_time:16924ms step_avg:97.26ms +step:175/1670 train_time:17019ms step_avg:97.25ms +step:176/1670 train_time:17114ms step_avg:97.24ms +step:177/1670 train_time:17209ms step_avg:97.23ms +step:178/1670 train_time:17304ms step_avg:97.22ms +step:179/1670 train_time:17400ms step_avg:97.21ms +step:180/1670 train_time:17495ms step_avg:97.19ms +step:181/1670 train_time:17591ms step_avg:97.19ms +step:182/1670 train_time:17687ms step_avg:97.18ms +step:183/1670 train_time:17783ms step_avg:97.17ms +step:184/1670 train_time:17879ms step_avg:97.17ms +step:185/1670 train_time:17974ms step_avg:97.16ms +step:186/1670 train_time:18069ms step_avg:97.14ms +step:187/1670 train_time:18164ms step_avg:97.13ms +step:188/1670 train_time:18260ms step_avg:97.13ms +step:189/1670 train_time:18355ms step_avg:97.12ms +step:190/1670 train_time:18451ms step_avg:97.11ms +step:191/1670 train_time:18547ms step_avg:97.11ms +step:192/1670 train_time:18642ms step_avg:97.09ms +step:193/1670 train_time:18738ms step_avg:97.09ms +step:194/1670 train_time:18834ms step_avg:97.08ms +step:195/1670 train_time:18930ms step_avg:97.08ms +step:196/1670 train_time:19026ms step_avg:97.07ms +step:197/1670 train_time:19122ms step_avg:97.06ms +step:198/1670 train_time:19217ms step_avg:97.05ms +step:199/1670 train_time:19313ms step_avg:97.05ms +step:200/1670 train_time:19408ms step_avg:97.04ms +step:201/1670 train_time:19505ms step_avg:97.04ms +step:202/1670 train_time:19600ms step_avg:97.03ms +step:203/1670 train_time:19695ms step_avg:97.02ms +step:204/1670 train_time:19790ms step_avg:97.01ms +step:205/1670 train_time:19886ms step_avg:97.01ms +step:206/1670 train_time:19982ms step_avg:97.00ms +step:207/1670 train_time:20078ms step_avg:96.99ms +step:208/1670 train_time:20173ms step_avg:96.99ms +step:209/1670 train_time:20268ms step_avg:96.98ms +step:210/1670 train_time:20363ms step_avg:96.97ms +step:211/1670 train_time:20459ms step_avg:96.96ms +step:212/1670 train_time:20555ms step_avg:96.96ms +step:213/1670 train_time:20884ms step_avg:98.05ms +step:214/1670 train_time:20972ms step_avg:98.00ms +step:215/1670 train_time:21066ms step_avg:97.98ms +step:216/1670 train_time:21161ms step_avg:97.97ms +step:217/1670 train_time:21256ms step_avg:97.95ms +step:218/1670 train_time:21351ms step_avg:97.94ms +step:219/1670 train_time:21446ms step_avg:97.93ms +step:220/1670 train_time:21540ms step_avg:97.91ms +step:221/1670 train_time:21635ms step_avg:97.89ms +step:222/1670 train_time:21729ms step_avg:97.88ms +step:223/1670 train_time:21828ms step_avg:97.88ms +step:224/1670 train_time:21928ms step_avg:97.89ms +step:225/1670 train_time:22026ms step_avg:97.89ms +step:226/1670 train_time:22122ms step_avg:97.88ms +step:227/1670 train_time:22216ms step_avg:97.87ms +step:228/1670 train_time:22311ms step_avg:97.86ms +step:229/1670 train_time:22406ms step_avg:97.84ms +step:230/1670 train_time:22501ms step_avg:97.83ms +step:231/1670 train_time:22595ms step_avg:97.81ms +step:232/1670 train_time:22690ms step_avg:97.80ms +step:233/1670 train_time:22786ms step_avg:97.79ms +step:234/1670 train_time:22883ms step_avg:97.79ms +step:235/1670 train_time:22979ms step_avg:97.78ms +step:236/1670 train_time:23075ms step_avg:97.77ms +step:237/1670 train_time:23171ms step_avg:97.77ms +step:238/1670 train_time:23268ms step_avg:97.76ms +step:239/1670 train_time:23362ms step_avg:97.75ms +step:240/1670 train_time:23457ms step_avg:97.74ms +step:241/1670 train_time:23551ms step_avg:97.72ms +step:242/1670 train_time:23646ms step_avg:97.71ms +step:243/1670 train_time:23741ms step_avg:97.70ms +step:244/1670 train_time:23837ms step_avg:97.69ms +step:245/1670 train_time:23934ms step_avg:97.69ms +step:246/1670 train_time:24030ms step_avg:97.68ms +step:247/1670 train_time:24126ms step_avg:97.68ms +step:248/1670 train_time:24222ms step_avg:97.67ms +step:249/1670 train_time:24317ms step_avg:97.66ms +step:250/1670 train_time:24412ms step_avg:97.65ms +step:250/1670 val_loss:3.9654 train_time:24506ms step_avg:98.02ms +step:251/1670 train_time:24527ms step_avg:97.72ms +step:252/1670 train_time:24608ms step_avg:97.65ms +step:253/1670 train_time:24708ms step_avg:97.66ms +step:254/1670 train_time:24803ms step_avg:97.65ms +step:255/1670 train_time:24898ms step_avg:97.64ms +step:256/1670 train_time:24993ms step_avg:97.63ms +step:257/1670 train_time:25088ms step_avg:97.62ms +step:258/1670 train_time:25183ms step_avg:97.61ms +step:259/1670 train_time:25277ms step_avg:97.60ms +step:260/1670 train_time:25372ms step_avg:97.59ms +step:261/1670 train_time:25467ms step_avg:97.57ms +step:262/1670 train_time:25564ms step_avg:97.57ms +step:263/1670 train_time:25662ms step_avg:97.57ms +step:264/1670 train_time:25758ms step_avg:97.57ms +step:265/1670 train_time:25854ms step_avg:97.56ms +step:266/1670 train_time:25950ms step_avg:97.56ms +step:267/1670 train_time:26045ms step_avg:97.55ms +step:268/1670 train_time:26140ms step_avg:97.54ms +step:269/1670 train_time:26234ms step_avg:97.52ms +step:270/1670 train_time:26329ms step_avg:97.52ms +step:271/1670 train_time:26424ms step_avg:97.51ms +step:272/1670 train_time:26520ms step_avg:97.50ms +step:273/1670 train_time:26617ms step_avg:97.50ms +step:274/1670 train_time:26713ms step_avg:97.49ms +step:275/1670 train_time:26809ms step_avg:97.49ms +step:276/1670 train_time:26905ms step_avg:97.48ms +step:277/1670 train_time:27000ms step_avg:97.47ms +step:278/1670 train_time:27095ms step_avg:97.47ms +step:279/1670 train_time:27190ms step_avg:97.46ms +step:280/1670 train_time:27286ms step_avg:97.45ms +step:281/1670 train_time:27380ms step_avg:97.44ms +step:282/1670 train_time:27475ms step_avg:97.43ms +step:283/1670 train_time:27571ms step_avg:97.43ms +step:284/1670 train_time:27668ms step_avg:97.42ms +step:285/1670 train_time:27763ms step_avg:97.41ms +step:286/1670 train_time:27859ms step_avg:97.41ms +step:287/1670 train_time:27955ms step_avg:97.40ms +step:288/1670 train_time:28051ms step_avg:97.40ms +step:289/1670 train_time:28146ms step_avg:97.39ms +step:290/1670 train_time:28240ms step_avg:97.38ms +step:291/1670 train_time:28335ms step_avg:97.37ms +step:292/1670 train_time:28430ms step_avg:97.36ms +step:293/1670 train_time:28525ms step_avg:97.36ms +step:294/1670 train_time:28621ms step_avg:97.35ms +step:295/1670 train_time:28717ms step_avg:97.35ms +step:296/1670 train_time:28813ms step_avg:97.34ms +step:297/1670 train_time:28909ms step_avg:97.34ms +step:298/1670 train_time:29005ms step_avg:97.33ms +step:299/1670 train_time:29100ms step_avg:97.32ms +step:300/1670 train_time:29196ms step_avg:97.32ms +step:301/1670 train_time:29292ms step_avg:97.31ms +step:302/1670 train_time:29387ms step_avg:97.31ms +step:303/1670 train_time:29481ms step_avg:97.30ms +step:304/1670 train_time:29577ms step_avg:97.29ms +step:305/1670 train_time:29674ms step_avg:97.29ms +step:306/1670 train_time:29769ms step_avg:97.28ms +step:307/1670 train_time:29864ms step_avg:97.28ms +step:308/1670 train_time:29960ms step_avg:97.27ms +step:309/1670 train_time:30056ms step_avg:97.27ms +step:310/1670 train_time:30151ms step_avg:97.26ms +step:311/1670 train_time:30247ms step_avg:97.26ms +step:312/1670 train_time:30343ms step_avg:97.25ms +step:313/1670 train_time:30438ms step_avg:97.25ms +step:314/1670 train_time:30533ms step_avg:97.24ms +step:315/1670 train_time:30629ms step_avg:97.23ms +step:316/1670 train_time:30724ms step_avg:97.23ms +step:317/1670 train_time:30819ms step_avg:97.22ms +step:318/1670 train_time:30916ms step_avg:97.22ms +step:319/1670 train_time:31011ms step_avg:97.21ms +step:320/1670 train_time:31106ms step_avg:97.21ms +step:321/1670 train_time:31203ms step_avg:97.20ms +step:322/1670 train_time:31298ms step_avg:97.20ms +step:323/1670 train_time:31394ms step_avg:97.20ms +step:324/1670 train_time:31490ms step_avg:97.19ms +step:325/1670 train_time:31585ms step_avg:97.19ms +step:326/1670 train_time:31680ms step_avg:97.18ms +step:327/1670 train_time:31776ms step_avg:97.17ms +step:328/1670 train_time:31872ms step_avg:97.17ms +step:329/1670 train_time:31967ms step_avg:97.17ms +step:330/1670 train_time:32063ms step_avg:97.16ms +step:331/1670 train_time:32158ms step_avg:97.15ms +step:332/1670 train_time:32255ms step_avg:97.15ms +step:333/1670 train_time:32350ms step_avg:97.15ms +step:334/1670 train_time:32446ms step_avg:97.14ms +step:335/1670 train_time:32541ms step_avg:97.14ms +step:336/1670 train_time:32638ms step_avg:97.14ms +step:337/1670 train_time:32733ms step_avg:97.13ms +step:338/1670 train_time:32829ms step_avg:97.13ms +step:339/1670 train_time:32924ms step_avg:97.12ms +step:340/1670 train_time:33019ms step_avg:97.12ms +step:341/1670 train_time:33115ms step_avg:97.11ms +step:342/1670 train_time:33211ms step_avg:97.11ms +step:343/1670 train_time:33307ms step_avg:97.10ms +step:344/1670 train_time:33401ms step_avg:97.10ms +step:345/1670 train_time:33497ms step_avg:97.09ms +step:346/1670 train_time:33594ms step_avg:97.09ms +step:347/1670 train_time:33690ms step_avg:97.09ms +step:348/1670 train_time:33787ms step_avg:97.09ms +step:349/1670 train_time:33882ms step_avg:97.08ms +step:350/1670 train_time:33977ms step_avg:97.08ms +step:351/1670 train_time:34072ms step_avg:97.07ms +step:352/1670 train_time:34168ms step_avg:97.07ms +step:353/1670 train_time:34264ms step_avg:97.06ms +step:354/1670 train_time:34359ms step_avg:97.06ms +step:355/1670 train_time:34455ms step_avg:97.06ms +step:356/1670 train_time:34550ms step_avg:97.05ms +step:357/1670 train_time:34645ms step_avg:97.05ms +step:358/1670 train_time:34741ms step_avg:97.04ms +step:359/1670 train_time:34837ms step_avg:97.04ms +step:360/1670 train_time:34933ms step_avg:97.04ms +step:361/1670 train_time:35029ms step_avg:97.03ms +step:362/1670 train_time:35124ms step_avg:97.03ms +step:363/1670 train_time:35220ms step_avg:97.03ms +step:364/1670 train_time:35315ms step_avg:97.02ms +step:365/1670 train_time:35411ms step_avg:97.02ms +step:366/1670 train_time:35506ms step_avg:97.01ms +step:367/1670 train_time:35601ms step_avg:97.01ms +step:368/1670 train_time:35697ms step_avg:97.00ms +step:369/1670 train_time:35793ms step_avg:97.00ms +step:370/1670 train_time:35889ms step_avg:97.00ms +step:371/1670 train_time:35985ms step_avg:97.00ms +step:372/1670 train_time:36080ms step_avg:96.99ms +step:373/1670 train_time:36176ms step_avg:96.99ms +step:374/1670 train_time:36272ms step_avg:96.98ms +step:375/1670 train_time:36368ms step_avg:96.98ms +step:375/1670 val_loss:3.8173 train_time:36462ms step_avg:97.23ms +step:376/1670 train_time:36485ms step_avg:97.03ms +step:377/1670 train_time:36566ms step_avg:96.99ms +step:378/1670 train_time:36662ms step_avg:96.99ms +step:379/1670 train_time:36759ms step_avg:96.99ms +step:380/1670 train_time:36855ms step_avg:96.99ms +step:381/1670 train_time:36950ms step_avg:96.98ms +step:382/1670 train_time:37045ms step_avg:96.98ms +step:383/1670 train_time:37140ms step_avg:96.97ms +step:384/1670 train_time:37235ms step_avg:96.97ms +step:385/1670 train_time:37329ms step_avg:96.96ms +step:386/1670 train_time:37425ms step_avg:96.96ms +step:387/1670 train_time:37523ms step_avg:96.96ms +step:388/1670 train_time:37621ms step_avg:96.96ms +step:389/1670 train_time:37718ms step_avg:96.96ms +step:390/1670 train_time:37813ms step_avg:96.96ms +step:391/1670 train_time:37908ms step_avg:96.95ms +step:392/1670 train_time:38003ms step_avg:96.95ms +step:393/1670 train_time:38098ms step_avg:96.94ms +step:394/1670 train_time:38193ms step_avg:96.94ms +step:395/1670 train_time:38287ms step_avg:96.93ms +step:396/1670 train_time:38382ms step_avg:96.93ms +step:397/1670 train_time:38479ms step_avg:96.92ms +step:398/1670 train_time:38575ms step_avg:96.92ms +step:399/1670 train_time:38672ms step_avg:96.92ms +step:400/1670 train_time:38768ms step_avg:96.92ms +step:401/1670 train_time:38864ms step_avg:96.92ms +step:402/1670 train_time:38959ms step_avg:96.91ms +step:403/1670 train_time:39055ms step_avg:96.91ms +step:404/1670 train_time:39150ms step_avg:96.91ms +step:405/1670 train_time:39245ms step_avg:96.90ms +step:406/1670 train_time:39340ms step_avg:96.90ms +step:407/1670 train_time:39436ms step_avg:96.89ms +step:408/1670 train_time:39532ms step_avg:96.89ms +step:409/1670 train_time:39629ms step_avg:96.89ms +step:410/1670 train_time:39724ms step_avg:96.89ms +step:411/1670 train_time:39820ms step_avg:96.89ms +step:412/1670 train_time:39916ms step_avg:96.88ms +step:413/1670 train_time:40011ms step_avg:96.88ms +step:414/1670 train_time:40107ms step_avg:96.88ms +step:415/1670 train_time:40202ms step_avg:96.87ms +step:416/1670 train_time:40298ms step_avg:96.87ms +step:417/1670 train_time:40393ms step_avg:96.87ms +step:418/1670 train_time:40488ms step_avg:96.86ms +step:419/1670 train_time:40584ms step_avg:96.86ms +step:420/1670 train_time:40680ms step_avg:96.86ms +step:421/1670 train_time:40776ms step_avg:96.85ms +step:422/1670 train_time:40872ms step_avg:96.85ms +step:423/1670 train_time:40967ms step_avg:96.85ms +step:424/1670 train_time:41063ms step_avg:96.85ms +step:425/1670 train_time:41326ms step_avg:97.24ms +step:426/1670 train_time:41451ms step_avg:97.30ms +step:427/1670 train_time:41544ms step_avg:97.29ms +step:428/1670 train_time:41639ms step_avg:97.29ms +step:429/1670 train_time:41734ms step_avg:97.28ms +step:430/1670 train_time:41829ms step_avg:97.28ms +step:431/1670 train_time:41923ms step_avg:97.27ms +step:432/1670 train_time:42018ms step_avg:97.26ms +step:433/1670 train_time:42113ms step_avg:97.26ms +step:434/1670 train_time:42209ms step_avg:97.25ms +step:435/1670 train_time:42305ms step_avg:97.25ms +step:436/1670 train_time:42404ms step_avg:97.26ms +step:437/1670 train_time:42502ms step_avg:97.26ms +step:438/1670 train_time:42597ms step_avg:97.25ms +step:439/1670 train_time:42692ms step_avg:97.25ms +step:440/1670 train_time:42788ms step_avg:97.24ms +step:441/1670 train_time:42883ms step_avg:97.24ms +step:442/1670 train_time:42978ms step_avg:97.24ms +step:443/1670 train_time:43073ms step_avg:97.23ms +step:444/1670 train_time:43168ms step_avg:97.23ms +step:445/1670 train_time:43263ms step_avg:97.22ms +step:446/1670 train_time:43361ms step_avg:97.22ms +step:447/1670 train_time:43459ms step_avg:97.22ms +step:448/1670 train_time:43556ms step_avg:97.22ms +step:449/1670 train_time:43652ms step_avg:97.22ms +step:450/1670 train_time:43747ms step_avg:97.22ms +step:451/1670 train_time:43843ms step_avg:97.21ms +step:452/1670 train_time:43937ms step_avg:97.21ms +step:453/1670 train_time:44033ms step_avg:97.20ms +step:454/1670 train_time:44127ms step_avg:97.20ms +step:455/1670 train_time:44223ms step_avg:97.19ms +step:456/1670 train_time:44319ms step_avg:97.19ms +step:457/1670 train_time:44415ms step_avg:97.19ms +step:458/1670 train_time:44512ms step_avg:97.19ms +step:459/1670 train_time:44608ms step_avg:97.18ms +step:460/1670 train_time:44703ms step_avg:97.18ms +step:461/1670 train_time:44798ms step_avg:97.18ms +step:462/1670 train_time:44894ms step_avg:97.17ms +step:463/1670 train_time:44989ms step_avg:97.17ms +step:464/1670 train_time:45083ms step_avg:97.16ms +step:465/1670 train_time:45179ms step_avg:97.16ms +step:466/1670 train_time:45274ms step_avg:97.16ms +step:467/1670 train_time:45371ms step_avg:97.15ms +step:468/1670 train_time:45466ms step_avg:97.15ms +step:469/1670 train_time:45562ms step_avg:97.15ms +step:470/1670 train_time:45659ms step_avg:97.15ms +step:471/1670 train_time:45755ms step_avg:97.14ms +step:472/1670 train_time:45850ms step_avg:97.14ms +step:473/1670 train_time:45946ms step_avg:97.14ms +step:474/1670 train_time:46041ms step_avg:97.13ms +step:475/1670 train_time:46136ms step_avg:97.13ms +step:476/1670 train_time:46232ms step_avg:97.13ms +step:477/1670 train_time:46328ms step_avg:97.12ms +step:478/1670 train_time:46423ms step_avg:97.12ms +step:479/1670 train_time:46520ms step_avg:97.12ms +step:480/1670 train_time:46616ms step_avg:97.12ms +step:481/1670 train_time:46712ms step_avg:97.11ms +step:482/1670 train_time:46808ms step_avg:97.11ms +step:483/1670 train_time:46903ms step_avg:97.11ms +step:484/1670 train_time:46998ms step_avg:97.10ms +step:485/1670 train_time:47093ms step_avg:97.10ms +step:486/1670 train_time:47189ms step_avg:97.10ms +step:487/1670 train_time:47283ms step_avg:97.09ms +step:488/1670 train_time:47379ms step_avg:97.09ms +step:489/1670 train_time:47476ms step_avg:97.09ms +step:490/1670 train_time:47572ms step_avg:97.09ms +step:491/1670 train_time:47668ms step_avg:97.08ms +step:492/1670 train_time:47764ms step_avg:97.08ms +step:493/1670 train_time:47860ms step_avg:97.08ms +step:494/1670 train_time:47957ms step_avg:97.08ms +step:495/1670 train_time:48053ms step_avg:97.08ms +step:496/1670 train_time:48147ms step_avg:97.07ms +step:497/1670 train_time:48243ms step_avg:97.07ms +step:498/1670 train_time:48339ms step_avg:97.07ms +step:499/1670 train_time:48435ms step_avg:97.06ms +step:500/1670 train_time:48532ms step_avg:97.06ms +step:500/1670 val_loss:3.7160 train_time:48627ms step_avg:97.25ms +step:501/1670 train_time:48648ms step_avg:97.10ms +step:502/1670 train_time:48732ms step_avg:97.08ms +step:503/1670 train_time:48834ms step_avg:97.09ms +step:504/1670 train_time:48931ms step_avg:97.08ms +step:505/1670 train_time:49026ms step_avg:97.08ms +step:506/1670 train_time:49120ms step_avg:97.08ms +step:507/1670 train_time:49215ms step_avg:97.07ms +step:508/1670 train_time:49309ms step_avg:97.07ms +step:509/1670 train_time:49404ms step_avg:97.06ms +step:510/1670 train_time:49498ms step_avg:97.05ms +step:511/1670 train_time:49594ms step_avg:97.05ms +step:512/1670 train_time:49692ms step_avg:97.05ms +step:513/1670 train_time:49789ms step_avg:97.06ms +step:514/1670 train_time:49886ms step_avg:97.06ms +step:515/1670 train_time:49982ms step_avg:97.05ms +step:516/1670 train_time:50078ms step_avg:97.05ms +step:517/1670 train_time:50173ms step_avg:97.05ms +step:518/1670 train_time:50269ms step_avg:97.04ms +step:519/1670 train_time:50364ms step_avg:97.04ms +step:520/1670 train_time:50459ms step_avg:97.04ms +step:521/1670 train_time:50554ms step_avg:97.03ms +step:522/1670 train_time:50651ms step_avg:97.03ms +step:523/1670 train_time:50747ms step_avg:97.03ms +step:524/1670 train_time:50844ms step_avg:97.03ms +step:525/1670 train_time:50940ms step_avg:97.03ms +step:526/1670 train_time:51035ms step_avg:97.03ms +step:527/1670 train_time:51131ms step_avg:97.02ms +step:528/1670 train_time:51227ms step_avg:97.02ms +step:529/1670 train_time:51322ms step_avg:97.02ms +step:530/1670 train_time:51417ms step_avg:97.01ms +step:531/1670 train_time:51512ms step_avg:97.01ms +step:532/1670 train_time:51608ms step_avg:97.01ms +step:533/1670 train_time:51704ms step_avg:97.01ms +step:534/1670 train_time:51801ms step_avg:97.00ms +step:535/1670 train_time:51897ms step_avg:97.00ms +step:536/1670 train_time:51993ms step_avg:97.00ms +step:537/1670 train_time:52089ms step_avg:97.00ms +step:538/1670 train_time:52184ms step_avg:97.00ms +step:539/1670 train_time:52280ms step_avg:96.99ms +step:540/1670 train_time:52375ms step_avg:96.99ms +step:541/1670 train_time:52470ms step_avg:96.99ms +step:542/1670 train_time:52566ms step_avg:96.98ms +step:543/1670 train_time:52661ms step_avg:96.98ms +step:544/1670 train_time:52756ms step_avg:96.98ms +step:545/1670 train_time:52852ms step_avg:96.98ms +step:546/1670 train_time:52949ms step_avg:96.98ms +step:547/1670 train_time:53045ms step_avg:96.97ms +step:548/1670 train_time:53141ms step_avg:96.97ms +step:549/1670 train_time:53237ms step_avg:96.97ms +step:550/1670 train_time:53333ms step_avg:96.97ms +step:551/1670 train_time:53429ms step_avg:96.97ms +step:552/1670 train_time:53524ms step_avg:96.96ms +step:553/1670 train_time:53619ms step_avg:96.96ms +step:554/1670 train_time:53714ms step_avg:96.96ms +step:555/1670 train_time:53810ms step_avg:96.95ms +step:556/1670 train_time:53906ms step_avg:96.95ms +step:557/1670 train_time:54003ms step_avg:96.95ms +step:558/1670 train_time:54099ms step_avg:96.95ms +step:559/1670 train_time:54196ms step_avg:96.95ms +step:560/1670 train_time:54293ms step_avg:96.95ms +step:561/1670 train_time:54390ms step_avg:96.95ms +step:562/1670 train_time:54487ms step_avg:96.95ms +step:563/1670 train_time:54584ms step_avg:96.95ms +step:564/1670 train_time:54681ms step_avg:96.95ms +step:565/1670 train_time:54778ms step_avg:96.95ms +step:566/1670 train_time:54875ms step_avg:96.95ms +step:567/1670 train_time:54973ms step_avg:96.95ms +step:568/1670 train_time:55071ms step_avg:96.96ms +step:569/1670 train_time:55168ms step_avg:96.96ms +step:570/1670 train_time:55265ms step_avg:96.96ms +step:571/1670 train_time:55362ms step_avg:96.96ms +step:572/1670 train_time:55459ms step_avg:96.96ms +step:573/1670 train_time:55556ms step_avg:96.96ms +step:574/1670 train_time:55652ms step_avg:96.96ms +step:575/1670 train_time:55751ms step_avg:96.96ms +step:576/1670 train_time:55850ms step_avg:96.96ms +step:577/1670 train_time:55948ms step_avg:96.96ms +step:578/1670 train_time:56045ms step_avg:96.96ms +step:579/1670 train_time:56142ms step_avg:96.96ms +step:580/1670 train_time:56240ms step_avg:96.97ms +step:581/1670 train_time:56336ms step_avg:96.96ms +step:582/1670 train_time:56432ms step_avg:96.96ms +step:583/1670 train_time:56529ms step_avg:96.96ms +step:584/1670 train_time:56626ms step_avg:96.96ms +step:585/1670 train_time:56723ms step_avg:96.96ms +step:586/1670 train_time:56820ms step_avg:96.96ms +step:587/1670 train_time:56918ms step_avg:96.96ms +step:588/1670 train_time:57015ms step_avg:96.96ms +step:589/1670 train_time:57113ms step_avg:96.97ms +step:590/1670 train_time:57211ms step_avg:96.97ms +step:591/1670 train_time:57309ms step_avg:96.97ms +step:592/1670 train_time:57406ms step_avg:96.97ms +step:593/1670 train_time:57503ms step_avg:96.97ms +step:594/1670 train_time:57600ms step_avg:96.97ms +step:595/1670 train_time:57697ms step_avg:96.97ms +step:596/1670 train_time:57793ms step_avg:96.97ms +step:597/1670 train_time:57892ms step_avg:96.97ms +step:598/1670 train_time:57989ms step_avg:96.97ms +step:599/1670 train_time:58087ms step_avg:96.97ms +step:600/1670 train_time:58184ms step_avg:96.97ms +step:601/1670 train_time:58281ms step_avg:96.97ms +step:602/1670 train_time:58378ms step_avg:96.97ms +step:603/1670 train_time:58475ms step_avg:96.97ms +step:604/1670 train_time:58572ms step_avg:96.97ms +step:605/1670 train_time:58670ms step_avg:96.98ms +step:606/1670 train_time:58768ms step_avg:96.98ms +step:607/1670 train_time:58865ms step_avg:96.98ms +step:608/1670 train_time:58962ms step_avg:96.98ms +step:609/1670 train_time:59059ms step_avg:96.98ms +step:610/1670 train_time:59156ms step_avg:96.98ms +step:611/1670 train_time:59254ms step_avg:96.98ms +step:612/1670 train_time:59351ms step_avg:96.98ms +step:613/1670 train_time:59448ms step_avg:96.98ms +step:614/1670 train_time:59545ms step_avg:96.98ms +step:615/1670 train_time:59642ms step_avg:96.98ms +step:616/1670 train_time:59739ms step_avg:96.98ms +step:617/1670 train_time:59836ms step_avg:96.98ms +step:618/1670 train_time:59933ms step_avg:96.98ms +step:619/1670 train_time:60030ms step_avg:96.98ms +step:620/1670 train_time:60129ms step_avg:96.98ms +step:621/1670 train_time:60225ms step_avg:96.98ms +step:622/1670 train_time:60322ms step_avg:96.98ms +step:623/1670 train_time:60418ms step_avg:96.98ms +step:624/1670 train_time:60515ms step_avg:96.98ms +step:625/1670 train_time:60612ms step_avg:96.98ms +step:625/1670 val_loss:3.6152 train_time:60710ms step_avg:97.14ms +step:626/1670 train_time:60731ms step_avg:97.01ms +step:627/1670 train_time:60820ms step_avg:97.00ms +step:628/1670 train_time:60918ms step_avg:97.00ms +step:629/1670 train_time:61014ms step_avg:97.00ms +step:630/1670 train_time:61110ms step_avg:97.00ms +step:631/1670 train_time:61206ms step_avg:97.00ms +step:632/1670 train_time:61301ms step_avg:97.00ms +step:633/1670 train_time:61397ms step_avg:96.99ms +step:634/1670 train_time:61493ms step_avg:96.99ms +step:635/1670 train_time:61589ms step_avg:96.99ms +step:636/1670 train_time:61687ms step_avg:96.99ms +step:637/1670 train_time:61786ms step_avg:97.00ms +step:638/1670 train_time:61886ms step_avg:97.00ms +step:639/1670 train_time:62255ms step_avg:97.43ms +step:640/1670 train_time:62349ms step_avg:97.42ms +step:641/1670 train_time:62444ms step_avg:97.42ms +step:642/1670 train_time:62540ms step_avg:97.41ms +step:643/1670 train_time:62636ms step_avg:97.41ms +step:644/1670 train_time:62733ms step_avg:97.41ms +step:645/1670 train_time:62828ms step_avg:97.41ms +step:646/1670 train_time:62924ms step_avg:97.41ms +step:647/1670 train_time:63020ms step_avg:97.40ms +step:648/1670 train_time:63116ms step_avg:97.40ms +step:649/1670 train_time:63220ms step_avg:97.41ms +step:650/1670 train_time:63320ms step_avg:97.42ms +step:651/1670 train_time:63418ms step_avg:97.42ms +step:652/1670 train_time:63515ms step_avg:97.42ms +step:653/1670 train_time:63612ms step_avg:97.42ms +step:654/1670 train_time:63709ms step_avg:97.42ms +step:655/1670 train_time:63805ms step_avg:97.41ms +step:656/1670 train_time:63900ms step_avg:97.41ms +step:657/1670 train_time:63996ms step_avg:97.41ms +step:658/1670 train_time:64093ms step_avg:97.41ms +step:659/1670 train_time:64195ms step_avg:97.41ms +step:660/1670 train_time:64295ms step_avg:97.42ms +step:661/1670 train_time:64394ms step_avg:97.42ms +step:662/1670 train_time:64491ms step_avg:97.42ms +step:663/1670 train_time:64589ms step_avg:97.42ms +step:664/1670 train_time:64686ms step_avg:97.42ms +step:665/1670 train_time:64782ms step_avg:97.42ms +step:666/1670 train_time:64878ms step_avg:97.41ms +step:667/1670 train_time:64974ms step_avg:97.41ms +step:668/1670 train_time:65070ms step_avg:97.41ms +step:669/1670 train_time:65168ms step_avg:97.41ms +step:670/1670 train_time:65267ms step_avg:97.41ms +step:671/1670 train_time:65366ms step_avg:97.42ms +step:672/1670 train_time:65464ms step_avg:97.42ms +step:673/1670 train_time:65562ms step_avg:97.42ms +step:674/1670 train_time:65659ms step_avg:97.42ms +step:675/1670 train_time:65757ms step_avg:97.42ms +step:676/1670 train_time:65854ms step_avg:97.42ms +step:677/1670 train_time:65951ms step_avg:97.42ms +step:678/1670 train_time:66048ms step_avg:97.42ms +step:679/1670 train_time:66145ms step_avg:97.42ms +step:680/1670 train_time:66242ms step_avg:97.41ms +step:681/1670 train_time:66340ms step_avg:97.42ms +step:682/1670 train_time:66439ms step_avg:97.42ms +step:683/1670 train_time:66536ms step_avg:97.42ms +step:684/1670 train_time:66634ms step_avg:97.42ms +step:685/1670 train_time:66731ms step_avg:97.42ms +step:686/1670 train_time:66829ms step_avg:97.42ms +step:687/1670 train_time:66926ms step_avg:97.42ms +step:688/1670 train_time:67022ms step_avg:97.41ms +step:689/1670 train_time:67118ms step_avg:97.41ms +step:690/1670 train_time:67215ms step_avg:97.41ms +step:691/1670 train_time:67314ms step_avg:97.42ms +step:692/1670 train_time:67413ms step_avg:97.42ms +step:693/1670 train_time:67511ms step_avg:97.42ms +step:694/1670 train_time:67608ms step_avg:97.42ms +step:695/1670 train_time:67705ms step_avg:97.42ms +step:696/1670 train_time:67802ms step_avg:97.42ms +step:697/1670 train_time:67899ms step_avg:97.42ms +step:698/1670 train_time:67995ms step_avg:97.41ms +step:699/1670 train_time:68094ms step_avg:97.42ms +step:700/1670 train_time:68192ms step_avg:97.42ms +step:701/1670 train_time:68290ms step_avg:97.42ms +step:702/1670 train_time:68388ms step_avg:97.42ms +step:703/1670 train_time:68486ms step_avg:97.42ms +step:704/1670 train_time:68584ms step_avg:97.42ms +step:705/1670 train_time:68681ms step_avg:97.42ms +step:706/1670 train_time:68778ms step_avg:97.42ms +step:707/1670 train_time:68875ms step_avg:97.42ms +step:708/1670 train_time:68973ms step_avg:97.42ms +step:709/1670 train_time:69070ms step_avg:97.42ms +step:710/1670 train_time:69166ms step_avg:97.42ms +step:711/1670 train_time:69264ms step_avg:97.42ms +step:712/1670 train_time:69361ms step_avg:97.42ms +step:713/1670 train_time:69458ms step_avg:97.42ms +step:714/1670 train_time:69555ms step_avg:97.42ms +step:715/1670 train_time:69654ms step_avg:97.42ms +step:716/1670 train_time:69752ms step_avg:97.42ms +step:717/1670 train_time:69849ms step_avg:97.42ms +step:718/1670 train_time:69946ms step_avg:97.42ms +step:719/1670 train_time:70042ms step_avg:97.42ms +step:720/1670 train_time:70139ms step_avg:97.41ms +step:721/1670 train_time:70236ms step_avg:97.42ms +step:722/1670 train_time:70334ms step_avg:97.41ms +step:723/1670 train_time:70431ms step_avg:97.42ms +step:724/1670 train_time:70528ms step_avg:97.41ms +step:725/1670 train_time:70626ms step_avg:97.42ms +step:726/1670 train_time:70724ms step_avg:97.42ms +step:727/1670 train_time:70821ms step_avg:97.42ms +step:728/1670 train_time:70918ms step_avg:97.42ms +step:729/1670 train_time:71015ms step_avg:97.41ms +step:730/1670 train_time:71113ms step_avg:97.42ms +step:731/1670 train_time:71211ms step_avg:97.42ms +step:732/1670 train_time:71308ms step_avg:97.42ms +step:733/1670 train_time:71406ms step_avg:97.42ms +step:734/1670 train_time:71503ms step_avg:97.41ms +step:735/1670 train_time:71599ms step_avg:97.41ms +step:736/1670 train_time:71697ms step_avg:97.41ms +step:737/1670 train_time:71795ms step_avg:97.42ms +step:738/1670 train_time:71894ms step_avg:97.42ms +step:739/1670 train_time:71991ms step_avg:97.42ms +step:740/1670 train_time:72088ms step_avg:97.42ms +step:741/1670 train_time:72185ms step_avg:97.42ms +step:742/1670 train_time:72282ms step_avg:97.42ms +step:743/1670 train_time:72378ms step_avg:97.41ms +step:744/1670 train_time:72476ms step_avg:97.41ms +step:745/1670 train_time:72574ms step_avg:97.41ms +step:746/1670 train_time:72671ms step_avg:97.41ms +step:747/1670 train_time:72769ms step_avg:97.41ms +step:748/1670 train_time:72866ms step_avg:97.42ms +step:749/1670 train_time:72964ms step_avg:97.42ms +step:750/1670 train_time:73061ms step_avg:97.41ms +step:750/1670 val_loss:3.5627 train_time:73157ms step_avg:97.54ms +step:751/1670 train_time:73179ms step_avg:97.44ms +step:752/1670 train_time:73261ms step_avg:97.42ms +step:753/1670 train_time:73364ms step_avg:97.43ms +step:754/1670 train_time:73461ms step_avg:97.43ms +step:755/1670 train_time:73558ms step_avg:97.43ms +step:756/1670 train_time:73654ms step_avg:97.43ms +step:757/1670 train_time:73750ms step_avg:97.42ms +step:758/1670 train_time:73846ms step_avg:97.42ms +step:759/1670 train_time:73943ms step_avg:97.42ms +step:760/1670 train_time:74039ms step_avg:97.42ms +step:761/1670 train_time:74136ms step_avg:97.42ms +step:762/1670 train_time:74235ms step_avg:97.42ms +step:763/1670 train_time:74335ms step_avg:97.42ms +step:764/1670 train_time:74434ms step_avg:97.43ms +step:765/1670 train_time:74530ms step_avg:97.43ms +step:766/1670 train_time:74627ms step_avg:97.42ms +step:767/1670 train_time:74724ms step_avg:97.42ms +step:768/1670 train_time:74820ms step_avg:97.42ms +step:769/1670 train_time:74916ms step_avg:97.42ms +step:770/1670 train_time:75012ms step_avg:97.42ms +step:771/1670 train_time:75109ms step_avg:97.42ms +step:772/1670 train_time:75208ms step_avg:97.42ms +step:773/1670 train_time:75307ms step_avg:97.42ms +step:774/1670 train_time:75405ms step_avg:97.42ms +step:775/1670 train_time:75504ms step_avg:97.42ms +step:776/1670 train_time:75601ms step_avg:97.42ms +step:777/1670 train_time:75698ms step_avg:97.42ms +step:778/1670 train_time:75794ms step_avg:97.42ms +step:779/1670 train_time:75890ms step_avg:97.42ms +step:780/1670 train_time:75987ms step_avg:97.42ms +step:781/1670 train_time:76084ms step_avg:97.42ms +step:782/1670 train_time:76183ms step_avg:97.42ms +step:783/1670 train_time:76281ms step_avg:97.42ms +step:784/1670 train_time:76380ms step_avg:97.42ms +step:785/1670 train_time:76477ms step_avg:97.42ms +step:786/1670 train_time:76576ms step_avg:97.42ms +step:787/1670 train_time:76673ms step_avg:97.42ms +step:788/1670 train_time:76770ms step_avg:97.42ms +step:789/1670 train_time:76867ms step_avg:97.42ms +step:790/1670 train_time:76962ms step_avg:97.42ms +step:791/1670 train_time:77059ms step_avg:97.42ms +step:792/1670 train_time:77157ms step_avg:97.42ms +step:793/1670 train_time:77255ms step_avg:97.42ms +step:794/1670 train_time:77353ms step_avg:97.42ms +step:795/1670 train_time:77451ms step_avg:97.42ms +step:796/1670 train_time:77548ms step_avg:97.42ms +step:797/1670 train_time:77645ms step_avg:97.42ms +step:798/1670 train_time:77744ms step_avg:97.42ms +step:799/1670 train_time:77841ms step_avg:97.42ms +step:800/1670 train_time:77937ms step_avg:97.42ms +step:801/1670 train_time:78034ms step_avg:97.42ms +step:802/1670 train_time:78131ms step_avg:97.42ms +step:803/1670 train_time:78228ms step_avg:97.42ms +step:804/1670 train_time:78326ms step_avg:97.42ms +step:805/1670 train_time:78424ms step_avg:97.42ms +step:806/1670 train_time:78522ms step_avg:97.42ms +step:807/1670 train_time:78619ms step_avg:97.42ms +step:808/1670 train_time:78716ms step_avg:97.42ms +step:809/1670 train_time:78813ms step_avg:97.42ms +step:810/1670 train_time:78909ms step_avg:97.42ms +step:811/1670 train_time:79006ms step_avg:97.42ms +step:812/1670 train_time:79103ms step_avg:97.42ms +step:813/1670 train_time:79200ms step_avg:97.42ms +step:814/1670 train_time:79298ms step_avg:97.42ms +step:815/1670 train_time:79396ms step_avg:97.42ms +step:816/1670 train_time:79495ms step_avg:97.42ms +step:817/1670 train_time:79592ms step_avg:97.42ms +step:818/1670 train_time:79688ms step_avg:97.42ms +step:819/1670 train_time:79785ms step_avg:97.42ms +step:820/1670 train_time:79883ms step_avg:97.42ms +step:821/1670 train_time:79980ms step_avg:97.42ms +step:822/1670 train_time:80077ms step_avg:97.42ms +step:823/1670 train_time:80173ms step_avg:97.42ms +step:824/1670 train_time:80270ms step_avg:97.41ms +step:825/1670 train_time:80368ms step_avg:97.42ms +step:826/1670 train_time:80466ms step_avg:97.42ms +step:827/1670 train_time:80565ms step_avg:97.42ms +step:828/1670 train_time:80663ms step_avg:97.42ms +step:829/1670 train_time:80759ms step_avg:97.42ms +step:830/1670 train_time:80857ms step_avg:97.42ms +step:831/1670 train_time:80953ms step_avg:97.42ms +step:832/1670 train_time:81050ms step_avg:97.42ms +step:833/1670 train_time:81148ms step_avg:97.42ms +step:834/1670 train_time:81245ms step_avg:97.42ms +step:835/1670 train_time:81343ms step_avg:97.42ms +step:836/1670 train_time:81441ms step_avg:97.42ms +step:837/1670 train_time:81539ms step_avg:97.42ms +step:838/1670 train_time:81635ms step_avg:97.42ms +step:839/1670 train_time:81732ms step_avg:97.42ms +step:840/1670 train_time:81830ms step_avg:97.42ms +step:841/1670 train_time:81926ms step_avg:97.42ms +step:842/1670 train_time:82024ms step_avg:97.42ms +step:843/1670 train_time:82122ms step_avg:97.42ms +step:844/1670 train_time:82219ms step_avg:97.42ms +step:845/1670 train_time:82316ms step_avg:97.42ms +step:846/1670 train_time:82413ms step_avg:97.42ms +step:847/1670 train_time:82511ms step_avg:97.42ms +step:848/1670 train_time:82608ms step_avg:97.41ms +step:849/1670 train_time:82705ms step_avg:97.42ms +step:850/1670 train_time:82804ms step_avg:97.42ms +step:851/1670 train_time:83075ms step_avg:97.62ms +step:852/1670 train_time:83271ms step_avg:97.74ms +step:853/1670 train_time:83366ms step_avg:97.73ms +step:854/1670 train_time:83462ms step_avg:97.73ms +step:855/1670 train_time:83558ms step_avg:97.73ms +step:856/1670 train_time:83654ms step_avg:97.73ms +step:857/1670 train_time:83750ms step_avg:97.72ms +step:858/1670 train_time:83846ms step_avg:97.72ms +step:859/1670 train_time:83943ms step_avg:97.72ms +step:860/1670 train_time:84039ms step_avg:97.72ms +step:861/1670 train_time:84138ms step_avg:97.72ms +step:862/1670 train_time:84241ms step_avg:97.73ms +step:863/1670 train_time:84340ms step_avg:97.73ms +step:864/1670 train_time:84438ms step_avg:97.73ms +step:865/1670 train_time:84535ms step_avg:97.73ms +step:866/1670 train_time:84631ms step_avg:97.73ms +step:867/1670 train_time:84727ms step_avg:97.72ms +step:868/1670 train_time:84823ms step_avg:97.72ms +step:869/1670 train_time:84920ms step_avg:97.72ms +step:870/1670 train_time:85016ms step_avg:97.72ms +step:871/1670 train_time:85113ms step_avg:97.72ms +step:872/1670 train_time:85212ms step_avg:97.72ms +step:873/1670 train_time:85310ms step_avg:97.72ms +step:874/1670 train_time:85409ms step_avg:97.72ms +step:875/1670 train_time:85507ms step_avg:97.72ms +step:875/1670 val_loss:3.5202 train_time:85604ms step_avg:97.83ms +step:876/1670 train_time:85625ms step_avg:97.75ms +step:877/1670 train_time:85710ms step_avg:97.73ms +step:878/1670 train_time:85810ms step_avg:97.73ms +step:879/1670 train_time:85908ms step_avg:97.73ms +step:880/1670 train_time:86005ms step_avg:97.73ms +step:881/1670 train_time:86101ms step_avg:97.73ms +step:882/1670 train_time:86197ms step_avg:97.73ms +step:883/1670 train_time:86294ms step_avg:97.73ms +step:884/1670 train_time:86389ms step_avg:97.73ms +step:885/1670 train_time:86485ms step_avg:97.72ms +step:886/1670 train_time:86583ms step_avg:97.72ms +step:887/1670 train_time:86685ms step_avg:97.73ms +step:888/1670 train_time:86784ms step_avg:97.73ms +step:889/1670 train_time:86882ms step_avg:97.73ms +step:890/1670 train_time:86979ms step_avg:97.73ms +step:891/1670 train_time:87077ms step_avg:97.73ms +step:892/1670 train_time:87174ms step_avg:97.73ms +step:893/1670 train_time:87270ms step_avg:97.73ms +step:894/1670 train_time:87366ms step_avg:97.72ms +step:895/1670 train_time:87463ms step_avg:97.72ms +step:896/1670 train_time:87560ms step_avg:97.72ms +step:897/1670 train_time:87659ms step_avg:97.73ms +step:898/1670 train_time:87759ms step_avg:97.73ms +step:899/1670 train_time:87858ms step_avg:97.73ms +step:900/1670 train_time:87956ms step_avg:97.73ms +step:901/1670 train_time:88054ms step_avg:97.73ms +step:902/1670 train_time:88150ms step_avg:97.73ms +step:903/1670 train_time:88247ms step_avg:97.73ms +step:904/1670 train_time:88343ms step_avg:97.72ms +step:905/1670 train_time:88439ms step_avg:97.72ms +step:906/1670 train_time:88536ms step_avg:97.72ms +step:907/1670 train_time:88634ms step_avg:97.72ms +step:908/1670 train_time:88732ms step_avg:97.72ms +step:909/1670 train_time:88829ms step_avg:97.72ms +step:910/1670 train_time:88926ms step_avg:97.72ms +step:911/1670 train_time:89024ms step_avg:97.72ms +step:912/1670 train_time:89121ms step_avg:97.72ms +step:913/1670 train_time:89219ms step_avg:97.72ms +step:914/1670 train_time:89317ms step_avg:97.72ms +step:915/1670 train_time:89413ms step_avg:97.72ms +step:916/1670 train_time:89510ms step_avg:97.72ms +step:917/1670 train_time:89607ms step_avg:97.72ms +step:918/1670 train_time:89704ms step_avg:97.72ms +step:919/1670 train_time:89803ms step_avg:97.72ms +step:920/1670 train_time:89901ms step_avg:97.72ms +step:921/1670 train_time:89999ms step_avg:97.72ms +step:922/1670 train_time:90096ms step_avg:97.72ms +step:923/1670 train_time:90193ms step_avg:97.72ms +step:924/1670 train_time:90290ms step_avg:97.72ms +step:925/1670 train_time:90387ms step_avg:97.72ms +step:926/1670 train_time:90483ms step_avg:97.71ms +step:927/1670 train_time:90581ms step_avg:97.71ms +step:928/1670 train_time:90679ms step_avg:97.71ms +step:929/1670 train_time:90777ms step_avg:97.72ms +step:930/1670 train_time:90876ms step_avg:97.72ms +step:931/1670 train_time:90973ms step_avg:97.71ms +step:932/1670 train_time:91070ms step_avg:97.71ms +step:933/1670 train_time:91167ms step_avg:97.71ms +step:934/1670 train_time:91265ms step_avg:97.71ms +step:935/1670 train_time:91361ms step_avg:97.71ms +step:936/1670 train_time:91458ms step_avg:97.71ms +step:937/1670 train_time:91556ms step_avg:97.71ms +step:938/1670 train_time:91655ms step_avg:97.71ms +step:939/1670 train_time:91752ms step_avg:97.71ms +step:940/1670 train_time:91849ms step_avg:97.71ms +step:941/1670 train_time:91947ms step_avg:97.71ms +step:942/1670 train_time:92043ms step_avg:97.71ms +step:943/1670 train_time:92141ms step_avg:97.71ms +step:944/1670 train_time:92238ms step_avg:97.71ms +step:945/1670 train_time:92335ms step_avg:97.71ms +step:946/1670 train_time:92432ms step_avg:97.71ms +step:947/1670 train_time:92529ms step_avg:97.71ms +step:948/1670 train_time:92626ms step_avg:97.71ms +step:949/1670 train_time:92724ms step_avg:97.71ms +step:950/1670 train_time:92822ms step_avg:97.71ms +step:951/1670 train_time:92919ms step_avg:97.71ms +step:952/1670 train_time:93017ms step_avg:97.71ms +step:953/1670 train_time:93115ms step_avg:97.71ms +step:954/1670 train_time:93213ms step_avg:97.71ms +step:955/1670 train_time:93311ms step_avg:97.71ms +step:956/1670 train_time:93407ms step_avg:97.71ms +step:957/1670 train_time:93504ms step_avg:97.71ms +step:958/1670 train_time:93601ms step_avg:97.70ms +step:959/1670 train_time:93699ms step_avg:97.70ms +step:960/1670 train_time:93797ms step_avg:97.70ms +step:961/1670 train_time:93894ms step_avg:97.70ms +step:962/1670 train_time:93993ms step_avg:97.71ms +step:963/1670 train_time:94090ms step_avg:97.71ms +step:964/1670 train_time:94187ms step_avg:97.70ms +step:965/1670 train_time:94285ms step_avg:97.70ms +step:966/1670 train_time:94382ms step_avg:97.70ms +step:967/1670 train_time:94479ms step_avg:97.70ms +step:968/1670 train_time:94577ms step_avg:97.70ms +step:969/1670 train_time:94674ms step_avg:97.70ms +step:970/1670 train_time:94772ms step_avg:97.70ms +step:971/1670 train_time:94868ms step_avg:97.70ms +step:972/1670 train_time:94965ms step_avg:97.70ms +step:973/1670 train_time:95062ms step_avg:97.70ms +step:974/1670 train_time:95160ms step_avg:97.70ms +step:975/1670 train_time:95257ms step_avg:97.70ms +step:976/1670 train_time:95355ms step_avg:97.70ms +step:977/1670 train_time:95452ms step_avg:97.70ms +step:978/1670 train_time:95548ms step_avg:97.70ms +step:979/1670 train_time:95646ms step_avg:97.70ms +step:980/1670 train_time:95743ms step_avg:97.70ms +step:981/1670 train_time:95841ms step_avg:97.70ms +step:982/1670 train_time:95939ms step_avg:97.70ms +step:983/1670 train_time:96037ms step_avg:97.70ms +step:984/1670 train_time:96134ms step_avg:97.70ms +step:985/1670 train_time:96231ms step_avg:97.70ms +step:986/1670 train_time:96328ms step_avg:97.70ms +step:987/1670 train_time:96425ms step_avg:97.70ms +step:988/1670 train_time:96523ms step_avg:97.69ms +step:989/1670 train_time:96620ms step_avg:97.69ms +step:990/1670 train_time:96718ms step_avg:97.69ms +step:991/1670 train_time:96816ms step_avg:97.69ms +step:992/1670 train_time:96913ms step_avg:97.69ms +step:993/1670 train_time:97009ms step_avg:97.69ms +step:994/1670 train_time:97106ms step_avg:97.69ms +step:995/1670 train_time:97203ms step_avg:97.69ms +step:996/1670 train_time:97300ms step_avg:97.69ms +step:997/1670 train_time:97398ms step_avg:97.69ms +step:998/1670 train_time:97497ms step_avg:97.69ms +step:999/1670 train_time:97594ms step_avg:97.69ms +step:1000/1670 train_time:97692ms step_avg:97.69ms +step:1000/1670 val_loss:3.4785 train_time:97788ms step_avg:97.79ms +step:1001/1670 train_time:97810ms step_avg:97.71ms +step:1002/1670 train_time:97893ms step_avg:97.70ms +step:1003/1670 train_time:97994ms step_avg:97.70ms +step:1004/1670 train_time:98093ms step_avg:97.70ms +step:1005/1670 train_time:98190ms step_avg:97.70ms +step:1006/1670 train_time:98288ms step_avg:97.70ms +step:1007/1670 train_time:98384ms step_avg:97.70ms +step:1008/1670 train_time:98480ms step_avg:97.70ms +step:1009/1670 train_time:98576ms step_avg:97.70ms +step:1010/1670 train_time:98672ms step_avg:97.70ms +step:1011/1670 train_time:98770ms step_avg:97.70ms +step:1012/1670 train_time:98870ms step_avg:97.70ms +step:1013/1670 train_time:98969ms step_avg:97.70ms +step:1014/1670 train_time:99067ms step_avg:97.70ms +step:1015/1670 train_time:99164ms step_avg:97.70ms +step:1016/1670 train_time:99261ms step_avg:97.70ms +step:1017/1670 train_time:99357ms step_avg:97.70ms +step:1018/1670 train_time:99454ms step_avg:97.70ms +step:1019/1670 train_time:99551ms step_avg:97.69ms +step:1020/1670 train_time:99648ms step_avg:97.69ms +step:1021/1670 train_time:99746ms step_avg:97.69ms +step:1022/1670 train_time:99844ms step_avg:97.69ms +step:1023/1670 train_time:99941ms step_avg:97.69ms +step:1024/1670 train_time:100039ms step_avg:97.69ms +step:1025/1670 train_time:100137ms step_avg:97.69ms +step:1026/1670 train_time:100234ms step_avg:97.69ms +step:1027/1670 train_time:100331ms step_avg:97.69ms +step:1028/1670 train_time:100428ms step_avg:97.69ms +step:1029/1670 train_time:100525ms step_avg:97.69ms +step:1030/1670 train_time:100621ms step_avg:97.69ms +step:1031/1670 train_time:100717ms step_avg:97.69ms +step:1032/1670 train_time:100816ms step_avg:97.69ms +step:1033/1670 train_time:100915ms step_avg:97.69ms +step:1034/1670 train_time:101013ms step_avg:97.69ms +step:1035/1670 train_time:101112ms step_avg:97.69ms +step:1036/1670 train_time:101209ms step_avg:97.69ms +step:1037/1670 train_time:101308ms step_avg:97.69ms +step:1038/1670 train_time:101404ms step_avg:97.69ms +step:1039/1670 train_time:101500ms step_avg:97.69ms +step:1040/1670 train_time:101597ms step_avg:97.69ms +step:1041/1670 train_time:101693ms step_avg:97.69ms +step:1042/1670 train_time:101791ms step_avg:97.69ms +step:1043/1670 train_time:101889ms step_avg:97.69ms +step:1044/1670 train_time:101988ms step_avg:97.69ms +step:1045/1670 train_time:102085ms step_avg:97.69ms +step:1046/1670 train_time:102182ms step_avg:97.69ms +step:1047/1670 train_time:102279ms step_avg:97.69ms +step:1048/1670 train_time:102377ms step_avg:97.69ms +step:1049/1670 train_time:102475ms step_avg:97.69ms +step:1050/1670 train_time:102573ms step_avg:97.69ms +step:1051/1670 train_time:102670ms step_avg:97.69ms +step:1052/1670 train_time:102767ms step_avg:97.69ms +step:1053/1670 train_time:102866ms step_avg:97.69ms +step:1054/1670 train_time:102963ms step_avg:97.69ms +step:1055/1670 train_time:103060ms step_avg:97.69ms +step:1056/1670 train_time:103158ms step_avg:97.69ms +step:1057/1670 train_time:103255ms step_avg:97.69ms +step:1058/1670 train_time:103352ms step_avg:97.69ms +step:1059/1670 train_time:103449ms step_avg:97.69ms +step:1060/1670 train_time:103547ms step_avg:97.69ms +step:1061/1670 train_time:103644ms step_avg:97.69ms +step:1062/1670 train_time:103896ms step_avg:97.83ms +step:1063/1670 train_time:104054ms step_avg:97.89ms +step:1064/1670 train_time:104149ms step_avg:97.88ms +step:1065/1670 train_time:104245ms step_avg:97.88ms +step:1066/1670 train_time:104341ms step_avg:97.88ms +step:1067/1670 train_time:104436ms step_avg:97.88ms +step:1068/1670 train_time:104532ms step_avg:97.88ms +step:1069/1670 train_time:104628ms step_avg:97.87ms +step:1070/1670 train_time:104724ms step_avg:97.87ms +step:1071/1670 train_time:104820ms step_avg:97.87ms +step:1072/1670 train_time:104920ms step_avg:97.87ms +step:1073/1670 train_time:105022ms step_avg:97.88ms +step:1074/1670 train_time:105120ms step_avg:97.88ms +step:1075/1670 train_time:105218ms step_avg:97.88ms +step:1076/1670 train_time:105316ms step_avg:97.88ms +step:1077/1670 train_time:105414ms step_avg:97.88ms +step:1078/1670 train_time:105510ms step_avg:97.88ms +step:1079/1670 train_time:105607ms step_avg:97.87ms +step:1080/1670 train_time:105704ms step_avg:97.87ms +step:1081/1670 train_time:105800ms step_avg:97.87ms +step:1082/1670 train_time:105897ms step_avg:97.87ms +step:1083/1670 train_time:105997ms step_avg:97.87ms +step:1084/1670 train_time:106097ms step_avg:97.88ms +step:1085/1670 train_time:106195ms step_avg:97.88ms +step:1086/1670 train_time:106293ms step_avg:97.88ms +step:1087/1670 train_time:106390ms step_avg:97.88ms +step:1088/1670 train_time:106487ms step_avg:97.87ms +step:1089/1670 train_time:106584ms step_avg:97.87ms +step:1090/1670 train_time:106680ms step_avg:97.87ms +step:1091/1670 train_time:106777ms step_avg:97.87ms +step:1092/1670 train_time:106874ms step_avg:97.87ms +step:1093/1670 train_time:106972ms step_avg:97.87ms +step:1094/1670 train_time:107071ms step_avg:97.87ms +step:1095/1670 train_time:107170ms step_avg:97.87ms +step:1096/1670 train_time:107267ms step_avg:97.87ms +step:1097/1670 train_time:107364ms step_avg:97.87ms +step:1098/1670 train_time:107460ms step_avg:97.87ms +step:1099/1670 train_time:107558ms step_avg:97.87ms +step:1100/1670 train_time:107655ms step_avg:97.87ms +step:1101/1670 train_time:107752ms step_avg:97.87ms +step:1102/1670 train_time:107850ms step_avg:97.87ms +step:1103/1670 train_time:107947ms step_avg:97.87ms +step:1104/1670 train_time:108044ms step_avg:97.87ms +step:1105/1670 train_time:108142ms step_avg:97.87ms +step:1106/1670 train_time:108241ms step_avg:97.87ms +step:1107/1670 train_time:108339ms step_avg:97.87ms +step:1108/1670 train_time:108436ms step_avg:97.87ms +step:1109/1670 train_time:108533ms step_avg:97.87ms +step:1110/1670 train_time:108631ms step_avg:97.87ms +step:1111/1670 train_time:108727ms step_avg:97.86ms +step:1112/1670 train_time:108825ms step_avg:97.86ms +step:1113/1670 train_time:108921ms step_avg:97.86ms +step:1114/1670 train_time:109019ms step_avg:97.86ms +step:1115/1670 train_time:109117ms step_avg:97.86ms +step:1116/1670 train_time:109216ms step_avg:97.86ms +step:1117/1670 train_time:109315ms step_avg:97.87ms +step:1118/1670 train_time:109413ms step_avg:97.87ms +step:1119/1670 train_time:109512ms step_avg:97.87ms +step:1120/1670 train_time:109610ms step_avg:97.87ms +step:1121/1670 train_time:109708ms step_avg:97.87ms +step:1122/1670 train_time:109805ms step_avg:97.87ms +step:1123/1670 train_time:109903ms step_avg:97.87ms +step:1124/1670 train_time:110001ms step_avg:97.87ms +step:1125/1670 train_time:110099ms step_avg:97.87ms +step:1125/1670 val_loss:3.4246 train_time:110197ms step_avg:97.95ms +step:1126/1670 train_time:110219ms step_avg:97.89ms +step:1127/1670 train_time:110310ms step_avg:97.88ms +step:1128/1670 train_time:110408ms step_avg:97.88ms +step:1129/1670 train_time:110506ms step_avg:97.88ms +step:1130/1670 train_time:110602ms step_avg:97.88ms +step:1131/1670 train_time:110698ms step_avg:97.88ms +step:1132/1670 train_time:110795ms step_avg:97.88ms +step:1133/1670 train_time:110892ms step_avg:97.87ms +step:1134/1670 train_time:110989ms step_avg:97.87ms +step:1135/1670 train_time:111087ms step_avg:97.87ms +step:1136/1670 train_time:111187ms step_avg:97.88ms +step:1137/1670 train_time:111288ms step_avg:97.88ms +step:1138/1670 train_time:111388ms step_avg:97.88ms +step:1139/1670 train_time:111487ms step_avg:97.88ms +step:1140/1670 train_time:111584ms step_avg:97.88ms +step:1141/1670 train_time:111681ms step_avg:97.88ms +step:1142/1670 train_time:111776ms step_avg:97.88ms +step:1143/1670 train_time:111873ms step_avg:97.88ms +step:1144/1670 train_time:111971ms step_avg:97.88ms +step:1145/1670 train_time:112070ms step_avg:97.88ms +step:1146/1670 train_time:112171ms step_avg:97.88ms +step:1147/1670 train_time:112271ms step_avg:97.88ms +step:1148/1670 train_time:112371ms step_avg:97.88ms +step:1149/1670 train_time:112471ms step_avg:97.89ms +step:1150/1670 train_time:112571ms step_avg:97.89ms +step:1151/1670 train_time:112670ms step_avg:97.89ms +step:1152/1670 train_time:112770ms step_avg:97.89ms +step:1153/1670 train_time:112867ms step_avg:97.89ms +step:1154/1670 train_time:112964ms step_avg:97.89ms +step:1155/1670 train_time:113061ms step_avg:97.89ms +step:1156/1670 train_time:113158ms step_avg:97.89ms +step:1157/1670 train_time:113256ms step_avg:97.89ms +step:1158/1670 train_time:113356ms step_avg:97.89ms +step:1159/1670 train_time:113456ms step_avg:97.89ms +step:1160/1670 train_time:113556ms step_avg:97.89ms +step:1161/1670 train_time:113656ms step_avg:97.89ms +step:1162/1670 train_time:113756ms step_avg:97.90ms +step:1163/1670 train_time:113854ms step_avg:97.90ms +step:1164/1670 train_time:113952ms step_avg:97.90ms +step:1165/1670 train_time:114050ms step_avg:97.90ms +step:1166/1670 train_time:114148ms step_avg:97.90ms +step:1167/1670 train_time:114246ms step_avg:97.90ms +step:1168/1670 train_time:114343ms step_avg:97.90ms +step:1169/1670 train_time:114440ms step_avg:97.90ms +step:1170/1670 train_time:114538ms step_avg:97.90ms +step:1171/1670 train_time:114637ms step_avg:97.90ms +step:1172/1670 train_time:114735ms step_avg:97.90ms +step:1173/1670 train_time:114834ms step_avg:97.90ms +step:1174/1670 train_time:114931ms step_avg:97.90ms +step:1175/1670 train_time:115030ms step_avg:97.90ms +step:1176/1670 train_time:115128ms step_avg:97.90ms +step:1177/1670 train_time:115227ms step_avg:97.90ms +step:1178/1670 train_time:115325ms step_avg:97.90ms +step:1179/1670 train_time:115423ms step_avg:97.90ms +step:1180/1670 train_time:115520ms step_avg:97.90ms +step:1181/1670 train_time:115618ms step_avg:97.90ms +step:1182/1670 train_time:115715ms step_avg:97.90ms +step:1183/1670 train_time:115813ms step_avg:97.90ms +step:1184/1670 train_time:115911ms step_avg:97.90ms +step:1185/1670 train_time:116009ms step_avg:97.90ms +step:1186/1670 train_time:116106ms step_avg:97.90ms +step:1187/1670 train_time:116204ms step_avg:97.90ms +step:1188/1670 train_time:116301ms step_avg:97.90ms +step:1189/1670 train_time:116399ms step_avg:97.90ms +step:1190/1670 train_time:116497ms step_avg:97.90ms +step:1191/1670 train_time:116596ms step_avg:97.90ms +step:1192/1670 train_time:116694ms step_avg:97.90ms +step:1193/1670 train_time:116792ms step_avg:97.90ms +step:1194/1670 train_time:116890ms step_avg:97.90ms +step:1195/1670 train_time:116987ms step_avg:97.90ms +step:1196/1670 train_time:117084ms step_avg:97.90ms +step:1197/1670 train_time:117182ms step_avg:97.90ms +step:1198/1670 train_time:117280ms step_avg:97.90ms +step:1199/1670 train_time:117378ms step_avg:97.90ms +step:1200/1670 train_time:117476ms step_avg:97.90ms +step:1201/1670 train_time:117575ms step_avg:97.90ms +step:1202/1670 train_time:117673ms step_avg:97.90ms +step:1203/1670 train_time:117772ms step_avg:97.90ms +step:1204/1670 train_time:117869ms step_avg:97.90ms +step:1205/1670 train_time:117966ms step_avg:97.90ms +step:1206/1670 train_time:118064ms step_avg:97.90ms +step:1207/1670 train_time:118162ms step_avg:97.90ms +step:1208/1670 train_time:118260ms step_avg:97.90ms +step:1209/1670 train_time:118357ms step_avg:97.90ms +step:1210/1670 train_time:118455ms step_avg:97.90ms +step:1211/1670 train_time:118554ms step_avg:97.90ms +step:1212/1670 train_time:118652ms step_avg:97.90ms +step:1213/1670 train_time:118750ms step_avg:97.90ms +step:1214/1670 train_time:118847ms step_avg:97.90ms +step:1215/1670 train_time:118944ms step_avg:97.90ms +step:1216/1670 train_time:119041ms step_avg:97.90ms +step:1217/1670 train_time:119139ms step_avg:97.90ms +step:1218/1670 train_time:119236ms step_avg:97.90ms +step:1219/1670 train_time:119335ms step_avg:97.90ms +step:1220/1670 train_time:119434ms step_avg:97.90ms +step:1221/1670 train_time:119533ms step_avg:97.90ms +step:1222/1670 train_time:119631ms step_avg:97.90ms +step:1223/1670 train_time:119730ms step_avg:97.90ms +step:1224/1670 train_time:119828ms step_avg:97.90ms +step:1225/1670 train_time:119927ms step_avg:97.90ms +step:1226/1670 train_time:120025ms step_avg:97.90ms +step:1227/1670 train_time:120123ms step_avg:97.90ms +step:1228/1670 train_time:120220ms step_avg:97.90ms +step:1229/1670 train_time:120317ms step_avg:97.90ms +step:1230/1670 train_time:120414ms step_avg:97.90ms +step:1231/1670 train_time:120513ms step_avg:97.90ms +step:1232/1670 train_time:120613ms step_avg:97.90ms +step:1233/1670 train_time:120711ms step_avg:97.90ms +step:1234/1670 train_time:120809ms step_avg:97.90ms +step:1235/1670 train_time:120908ms step_avg:97.90ms +step:1236/1670 train_time:121006ms step_avg:97.90ms +step:1237/1670 train_time:121104ms step_avg:97.90ms +step:1238/1670 train_time:121202ms step_avg:97.90ms +step:1239/1670 train_time:121300ms step_avg:97.90ms +step:1240/1670 train_time:121397ms step_avg:97.90ms +step:1241/1670 train_time:121494ms step_avg:97.90ms +step:1242/1670 train_time:121592ms step_avg:97.90ms +step:1243/1670 train_time:121690ms step_avg:97.90ms +step:1244/1670 train_time:121787ms step_avg:97.90ms +step:1245/1670 train_time:121885ms step_avg:97.90ms +step:1246/1670 train_time:121983ms step_avg:97.90ms +step:1247/1670 train_time:122081ms step_avg:97.90ms +step:1248/1670 train_time:122178ms step_avg:97.90ms +step:1249/1670 train_time:122276ms step_avg:97.90ms +step:1250/1670 train_time:122374ms step_avg:97.90ms +step:1250/1670 val_loss:3.3811 train_time:122471ms step_avg:97.98ms +step:1251/1670 train_time:122493ms step_avg:97.92ms +step:1252/1670 train_time:122579ms step_avg:97.91ms +step:1253/1670 train_time:122678ms step_avg:97.91ms +step:1254/1670 train_time:122776ms step_avg:97.91ms +step:1255/1670 train_time:122875ms step_avg:97.91ms +step:1256/1670 train_time:122971ms step_avg:97.91ms +step:1257/1670 train_time:123068ms step_avg:97.91ms +step:1258/1670 train_time:123165ms step_avg:97.91ms +step:1259/1670 train_time:123261ms step_avg:97.90ms +step:1260/1670 train_time:123358ms step_avg:97.90ms +step:1261/1670 train_time:123458ms step_avg:97.90ms +step:1262/1670 train_time:123560ms step_avg:97.91ms +step:1263/1670 train_time:123660ms step_avg:97.91ms +step:1264/1670 train_time:123758ms step_avg:97.91ms +step:1265/1670 train_time:123856ms step_avg:97.91ms +step:1266/1670 train_time:123955ms step_avg:97.91ms +step:1267/1670 train_time:124053ms step_avg:97.91ms +step:1268/1670 train_time:124151ms step_avg:97.91ms +step:1269/1670 train_time:124247ms step_avg:97.91ms +step:1270/1670 train_time:124344ms step_avg:97.91ms +step:1271/1670 train_time:124442ms step_avg:97.91ms +step:1272/1670 train_time:124541ms step_avg:97.91ms +step:1273/1670 train_time:124640ms step_avg:97.91ms +step:1274/1670 train_time:125029ms step_avg:98.14ms +step:1275/1670 train_time:125103ms step_avg:98.12ms +step:1276/1670 train_time:125199ms step_avg:98.12ms +step:1277/1670 train_time:125295ms step_avg:98.12ms +step:1278/1670 train_time:125392ms step_avg:98.12ms +step:1279/1670 train_time:125488ms step_avg:98.11ms +step:1280/1670 train_time:125585ms step_avg:98.11ms +step:1281/1670 train_time:125681ms step_avg:98.11ms +step:1282/1670 train_time:125779ms step_avg:98.11ms +step:1283/1670 train_time:125876ms step_avg:98.11ms +step:1284/1670 train_time:125982ms step_avg:98.12ms +step:1285/1670 train_time:126086ms step_avg:98.12ms +step:1286/1670 train_time:126184ms step_avg:98.12ms +step:1287/1670 train_time:126281ms step_avg:98.12ms +step:1288/1670 train_time:126379ms step_avg:98.12ms +step:1289/1670 train_time:126477ms step_avg:98.12ms +step:1290/1670 train_time:126575ms step_avg:98.12ms +step:1291/1670 train_time:126672ms step_avg:98.12ms +step:1292/1670 train_time:126769ms step_avg:98.12ms +step:1293/1670 train_time:126866ms step_avg:98.12ms +step:1294/1670 train_time:126966ms step_avg:98.12ms +step:1295/1670 train_time:127065ms step_avg:98.12ms +step:1296/1670 train_time:127163ms step_avg:98.12ms +step:1297/1670 train_time:127261ms step_avg:98.12ms +step:1298/1670 train_time:127359ms step_avg:98.12ms +step:1299/1670 train_time:127457ms step_avg:98.12ms +step:1300/1670 train_time:127555ms step_avg:98.12ms +step:1301/1670 train_time:127654ms step_avg:98.12ms +step:1302/1670 train_time:127750ms step_avg:98.12ms +step:1303/1670 train_time:127847ms step_avg:98.12ms +step:1304/1670 train_time:127947ms step_avg:98.12ms +step:1305/1670 train_time:128044ms step_avg:98.12ms +step:1306/1670 train_time:128143ms step_avg:98.12ms +step:1307/1670 train_time:128240ms step_avg:98.12ms +step:1308/1670 train_time:128338ms step_avg:98.12ms +step:1309/1670 train_time:128436ms step_avg:98.12ms +step:1310/1670 train_time:128535ms step_avg:98.12ms +step:1311/1670 train_time:128635ms step_avg:98.12ms +step:1312/1670 train_time:128732ms step_avg:98.12ms +step:1313/1670 train_time:128830ms step_avg:98.12ms +step:1314/1670 train_time:128930ms step_avg:98.12ms +step:1315/1670 train_time:129031ms step_avg:98.12ms +step:1316/1670 train_time:129130ms step_avg:98.12ms +step:1317/1670 train_time:129228ms step_avg:98.12ms +step:1318/1670 train_time:129327ms step_avg:98.12ms +step:1319/1670 train_time:129424ms step_avg:98.12ms +step:1320/1670 train_time:129522ms step_avg:98.12ms +step:1321/1670 train_time:129621ms step_avg:98.12ms +step:1322/1670 train_time:129719ms step_avg:98.12ms +step:1323/1670 train_time:129819ms step_avg:98.12ms +step:1324/1670 train_time:129918ms step_avg:98.13ms +step:1325/1670 train_time:130018ms step_avg:98.13ms +step:1326/1670 train_time:130117ms step_avg:98.13ms +step:1327/1670 train_time:130216ms step_avg:98.13ms +step:1328/1670 train_time:130316ms step_avg:98.13ms +step:1329/1670 train_time:130416ms step_avg:98.13ms +step:1330/1670 train_time:130516ms step_avg:98.13ms +step:1331/1670 train_time:130616ms step_avg:98.13ms +step:1332/1670 train_time:130714ms step_avg:98.13ms +step:1333/1670 train_time:130812ms step_avg:98.13ms +step:1334/1670 train_time:130910ms step_avg:98.13ms +step:1335/1670 train_time:131008ms step_avg:98.13ms +step:1336/1670 train_time:131107ms step_avg:98.13ms +step:1337/1670 train_time:131205ms step_avg:98.13ms +step:1338/1670 train_time:131303ms step_avg:98.13ms +step:1339/1670 train_time:131401ms step_avg:98.13ms +step:1340/1670 train_time:131500ms step_avg:98.13ms +step:1341/1670 train_time:131598ms step_avg:98.13ms +step:1342/1670 train_time:131695ms step_avg:98.13ms +step:1343/1670 train_time:131794ms step_avg:98.13ms +step:1344/1670 train_time:131892ms step_avg:98.13ms +step:1345/1670 train_time:131989ms step_avg:98.13ms +step:1346/1670 train_time:132087ms step_avg:98.13ms +step:1347/1670 train_time:132186ms step_avg:98.13ms +step:1348/1670 train_time:132283ms step_avg:98.13ms +step:1349/1670 train_time:132382ms step_avg:98.13ms +step:1350/1670 train_time:132480ms step_avg:98.13ms +step:1351/1670 train_time:132579ms step_avg:98.13ms +step:1352/1670 train_time:132677ms step_avg:98.13ms +step:1353/1670 train_time:132775ms step_avg:98.13ms +step:1354/1670 train_time:132874ms step_avg:98.13ms +step:1355/1670 train_time:132973ms step_avg:98.13ms +step:1356/1670 train_time:133072ms step_avg:98.14ms +step:1357/1670 train_time:133173ms step_avg:98.14ms +step:1358/1670 train_time:133272ms step_avg:98.14ms +step:1359/1670 train_time:133373ms step_avg:98.14ms +step:1360/1670 train_time:133472ms step_avg:98.14ms +step:1361/1670 train_time:133570ms step_avg:98.14ms +step:1362/1670 train_time:133668ms step_avg:98.14ms +step:1363/1670 train_time:133765ms step_avg:98.14ms +step:1364/1670 train_time:133863ms step_avg:98.14ms +step:1365/1670 train_time:133961ms step_avg:98.14ms +step:1366/1670 train_time:134060ms step_avg:98.14ms +step:1367/1670 train_time:134159ms step_avg:98.14ms +step:1368/1670 train_time:134259ms step_avg:98.14ms +step:1369/1670 train_time:134357ms step_avg:98.14ms +step:1370/1670 train_time:134457ms step_avg:98.14ms +step:1371/1670 train_time:134556ms step_avg:98.14ms +step:1372/1670 train_time:134655ms step_avg:98.14ms +step:1373/1670 train_time:134754ms step_avg:98.15ms +step:1374/1670 train_time:134852ms step_avg:98.15ms +step:1375/1670 train_time:134950ms step_avg:98.15ms +step:1375/1670 val_loss:3.3439 train_time:135046ms step_avg:98.22ms +step:1376/1670 train_time:135068ms step_avg:98.16ms +step:1377/1670 train_time:135154ms step_avg:98.15ms +step:1378/1670 train_time:135252ms step_avg:98.15ms +step:1379/1670 train_time:135350ms step_avg:98.15ms +step:1380/1670 train_time:135448ms step_avg:98.15ms +step:1381/1670 train_time:135545ms step_avg:98.15ms +step:1382/1670 train_time:135641ms step_avg:98.15ms +step:1383/1670 train_time:135740ms step_avg:98.15ms +step:1384/1670 train_time:135837ms step_avg:98.15ms +step:1385/1670 train_time:135934ms step_avg:98.15ms +step:1386/1670 train_time:136033ms step_avg:98.15ms +step:1387/1670 train_time:136134ms step_avg:98.15ms +step:1388/1670 train_time:136233ms step_avg:98.15ms +step:1389/1670 train_time:136331ms step_avg:98.15ms +step:1390/1670 train_time:136429ms step_avg:98.15ms +step:1391/1670 train_time:136527ms step_avg:98.15ms +step:1392/1670 train_time:136623ms step_avg:98.15ms +step:1393/1670 train_time:136720ms step_avg:98.15ms +step:1394/1670 train_time:136818ms step_avg:98.15ms +step:1395/1670 train_time:136916ms step_avg:98.15ms +step:1396/1670 train_time:137014ms step_avg:98.15ms +step:1397/1670 train_time:137113ms step_avg:98.15ms +step:1398/1670 train_time:137213ms step_avg:98.15ms +step:1399/1670 train_time:137312ms step_avg:98.15ms +step:1400/1670 train_time:137410ms step_avg:98.15ms +step:1401/1670 train_time:137507ms step_avg:98.15ms +step:1402/1670 train_time:137604ms step_avg:98.15ms +step:1403/1670 train_time:137701ms step_avg:98.15ms +step:1404/1670 train_time:137798ms step_avg:98.15ms +step:1405/1670 train_time:137895ms step_avg:98.15ms +step:1406/1670 train_time:137993ms step_avg:98.15ms +step:1407/1670 train_time:138092ms step_avg:98.15ms +step:1408/1670 train_time:138190ms step_avg:98.15ms +step:1409/1670 train_time:138288ms step_avg:98.15ms +step:1410/1670 train_time:138386ms step_avg:98.15ms +step:1411/1670 train_time:138485ms step_avg:98.15ms +step:1412/1670 train_time:138582ms step_avg:98.15ms +step:1413/1670 train_time:138680ms step_avg:98.15ms +step:1414/1670 train_time:138777ms step_avg:98.15ms +step:1415/1670 train_time:138875ms step_avg:98.15ms +step:1416/1670 train_time:138973ms step_avg:98.14ms +step:1417/1670 train_time:139071ms step_avg:98.14ms +step:1418/1670 train_time:139169ms step_avg:98.14ms +step:1419/1670 train_time:139267ms step_avg:98.14ms +step:1420/1670 train_time:139365ms step_avg:98.14ms +step:1421/1670 train_time:139464ms step_avg:98.14ms +step:1422/1670 train_time:139562ms step_avg:98.15ms +step:1423/1670 train_time:139660ms step_avg:98.15ms +step:1424/1670 train_time:139758ms step_avg:98.14ms +step:1425/1670 train_time:139856ms step_avg:98.14ms +step:1426/1670 train_time:139954ms step_avg:98.14ms +step:1427/1670 train_time:140052ms step_avg:98.14ms +step:1428/1670 train_time:140151ms step_avg:98.14ms +step:1429/1670 train_time:140250ms step_avg:98.15ms +step:1430/1670 train_time:140348ms step_avg:98.15ms +step:1431/1670 train_time:140446ms step_avg:98.15ms +step:1432/1670 train_time:140543ms step_avg:98.14ms +step:1433/1670 train_time:140641ms step_avg:98.14ms +step:1434/1670 train_time:140740ms step_avg:98.14ms +step:1435/1670 train_time:140839ms step_avg:98.15ms +step:1436/1670 train_time:140937ms step_avg:98.15ms +step:1437/1670 train_time:141036ms step_avg:98.15ms +step:1438/1670 train_time:141136ms step_avg:98.15ms +step:1439/1670 train_time:141235ms step_avg:98.15ms +step:1440/1670 train_time:141335ms step_avg:98.15ms +step:1441/1670 train_time:141434ms step_avg:98.15ms +step:1442/1670 train_time:141532ms step_avg:98.15ms +step:1443/1670 train_time:141630ms step_avg:98.15ms +step:1444/1670 train_time:141728ms step_avg:98.15ms +step:1445/1670 train_time:141828ms step_avg:98.15ms +step:1446/1670 train_time:141926ms step_avg:98.15ms +step:1447/1670 train_time:142026ms step_avg:98.15ms +step:1448/1670 train_time:142123ms step_avg:98.15ms +step:1449/1670 train_time:142223ms step_avg:98.15ms +step:1450/1670 train_time:142323ms step_avg:98.15ms +step:1451/1670 train_time:142423ms step_avg:98.15ms +step:1452/1670 train_time:142522ms step_avg:98.16ms +step:1453/1670 train_time:142621ms step_avg:98.16ms +step:1454/1670 train_time:142720ms step_avg:98.16ms +step:1455/1670 train_time:142820ms step_avg:98.16ms +step:1456/1670 train_time:142919ms step_avg:98.16ms +step:1457/1670 train_time:143017ms step_avg:98.16ms +step:1458/1670 train_time:143115ms step_avg:98.16ms +step:1459/1670 train_time:143213ms step_avg:98.16ms +step:1460/1670 train_time:143312ms step_avg:98.16ms +step:1461/1670 train_time:143409ms step_avg:98.16ms +step:1462/1670 train_time:143507ms step_avg:98.16ms +step:1463/1670 train_time:143604ms step_avg:98.16ms +step:1464/1670 train_time:143703ms step_avg:98.16ms +step:1465/1670 train_time:143802ms step_avg:98.16ms +step:1466/1670 train_time:143901ms step_avg:98.16ms +step:1467/1670 train_time:143999ms step_avg:98.16ms +step:1468/1670 train_time:144098ms step_avg:98.16ms +step:1469/1670 train_time:144196ms step_avg:98.16ms +step:1470/1670 train_time:144294ms step_avg:98.16ms +step:1471/1670 train_time:144392ms step_avg:98.16ms +step:1472/1670 train_time:144491ms step_avg:98.16ms +step:1473/1670 train_time:144588ms step_avg:98.16ms +step:1474/1670 train_time:144685ms step_avg:98.16ms +step:1475/1670 train_time:144783ms step_avg:98.16ms +step:1476/1670 train_time:144882ms step_avg:98.16ms +step:1477/1670 train_time:144980ms step_avg:98.16ms +step:1478/1670 train_time:145079ms step_avg:98.16ms +step:1479/1670 train_time:145177ms step_avg:98.16ms +step:1480/1670 train_time:145277ms step_avg:98.16ms +step:1481/1670 train_time:145377ms step_avg:98.16ms +step:1482/1670 train_time:145476ms step_avg:98.16ms +step:1483/1670 train_time:145574ms step_avg:98.16ms +step:1484/1670 train_time:145673ms step_avg:98.16ms +step:1485/1670 train_time:146044ms step_avg:98.35ms +step:1486/1670 train_time:146118ms step_avg:98.33ms +step:1487/1670 train_time:146214ms step_avg:98.33ms +step:1488/1670 train_time:146310ms step_avg:98.33ms +step:1489/1670 train_time:146406ms step_avg:98.33ms +step:1490/1670 train_time:146503ms step_avg:98.32ms +step:1491/1670 train_time:146600ms step_avg:98.32ms +step:1492/1670 train_time:146698ms step_avg:98.32ms +step:1493/1670 train_time:146795ms step_avg:98.32ms +step:1494/1670 train_time:146892ms step_avg:98.32ms +step:1495/1670 train_time:146996ms step_avg:98.32ms +step:1496/1670 train_time:147098ms step_avg:98.33ms +step:1497/1670 train_time:147197ms step_avg:98.33ms +step:1498/1670 train_time:147297ms step_avg:98.33ms +step:1499/1670 train_time:147397ms step_avg:98.33ms +step:1500/1670 train_time:147494ms step_avg:98.33ms +step:1500/1670 val_loss:3.3119 train_time:147591ms step_avg:98.39ms +step:1501/1670 train_time:147612ms step_avg:98.34ms +step:1502/1670 train_time:147699ms step_avg:98.33ms +step:1503/1670 train_time:147800ms step_avg:98.34ms +step:1504/1670 train_time:147898ms step_avg:98.34ms +step:1505/1670 train_time:147996ms step_avg:98.34ms +step:1506/1670 train_time:148092ms step_avg:98.33ms +step:1507/1670 train_time:148189ms step_avg:98.33ms +step:1508/1670 train_time:148286ms step_avg:98.33ms +step:1509/1670 train_time:148384ms step_avg:98.33ms +step:1510/1670 train_time:148481ms step_avg:98.33ms +step:1511/1670 train_time:148579ms step_avg:98.33ms +step:1512/1670 train_time:148679ms step_avg:98.33ms +step:1513/1670 train_time:148779ms step_avg:98.33ms +step:1514/1670 train_time:148877ms step_avg:98.33ms +step:1515/1670 train_time:148975ms step_avg:98.33ms +step:1516/1670 train_time:149072ms step_avg:98.33ms +step:1517/1670 train_time:149170ms step_avg:98.33ms +step:1518/1670 train_time:149267ms step_avg:98.33ms +step:1519/1670 train_time:149365ms step_avg:98.33ms +step:1520/1670 train_time:149463ms step_avg:98.33ms +step:1521/1670 train_time:149561ms step_avg:98.33ms +step:1522/1670 train_time:149660ms step_avg:98.33ms +step:1523/1670 train_time:149759ms step_avg:98.33ms +step:1524/1670 train_time:149859ms step_avg:98.33ms +step:1525/1670 train_time:149957ms step_avg:98.33ms +step:1526/1670 train_time:150055ms step_avg:98.33ms +step:1527/1670 train_time:150153ms step_avg:98.33ms +step:1528/1670 train_time:150250ms step_avg:98.33ms +step:1529/1670 train_time:150348ms step_avg:98.33ms +step:1530/1670 train_time:150445ms step_avg:98.33ms +step:1531/1670 train_time:150544ms step_avg:98.33ms +step:1532/1670 train_time:150643ms step_avg:98.33ms +step:1533/1670 train_time:150742ms step_avg:98.33ms +step:1534/1670 train_time:150840ms step_avg:98.33ms +step:1535/1670 train_time:150939ms step_avg:98.33ms +step:1536/1670 train_time:151037ms step_avg:98.33ms +step:1537/1670 train_time:151134ms step_avg:98.33ms +step:1538/1670 train_time:151232ms step_avg:98.33ms +step:1539/1670 train_time:151331ms step_avg:98.33ms +step:1540/1670 train_time:151428ms step_avg:98.33ms +step:1541/1670 train_time:151526ms step_avg:98.33ms +step:1542/1670 train_time:151625ms step_avg:98.33ms +step:1543/1670 train_time:151725ms step_avg:98.33ms +step:1544/1670 train_time:151825ms step_avg:98.33ms +step:1545/1670 train_time:151925ms step_avg:98.33ms +step:1546/1670 train_time:152024ms step_avg:98.33ms +step:1547/1670 train_time:152123ms step_avg:98.33ms +step:1548/1670 train_time:152222ms step_avg:98.33ms +step:1549/1670 train_time:152321ms step_avg:98.33ms +step:1550/1670 train_time:152417ms step_avg:98.33ms +step:1551/1670 train_time:152515ms step_avg:98.33ms +step:1552/1670 train_time:152614ms step_avg:98.33ms +step:1553/1670 train_time:152712ms step_avg:98.33ms +step:1554/1670 train_time:152813ms step_avg:98.34ms +step:1555/1670 train_time:152913ms step_avg:98.34ms +step:1556/1670 train_time:153013ms step_avg:98.34ms +step:1557/1670 train_time:153113ms step_avg:98.34ms +step:1558/1670 train_time:153212ms step_avg:98.34ms +step:1559/1670 train_time:153313ms step_avg:98.34ms +step:1560/1670 train_time:153412ms step_avg:98.34ms +step:1561/1670 train_time:153510ms step_avg:98.34ms +step:1562/1670 train_time:153608ms step_avg:98.34ms +step:1563/1670 train_time:153707ms step_avg:98.34ms +step:1564/1670 train_time:153807ms step_avg:98.34ms +step:1565/1670 train_time:153906ms step_avg:98.34ms +step:1566/1670 train_time:154006ms step_avg:98.34ms +step:1567/1670 train_time:154106ms step_avg:98.34ms +step:1568/1670 train_time:154203ms step_avg:98.34ms +step:1569/1670 train_time:154301ms step_avg:98.34ms +step:1570/1670 train_time:154399ms step_avg:98.34ms +step:1571/1670 train_time:154496ms step_avg:98.34ms +step:1572/1670 train_time:154594ms step_avg:98.34ms +step:1573/1670 train_time:154692ms step_avg:98.34ms +step:1574/1670 train_time:154791ms step_avg:98.34ms +step:1575/1670 train_time:154890ms step_avg:98.34ms +step:1576/1670 train_time:154989ms step_avg:98.34ms +step:1577/1670 train_time:155088ms step_avg:98.34ms +step:1578/1670 train_time:155187ms step_avg:98.34ms +step:1579/1670 train_time:155286ms step_avg:98.34ms +step:1580/1670 train_time:155385ms step_avg:98.34ms +step:1581/1670 train_time:155483ms step_avg:98.34ms +step:1582/1670 train_time:155581ms step_avg:98.34ms +step:1583/1670 train_time:155678ms step_avg:98.34ms +step:1584/1670 train_time:155776ms step_avg:98.34ms +step:1585/1670 train_time:155874ms step_avg:98.34ms +step:1586/1670 train_time:155973ms step_avg:98.34ms +step:1587/1670 train_time:156072ms step_avg:98.34ms +step:1588/1670 train_time:156173ms step_avg:98.35ms +step:1589/1670 train_time:156273ms step_avg:98.35ms +step:1590/1670 train_time:156372ms step_avg:98.35ms +step:1591/1670 train_time:156471ms step_avg:98.35ms +step:1592/1670 train_time:156570ms step_avg:98.35ms +step:1593/1670 train_time:156669ms step_avg:98.35ms +step:1594/1670 train_time:156767ms step_avg:98.35ms +step:1595/1670 train_time:156868ms step_avg:98.35ms +step:1596/1670 train_time:156967ms step_avg:98.35ms +step:1597/1670 train_time:157067ms step_avg:98.35ms +step:1598/1670 train_time:157166ms step_avg:98.35ms +step:1599/1670 train_time:157266ms step_avg:98.35ms +step:1600/1670 train_time:157364ms step_avg:98.35ms +step:1601/1670 train_time:157463ms step_avg:98.35ms +step:1602/1670 train_time:157562ms step_avg:98.35ms +step:1603/1670 train_time:157659ms step_avg:98.35ms +step:1604/1670 train_time:157757ms step_avg:98.35ms +step:1605/1670 train_time:157854ms step_avg:98.35ms +step:1606/1670 train_time:157952ms step_avg:98.35ms +step:1607/1670 train_time:158051ms step_avg:98.35ms +step:1608/1670 train_time:158150ms step_avg:98.35ms +step:1609/1670 train_time:158250ms step_avg:98.35ms +step:1610/1670 train_time:158349ms step_avg:98.35ms +step:1611/1670 train_time:158451ms step_avg:98.36ms +step:1612/1670 train_time:158550ms step_avg:98.36ms +step:1613/1670 train_time:158648ms step_avg:98.36ms +step:1614/1670 train_time:158747ms step_avg:98.36ms +step:1615/1670 train_time:158846ms step_avg:98.36ms +step:1616/1670 train_time:158945ms step_avg:98.36ms +step:1617/1670 train_time:159045ms step_avg:98.36ms +step:1618/1670 train_time:159143ms step_avg:98.36ms +step:1619/1670 train_time:159241ms step_avg:98.36ms +step:1620/1670 train_time:159338ms step_avg:98.36ms +step:1621/1670 train_time:159436ms step_avg:98.36ms +step:1622/1670 train_time:159535ms step_avg:98.36ms +step:1623/1670 train_time:159634ms step_avg:98.36ms +step:1624/1670 train_time:159733ms step_avg:98.36ms +step:1625/1670 train_time:159832ms step_avg:98.36ms +step:1625/1670 val_loss:3.2852 train_time:159928ms step_avg:98.42ms +step:1626/1670 train_time:159952ms step_avg:98.37ms +step:1627/1670 train_time:160035ms step_avg:98.36ms +step:1628/1670 train_time:160135ms step_avg:98.36ms +step:1629/1670 train_time:160233ms step_avg:98.36ms +step:1630/1670 train_time:160330ms step_avg:98.36ms +step:1631/1670 train_time:160428ms step_avg:98.36ms +step:1632/1670 train_time:160525ms step_avg:98.36ms +step:1633/1670 train_time:160622ms step_avg:98.36ms +step:1634/1670 train_time:160719ms step_avg:98.36ms +step:1635/1670 train_time:160817ms step_avg:98.36ms +step:1636/1670 train_time:160917ms step_avg:98.36ms +step:1637/1670 train_time:161017ms step_avg:98.36ms +step:1638/1670 train_time:161116ms step_avg:98.36ms +step:1639/1670 train_time:161215ms step_avg:98.36ms +step:1640/1670 train_time:161314ms step_avg:98.36ms +step:1641/1670 train_time:161412ms step_avg:98.36ms +step:1642/1670 train_time:161512ms step_avg:98.36ms +step:1643/1670 train_time:161610ms step_avg:98.36ms +step:1644/1670 train_time:161708ms step_avg:98.36ms +step:1645/1670 train_time:161805ms step_avg:98.36ms +step:1646/1670 train_time:161904ms step_avg:98.36ms +step:1647/1670 train_time:162003ms step_avg:98.36ms +step:1648/1670 train_time:162103ms step_avg:98.36ms +step:1649/1670 train_time:162202ms step_avg:98.36ms +step:1650/1670 train_time:162300ms step_avg:98.36ms +step:1651/1670 train_time:162398ms step_avg:98.36ms +step:1652/1670 train_time:162497ms step_avg:98.36ms +step:1653/1670 train_time:162594ms step_avg:98.36ms +step:1654/1670 train_time:162692ms step_avg:98.36ms +step:1655/1670 train_time:162791ms step_avg:98.36ms +step:1656/1670 train_time:162890ms step_avg:98.36ms +step:1657/1670 train_time:162990ms step_avg:98.36ms +step:1658/1670 train_time:163090ms step_avg:98.37ms +step:1659/1670 train_time:163190ms step_avg:98.37ms +step:1660/1670 train_time:163290ms step_avg:98.37ms +step:1661/1670 train_time:163391ms step_avg:98.37ms +step:1662/1670 train_time:163489ms step_avg:98.37ms +step:1663/1670 train_time:163587ms step_avg:98.37ms +step:1664/1670 train_time:163684ms step_avg:98.37ms +step:1665/1670 train_time:163781ms step_avg:98.37ms +step:1666/1670 train_time:163878ms step_avg:98.37ms +step:1667/1670 train_time:163977ms step_avg:98.37ms +step:1668/1670 train_time:164075ms step_avg:98.37ms +step:1669/1670 train_time:164174ms step_avg:98.37ms +step:1670/1670 train_time:164273ms step_avg:98.37ms +step:1670/1670 val_loss:3.2771 train_time:164370ms step_avg:98.43ms +peak memory allocated: 34000 MiB reserved: 49676 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_803c2d15-4adb-42d2-958b-0b712cd9d062.txt b/records/090525_SkipMLPBlocks/comparison_803c2d15-4adb-42d2-958b-0b712cd9d062.txt new file mode 100644 index 000000000..0aa7a41df --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_803c2d15-4adb-42d2-958b-0b712cd9d062.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:30:52 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 130W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 44C P0 131W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 91422 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 91423 C /usr/bin/python3 610MiB | +| 0 N/A N/A 91424 C /usr/bin/python3 610MiB | +| 0 N/A N/A 91425 C /usr/bin/python3 610MiB | +| 0 N/A N/A 91426 C /usr/bin/python3 610MiB | +| 0 N/A N/A 91427 C /usr/bin/python3 610MiB | +| 0 N/A N/A 91428 C /usr/bin/python3 610MiB | +| 0 N/A N/A 91429 C /usr/bin/python3 610MiB | +| 1 N/A N/A 91423 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 91424 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 91425 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 91426 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 91427 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 91428 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 91429 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:382ms step_avg:381.61ms +step:2/1670 train_time:403ms step_avg:201.51ms +step:3/1670 train_time:475ms step_avg:158.40ms +step:4/1670 train_time:569ms step_avg:142.15ms +step:5/1670 train_time:663ms step_avg:132.68ms +step:6/1670 train_time:758ms step_avg:126.35ms +step:7/1670 train_time:853ms step_avg:121.83ms +step:8/1670 train_time:948ms step_avg:118.50ms +step:9/1670 train_time:1043ms step_avg:115.90ms +step:10/1670 train_time:1138ms step_avg:113.82ms +step:11/1670 train_time:1233ms step_avg:112.09ms +step:12/1670 train_time:1332ms step_avg:111.01ms +step:13/1670 train_time:1431ms step_avg:110.08ms +step:14/1670 train_time:1528ms step_avg:109.16ms +step:15/1670 train_time:1625ms step_avg:108.33ms +step:16/1670 train_time:1721ms step_avg:107.58ms +step:17/1670 train_time:1817ms step_avg:106.86ms +step:18/1670 train_time:1912ms step_avg:106.23ms +step:19/1670 train_time:2007ms step_avg:105.63ms +step:20/1670 train_time:2102ms step_avg:105.11ms +step:21/1670 train_time:2198ms step_avg:104.66ms +step:22/1670 train_time:2294ms step_avg:104.27ms +step:23/1670 train_time:2390ms step_avg:103.90ms +step:24/1670 train_time:2487ms step_avg:103.61ms +step:25/1670 train_time:2584ms step_avg:103.35ms +step:26/1670 train_time:2681ms step_avg:103.10ms +step:27/1670 train_time:2777ms step_avg:102.84ms +step:28/1670 train_time:2872ms step_avg:102.56ms +step:29/1670 train_time:2967ms step_avg:102.31ms +step:30/1670 train_time:3063ms step_avg:102.11ms +step:31/1670 train_time:3159ms step_avg:101.89ms +step:32/1670 train_time:3255ms step_avg:101.71ms +step:33/1670 train_time:3350ms step_avg:101.53ms +step:34/1670 train_time:3447ms step_avg:101.37ms +step:35/1670 train_time:3543ms step_avg:101.24ms +step:36/1670 train_time:3640ms step_avg:101.12ms +step:37/1670 train_time:3736ms step_avg:100.97ms +step:38/1670 train_time:3832ms step_avg:100.84ms +step:39/1670 train_time:3927ms step_avg:100.70ms +step:40/1670 train_time:4024ms step_avg:100.60ms +step:41/1670 train_time:4121ms step_avg:100.52ms +step:42/1670 train_time:4217ms step_avg:100.39ms +step:43/1670 train_time:4311ms step_avg:100.26ms +step:44/1670 train_time:4408ms step_avg:100.17ms +step:45/1670 train_time:4505ms step_avg:100.10ms +step:46/1670 train_time:4601ms step_avg:100.02ms +step:47/1670 train_time:4697ms step_avg:99.95ms +step:48/1670 train_time:4793ms step_avg:99.86ms +step:49/1670 train_time:4889ms step_avg:99.77ms +step:50/1670 train_time:4986ms step_avg:99.71ms +step:51/1670 train_time:5081ms step_avg:99.63ms +step:52/1670 train_time:5177ms step_avg:99.55ms +step:53/1670 train_time:5273ms step_avg:99.48ms +step:54/1670 train_time:5368ms step_avg:99.41ms +step:55/1670 train_time:5464ms step_avg:99.35ms +step:56/1670 train_time:5561ms step_avg:99.31ms +step:57/1670 train_time:5657ms step_avg:99.24ms +step:58/1670 train_time:5753ms step_avg:99.18ms +step:59/1670 train_time:5849ms step_avg:99.13ms +step:60/1670 train_time:5945ms step_avg:99.08ms +step:61/1670 train_time:6041ms step_avg:99.03ms +step:62/1670 train_time:6137ms step_avg:98.98ms +step:63/1670 train_time:6233ms step_avg:98.93ms +step:64/1670 train_time:6328ms step_avg:98.88ms +step:65/1670 train_time:6425ms step_avg:98.85ms +step:66/1670 train_time:6521ms step_avg:98.80ms +step:67/1670 train_time:6616ms step_avg:98.75ms +step:68/1670 train_time:6712ms step_avg:98.70ms +step:69/1670 train_time:6808ms step_avg:98.66ms +step:70/1670 train_time:6905ms step_avg:98.64ms +step:71/1670 train_time:7001ms step_avg:98.61ms +step:72/1670 train_time:7097ms step_avg:98.56ms +step:73/1670 train_time:7192ms step_avg:98.53ms +step:74/1670 train_time:7288ms step_avg:98.48ms +step:75/1670 train_time:7384ms step_avg:98.46ms +step:76/1670 train_time:7481ms step_avg:98.43ms +step:77/1670 train_time:7577ms step_avg:98.40ms +step:78/1670 train_time:7672ms step_avg:98.36ms +step:79/1670 train_time:7768ms step_avg:98.32ms +step:80/1670 train_time:7864ms step_avg:98.30ms +step:81/1670 train_time:7961ms step_avg:98.29ms +step:82/1670 train_time:8058ms step_avg:98.26ms +step:83/1670 train_time:8154ms step_avg:98.24ms +step:84/1670 train_time:8249ms step_avg:98.20ms +step:85/1670 train_time:8345ms step_avg:98.18ms +step:86/1670 train_time:8441ms step_avg:98.15ms +step:87/1670 train_time:8537ms step_avg:98.13ms +step:88/1670 train_time:8633ms step_avg:98.10ms +step:89/1670 train_time:8730ms step_avg:98.09ms +step:90/1670 train_time:8825ms step_avg:98.06ms +step:91/1670 train_time:8922ms step_avg:98.05ms +step:92/1670 train_time:9018ms step_avg:98.02ms +step:93/1670 train_time:9114ms step_avg:98.00ms +step:94/1670 train_time:9211ms step_avg:97.98ms +step:95/1670 train_time:9307ms step_avg:97.97ms +step:96/1670 train_time:9404ms step_avg:97.95ms +step:97/1670 train_time:9500ms step_avg:97.93ms +step:98/1670 train_time:9596ms step_avg:97.92ms +step:99/1670 train_time:9691ms step_avg:97.89ms +step:100/1670 train_time:9787ms step_avg:97.87ms +step:101/1670 train_time:9883ms step_avg:97.85ms +step:102/1670 train_time:9979ms step_avg:97.83ms +step:103/1670 train_time:10075ms step_avg:97.81ms +step:104/1670 train_time:10170ms step_avg:97.79ms +step:105/1670 train_time:10267ms step_avg:97.78ms +step:106/1670 train_time:10363ms step_avg:97.77ms +step:107/1670 train_time:10460ms step_avg:97.75ms +step:108/1670 train_time:10555ms step_avg:97.73ms +step:109/1670 train_time:10650ms step_avg:97.71ms +step:110/1670 train_time:10745ms step_avg:97.69ms +step:111/1670 train_time:10841ms step_avg:97.66ms +step:112/1670 train_time:10937ms step_avg:97.65ms +step:113/1670 train_time:11032ms step_avg:97.63ms +step:114/1670 train_time:11129ms step_avg:97.62ms +step:115/1670 train_time:11225ms step_avg:97.61ms +step:116/1670 train_time:11321ms step_avg:97.59ms +step:117/1670 train_time:11417ms step_avg:97.58ms +step:118/1670 train_time:11513ms step_avg:97.57ms +step:119/1670 train_time:11608ms step_avg:97.55ms +step:120/1670 train_time:11704ms step_avg:97.53ms +step:121/1670 train_time:11800ms step_avg:97.52ms +step:122/1670 train_time:11895ms step_avg:97.50ms +step:123/1670 train_time:11991ms step_avg:97.49ms +step:124/1670 train_time:12087ms step_avg:97.48ms +step:125/1670 train_time:12183ms step_avg:97.47ms +step:125/1670 val_loss:4.3009 train_time:12278ms step_avg:98.22ms +step:126/1670 train_time:12303ms step_avg:97.64ms +step:127/1670 train_time:12385ms step_avg:97.52ms +step:128/1670 train_time:12488ms step_avg:97.56ms +step:129/1670 train_time:12585ms step_avg:97.55ms +step:130/1670 train_time:12680ms step_avg:97.53ms +step:131/1670 train_time:12775ms step_avg:97.52ms +step:132/1670 train_time:12870ms step_avg:97.50ms +step:133/1670 train_time:12964ms step_avg:97.48ms +step:134/1670 train_time:13059ms step_avg:97.46ms +step:135/1670 train_time:13154ms step_avg:97.44ms +step:136/1670 train_time:13249ms step_avg:97.42ms +step:137/1670 train_time:13347ms step_avg:97.42ms +step:138/1670 train_time:13444ms step_avg:97.42ms +step:139/1670 train_time:13541ms step_avg:97.42ms +step:140/1670 train_time:13637ms step_avg:97.41ms +step:141/1670 train_time:13733ms step_avg:97.40ms +step:142/1670 train_time:13829ms step_avg:97.38ms +step:143/1670 train_time:13924ms step_avg:97.37ms +step:144/1670 train_time:14019ms step_avg:97.35ms +step:145/1670 train_time:14114ms step_avg:97.34ms +step:146/1670 train_time:14210ms step_avg:97.33ms +step:147/1670 train_time:14306ms step_avg:97.32ms +step:148/1670 train_time:14404ms step_avg:97.32ms +step:149/1670 train_time:14500ms step_avg:97.32ms +step:150/1670 train_time:14596ms step_avg:97.31ms +step:151/1670 train_time:14692ms step_avg:97.30ms +step:152/1670 train_time:14788ms step_avg:97.29ms +step:153/1670 train_time:14884ms step_avg:97.28ms +step:154/1670 train_time:14980ms step_avg:97.27ms +step:155/1670 train_time:15075ms step_avg:97.26ms +step:156/1670 train_time:15170ms step_avg:97.25ms +step:157/1670 train_time:15266ms step_avg:97.24ms +step:158/1670 train_time:15362ms step_avg:97.23ms +step:159/1670 train_time:15458ms step_avg:97.22ms +step:160/1670 train_time:15555ms step_avg:97.22ms +step:161/1670 train_time:15652ms step_avg:97.21ms +step:162/1670 train_time:15748ms step_avg:97.21ms +step:163/1670 train_time:15844ms step_avg:97.20ms +step:164/1670 train_time:15940ms step_avg:97.20ms +step:165/1670 train_time:16035ms step_avg:97.18ms +step:166/1670 train_time:16130ms step_avg:97.17ms +step:167/1670 train_time:16226ms step_avg:97.16ms +step:168/1670 train_time:16321ms step_avg:97.15ms +step:169/1670 train_time:16417ms step_avg:97.14ms +step:170/1670 train_time:16513ms step_avg:97.14ms +step:171/1670 train_time:16609ms step_avg:97.13ms +step:172/1670 train_time:16705ms step_avg:97.12ms +step:173/1670 train_time:16801ms step_avg:97.11ms +step:174/1670 train_time:16897ms step_avg:97.11ms +step:175/1670 train_time:16992ms step_avg:97.10ms +step:176/1670 train_time:17088ms step_avg:97.09ms +step:177/1670 train_time:17184ms step_avg:97.09ms +step:178/1670 train_time:17279ms step_avg:97.08ms +step:179/1670 train_time:17375ms step_avg:97.07ms +step:180/1670 train_time:17471ms step_avg:97.06ms +step:181/1670 train_time:17568ms step_avg:97.06ms +step:182/1670 train_time:17663ms step_avg:97.05ms +step:183/1670 train_time:17758ms step_avg:97.04ms +step:184/1670 train_time:17854ms step_avg:97.03ms +step:185/1670 train_time:17950ms step_avg:97.03ms +step:186/1670 train_time:18045ms step_avg:97.02ms +step:187/1670 train_time:18141ms step_avg:97.01ms +step:188/1670 train_time:18236ms step_avg:97.00ms +step:189/1670 train_time:18332ms step_avg:97.00ms +step:190/1670 train_time:18428ms step_avg:96.99ms +step:191/1670 train_time:18524ms step_avg:96.98ms +step:192/1670 train_time:18619ms step_avg:96.98ms +step:193/1670 train_time:18715ms step_avg:96.97ms +step:194/1670 train_time:18811ms step_avg:96.96ms +step:195/1670 train_time:18906ms step_avg:96.96ms +step:196/1670 train_time:19002ms step_avg:96.95ms +step:197/1670 train_time:19097ms step_avg:96.94ms +step:198/1670 train_time:19192ms step_avg:96.93ms +step:199/1670 train_time:19288ms step_avg:96.92ms +step:200/1670 train_time:19384ms step_avg:96.92ms +step:201/1670 train_time:19479ms step_avg:96.91ms +step:202/1670 train_time:19575ms step_avg:96.91ms +step:203/1670 train_time:19671ms step_avg:96.90ms +step:204/1670 train_time:19767ms step_avg:96.90ms +step:205/1670 train_time:19862ms step_avg:96.89ms +step:206/1670 train_time:19957ms step_avg:96.88ms +step:207/1670 train_time:20053ms step_avg:96.87ms +step:208/1670 train_time:20148ms step_avg:96.87ms +step:209/1670 train_time:20243ms step_avg:96.86ms +step:210/1670 train_time:20338ms step_avg:96.85ms +step:211/1670 train_time:20434ms step_avg:96.84ms +step:212/1670 train_time:20529ms step_avg:96.84ms +step:213/1670 train_time:20838ms step_avg:97.83ms +step:214/1670 train_time:20943ms step_avg:97.86ms +step:215/1670 train_time:21037ms step_avg:97.84ms +step:216/1670 train_time:21132ms step_avg:97.83ms +step:217/1670 train_time:21226ms step_avg:97.82ms +step:218/1670 train_time:21320ms step_avg:97.80ms +step:219/1670 train_time:21415ms step_avg:97.78ms +step:220/1670 train_time:21510ms step_avg:97.77ms +step:221/1670 train_time:21605ms step_avg:97.76ms +step:222/1670 train_time:21699ms step_avg:97.74ms +step:223/1670 train_time:21796ms step_avg:97.74ms +step:224/1670 train_time:21896ms step_avg:97.75ms +step:225/1670 train_time:21993ms step_avg:97.75ms +step:226/1670 train_time:22089ms step_avg:97.74ms +step:227/1670 train_time:22185ms step_avg:97.73ms +step:228/1670 train_time:22280ms step_avg:97.72ms +step:229/1670 train_time:22374ms step_avg:97.70ms +step:230/1670 train_time:22469ms step_avg:97.69ms +step:231/1670 train_time:22563ms step_avg:97.68ms +step:232/1670 train_time:22658ms step_avg:97.66ms +step:233/1670 train_time:22753ms step_avg:97.65ms +step:234/1670 train_time:22850ms step_avg:97.65ms +step:235/1670 train_time:22948ms step_avg:97.65ms +step:236/1670 train_time:23044ms step_avg:97.64ms +step:237/1670 train_time:23139ms step_avg:97.63ms +step:238/1670 train_time:23235ms step_avg:97.63ms +step:239/1670 train_time:23331ms step_avg:97.62ms +step:240/1670 train_time:23426ms step_avg:97.61ms +step:241/1670 train_time:23522ms step_avg:97.60ms +step:242/1670 train_time:23617ms step_avg:97.59ms +step:243/1670 train_time:23712ms step_avg:97.58ms +step:244/1670 train_time:23808ms step_avg:97.57ms +step:245/1670 train_time:23905ms step_avg:97.57ms +step:246/1670 train_time:24000ms step_avg:97.56ms +step:247/1670 train_time:24097ms step_avg:97.56ms +step:248/1670 train_time:24192ms step_avg:97.55ms +step:249/1670 train_time:24288ms step_avg:97.54ms +step:250/1670 train_time:24384ms step_avg:97.54ms +step:250/1670 val_loss:3.9677 train_time:24478ms step_avg:97.91ms +step:251/1670 train_time:24500ms step_avg:97.61ms +step:252/1670 train_time:24581ms step_avg:97.54ms +step:253/1670 train_time:24680ms step_avg:97.55ms +step:254/1670 train_time:24776ms step_avg:97.54ms +step:255/1670 train_time:24871ms step_avg:97.53ms +step:256/1670 train_time:24966ms step_avg:97.52ms +step:257/1670 train_time:25061ms step_avg:97.51ms +step:258/1670 train_time:25155ms step_avg:97.50ms +step:259/1670 train_time:25250ms step_avg:97.49ms +step:260/1670 train_time:25345ms step_avg:97.48ms +step:261/1670 train_time:25441ms step_avg:97.48ms +step:262/1670 train_time:25540ms step_avg:97.48ms +step:263/1670 train_time:25637ms step_avg:97.48ms +step:264/1670 train_time:25733ms step_avg:97.47ms +step:265/1670 train_time:25828ms step_avg:97.46ms +step:266/1670 train_time:25923ms step_avg:97.45ms +step:267/1670 train_time:26019ms step_avg:97.45ms +step:268/1670 train_time:26114ms step_avg:97.44ms +step:269/1670 train_time:26209ms step_avg:97.43ms +step:270/1670 train_time:26304ms step_avg:97.42ms +step:271/1670 train_time:26399ms step_avg:97.41ms +step:272/1670 train_time:26495ms step_avg:97.41ms +step:273/1670 train_time:26591ms step_avg:97.40ms +step:274/1670 train_time:26688ms step_avg:97.40ms +step:275/1670 train_time:26784ms step_avg:97.40ms +step:276/1670 train_time:26881ms step_avg:97.39ms +step:277/1670 train_time:26977ms step_avg:97.39ms +step:278/1670 train_time:27072ms step_avg:97.38ms +step:279/1670 train_time:27168ms step_avg:97.38ms +step:280/1670 train_time:27264ms step_avg:97.37ms +step:281/1670 train_time:27358ms step_avg:97.36ms +step:282/1670 train_time:27454ms step_avg:97.35ms +step:283/1670 train_time:27549ms step_avg:97.35ms +step:284/1670 train_time:27645ms step_avg:97.34ms +step:285/1670 train_time:27742ms step_avg:97.34ms +step:286/1670 train_time:27838ms step_avg:97.34ms +step:287/1670 train_time:27934ms step_avg:97.33ms +step:288/1670 train_time:28030ms step_avg:97.32ms +step:289/1670 train_time:28125ms step_avg:97.32ms +step:290/1670 train_time:28221ms step_avg:97.31ms +step:291/1670 train_time:28317ms step_avg:97.31ms +step:292/1670 train_time:28412ms step_avg:97.30ms +step:293/1670 train_time:28508ms step_avg:97.30ms +step:294/1670 train_time:28603ms step_avg:97.29ms +step:295/1670 train_time:28699ms step_avg:97.29ms +step:296/1670 train_time:28794ms step_avg:97.28ms +step:297/1670 train_time:28890ms step_avg:97.27ms +step:298/1670 train_time:28986ms step_avg:97.27ms +step:299/1670 train_time:29083ms step_avg:97.27ms +step:300/1670 train_time:29179ms step_avg:97.26ms +step:301/1670 train_time:29274ms step_avg:97.26ms +step:302/1670 train_time:29369ms step_avg:97.25ms +step:303/1670 train_time:29465ms step_avg:97.24ms +step:304/1670 train_time:29561ms step_avg:97.24ms +step:305/1670 train_time:29656ms step_avg:97.23ms +step:306/1670 train_time:29752ms step_avg:97.23ms +step:307/1670 train_time:29848ms step_avg:97.22ms +step:308/1670 train_time:29943ms step_avg:97.22ms +step:309/1670 train_time:30039ms step_avg:97.21ms +step:310/1670 train_time:30134ms step_avg:97.21ms +step:311/1670 train_time:30229ms step_avg:97.20ms +step:312/1670 train_time:30325ms step_avg:97.20ms +step:313/1670 train_time:30421ms step_avg:97.19ms +step:314/1670 train_time:30517ms step_avg:97.19ms +step:315/1670 train_time:30613ms step_avg:97.18ms +step:316/1670 train_time:30708ms step_avg:97.18ms +step:317/1670 train_time:30804ms step_avg:97.17ms +step:318/1670 train_time:30900ms step_avg:97.17ms +step:319/1670 train_time:30996ms step_avg:97.17ms +step:320/1670 train_time:31091ms step_avg:97.16ms +step:321/1670 train_time:31186ms step_avg:97.15ms +step:322/1670 train_time:31282ms step_avg:97.15ms +step:323/1670 train_time:31378ms step_avg:97.15ms +step:324/1670 train_time:31474ms step_avg:97.14ms +step:325/1670 train_time:31569ms step_avg:97.14ms +step:326/1670 train_time:31665ms step_avg:97.13ms +step:327/1670 train_time:31762ms step_avg:97.13ms +step:328/1670 train_time:31857ms step_avg:97.13ms +step:329/1670 train_time:31954ms step_avg:97.12ms +step:330/1670 train_time:32048ms step_avg:97.12ms +step:331/1670 train_time:32144ms step_avg:97.11ms +step:332/1670 train_time:32240ms step_avg:97.11ms +step:333/1670 train_time:32337ms step_avg:97.11ms +step:334/1670 train_time:32432ms step_avg:97.10ms +step:335/1670 train_time:32528ms step_avg:97.10ms +step:336/1670 train_time:32623ms step_avg:97.09ms +step:337/1670 train_time:32719ms step_avg:97.09ms +step:338/1670 train_time:32816ms step_avg:97.09ms +step:339/1670 train_time:32912ms step_avg:97.09ms +step:340/1670 train_time:33008ms step_avg:97.08ms +step:341/1670 train_time:33103ms step_avg:97.08ms +step:342/1670 train_time:33199ms step_avg:97.07ms +step:343/1670 train_time:33294ms step_avg:97.07ms +step:344/1670 train_time:33390ms step_avg:97.06ms +step:345/1670 train_time:33486ms step_avg:97.06ms +step:346/1670 train_time:33581ms step_avg:97.06ms +step:347/1670 train_time:33677ms step_avg:97.05ms +step:348/1670 train_time:33772ms step_avg:97.05ms +step:349/1670 train_time:33868ms step_avg:97.04ms +step:350/1670 train_time:33964ms step_avg:97.04ms +step:351/1670 train_time:34061ms step_avg:97.04ms +step:352/1670 train_time:34156ms step_avg:97.04ms +step:353/1670 train_time:34252ms step_avg:97.03ms +step:354/1670 train_time:34347ms step_avg:97.03ms +step:355/1670 train_time:34443ms step_avg:97.02ms +step:356/1670 train_time:34538ms step_avg:97.02ms +step:357/1670 train_time:34633ms step_avg:97.01ms +step:358/1670 train_time:34729ms step_avg:97.01ms +step:359/1670 train_time:34825ms step_avg:97.00ms +step:360/1670 train_time:34920ms step_avg:97.00ms +step:361/1670 train_time:35016ms step_avg:97.00ms +step:362/1670 train_time:35111ms step_avg:96.99ms +step:363/1670 train_time:35207ms step_avg:96.99ms +step:364/1670 train_time:35303ms step_avg:96.99ms +step:365/1670 train_time:35399ms step_avg:96.98ms +step:366/1670 train_time:35495ms step_avg:96.98ms +step:367/1670 train_time:35590ms step_avg:96.97ms +step:368/1670 train_time:35686ms step_avg:96.97ms +step:369/1670 train_time:35781ms step_avg:96.97ms +step:370/1670 train_time:35877ms step_avg:96.96ms +step:371/1670 train_time:35972ms step_avg:96.96ms +step:372/1670 train_time:36068ms step_avg:96.96ms +step:373/1670 train_time:36164ms step_avg:96.96ms +step:374/1670 train_time:36260ms step_avg:96.95ms +step:375/1670 train_time:36356ms step_avg:96.95ms +step:375/1670 val_loss:3.8164 train_time:36451ms step_avg:97.20ms +step:376/1670 train_time:36472ms step_avg:97.00ms +step:377/1670 train_time:36563ms step_avg:96.99ms +step:378/1670 train_time:36663ms step_avg:96.99ms +step:379/1670 train_time:36759ms step_avg:96.99ms +step:380/1670 train_time:36854ms step_avg:96.98ms +step:381/1670 train_time:36948ms step_avg:96.98ms +step:382/1670 train_time:37043ms step_avg:96.97ms +step:383/1670 train_time:37137ms step_avg:96.96ms +step:384/1670 train_time:37233ms step_avg:96.96ms +step:385/1670 train_time:37327ms step_avg:96.95ms +step:386/1670 train_time:37423ms step_avg:96.95ms +step:387/1670 train_time:37520ms step_avg:96.95ms +step:388/1670 train_time:37620ms step_avg:96.96ms +step:389/1670 train_time:37717ms step_avg:96.96ms +step:390/1670 train_time:37813ms step_avg:96.96ms +step:391/1670 train_time:37910ms step_avg:96.96ms +step:392/1670 train_time:38005ms step_avg:96.95ms +step:393/1670 train_time:38101ms step_avg:96.95ms +step:394/1670 train_time:38196ms step_avg:96.94ms +step:395/1670 train_time:38291ms step_avg:96.94ms +step:396/1670 train_time:38386ms step_avg:96.93ms +step:397/1670 train_time:38482ms step_avg:96.93ms +step:398/1670 train_time:38579ms step_avg:96.93ms +step:399/1670 train_time:38676ms step_avg:96.93ms +step:400/1670 train_time:38773ms step_avg:96.93ms +step:401/1670 train_time:38869ms step_avg:96.93ms +step:402/1670 train_time:38965ms step_avg:96.93ms +step:403/1670 train_time:39060ms step_avg:96.92ms +step:404/1670 train_time:39156ms step_avg:96.92ms +step:405/1670 train_time:39251ms step_avg:96.92ms +step:406/1670 train_time:39346ms step_avg:96.91ms +step:407/1670 train_time:39441ms step_avg:96.91ms +step:408/1670 train_time:39537ms step_avg:96.91ms +step:409/1670 train_time:39634ms step_avg:96.91ms +step:410/1670 train_time:39730ms step_avg:96.90ms +step:411/1670 train_time:39826ms step_avg:96.90ms +step:412/1670 train_time:39922ms step_avg:96.90ms +step:413/1670 train_time:40018ms step_avg:96.90ms +step:414/1670 train_time:40114ms step_avg:96.89ms +step:415/1670 train_time:40210ms step_avg:96.89ms +step:416/1670 train_time:40306ms step_avg:96.89ms +step:417/1670 train_time:40401ms step_avg:96.88ms +step:418/1670 train_time:40497ms step_avg:96.88ms +step:419/1670 train_time:40593ms step_avg:96.88ms +step:420/1670 train_time:40689ms step_avg:96.88ms +step:421/1670 train_time:40785ms step_avg:96.88ms +step:422/1670 train_time:40881ms step_avg:96.87ms +step:423/1670 train_time:40976ms step_avg:96.87ms +step:424/1670 train_time:41072ms step_avg:96.87ms +step:425/1670 train_time:41348ms step_avg:97.29ms +step:426/1670 train_time:41559ms step_avg:97.56ms +step:427/1670 train_time:41653ms step_avg:97.55ms +step:428/1670 train_time:41748ms step_avg:97.54ms +step:429/1670 train_time:41842ms step_avg:97.53ms +step:430/1670 train_time:41937ms step_avg:97.53ms +step:431/1670 train_time:42032ms step_avg:97.52ms +step:432/1670 train_time:42126ms step_avg:97.51ms +step:433/1670 train_time:42221ms step_avg:97.51ms +step:434/1670 train_time:42315ms step_avg:97.50ms +step:435/1670 train_time:42413ms step_avg:97.50ms +step:436/1670 train_time:42516ms step_avg:97.51ms +step:437/1670 train_time:42615ms step_avg:97.52ms +step:438/1670 train_time:42712ms step_avg:97.52ms +step:439/1670 train_time:42809ms step_avg:97.51ms +step:440/1670 train_time:42904ms step_avg:97.51ms +step:441/1670 train_time:42999ms step_avg:97.50ms +step:442/1670 train_time:43094ms step_avg:97.50ms +step:443/1670 train_time:43189ms step_avg:97.49ms +step:444/1670 train_time:43284ms step_avg:97.49ms +step:445/1670 train_time:43379ms step_avg:97.48ms +step:446/1670 train_time:43476ms step_avg:97.48ms +step:447/1670 train_time:43573ms step_avg:97.48ms +step:448/1670 train_time:43671ms step_avg:97.48ms +step:449/1670 train_time:43767ms step_avg:97.48ms +step:450/1670 train_time:43862ms step_avg:97.47ms +step:451/1670 train_time:43957ms step_avg:97.47ms +step:452/1670 train_time:44053ms step_avg:97.46ms +step:453/1670 train_time:44148ms step_avg:97.46ms +step:454/1670 train_time:44243ms step_avg:97.45ms +step:455/1670 train_time:44338ms step_avg:97.45ms +step:456/1670 train_time:44434ms step_avg:97.44ms +step:457/1670 train_time:44532ms step_avg:97.44ms +step:458/1670 train_time:44628ms step_avg:97.44ms +step:459/1670 train_time:44724ms step_avg:97.44ms +step:460/1670 train_time:44820ms step_avg:97.43ms +step:461/1670 train_time:44916ms step_avg:97.43ms +step:462/1670 train_time:45012ms step_avg:97.43ms +step:463/1670 train_time:45108ms step_avg:97.42ms +step:464/1670 train_time:45203ms step_avg:97.42ms +step:465/1670 train_time:45298ms step_avg:97.41ms +step:466/1670 train_time:45393ms step_avg:97.41ms +step:467/1670 train_time:45489ms step_avg:97.41ms +step:468/1670 train_time:45586ms step_avg:97.41ms +step:469/1670 train_time:45682ms step_avg:97.40ms +step:470/1670 train_time:45777ms step_avg:97.40ms +step:471/1670 train_time:45873ms step_avg:97.39ms +step:472/1670 train_time:45970ms step_avg:97.39ms +step:473/1670 train_time:46066ms step_avg:97.39ms +step:474/1670 train_time:46161ms step_avg:97.39ms +step:475/1670 train_time:46256ms step_avg:97.38ms +step:476/1670 train_time:46353ms step_avg:97.38ms +step:477/1670 train_time:46449ms step_avg:97.38ms +step:478/1670 train_time:46544ms step_avg:97.37ms +step:479/1670 train_time:46640ms step_avg:97.37ms +step:480/1670 train_time:46736ms step_avg:97.37ms +step:481/1670 train_time:46832ms step_avg:97.36ms +step:482/1670 train_time:46928ms step_avg:97.36ms +step:483/1670 train_time:47025ms step_avg:97.36ms +step:484/1670 train_time:47119ms step_avg:97.35ms +step:485/1670 train_time:47215ms step_avg:97.35ms +step:486/1670 train_time:47311ms step_avg:97.35ms +step:487/1670 train_time:47407ms step_avg:97.34ms +step:488/1670 train_time:47502ms step_avg:97.34ms +step:489/1670 train_time:47597ms step_avg:97.34ms +step:490/1670 train_time:47694ms step_avg:97.33ms +step:491/1670 train_time:47790ms step_avg:97.33ms +step:492/1670 train_time:47886ms step_avg:97.33ms +step:493/1670 train_time:47981ms step_avg:97.33ms +step:494/1670 train_time:48077ms step_avg:97.32ms +step:495/1670 train_time:48173ms step_avg:97.32ms +step:496/1670 train_time:48269ms step_avg:97.32ms +step:497/1670 train_time:48365ms step_avg:97.31ms +step:498/1670 train_time:48460ms step_avg:97.31ms +step:499/1670 train_time:48556ms step_avg:97.31ms +step:500/1670 train_time:48652ms step_avg:97.30ms +step:500/1670 val_loss:3.7161 train_time:48748ms step_avg:97.50ms +step:501/1670 train_time:48770ms step_avg:97.35ms +step:502/1670 train_time:48851ms step_avg:97.31ms +step:503/1670 train_time:48950ms step_avg:97.32ms +step:504/1670 train_time:49047ms step_avg:97.32ms +step:505/1670 train_time:49143ms step_avg:97.31ms +step:506/1670 train_time:49238ms step_avg:97.31ms +step:507/1670 train_time:49333ms step_avg:97.30ms +step:508/1670 train_time:49428ms step_avg:97.30ms +step:509/1670 train_time:49525ms step_avg:97.30ms +step:510/1670 train_time:49620ms step_avg:97.29ms +step:511/1670 train_time:49716ms step_avg:97.29ms +step:512/1670 train_time:49813ms step_avg:97.29ms +step:513/1670 train_time:49910ms step_avg:97.29ms +step:514/1670 train_time:50007ms step_avg:97.29ms +step:515/1670 train_time:50104ms step_avg:97.29ms +step:516/1670 train_time:50200ms step_avg:97.29ms +step:517/1670 train_time:50296ms step_avg:97.28ms +step:518/1670 train_time:50390ms step_avg:97.28ms +step:519/1670 train_time:50485ms step_avg:97.27ms +step:520/1670 train_time:50580ms step_avg:97.27ms +step:521/1670 train_time:50676ms step_avg:97.27ms +step:522/1670 train_time:50772ms step_avg:97.26ms +step:523/1670 train_time:50869ms step_avg:97.26ms +step:524/1670 train_time:50966ms step_avg:97.26ms +step:525/1670 train_time:51062ms step_avg:97.26ms +step:526/1670 train_time:51157ms step_avg:97.26ms +step:527/1670 train_time:51253ms step_avg:97.25ms +step:528/1670 train_time:51348ms step_avg:97.25ms +step:529/1670 train_time:51444ms step_avg:97.25ms +step:530/1670 train_time:51539ms step_avg:97.24ms +step:531/1670 train_time:51635ms step_avg:97.24ms +step:532/1670 train_time:51730ms step_avg:97.24ms +step:533/1670 train_time:51826ms step_avg:97.23ms +step:534/1670 train_time:51923ms step_avg:97.23ms +step:535/1670 train_time:52019ms step_avg:97.23ms +step:536/1670 train_time:52116ms step_avg:97.23ms +step:537/1670 train_time:52211ms step_avg:97.23ms +step:538/1670 train_time:52307ms step_avg:97.22ms +step:539/1670 train_time:52402ms step_avg:97.22ms +step:540/1670 train_time:52498ms step_avg:97.22ms +step:541/1670 train_time:52593ms step_avg:97.21ms +step:542/1670 train_time:52688ms step_avg:97.21ms +step:543/1670 train_time:52785ms step_avg:97.21ms +step:544/1670 train_time:52881ms step_avg:97.21ms +step:545/1670 train_time:52978ms step_avg:97.21ms +step:546/1670 train_time:53075ms step_avg:97.21ms +step:547/1670 train_time:53170ms step_avg:97.20ms +step:548/1670 train_time:53266ms step_avg:97.20ms +step:549/1670 train_time:53361ms step_avg:97.20ms +step:550/1670 train_time:53457ms step_avg:97.19ms +step:551/1670 train_time:53552ms step_avg:97.19ms +step:552/1670 train_time:53647ms step_avg:97.19ms +step:553/1670 train_time:53743ms step_avg:97.19ms +step:554/1670 train_time:53839ms step_avg:97.18ms +step:555/1670 train_time:53934ms step_avg:97.18ms +step:556/1670 train_time:54031ms step_avg:97.18ms +step:557/1670 train_time:54127ms step_avg:97.18ms +step:558/1670 train_time:54224ms step_avg:97.18ms +step:559/1670 train_time:54321ms step_avg:97.18ms +step:560/1670 train_time:54418ms step_avg:97.18ms +step:561/1670 train_time:54515ms step_avg:97.18ms +step:562/1670 train_time:54612ms step_avg:97.17ms +step:563/1670 train_time:54709ms step_avg:97.17ms +step:564/1670 train_time:54807ms step_avg:97.17ms +step:565/1670 train_time:54905ms step_avg:97.18ms +step:566/1670 train_time:55004ms step_avg:97.18ms +step:567/1670 train_time:55102ms step_avg:97.18ms +step:568/1670 train_time:55200ms step_avg:97.18ms +step:569/1670 train_time:55297ms step_avg:97.18ms +step:570/1670 train_time:55393ms step_avg:97.18ms +step:571/1670 train_time:55490ms step_avg:97.18ms +step:572/1670 train_time:55587ms step_avg:97.18ms +step:573/1670 train_time:55683ms step_avg:97.18ms +step:574/1670 train_time:55781ms step_avg:97.18ms +step:575/1670 train_time:55880ms step_avg:97.18ms +step:576/1670 train_time:55977ms step_avg:97.18ms +step:577/1670 train_time:56074ms step_avg:97.18ms +step:578/1670 train_time:56171ms step_avg:97.18ms +step:579/1670 train_time:56268ms step_avg:97.18ms +step:580/1670 train_time:56367ms step_avg:97.18ms +step:581/1670 train_time:56465ms step_avg:97.19ms +step:582/1670 train_time:56563ms step_avg:97.19ms +step:583/1670 train_time:56659ms step_avg:97.19ms +step:584/1670 train_time:56756ms step_avg:97.19ms +step:585/1670 train_time:56852ms step_avg:97.18ms +step:586/1670 train_time:56950ms step_avg:97.18ms +step:587/1670 train_time:57047ms step_avg:97.18ms +step:588/1670 train_time:57146ms step_avg:97.19ms +step:589/1670 train_time:57244ms step_avg:97.19ms +step:590/1670 train_time:57341ms step_avg:97.19ms +step:591/1670 train_time:57438ms step_avg:97.19ms +step:592/1670 train_time:57535ms step_avg:97.19ms +step:593/1670 train_time:57631ms step_avg:97.19ms +step:594/1670 train_time:57730ms step_avg:97.19ms +step:595/1670 train_time:57827ms step_avg:97.19ms +step:596/1670 train_time:57925ms step_avg:97.19ms +step:597/1670 train_time:58023ms step_avg:97.19ms +step:598/1670 train_time:58121ms step_avg:97.19ms +step:599/1670 train_time:58219ms step_avg:97.19ms +step:600/1670 train_time:58316ms step_avg:97.19ms +step:601/1670 train_time:58413ms step_avg:97.19ms +step:602/1670 train_time:58509ms step_avg:97.19ms +step:603/1670 train_time:58606ms step_avg:97.19ms +step:604/1670 train_time:58704ms step_avg:97.19ms +step:605/1670 train_time:58801ms step_avg:97.19ms +step:606/1670 train_time:58898ms step_avg:97.19ms +step:607/1670 train_time:58995ms step_avg:97.19ms +step:608/1670 train_time:59092ms step_avg:97.19ms +step:609/1670 train_time:59191ms step_avg:97.19ms +step:610/1670 train_time:59288ms step_avg:97.19ms +step:611/1670 train_time:59385ms step_avg:97.19ms +step:612/1670 train_time:59483ms step_avg:97.19ms +step:613/1670 train_time:59580ms step_avg:97.19ms +step:614/1670 train_time:59677ms step_avg:97.19ms +step:615/1670 train_time:59774ms step_avg:97.19ms +step:616/1670 train_time:59870ms step_avg:97.19ms +step:617/1670 train_time:59968ms step_avg:97.19ms +step:618/1670 train_time:60065ms step_avg:97.19ms +step:619/1670 train_time:60163ms step_avg:97.19ms +step:620/1670 train_time:60260ms step_avg:97.19ms +step:621/1670 train_time:60357ms step_avg:97.19ms +step:622/1670 train_time:60453ms step_avg:97.19ms +step:623/1670 train_time:60551ms step_avg:97.19ms +step:624/1670 train_time:60649ms step_avg:97.19ms +step:625/1670 train_time:60747ms step_avg:97.19ms +step:625/1670 val_loss:3.6123 train_time:60843ms step_avg:97.35ms +step:626/1670 train_time:60865ms step_avg:97.23ms +step:627/1670 train_time:60947ms step_avg:97.20ms +step:628/1670 train_time:61043ms step_avg:97.20ms +step:629/1670 train_time:61139ms step_avg:97.20ms +step:630/1670 train_time:61236ms step_avg:97.20ms +step:631/1670 train_time:61332ms step_avg:97.20ms +step:632/1670 train_time:61427ms step_avg:97.20ms +step:633/1670 train_time:61523ms step_avg:97.19ms +step:634/1670 train_time:61618ms step_avg:97.19ms +step:635/1670 train_time:61715ms step_avg:97.19ms +step:636/1670 train_time:61816ms step_avg:97.19ms +step:637/1670 train_time:61917ms step_avg:97.20ms +step:638/1670 train_time:62017ms step_avg:97.21ms +step:639/1670 train_time:62397ms step_avg:97.65ms +step:640/1670 train_time:62476ms step_avg:97.62ms +step:641/1670 train_time:62572ms step_avg:97.62ms +step:642/1670 train_time:62669ms step_avg:97.61ms +step:643/1670 train_time:62765ms step_avg:97.61ms +step:644/1670 train_time:62860ms step_avg:97.61ms +step:645/1670 train_time:62957ms step_avg:97.61ms +step:646/1670 train_time:63053ms step_avg:97.60ms +step:647/1670 train_time:63148ms step_avg:97.60ms +step:648/1670 train_time:63245ms step_avg:97.60ms +step:649/1670 train_time:63347ms step_avg:97.61ms +step:650/1670 train_time:63447ms step_avg:97.61ms +step:651/1670 train_time:63544ms step_avg:97.61ms +step:652/1670 train_time:63641ms step_avg:97.61ms +step:653/1670 train_time:63738ms step_avg:97.61ms +step:654/1670 train_time:63835ms step_avg:97.61ms +step:655/1670 train_time:63932ms step_avg:97.61ms +step:656/1670 train_time:64028ms step_avg:97.60ms +step:657/1670 train_time:64124ms step_avg:97.60ms +step:658/1670 train_time:64220ms step_avg:97.60ms +step:659/1670 train_time:64318ms step_avg:97.60ms +step:660/1670 train_time:64418ms step_avg:97.60ms +step:661/1670 train_time:64515ms step_avg:97.60ms +step:662/1670 train_time:64614ms step_avg:97.60ms +step:663/1670 train_time:64711ms step_avg:97.60ms +step:664/1670 train_time:64809ms step_avg:97.60ms +step:665/1670 train_time:64905ms step_avg:97.60ms +step:666/1670 train_time:65001ms step_avg:97.60ms +step:667/1670 train_time:65098ms step_avg:97.60ms +step:668/1670 train_time:65194ms step_avg:97.60ms +step:669/1670 train_time:65294ms step_avg:97.60ms +step:670/1670 train_time:65394ms step_avg:97.60ms +step:671/1670 train_time:65492ms step_avg:97.60ms +step:672/1670 train_time:65591ms step_avg:97.61ms +step:673/1670 train_time:65689ms step_avg:97.61ms +step:674/1670 train_time:65786ms step_avg:97.61ms +step:675/1670 train_time:65883ms step_avg:97.60ms +step:676/1670 train_time:65979ms step_avg:97.60ms +step:677/1670 train_time:66075ms step_avg:97.60ms +step:678/1670 train_time:66172ms step_avg:97.60ms +step:679/1670 train_time:66269ms step_avg:97.60ms +step:680/1670 train_time:66366ms step_avg:97.60ms +step:681/1670 train_time:66464ms step_avg:97.60ms +step:682/1670 train_time:66561ms step_avg:97.60ms +step:683/1670 train_time:66659ms step_avg:97.60ms +step:684/1670 train_time:66757ms step_avg:97.60ms +step:685/1670 train_time:66855ms step_avg:97.60ms +step:686/1670 train_time:66953ms step_avg:97.60ms +step:687/1670 train_time:67050ms step_avg:97.60ms +step:688/1670 train_time:67146ms step_avg:97.60ms +step:689/1670 train_time:67243ms step_avg:97.60ms +step:690/1670 train_time:67340ms step_avg:97.59ms +step:691/1670 train_time:67437ms step_avg:97.59ms +step:692/1670 train_time:67535ms step_avg:97.59ms +step:693/1670 train_time:67634ms step_avg:97.60ms +step:694/1670 train_time:67733ms step_avg:97.60ms +step:695/1670 train_time:67830ms step_avg:97.60ms +step:696/1670 train_time:67927ms step_avg:97.60ms +step:697/1670 train_time:68023ms step_avg:97.59ms +step:698/1670 train_time:68121ms step_avg:97.59ms +step:699/1670 train_time:68217ms step_avg:97.59ms +step:700/1670 train_time:68315ms step_avg:97.59ms +step:701/1670 train_time:68413ms step_avg:97.59ms +step:702/1670 train_time:68511ms step_avg:97.59ms +step:703/1670 train_time:68608ms step_avg:97.59ms +step:704/1670 train_time:68705ms step_avg:97.59ms +step:705/1670 train_time:68802ms step_avg:97.59ms +step:706/1670 train_time:68900ms step_avg:97.59ms +step:707/1670 train_time:68998ms step_avg:97.59ms +step:708/1670 train_time:69095ms step_avg:97.59ms +step:709/1670 train_time:69193ms step_avg:97.59ms +step:710/1670 train_time:69290ms step_avg:97.59ms +step:711/1670 train_time:69387ms step_avg:97.59ms +step:712/1670 train_time:69484ms step_avg:97.59ms +step:713/1670 train_time:69581ms step_avg:97.59ms +step:714/1670 train_time:69678ms step_avg:97.59ms +step:715/1670 train_time:69775ms step_avg:97.59ms +step:716/1670 train_time:69872ms step_avg:97.59ms +step:717/1670 train_time:69970ms step_avg:97.59ms +step:718/1670 train_time:70068ms step_avg:97.59ms +step:719/1670 train_time:70165ms step_avg:97.59ms +step:720/1670 train_time:70261ms step_avg:97.58ms +step:721/1670 train_time:70357ms step_avg:97.58ms +step:722/1670 train_time:70456ms step_avg:97.58ms +step:723/1670 train_time:70553ms step_avg:97.58ms +step:724/1670 train_time:70651ms step_avg:97.58ms +step:725/1670 train_time:70748ms step_avg:97.58ms +step:726/1670 train_time:70845ms step_avg:97.58ms +step:727/1670 train_time:70942ms step_avg:97.58ms +step:728/1670 train_time:71040ms step_avg:97.58ms +step:729/1670 train_time:71139ms step_avg:97.58ms +step:730/1670 train_time:71236ms step_avg:97.58ms +step:731/1670 train_time:71334ms step_avg:97.58ms +step:732/1670 train_time:71431ms step_avg:97.58ms +step:733/1670 train_time:71529ms step_avg:97.58ms +step:734/1670 train_time:71626ms step_avg:97.58ms +step:735/1670 train_time:71722ms step_avg:97.58ms +step:736/1670 train_time:71819ms step_avg:97.58ms +step:737/1670 train_time:71917ms step_avg:97.58ms +step:738/1670 train_time:72015ms step_avg:97.58ms +step:739/1670 train_time:72113ms step_avg:97.58ms +step:740/1670 train_time:72210ms step_avg:97.58ms +step:741/1670 train_time:72307ms step_avg:97.58ms +step:742/1670 train_time:72404ms step_avg:97.58ms +step:743/1670 train_time:72501ms step_avg:97.58ms +step:744/1670 train_time:72599ms step_avg:97.58ms +step:745/1670 train_time:72697ms step_avg:97.58ms +step:746/1670 train_time:72793ms step_avg:97.58ms +step:747/1670 train_time:72891ms step_avg:97.58ms +step:748/1670 train_time:72989ms step_avg:97.58ms +step:749/1670 train_time:73087ms step_avg:97.58ms +step:750/1670 train_time:73184ms step_avg:97.58ms +step:750/1670 val_loss:3.5623 train_time:73280ms step_avg:97.71ms +step:751/1670 train_time:73302ms step_avg:97.61ms +step:752/1670 train_time:73387ms step_avg:97.59ms +step:753/1670 train_time:73485ms step_avg:97.59ms +step:754/1670 train_time:73582ms step_avg:97.59ms +step:755/1670 train_time:73678ms step_avg:97.59ms +step:756/1670 train_time:73774ms step_avg:97.58ms +step:757/1670 train_time:73870ms step_avg:97.58ms +step:758/1670 train_time:73967ms step_avg:97.58ms +step:759/1670 train_time:74064ms step_avg:97.58ms +step:760/1670 train_time:74159ms step_avg:97.58ms +step:761/1670 train_time:74257ms step_avg:97.58ms +step:762/1670 train_time:74358ms step_avg:97.58ms +step:763/1670 train_time:74459ms step_avg:97.59ms +step:764/1670 train_time:74558ms step_avg:97.59ms +step:765/1670 train_time:74655ms step_avg:97.59ms +step:766/1670 train_time:74752ms step_avg:97.59ms +step:767/1670 train_time:74849ms step_avg:97.59ms +step:768/1670 train_time:74944ms step_avg:97.58ms +step:769/1670 train_time:75040ms step_avg:97.58ms +step:770/1670 train_time:75136ms step_avg:97.58ms +step:771/1670 train_time:75234ms step_avg:97.58ms +step:772/1670 train_time:75333ms step_avg:97.58ms +step:773/1670 train_time:75434ms step_avg:97.59ms +step:774/1670 train_time:75533ms step_avg:97.59ms +step:775/1670 train_time:75632ms step_avg:97.59ms +step:776/1670 train_time:75730ms step_avg:97.59ms +step:777/1670 train_time:75826ms step_avg:97.59ms +step:778/1670 train_time:75923ms step_avg:97.59ms +step:779/1670 train_time:76018ms step_avg:97.58ms +step:780/1670 train_time:76115ms step_avg:97.58ms +step:781/1670 train_time:76213ms step_avg:97.58ms +step:782/1670 train_time:76311ms step_avg:97.58ms +step:783/1670 train_time:76411ms step_avg:97.59ms +step:784/1670 train_time:76511ms step_avg:97.59ms +step:785/1670 train_time:76610ms step_avg:97.59ms +step:786/1670 train_time:76709ms step_avg:97.59ms +step:787/1670 train_time:76805ms step_avg:97.59ms +step:788/1670 train_time:76901ms step_avg:97.59ms +step:789/1670 train_time:76997ms step_avg:97.59ms +step:790/1670 train_time:77094ms step_avg:97.59ms +step:791/1670 train_time:77192ms step_avg:97.59ms +step:792/1670 train_time:77291ms step_avg:97.59ms +step:793/1670 train_time:77390ms step_avg:97.59ms +step:794/1670 train_time:77489ms step_avg:97.59ms +step:795/1670 train_time:77587ms step_avg:97.59ms +step:796/1670 train_time:77685ms step_avg:97.59ms +step:797/1670 train_time:77781ms step_avg:97.59ms +step:798/1670 train_time:77878ms step_avg:97.59ms +step:799/1670 train_time:77974ms step_avg:97.59ms +step:800/1670 train_time:78070ms step_avg:97.59ms +step:801/1670 train_time:78167ms step_avg:97.59ms +step:802/1670 train_time:78265ms step_avg:97.59ms +step:803/1670 train_time:78363ms step_avg:97.59ms +step:804/1670 train_time:78459ms step_avg:97.59ms +step:805/1670 train_time:78557ms step_avg:97.59ms +step:806/1670 train_time:78655ms step_avg:97.59ms +step:807/1670 train_time:78752ms step_avg:97.59ms +step:808/1670 train_time:78851ms step_avg:97.59ms +step:809/1670 train_time:78949ms step_avg:97.59ms +step:810/1670 train_time:79045ms step_avg:97.59ms +step:811/1670 train_time:79142ms step_avg:97.59ms +step:812/1670 train_time:79238ms step_avg:97.58ms +step:813/1670 train_time:79335ms step_avg:97.58ms +step:814/1670 train_time:79434ms step_avg:97.58ms +step:815/1670 train_time:79533ms step_avg:97.59ms +step:816/1670 train_time:79632ms step_avg:97.59ms +step:817/1670 train_time:79729ms step_avg:97.59ms +step:818/1670 train_time:79826ms step_avg:97.59ms +step:819/1670 train_time:79924ms step_avg:97.59ms +step:820/1670 train_time:80021ms step_avg:97.59ms +step:821/1670 train_time:80117ms step_avg:97.58ms +step:822/1670 train_time:80214ms step_avg:97.58ms +step:823/1670 train_time:80311ms step_avg:97.58ms +step:824/1670 train_time:80408ms step_avg:97.58ms +step:825/1670 train_time:80507ms step_avg:97.58ms +step:826/1670 train_time:80604ms step_avg:97.58ms +step:827/1670 train_time:80701ms step_avg:97.58ms +step:828/1670 train_time:80798ms step_avg:97.58ms +step:829/1670 train_time:80897ms step_avg:97.58ms +step:830/1670 train_time:80994ms step_avg:97.58ms +step:831/1670 train_time:81092ms step_avg:97.58ms +step:832/1670 train_time:81189ms step_avg:97.58ms +step:833/1670 train_time:81286ms step_avg:97.58ms +step:834/1670 train_time:81382ms step_avg:97.58ms +step:835/1670 train_time:81479ms step_avg:97.58ms +step:836/1670 train_time:81577ms step_avg:97.58ms +step:837/1670 train_time:81674ms step_avg:97.58ms +step:838/1670 train_time:81772ms step_avg:97.58ms +step:839/1670 train_time:81870ms step_avg:97.58ms +step:840/1670 train_time:81969ms step_avg:97.58ms +step:841/1670 train_time:82068ms step_avg:97.58ms +step:842/1670 train_time:82165ms step_avg:97.58ms +step:843/1670 train_time:82262ms step_avg:97.58ms +step:844/1670 train_time:82358ms step_avg:97.58ms +step:845/1670 train_time:82455ms step_avg:97.58ms +step:846/1670 train_time:82553ms step_avg:97.58ms +step:847/1670 train_time:82650ms step_avg:97.58ms +step:848/1670 train_time:82747ms step_avg:97.58ms +step:849/1670 train_time:82844ms step_avg:97.58ms +step:850/1670 train_time:82941ms step_avg:97.58ms +step:851/1670 train_time:83209ms step_avg:97.78ms +step:852/1670 train_time:83379ms step_avg:97.86ms +step:853/1670 train_time:83473ms step_avg:97.86ms +step:854/1670 train_time:83570ms step_avg:97.86ms +step:855/1670 train_time:83666ms step_avg:97.86ms +step:856/1670 train_time:83762ms step_avg:97.85ms +step:857/1670 train_time:83858ms step_avg:97.85ms +step:858/1670 train_time:83955ms step_avg:97.85ms +step:859/1670 train_time:84051ms step_avg:97.85ms +step:860/1670 train_time:84148ms step_avg:97.85ms +step:861/1670 train_time:84245ms step_avg:97.85ms +step:862/1670 train_time:84351ms step_avg:97.85ms +step:863/1670 train_time:84450ms step_avg:97.86ms +step:864/1670 train_time:84548ms step_avg:97.86ms +step:865/1670 train_time:84644ms step_avg:97.85ms +step:866/1670 train_time:84740ms step_avg:97.85ms +step:867/1670 train_time:84836ms step_avg:97.85ms +step:868/1670 train_time:84932ms step_avg:97.85ms +step:869/1670 train_time:85029ms step_avg:97.85ms +step:870/1670 train_time:85125ms step_avg:97.85ms +step:871/1670 train_time:85222ms step_avg:97.84ms +step:872/1670 train_time:85321ms step_avg:97.84ms +step:873/1670 train_time:85420ms step_avg:97.85ms +step:874/1670 train_time:85518ms step_avg:97.85ms +step:875/1670 train_time:85617ms step_avg:97.85ms +step:875/1670 val_loss:3.5191 train_time:85714ms step_avg:97.96ms +step:876/1670 train_time:85735ms step_avg:97.87ms +step:877/1670 train_time:85818ms step_avg:97.85ms +step:878/1670 train_time:85917ms step_avg:97.86ms +step:879/1670 train_time:86014ms step_avg:97.85ms +step:880/1670 train_time:86111ms step_avg:97.85ms +step:881/1670 train_time:86207ms step_avg:97.85ms +step:882/1670 train_time:86303ms step_avg:97.85ms +step:883/1670 train_time:86399ms step_avg:97.85ms +step:884/1670 train_time:86496ms step_avg:97.85ms +step:885/1670 train_time:86592ms step_avg:97.84ms +step:886/1670 train_time:86690ms step_avg:97.84ms +step:887/1670 train_time:86792ms step_avg:97.85ms +step:888/1670 train_time:86893ms step_avg:97.85ms +step:889/1670 train_time:86992ms step_avg:97.85ms +step:890/1670 train_time:87090ms step_avg:97.85ms +step:891/1670 train_time:87186ms step_avg:97.85ms +step:892/1670 train_time:87284ms step_avg:97.85ms +step:893/1670 train_time:87382ms step_avg:97.85ms +step:894/1670 train_time:87479ms step_avg:97.85ms +step:895/1670 train_time:87576ms step_avg:97.85ms +step:896/1670 train_time:87672ms step_avg:97.85ms +step:897/1670 train_time:87770ms step_avg:97.85ms +step:898/1670 train_time:87870ms step_avg:97.85ms +step:899/1670 train_time:87968ms step_avg:97.85ms +step:900/1670 train_time:88066ms step_avg:97.85ms +step:901/1670 train_time:88163ms step_avg:97.85ms +step:902/1670 train_time:88259ms step_avg:97.85ms +step:903/1670 train_time:88356ms step_avg:97.85ms +step:904/1670 train_time:88452ms step_avg:97.85ms +step:905/1670 train_time:88550ms step_avg:97.85ms +step:906/1670 train_time:88647ms step_avg:97.84ms +step:907/1670 train_time:88747ms step_avg:97.85ms +step:908/1670 train_time:88845ms step_avg:97.85ms +step:909/1670 train_time:88944ms step_avg:97.85ms +step:910/1670 train_time:89042ms step_avg:97.85ms +step:911/1670 train_time:89139ms step_avg:97.85ms +step:912/1670 train_time:89236ms step_avg:97.85ms +step:913/1670 train_time:89333ms step_avg:97.85ms +step:914/1670 train_time:89430ms step_avg:97.84ms +step:915/1670 train_time:89527ms step_avg:97.84ms +step:916/1670 train_time:89625ms step_avg:97.84ms +step:917/1670 train_time:89723ms step_avg:97.84ms +step:918/1670 train_time:89820ms step_avg:97.84ms +step:919/1670 train_time:89918ms step_avg:97.84ms +step:920/1670 train_time:90015ms step_avg:97.84ms +step:921/1670 train_time:90112ms step_avg:97.84ms +step:922/1670 train_time:90209ms step_avg:97.84ms +step:923/1670 train_time:90306ms step_avg:97.84ms +step:924/1670 train_time:90403ms step_avg:97.84ms +step:925/1670 train_time:90500ms step_avg:97.84ms +step:926/1670 train_time:90597ms step_avg:97.84ms +step:927/1670 train_time:90694ms step_avg:97.84ms +step:928/1670 train_time:90791ms step_avg:97.84ms +step:929/1670 train_time:90889ms step_avg:97.84ms +step:930/1670 train_time:90987ms step_avg:97.84ms +step:931/1670 train_time:91086ms step_avg:97.84ms +step:932/1670 train_time:91185ms step_avg:97.84ms +step:933/1670 train_time:91283ms step_avg:97.84ms +step:934/1670 train_time:91380ms step_avg:97.84ms +step:935/1670 train_time:91477ms step_avg:97.84ms +step:936/1670 train_time:91574ms step_avg:97.84ms +step:937/1670 train_time:91670ms step_avg:97.83ms +step:938/1670 train_time:91767ms step_avg:97.83ms +step:939/1670 train_time:91866ms step_avg:97.83ms +step:940/1670 train_time:91963ms step_avg:97.83ms +step:941/1670 train_time:92060ms step_avg:97.83ms +step:942/1670 train_time:92158ms step_avg:97.83ms +step:943/1670 train_time:92255ms step_avg:97.83ms +step:944/1670 train_time:92351ms step_avg:97.83ms +step:945/1670 train_time:92449ms step_avg:97.83ms +step:946/1670 train_time:92548ms step_avg:97.83ms +step:947/1670 train_time:92646ms step_avg:97.83ms +step:948/1670 train_time:92743ms step_avg:97.83ms +step:949/1670 train_time:92840ms step_avg:97.83ms +step:950/1670 train_time:92937ms step_avg:97.83ms +step:951/1670 train_time:93033ms step_avg:97.83ms +step:952/1670 train_time:93132ms step_avg:97.83ms +step:953/1670 train_time:93230ms step_avg:97.83ms +step:954/1670 train_time:93327ms step_avg:97.83ms +step:955/1670 train_time:93425ms step_avg:97.83ms +step:956/1670 train_time:93523ms step_avg:97.83ms +step:957/1670 train_time:93620ms step_avg:97.83ms +step:958/1670 train_time:93717ms step_avg:97.83ms +step:959/1670 train_time:93814ms step_avg:97.82ms +step:960/1670 train_time:93911ms step_avg:97.82ms +step:961/1670 train_time:94008ms step_avg:97.82ms +step:962/1670 train_time:94107ms step_avg:97.82ms +step:963/1670 train_time:94205ms step_avg:97.82ms +step:964/1670 train_time:94303ms step_avg:97.82ms +step:965/1670 train_time:94400ms step_avg:97.82ms +step:966/1670 train_time:94497ms step_avg:97.82ms +step:967/1670 train_time:94594ms step_avg:97.82ms +step:968/1670 train_time:94692ms step_avg:97.82ms +step:969/1670 train_time:94789ms step_avg:97.82ms +step:970/1670 train_time:94886ms step_avg:97.82ms +step:971/1670 train_time:94984ms step_avg:97.82ms +step:972/1670 train_time:95082ms step_avg:97.82ms +step:973/1670 train_time:95178ms step_avg:97.82ms +step:974/1670 train_time:95275ms step_avg:97.82ms +step:975/1670 train_time:95372ms step_avg:97.82ms +step:976/1670 train_time:95470ms step_avg:97.82ms +step:977/1670 train_time:95568ms step_avg:97.82ms +step:978/1670 train_time:95666ms step_avg:97.82ms +step:979/1670 train_time:95763ms step_avg:97.82ms +step:980/1670 train_time:95861ms step_avg:97.82ms +step:981/1670 train_time:95958ms step_avg:97.82ms +step:982/1670 train_time:96055ms step_avg:97.82ms +step:983/1670 train_time:96153ms step_avg:97.82ms +step:984/1670 train_time:96251ms step_avg:97.82ms +step:985/1670 train_time:96349ms step_avg:97.82ms +step:986/1670 train_time:96447ms step_avg:97.82ms +step:987/1670 train_time:96545ms step_avg:97.82ms +step:988/1670 train_time:96641ms step_avg:97.82ms +step:989/1670 train_time:96739ms step_avg:97.81ms +step:990/1670 train_time:96835ms step_avg:97.81ms +step:991/1670 train_time:96933ms step_avg:97.81ms +step:992/1670 train_time:97030ms step_avg:97.81ms +step:993/1670 train_time:97128ms step_avg:97.81ms +step:994/1670 train_time:97227ms step_avg:97.81ms +step:995/1670 train_time:97324ms step_avg:97.81ms +step:996/1670 train_time:97422ms step_avg:97.81ms +step:997/1670 train_time:97519ms step_avg:97.81ms +step:998/1670 train_time:97616ms step_avg:97.81ms +step:999/1670 train_time:97713ms step_avg:97.81ms +step:1000/1670 train_time:97811ms step_avg:97.81ms +step:1000/1670 val_loss:3.4757 train_time:97908ms step_avg:97.91ms +step:1001/1670 train_time:97929ms step_avg:97.83ms +step:1002/1670 train_time:98015ms step_avg:97.82ms +step:1003/1670 train_time:98114ms step_avg:97.82ms +step:1004/1670 train_time:98213ms step_avg:97.82ms +step:1005/1670 train_time:98309ms step_avg:97.82ms +step:1006/1670 train_time:98405ms step_avg:97.82ms +step:1007/1670 train_time:98500ms step_avg:97.82ms +step:1008/1670 train_time:98597ms step_avg:97.81ms +step:1009/1670 train_time:98693ms step_avg:97.81ms +step:1010/1670 train_time:98788ms step_avg:97.81ms +step:1011/1670 train_time:98886ms step_avg:97.81ms +step:1012/1670 train_time:98986ms step_avg:97.81ms +step:1013/1670 train_time:99085ms step_avg:97.81ms +step:1014/1670 train_time:99186ms step_avg:97.82ms +step:1015/1670 train_time:99283ms step_avg:97.82ms +step:1016/1670 train_time:99381ms step_avg:97.82ms +step:1017/1670 train_time:99478ms step_avg:97.81ms +step:1018/1670 train_time:99574ms step_avg:97.81ms +step:1019/1670 train_time:99670ms step_avg:97.81ms +step:1020/1670 train_time:99767ms step_avg:97.81ms +step:1021/1670 train_time:99864ms step_avg:97.81ms +step:1022/1670 train_time:99963ms step_avg:97.81ms +step:1023/1670 train_time:100062ms step_avg:97.81ms +step:1024/1670 train_time:100162ms step_avg:97.81ms +step:1025/1670 train_time:100260ms step_avg:97.81ms +step:1026/1670 train_time:100357ms step_avg:97.81ms +step:1027/1670 train_time:100455ms step_avg:97.81ms +step:1028/1670 train_time:100553ms step_avg:97.81ms +step:1029/1670 train_time:100649ms step_avg:97.81ms +step:1030/1670 train_time:100745ms step_avg:97.81ms +step:1031/1670 train_time:100842ms step_avg:97.81ms +step:1032/1670 train_time:100941ms step_avg:97.81ms +step:1033/1670 train_time:101039ms step_avg:97.81ms +step:1034/1670 train_time:101138ms step_avg:97.81ms +step:1035/1670 train_time:101236ms step_avg:97.81ms +step:1036/1670 train_time:101334ms step_avg:97.81ms +step:1037/1670 train_time:101430ms step_avg:97.81ms +step:1038/1670 train_time:101528ms step_avg:97.81ms +step:1039/1670 train_time:101625ms step_avg:97.81ms +step:1040/1670 train_time:101722ms step_avg:97.81ms +step:1041/1670 train_time:101819ms step_avg:97.81ms +step:1042/1670 train_time:101917ms step_avg:97.81ms +step:1043/1670 train_time:102015ms step_avg:97.81ms +step:1044/1670 train_time:102113ms step_avg:97.81ms +step:1045/1670 train_time:102211ms step_avg:97.81ms +step:1046/1670 train_time:102308ms step_avg:97.81ms +step:1047/1670 train_time:102697ms step_avg:98.09ms +step:1048/1670 train_time:102792ms step_avg:98.08ms +step:1049/1670 train_time:102888ms step_avg:98.08ms +step:1050/1670 train_time:102984ms step_avg:98.08ms +step:1051/1670 train_time:103080ms step_avg:98.08ms +step:1052/1670 train_time:103177ms step_avg:98.08ms +step:1053/1670 train_time:103273ms step_avg:98.08ms +step:1054/1670 train_time:103369ms step_avg:98.07ms +step:1055/1670 train_time:103464ms step_avg:98.07ms +step:1056/1670 train_time:103563ms step_avg:98.07ms +step:1057/1670 train_time:103666ms step_avg:98.08ms +step:1058/1670 train_time:103765ms step_avg:98.08ms +step:1059/1670 train_time:103863ms step_avg:98.08ms +step:1060/1670 train_time:103962ms step_avg:98.08ms +step:1061/1670 train_time:104059ms step_avg:98.08ms +step:1062/1670 train_time:104340ms step_avg:98.25ms +step:1063/1670 train_time:104413ms step_avg:98.23ms +step:1064/1670 train_time:104508ms step_avg:98.22ms +step:1065/1670 train_time:104605ms step_avg:98.22ms +step:1066/1670 train_time:104701ms step_avg:98.22ms +step:1067/1670 train_time:104797ms step_avg:98.22ms +step:1068/1670 train_time:104892ms step_avg:98.21ms +step:1069/1670 train_time:104989ms step_avg:98.21ms +step:1070/1670 train_time:105085ms step_avg:98.21ms +step:1071/1670 train_time:105181ms step_avg:98.21ms +step:1072/1670 train_time:105282ms step_avg:98.21ms +step:1073/1670 train_time:105383ms step_avg:98.21ms +step:1074/1670 train_time:105483ms step_avg:98.22ms +step:1075/1670 train_time:105580ms step_avg:98.21ms +step:1076/1670 train_time:105678ms step_avg:98.21ms +step:1077/1670 train_time:105775ms step_avg:98.21ms +step:1078/1670 train_time:105871ms step_avg:98.21ms +step:1079/1670 train_time:105968ms step_avg:98.21ms +step:1080/1670 train_time:106064ms step_avg:98.21ms +step:1081/1670 train_time:106161ms step_avg:98.21ms +step:1082/1670 train_time:106261ms step_avg:98.21ms +step:1083/1670 train_time:106362ms step_avg:98.21ms +step:1084/1670 train_time:106461ms step_avg:98.21ms +step:1085/1670 train_time:106559ms step_avg:98.21ms +step:1086/1670 train_time:106656ms step_avg:98.21ms +step:1087/1670 train_time:106753ms step_avg:98.21ms +step:1088/1670 train_time:106850ms step_avg:98.21ms +step:1089/1670 train_time:106946ms step_avg:98.21ms +step:1090/1670 train_time:107042ms step_avg:98.20ms +step:1091/1670 train_time:107140ms step_avg:98.20ms +step:1092/1670 train_time:107238ms step_avg:98.20ms +step:1093/1670 train_time:107337ms step_avg:98.20ms +step:1094/1670 train_time:107436ms step_avg:98.20ms +step:1095/1670 train_time:107534ms step_avg:98.21ms +step:1096/1670 train_time:107633ms step_avg:98.21ms +step:1097/1670 train_time:107730ms step_avg:98.20ms +step:1098/1670 train_time:107826ms step_avg:98.20ms +step:1099/1670 train_time:107923ms step_avg:98.20ms +step:1100/1670 train_time:108021ms step_avg:98.20ms +step:1101/1670 train_time:108119ms step_avg:98.20ms +step:1102/1670 train_time:108216ms step_avg:98.20ms +step:1103/1670 train_time:108313ms step_avg:98.20ms +step:1104/1670 train_time:108410ms step_avg:98.20ms +step:1105/1670 train_time:108507ms step_avg:98.20ms +step:1106/1670 train_time:108605ms step_avg:98.20ms +step:1107/1670 train_time:108703ms step_avg:98.20ms +step:1108/1670 train_time:108800ms step_avg:98.20ms +step:1109/1670 train_time:108899ms step_avg:98.20ms +step:1110/1670 train_time:108996ms step_avg:98.19ms +step:1111/1670 train_time:109093ms step_avg:98.19ms +step:1112/1670 train_time:109190ms step_avg:98.19ms +step:1113/1670 train_time:109287ms step_avg:98.19ms +step:1114/1670 train_time:109385ms step_avg:98.19ms +step:1115/1670 train_time:109483ms step_avg:98.19ms +step:1116/1670 train_time:109583ms step_avg:98.19ms +step:1117/1670 train_time:109682ms step_avg:98.19ms +step:1118/1670 train_time:109780ms step_avg:98.19ms +step:1119/1670 train_time:109879ms step_avg:98.19ms +step:1120/1670 train_time:109978ms step_avg:98.20ms +step:1121/1670 train_time:110077ms step_avg:98.20ms +step:1122/1670 train_time:110176ms step_avg:98.20ms +step:1123/1670 train_time:110274ms step_avg:98.20ms +step:1124/1670 train_time:110372ms step_avg:98.20ms +step:1125/1670 train_time:110471ms step_avg:98.20ms +step:1125/1670 val_loss:3.4234 train_time:110568ms step_avg:98.28ms +step:1126/1670 train_time:110591ms step_avg:98.22ms +step:1127/1670 train_time:110678ms step_avg:98.21ms +step:1128/1670 train_time:110775ms step_avg:98.20ms +step:1129/1670 train_time:110871ms step_avg:98.20ms +step:1130/1670 train_time:110968ms step_avg:98.20ms +step:1131/1670 train_time:111064ms step_avg:98.20ms +step:1132/1670 train_time:111161ms step_avg:98.20ms +step:1133/1670 train_time:111259ms step_avg:98.20ms +step:1134/1670 train_time:111356ms step_avg:98.20ms +step:1135/1670 train_time:111455ms step_avg:98.20ms +step:1136/1670 train_time:111558ms step_avg:98.20ms +step:1137/1670 train_time:111660ms step_avg:98.21ms +step:1138/1670 train_time:111759ms step_avg:98.21ms +step:1139/1670 train_time:111857ms step_avg:98.21ms +step:1140/1670 train_time:111955ms step_avg:98.21ms +step:1141/1670 train_time:112052ms step_avg:98.21ms +step:1142/1670 train_time:112150ms step_avg:98.20ms +step:1143/1670 train_time:112246ms step_avg:98.20ms +step:1144/1670 train_time:112343ms step_avg:98.20ms +step:1145/1670 train_time:112441ms step_avg:98.20ms +step:1146/1670 train_time:112541ms step_avg:98.20ms +step:1147/1670 train_time:112640ms step_avg:98.20ms +step:1148/1670 train_time:112741ms step_avg:98.21ms +step:1149/1670 train_time:112839ms step_avg:98.21ms +step:1150/1670 train_time:112938ms step_avg:98.21ms +step:1151/1670 train_time:113037ms step_avg:98.21ms +step:1152/1670 train_time:113136ms step_avg:98.21ms +step:1153/1670 train_time:113234ms step_avg:98.21ms +step:1154/1670 train_time:113331ms step_avg:98.21ms +step:1155/1670 train_time:113430ms step_avg:98.21ms +step:1156/1670 train_time:113529ms step_avg:98.21ms +step:1157/1670 train_time:113628ms step_avg:98.21ms +step:1158/1670 train_time:113727ms step_avg:98.21ms +step:1159/1670 train_time:113825ms step_avg:98.21ms +step:1160/1670 train_time:113923ms step_avg:98.21ms +step:1161/1670 train_time:114022ms step_avg:98.21ms +step:1162/1670 train_time:114120ms step_avg:98.21ms +step:1163/1670 train_time:114218ms step_avg:98.21ms +step:1164/1670 train_time:114316ms step_avg:98.21ms +step:1165/1670 train_time:114414ms step_avg:98.21ms +step:1166/1670 train_time:114514ms step_avg:98.21ms +step:1167/1670 train_time:114614ms step_avg:98.21ms +step:1168/1670 train_time:114714ms step_avg:98.21ms +step:1169/1670 train_time:114812ms step_avg:98.21ms +step:1170/1670 train_time:114911ms step_avg:98.21ms +step:1171/1670 train_time:115008ms step_avg:98.21ms +step:1172/1670 train_time:115105ms step_avg:98.21ms +step:1173/1670 train_time:115203ms step_avg:98.21ms +step:1174/1670 train_time:115300ms step_avg:98.21ms +step:1175/1670 train_time:115399ms step_avg:98.21ms +step:1176/1670 train_time:115498ms step_avg:98.21ms +step:1177/1670 train_time:115596ms step_avg:98.21ms +step:1178/1670 train_time:115696ms step_avg:98.21ms +step:1179/1670 train_time:115795ms step_avg:98.21ms +step:1180/1670 train_time:115894ms step_avg:98.22ms +step:1181/1670 train_time:115993ms step_avg:98.22ms +step:1182/1670 train_time:116093ms step_avg:98.22ms +step:1183/1670 train_time:116190ms step_avg:98.22ms +step:1184/1670 train_time:116287ms step_avg:98.22ms +step:1185/1670 train_time:116385ms step_avg:98.21ms +step:1186/1670 train_time:116482ms step_avg:98.21ms +step:1187/1670 train_time:116581ms step_avg:98.21ms +step:1188/1670 train_time:116680ms step_avg:98.22ms +step:1189/1670 train_time:116779ms step_avg:98.22ms +step:1190/1670 train_time:116878ms step_avg:98.22ms +step:1191/1670 train_time:116977ms step_avg:98.22ms +step:1192/1670 train_time:117077ms step_avg:98.22ms +step:1193/1670 train_time:117177ms step_avg:98.22ms +step:1194/1670 train_time:117275ms step_avg:98.22ms +step:1195/1670 train_time:117374ms step_avg:98.22ms +step:1196/1670 train_time:117472ms step_avg:98.22ms +step:1197/1670 train_time:117570ms step_avg:98.22ms +step:1198/1670 train_time:117668ms step_avg:98.22ms +step:1199/1670 train_time:117764ms step_avg:98.22ms +step:1200/1670 train_time:117861ms step_avg:98.22ms +step:1201/1670 train_time:117960ms step_avg:98.22ms +step:1202/1670 train_time:118059ms step_avg:98.22ms +step:1203/1670 train_time:118159ms step_avg:98.22ms +step:1204/1670 train_time:118256ms step_avg:98.22ms +step:1205/1670 train_time:118356ms step_avg:98.22ms +step:1206/1670 train_time:118454ms step_avg:98.22ms +step:1207/1670 train_time:118553ms step_avg:98.22ms +step:1208/1670 train_time:118651ms step_avg:98.22ms +step:1209/1670 train_time:118750ms step_avg:98.22ms +step:1210/1670 train_time:118848ms step_avg:98.22ms +step:1211/1670 train_time:118946ms step_avg:98.22ms +step:1212/1670 train_time:119044ms step_avg:98.22ms +step:1213/1670 train_time:119141ms step_avg:98.22ms +step:1214/1670 train_time:119239ms step_avg:98.22ms +step:1215/1670 train_time:119339ms step_avg:98.22ms +step:1216/1670 train_time:119439ms step_avg:98.22ms +step:1217/1670 train_time:119538ms step_avg:98.22ms +step:1218/1670 train_time:119638ms step_avg:98.23ms +step:1219/1670 train_time:119736ms step_avg:98.23ms +step:1220/1670 train_time:119836ms step_avg:98.23ms +step:1221/1670 train_time:119936ms step_avg:98.23ms +step:1222/1670 train_time:120036ms step_avg:98.23ms +step:1223/1670 train_time:120135ms step_avg:98.23ms +step:1224/1670 train_time:120233ms step_avg:98.23ms +step:1225/1670 train_time:120331ms step_avg:98.23ms +step:1226/1670 train_time:120429ms step_avg:98.23ms +step:1227/1670 train_time:120527ms step_avg:98.23ms +step:1228/1670 train_time:120624ms step_avg:98.23ms +step:1229/1670 train_time:120722ms step_avg:98.23ms +step:1230/1670 train_time:120820ms step_avg:98.23ms +step:1231/1670 train_time:120919ms step_avg:98.23ms +step:1232/1670 train_time:121018ms step_avg:98.23ms +step:1233/1670 train_time:121117ms step_avg:98.23ms +step:1234/1670 train_time:121215ms step_avg:98.23ms +step:1235/1670 train_time:121313ms step_avg:98.23ms +step:1236/1670 train_time:121412ms step_avg:98.23ms +step:1237/1670 train_time:121510ms step_avg:98.23ms +step:1238/1670 train_time:121608ms step_avg:98.23ms +step:1239/1670 train_time:121707ms step_avg:98.23ms +step:1240/1670 train_time:121804ms step_avg:98.23ms +step:1241/1670 train_time:121902ms step_avg:98.23ms +step:1242/1670 train_time:121999ms step_avg:98.23ms +step:1243/1670 train_time:122097ms step_avg:98.23ms +step:1244/1670 train_time:122196ms step_avg:98.23ms +step:1245/1670 train_time:122295ms step_avg:98.23ms +step:1246/1670 train_time:122393ms step_avg:98.23ms +step:1247/1670 train_time:122492ms step_avg:98.23ms +step:1248/1670 train_time:122590ms step_avg:98.23ms +step:1249/1670 train_time:122688ms step_avg:98.23ms +step:1250/1670 train_time:122785ms step_avg:98.23ms +step:1250/1670 val_loss:3.3809 train_time:122882ms step_avg:98.31ms +step:1251/1670 train_time:122905ms step_avg:98.25ms +step:1252/1670 train_time:122989ms step_avg:98.23ms +step:1253/1670 train_time:123088ms step_avg:98.23ms +step:1254/1670 train_time:123185ms step_avg:98.23ms +step:1255/1670 train_time:123283ms step_avg:98.23ms +step:1256/1670 train_time:123380ms step_avg:98.23ms +step:1257/1670 train_time:123477ms step_avg:98.23ms +step:1258/1670 train_time:123574ms step_avg:98.23ms +step:1259/1670 train_time:123671ms step_avg:98.23ms +step:1260/1670 train_time:123767ms step_avg:98.23ms +step:1261/1670 train_time:123866ms step_avg:98.23ms +step:1262/1670 train_time:123967ms step_avg:98.23ms +step:1263/1670 train_time:124066ms step_avg:98.23ms +step:1264/1670 train_time:124165ms step_avg:98.23ms +step:1265/1670 train_time:124263ms step_avg:98.23ms +step:1266/1670 train_time:124361ms step_avg:98.23ms +step:1267/1670 train_time:124459ms step_avg:98.23ms +step:1268/1670 train_time:124556ms step_avg:98.23ms +step:1269/1670 train_time:124653ms step_avg:98.23ms +step:1270/1670 train_time:124750ms step_avg:98.23ms +step:1271/1670 train_time:124848ms step_avg:98.23ms +step:1272/1670 train_time:124946ms step_avg:98.23ms +step:1273/1670 train_time:125045ms step_avg:98.23ms +step:1274/1670 train_time:125323ms step_avg:98.37ms +step:1275/1670 train_time:125511ms step_avg:98.44ms +step:1276/1670 train_time:125607ms step_avg:98.44ms +step:1277/1670 train_time:125704ms step_avg:98.44ms +step:1278/1670 train_time:125801ms step_avg:98.44ms +step:1279/1670 train_time:125898ms step_avg:98.43ms +step:1280/1670 train_time:125995ms step_avg:98.43ms +step:1281/1670 train_time:126091ms step_avg:98.43ms +step:1282/1670 train_time:126188ms step_avg:98.43ms +step:1283/1670 train_time:126284ms step_avg:98.43ms +step:1284/1670 train_time:126384ms step_avg:98.43ms +step:1285/1670 train_time:126488ms step_avg:98.43ms +step:1286/1670 train_time:126587ms step_avg:98.43ms +step:1287/1670 train_time:126685ms step_avg:98.43ms +step:1288/1670 train_time:126782ms step_avg:98.43ms +step:1289/1670 train_time:126880ms step_avg:98.43ms +step:1290/1670 train_time:126977ms step_avg:98.43ms +step:1291/1670 train_time:127075ms step_avg:98.43ms +step:1292/1670 train_time:127173ms step_avg:98.43ms +step:1293/1670 train_time:127269ms step_avg:98.43ms +step:1294/1670 train_time:127367ms step_avg:98.43ms +step:1295/1670 train_time:127466ms step_avg:98.43ms +step:1296/1670 train_time:127566ms step_avg:98.43ms +step:1297/1670 train_time:127664ms step_avg:98.43ms +step:1298/1670 train_time:127764ms step_avg:98.43ms +step:1299/1670 train_time:127862ms step_avg:98.43ms +step:1300/1670 train_time:127959ms step_avg:98.43ms +step:1301/1670 train_time:128058ms step_avg:98.43ms +step:1302/1670 train_time:128155ms step_avg:98.43ms +step:1303/1670 train_time:128252ms step_avg:98.43ms +step:1304/1670 train_time:128351ms step_avg:98.43ms +step:1305/1670 train_time:128450ms step_avg:98.43ms +step:1306/1670 train_time:128549ms step_avg:98.43ms +step:1307/1670 train_time:128647ms step_avg:98.43ms +step:1308/1670 train_time:128745ms step_avg:98.43ms +step:1309/1670 train_time:128843ms step_avg:98.43ms +step:1310/1670 train_time:128940ms step_avg:98.43ms +step:1311/1670 train_time:129039ms step_avg:98.43ms +step:1312/1670 train_time:129137ms step_avg:98.43ms +step:1313/1670 train_time:129234ms step_avg:98.43ms +step:1314/1670 train_time:129332ms step_avg:98.43ms +step:1315/1670 train_time:129430ms step_avg:98.43ms +step:1316/1670 train_time:129528ms step_avg:98.43ms +step:1317/1670 train_time:129626ms step_avg:98.43ms +step:1318/1670 train_time:129725ms step_avg:98.43ms +step:1319/1670 train_time:129822ms step_avg:98.42ms +step:1320/1670 train_time:129921ms step_avg:98.43ms +step:1321/1670 train_time:130019ms step_avg:98.42ms +step:1322/1670 train_time:130116ms step_avg:98.42ms +step:1323/1670 train_time:130214ms step_avg:98.42ms +step:1324/1670 train_time:130313ms step_avg:98.42ms +step:1325/1670 train_time:130411ms step_avg:98.42ms +step:1326/1670 train_time:130509ms step_avg:98.42ms +step:1327/1670 train_time:130607ms step_avg:98.42ms +step:1328/1670 train_time:130705ms step_avg:98.42ms +step:1329/1670 train_time:130803ms step_avg:98.42ms +step:1330/1670 train_time:130901ms step_avg:98.42ms +step:1331/1670 train_time:130999ms step_avg:98.42ms +step:1332/1670 train_time:131098ms step_avg:98.42ms +step:1333/1670 train_time:131196ms step_avg:98.42ms +step:1334/1670 train_time:131294ms step_avg:98.42ms +step:1335/1670 train_time:131393ms step_avg:98.42ms +step:1336/1670 train_time:131494ms step_avg:98.42ms +step:1337/1670 train_time:131593ms step_avg:98.42ms +step:1338/1670 train_time:131692ms step_avg:98.42ms +step:1339/1670 train_time:131792ms step_avg:98.43ms +step:1340/1670 train_time:131891ms step_avg:98.43ms +step:1341/1670 train_time:131990ms step_avg:98.43ms +step:1342/1670 train_time:132089ms step_avg:98.43ms +step:1343/1670 train_time:132186ms step_avg:98.43ms +step:1344/1670 train_time:132284ms step_avg:98.43ms +step:1345/1670 train_time:132382ms step_avg:98.43ms +step:1346/1670 train_time:132482ms step_avg:98.43ms +step:1347/1670 train_time:132581ms step_avg:98.43ms +step:1348/1670 train_time:132679ms step_avg:98.43ms +step:1349/1670 train_time:132779ms step_avg:98.43ms +step:1350/1670 train_time:132880ms step_avg:98.43ms +step:1351/1670 train_time:132981ms step_avg:98.43ms +step:1352/1670 train_time:133080ms step_avg:98.43ms +step:1353/1670 train_time:133179ms step_avg:98.43ms +step:1354/1670 train_time:133277ms step_avg:98.43ms +step:1355/1670 train_time:133375ms step_avg:98.43ms +step:1356/1670 train_time:133473ms step_avg:98.43ms +step:1357/1670 train_time:133572ms step_avg:98.43ms +step:1358/1670 train_time:133669ms step_avg:98.43ms +step:1359/1670 train_time:133766ms step_avg:98.43ms +step:1360/1670 train_time:133864ms step_avg:98.43ms +step:1361/1670 train_time:133962ms step_avg:98.43ms +step:1362/1670 train_time:134060ms step_avg:98.43ms +step:1363/1670 train_time:134159ms step_avg:98.43ms +step:1364/1670 train_time:134257ms step_avg:98.43ms +step:1365/1670 train_time:134354ms step_avg:98.43ms +step:1366/1670 train_time:134452ms step_avg:98.43ms +step:1367/1670 train_time:134550ms step_avg:98.43ms +step:1368/1670 train_time:134648ms step_avg:98.43ms +step:1369/1670 train_time:134747ms step_avg:98.43ms +step:1370/1670 train_time:134845ms step_avg:98.43ms +step:1371/1670 train_time:134943ms step_avg:98.43ms +step:1372/1670 train_time:135042ms step_avg:98.43ms +step:1373/1670 train_time:135140ms step_avg:98.43ms +step:1374/1670 train_time:135238ms step_avg:98.43ms +step:1375/1670 train_time:135337ms step_avg:98.43ms +step:1375/1670 val_loss:3.3427 train_time:135436ms step_avg:98.50ms +step:1376/1670 train_time:135458ms step_avg:98.44ms +step:1377/1670 train_time:135542ms step_avg:98.43ms +step:1378/1670 train_time:135644ms step_avg:98.44ms +step:1379/1670 train_time:135744ms step_avg:98.44ms +step:1380/1670 train_time:135842ms step_avg:98.44ms +step:1381/1670 train_time:135939ms step_avg:98.44ms +step:1382/1670 train_time:136035ms step_avg:98.43ms +step:1383/1670 train_time:136132ms step_avg:98.43ms +step:1384/1670 train_time:136229ms step_avg:98.43ms +step:1385/1670 train_time:136327ms step_avg:98.43ms +step:1386/1670 train_time:136426ms step_avg:98.43ms +step:1387/1670 train_time:136527ms step_avg:98.43ms +step:1388/1670 train_time:136629ms step_avg:98.44ms +step:1389/1670 train_time:136729ms step_avg:98.44ms +step:1390/1670 train_time:136827ms step_avg:98.44ms +step:1391/1670 train_time:136926ms step_avg:98.44ms +step:1392/1670 train_time:137023ms step_avg:98.44ms +step:1393/1670 train_time:137120ms step_avg:98.44ms +step:1394/1670 train_time:137217ms step_avg:98.43ms +step:1395/1670 train_time:137314ms step_avg:98.43ms +step:1396/1670 train_time:137411ms step_avg:98.43ms +step:1397/1670 train_time:137510ms step_avg:98.43ms +step:1398/1670 train_time:137610ms step_avg:98.43ms +step:1399/1670 train_time:137709ms step_avg:98.43ms +step:1400/1670 train_time:137810ms step_avg:98.44ms +step:1401/1670 train_time:137908ms step_avg:98.44ms +step:1402/1670 train_time:138006ms step_avg:98.44ms +step:1403/1670 train_time:138104ms step_avg:98.43ms +step:1404/1670 train_time:138202ms step_avg:98.43ms +step:1405/1670 train_time:138301ms step_avg:98.44ms +step:1406/1670 train_time:138400ms step_avg:98.44ms +step:1407/1670 train_time:138498ms step_avg:98.44ms +step:1408/1670 train_time:138597ms step_avg:98.44ms +step:1409/1670 train_time:138694ms step_avg:98.43ms +step:1410/1670 train_time:138793ms step_avg:98.43ms +step:1411/1670 train_time:138891ms step_avg:98.43ms +step:1412/1670 train_time:138989ms step_avg:98.43ms +step:1413/1670 train_time:139087ms step_avg:98.43ms +step:1414/1670 train_time:139185ms step_avg:98.43ms +step:1415/1670 train_time:139283ms step_avg:98.43ms +step:1416/1670 train_time:139382ms step_avg:98.43ms +step:1417/1670 train_time:139482ms step_avg:98.43ms +step:1418/1670 train_time:139580ms step_avg:98.43ms +step:1419/1670 train_time:139678ms step_avg:98.43ms +step:1420/1670 train_time:139776ms step_avg:98.43ms +step:1421/1670 train_time:139873ms step_avg:98.43ms +step:1422/1670 train_time:139971ms step_avg:98.43ms +step:1423/1670 train_time:140069ms step_avg:98.43ms +step:1424/1670 train_time:140167ms step_avg:98.43ms +step:1425/1670 train_time:140265ms step_avg:98.43ms +step:1426/1670 train_time:140364ms step_avg:98.43ms +step:1427/1670 train_time:140465ms step_avg:98.43ms +step:1428/1670 train_time:140565ms step_avg:98.43ms +step:1429/1670 train_time:140665ms step_avg:98.44ms +step:1430/1670 train_time:140764ms step_avg:98.44ms +step:1431/1670 train_time:140864ms step_avg:98.44ms +step:1432/1670 train_time:140964ms step_avg:98.44ms +step:1433/1670 train_time:141063ms step_avg:98.44ms +step:1434/1670 train_time:141160ms step_avg:98.44ms +step:1435/1670 train_time:141258ms step_avg:98.44ms +step:1436/1670 train_time:141355ms step_avg:98.44ms +step:1437/1670 train_time:141453ms step_avg:98.44ms +step:1438/1670 train_time:141552ms step_avg:98.44ms +step:1439/1670 train_time:141653ms step_avg:98.44ms +step:1440/1670 train_time:141750ms step_avg:98.44ms +step:1441/1670 train_time:141849ms step_avg:98.44ms +step:1442/1670 train_time:141948ms step_avg:98.44ms +step:1443/1670 train_time:142047ms step_avg:98.44ms +step:1444/1670 train_time:142146ms step_avg:98.44ms +step:1445/1670 train_time:142245ms step_avg:98.44ms +step:1446/1670 train_time:142343ms step_avg:98.44ms +step:1447/1670 train_time:142442ms step_avg:98.44ms +step:1448/1670 train_time:142541ms step_avg:98.44ms +step:1449/1670 train_time:142640ms step_avg:98.44ms +step:1450/1670 train_time:142739ms step_avg:98.44ms +step:1451/1670 train_time:142837ms step_avg:98.44ms +step:1452/1670 train_time:142934ms step_avg:98.44ms +step:1453/1670 train_time:143031ms step_avg:98.44ms +step:1454/1670 train_time:143129ms step_avg:98.44ms +step:1455/1670 train_time:143228ms step_avg:98.44ms +step:1456/1670 train_time:143327ms step_avg:98.44ms +step:1457/1670 train_time:143426ms step_avg:98.44ms +step:1458/1670 train_time:143524ms step_avg:98.44ms +step:1459/1670 train_time:143623ms step_avg:98.44ms +step:1460/1670 train_time:143722ms step_avg:98.44ms +step:1461/1670 train_time:143821ms step_avg:98.44ms +step:1462/1670 train_time:143920ms step_avg:98.44ms +step:1463/1670 train_time:144018ms step_avg:98.44ms +step:1464/1670 train_time:144115ms step_avg:98.44ms +step:1465/1670 train_time:144213ms step_avg:98.44ms +step:1466/1670 train_time:144310ms step_avg:98.44ms +step:1467/1670 train_time:144409ms step_avg:98.44ms +step:1468/1670 train_time:144508ms step_avg:98.44ms +step:1469/1670 train_time:144608ms step_avg:98.44ms +step:1470/1670 train_time:144707ms step_avg:98.44ms +step:1471/1670 train_time:144807ms step_avg:98.44ms +step:1472/1670 train_time:144907ms step_avg:98.44ms +step:1473/1670 train_time:145007ms step_avg:98.44ms +step:1474/1670 train_time:145106ms step_avg:98.44ms +step:1475/1670 train_time:145204ms step_avg:98.44ms +step:1476/1670 train_time:145302ms step_avg:98.44ms +step:1477/1670 train_time:145399ms step_avg:98.44ms +step:1478/1670 train_time:145497ms step_avg:98.44ms +step:1479/1670 train_time:145595ms step_avg:98.44ms +step:1480/1670 train_time:145693ms step_avg:98.44ms +step:1481/1670 train_time:145792ms step_avg:98.44ms +step:1482/1670 train_time:145892ms step_avg:98.44ms +step:1483/1670 train_time:145992ms step_avg:98.44ms +step:1484/1670 train_time:146092ms step_avg:98.44ms +step:1485/1670 train_time:146373ms step_avg:98.57ms +step:1486/1670 train_time:146575ms step_avg:98.64ms +step:1487/1670 train_time:146670ms step_avg:98.63ms +step:1488/1670 train_time:146766ms step_avg:98.63ms +step:1489/1670 train_time:146863ms step_avg:98.63ms +step:1490/1670 train_time:146960ms step_avg:98.63ms +step:1491/1670 train_time:147056ms step_avg:98.63ms +step:1492/1670 train_time:147153ms step_avg:98.63ms +step:1493/1670 train_time:147250ms step_avg:98.63ms +step:1494/1670 train_time:147348ms step_avg:98.63ms +step:1495/1670 train_time:147452ms step_avg:98.63ms +step:1496/1670 train_time:147554ms step_avg:98.63ms +step:1497/1670 train_time:147654ms step_avg:98.63ms +step:1498/1670 train_time:147753ms step_avg:98.63ms +step:1499/1670 train_time:147851ms step_avg:98.63ms +step:1500/1670 train_time:147949ms step_avg:98.63ms +step:1500/1670 val_loss:3.3106 train_time:148046ms step_avg:98.70ms +step:1501/1670 train_time:148068ms step_avg:98.65ms +step:1502/1670 train_time:148151ms step_avg:98.64ms +step:1503/1670 train_time:148256ms step_avg:98.64ms +step:1504/1670 train_time:148354ms step_avg:98.64ms +step:1505/1670 train_time:148451ms step_avg:98.64ms +step:1506/1670 train_time:148549ms step_avg:98.64ms +step:1507/1670 train_time:148646ms step_avg:98.64ms +step:1508/1670 train_time:148743ms step_avg:98.64ms +step:1509/1670 train_time:148839ms step_avg:98.63ms +step:1510/1670 train_time:148937ms step_avg:98.63ms +step:1511/1670 train_time:149036ms step_avg:98.63ms +step:1512/1670 train_time:149138ms step_avg:98.64ms +step:1513/1670 train_time:149237ms step_avg:98.64ms +step:1514/1670 train_time:149336ms step_avg:98.64ms +step:1515/1670 train_time:149433ms step_avg:98.64ms +step:1516/1670 train_time:149530ms step_avg:98.63ms +step:1517/1670 train_time:149628ms step_avg:98.63ms +step:1518/1670 train_time:149725ms step_avg:98.63ms +step:1519/1670 train_time:149823ms step_avg:98.63ms +step:1520/1670 train_time:149920ms step_avg:98.63ms +step:1521/1670 train_time:150019ms step_avg:98.63ms +step:1522/1670 train_time:150117ms step_avg:98.63ms +step:1523/1670 train_time:150217ms step_avg:98.63ms +step:1524/1670 train_time:150315ms step_avg:98.63ms +step:1525/1670 train_time:150413ms step_avg:98.63ms +step:1526/1670 train_time:150510ms step_avg:98.63ms +step:1527/1670 train_time:150608ms step_avg:98.63ms +step:1528/1670 train_time:150706ms step_avg:98.63ms +step:1529/1670 train_time:150804ms step_avg:98.63ms +step:1530/1670 train_time:150902ms step_avg:98.63ms +step:1531/1670 train_time:151000ms step_avg:98.63ms +step:1532/1670 train_time:151099ms step_avg:98.63ms +step:1533/1670 train_time:151198ms step_avg:98.63ms +step:1534/1670 train_time:151296ms step_avg:98.63ms +step:1535/1670 train_time:151394ms step_avg:98.63ms +step:1536/1670 train_time:151492ms step_avg:98.63ms +step:1537/1670 train_time:151590ms step_avg:98.63ms +step:1538/1670 train_time:151688ms step_avg:98.63ms +step:1539/1670 train_time:151786ms step_avg:98.63ms +step:1540/1670 train_time:151884ms step_avg:98.63ms +step:1541/1670 train_time:151982ms step_avg:98.63ms +step:1542/1670 train_time:152081ms step_avg:98.63ms +step:1543/1670 train_time:152181ms step_avg:98.63ms +step:1544/1670 train_time:152280ms step_avg:98.63ms +step:1545/1670 train_time:152379ms step_avg:98.63ms +step:1546/1670 train_time:152476ms step_avg:98.63ms +step:1547/1670 train_time:152574ms step_avg:98.63ms +step:1548/1670 train_time:152671ms step_avg:98.62ms +step:1549/1670 train_time:152769ms step_avg:98.62ms +step:1550/1670 train_time:152867ms step_avg:98.62ms +step:1551/1670 train_time:152966ms step_avg:98.62ms +step:1552/1670 train_time:153066ms step_avg:98.63ms +step:1553/1670 train_time:153166ms step_avg:98.63ms +step:1554/1670 train_time:153266ms step_avg:98.63ms +step:1555/1670 train_time:153366ms step_avg:98.63ms +step:1556/1670 train_time:153466ms step_avg:98.63ms +step:1557/1670 train_time:153565ms step_avg:98.63ms +step:1558/1670 train_time:153663ms step_avg:98.63ms +step:1559/1670 train_time:153761ms step_avg:98.63ms +step:1560/1670 train_time:153858ms step_avg:98.63ms +step:1561/1670 train_time:153955ms step_avg:98.63ms +step:1562/1670 train_time:154054ms step_avg:98.63ms +step:1563/1670 train_time:154153ms step_avg:98.63ms +step:1564/1670 train_time:154252ms step_avg:98.63ms +step:1565/1670 train_time:154351ms step_avg:98.63ms +step:1566/1670 train_time:154450ms step_avg:98.63ms +step:1567/1670 train_time:154549ms step_avg:98.63ms +step:1568/1670 train_time:154647ms step_avg:98.63ms +step:1569/1670 train_time:154746ms step_avg:98.63ms +step:1570/1670 train_time:154845ms step_avg:98.63ms +step:1571/1670 train_time:154944ms step_avg:98.63ms +step:1572/1670 train_time:155044ms step_avg:98.63ms +step:1573/1670 train_time:155141ms step_avg:98.63ms +step:1574/1670 train_time:155240ms step_avg:98.63ms +step:1575/1670 train_time:155338ms step_avg:98.63ms +step:1576/1670 train_time:155435ms step_avg:98.63ms +step:1577/1670 train_time:155533ms step_avg:98.63ms +step:1578/1670 train_time:155631ms step_avg:98.63ms +step:1579/1670 train_time:155731ms step_avg:98.63ms +step:1580/1670 train_time:155830ms step_avg:98.63ms +step:1581/1670 train_time:155929ms step_avg:98.63ms +step:1582/1670 train_time:156027ms step_avg:98.63ms +step:1583/1670 train_time:156127ms step_avg:98.63ms +step:1584/1670 train_time:156227ms step_avg:98.63ms +step:1585/1670 train_time:156325ms step_avg:98.63ms +step:1586/1670 train_time:156425ms step_avg:98.63ms +step:1587/1670 train_time:156523ms step_avg:98.63ms +step:1588/1670 train_time:156622ms step_avg:98.63ms +step:1589/1670 train_time:156719ms step_avg:98.63ms +step:1590/1670 train_time:156816ms step_avg:98.63ms +step:1591/1670 train_time:156914ms step_avg:98.63ms +step:1592/1670 train_time:157013ms step_avg:98.63ms +step:1593/1670 train_time:157112ms step_avg:98.63ms +step:1594/1670 train_time:157211ms step_avg:98.63ms +step:1595/1670 train_time:157310ms step_avg:98.63ms +step:1596/1670 train_time:157410ms step_avg:98.63ms +step:1597/1670 train_time:157508ms step_avg:98.63ms +step:1598/1670 train_time:157608ms step_avg:98.63ms +step:1599/1670 train_time:157708ms step_avg:98.63ms +step:1600/1670 train_time:157807ms step_avg:98.63ms +step:1601/1670 train_time:157905ms step_avg:98.63ms +step:1602/1670 train_time:158003ms step_avg:98.63ms +step:1603/1670 train_time:158101ms step_avg:98.63ms +step:1604/1670 train_time:158199ms step_avg:98.63ms +step:1605/1670 train_time:158296ms step_avg:98.63ms +step:1606/1670 train_time:158395ms step_avg:98.63ms +step:1607/1670 train_time:158494ms step_avg:98.63ms +step:1608/1670 train_time:158593ms step_avg:98.63ms +step:1609/1670 train_time:158691ms step_avg:98.63ms +step:1610/1670 train_time:158790ms step_avg:98.63ms +step:1611/1670 train_time:158890ms step_avg:98.63ms +step:1612/1670 train_time:158989ms step_avg:98.63ms +step:1613/1670 train_time:159088ms step_avg:98.63ms +step:1614/1670 train_time:159186ms step_avg:98.63ms +step:1615/1670 train_time:159285ms step_avg:98.63ms +step:1616/1670 train_time:159384ms step_avg:98.63ms +step:1617/1670 train_time:159484ms step_avg:98.63ms +step:1618/1670 train_time:159583ms step_avg:98.63ms +step:1619/1670 train_time:159681ms step_avg:98.63ms +step:1620/1670 train_time:159779ms step_avg:98.63ms +step:1621/1670 train_time:159877ms step_avg:98.63ms +step:1622/1670 train_time:159974ms step_avg:98.63ms +step:1623/1670 train_time:160074ms step_avg:98.63ms +step:1624/1670 train_time:160173ms step_avg:98.63ms +step:1625/1670 train_time:160273ms step_avg:98.63ms +step:1625/1670 val_loss:3.2843 train_time:160371ms step_avg:98.69ms +step:1626/1670 train_time:160393ms step_avg:98.64ms +step:1627/1670 train_time:160478ms step_avg:98.63ms +step:1628/1670 train_time:160582ms step_avg:98.64ms +step:1629/1670 train_time:160679ms step_avg:98.64ms +step:1630/1670 train_time:160778ms step_avg:98.64ms +step:1631/1670 train_time:160875ms step_avg:98.64ms +step:1632/1670 train_time:160972ms step_avg:98.64ms +step:1633/1670 train_time:161069ms step_avg:98.63ms +step:1634/1670 train_time:161166ms step_avg:98.63ms +step:1635/1670 train_time:161263ms step_avg:98.63ms +step:1636/1670 train_time:161361ms step_avg:98.63ms +step:1637/1670 train_time:161461ms step_avg:98.63ms +step:1638/1670 train_time:161562ms step_avg:98.63ms +step:1639/1670 train_time:161661ms step_avg:98.63ms +step:1640/1670 train_time:161760ms step_avg:98.63ms +step:1641/1670 train_time:161858ms step_avg:98.63ms +step:1642/1670 train_time:161956ms step_avg:98.63ms +step:1643/1670 train_time:162054ms step_avg:98.63ms +step:1644/1670 train_time:162152ms step_avg:98.63ms +step:1645/1670 train_time:162250ms step_avg:98.63ms +step:1646/1670 train_time:162348ms step_avg:98.63ms +step:1647/1670 train_time:162447ms step_avg:98.63ms +step:1648/1670 train_time:162548ms step_avg:98.63ms +step:1649/1670 train_time:162648ms step_avg:98.63ms +step:1650/1670 train_time:162747ms step_avg:98.63ms +step:1651/1670 train_time:162846ms step_avg:98.63ms +step:1652/1670 train_time:162944ms step_avg:98.63ms +step:1653/1670 train_time:163040ms step_avg:98.63ms +step:1654/1670 train_time:163137ms step_avg:98.63ms +step:1655/1670 train_time:163235ms step_avg:98.63ms +step:1656/1670 train_time:163333ms step_avg:98.63ms +step:1657/1670 train_time:163433ms step_avg:98.63ms +step:1658/1670 train_time:163533ms step_avg:98.63ms +step:1659/1670 train_time:163634ms step_avg:98.63ms +step:1660/1670 train_time:163734ms step_avg:98.64ms +step:1661/1670 train_time:163835ms step_avg:98.64ms +step:1662/1670 train_time:163933ms step_avg:98.64ms +step:1663/1670 train_time:164032ms step_avg:98.64ms +step:1664/1670 train_time:164131ms step_avg:98.64ms +step:1665/1670 train_time:164228ms step_avg:98.64ms +step:1666/1670 train_time:164325ms step_avg:98.63ms +step:1667/1670 train_time:164423ms step_avg:98.63ms +step:1668/1670 train_time:164520ms step_avg:98.63ms +step:1669/1670 train_time:164619ms step_avg:98.63ms +step:1670/1670 train_time:164718ms step_avg:98.63ms +step:1670/1670 val_loss:3.2764 train_time:164817ms step_avg:98.69ms +peak memory allocated: 34000 MiB reserved: 49056 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_9a9ac5ac-514a-43e0-ab92-1319bf013a3b.txt b/records/090525_SkipMLPBlocks/comparison_9a9ac5ac-514a-43e0-ab92-1319bf013a3b.txt new file mode 100644 index 000000000..784030ba9 --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_9a9ac5ac-514a-43e0-ab92-1319bf013a3b.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:22:32 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 129W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 128W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 87344 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 87345 C /usr/bin/python3 610MiB | +| 0 N/A N/A 87346 C /usr/bin/python3 610MiB | +| 0 N/A N/A 87347 C /usr/bin/python3 610MiB | +| 0 N/A N/A 87348 C /usr/bin/python3 610MiB | +| 0 N/A N/A 87349 C /usr/bin/python3 610MiB | +| 0 N/A N/A 87350 C /usr/bin/python3 610MiB | +| 0 N/A N/A 87351 C /usr/bin/python3 610MiB | +| 1 N/A N/A 87345 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 87346 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 87347 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 87348 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 87349 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 87350 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 87351 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1670 train_time:354ms step_avg:354.08ms +step:2/1670 train_time:375ms step_avg:187.34ms +step:3/1670 train_time:448ms step_avg:149.22ms +step:4/1670 train_time:541ms step_avg:135.35ms +step:5/1670 train_time:636ms step_avg:127.26ms +step:6/1670 train_time:731ms step_avg:121.81ms +step:7/1670 train_time:826ms step_avg:117.95ms +step:8/1670 train_time:921ms step_avg:115.12ms +step:9/1670 train_time:1016ms step_avg:112.91ms +step:10/1670 train_time:1112ms step_avg:111.19ms +step:11/1670 train_time:1207ms step_avg:109.77ms +step:12/1670 train_time:1305ms step_avg:108.78ms +step:13/1670 train_time:1404ms step_avg:107.98ms +step:14/1670 train_time:1499ms step_avg:107.10ms +step:15/1670 train_time:1595ms step_avg:106.36ms +step:16/1670 train_time:1691ms step_avg:105.69ms +step:17/1670 train_time:1787ms step_avg:105.14ms +step:18/1670 train_time:1883ms step_avg:104.61ms +step:19/1670 train_time:1979ms step_avg:104.15ms +step:20/1670 train_time:2074ms step_avg:103.69ms +step:21/1670 train_time:2169ms step_avg:103.30ms +step:22/1670 train_time:2266ms step_avg:102.99ms +step:23/1670 train_time:2363ms step_avg:102.74ms +step:24/1670 train_time:2459ms step_avg:102.44ms +step:25/1670 train_time:2555ms step_avg:102.19ms +step:26/1670 train_time:2651ms step_avg:101.96ms +step:27/1670 train_time:2747ms step_avg:101.74ms +step:28/1670 train_time:2843ms step_avg:101.55ms +step:29/1670 train_time:2939ms step_avg:101.33ms +step:30/1670 train_time:3034ms step_avg:101.12ms +step:31/1670 train_time:3130ms step_avg:100.97ms +step:32/1670 train_time:3226ms step_avg:100.81ms +step:33/1670 train_time:3322ms step_avg:100.67ms +step:34/1670 train_time:3418ms step_avg:100.54ms +step:35/1670 train_time:3514ms step_avg:100.41ms +step:36/1670 train_time:3611ms step_avg:100.30ms +step:37/1670 train_time:3707ms step_avg:100.20ms +step:38/1670 train_time:3803ms step_avg:100.09ms +step:39/1670 train_time:3899ms step_avg:99.98ms +step:40/1670 train_time:3995ms step_avg:99.86ms +step:41/1670 train_time:4090ms step_avg:99.75ms +step:42/1670 train_time:4185ms step_avg:99.65ms +step:43/1670 train_time:4282ms step_avg:99.58ms +step:44/1670 train_time:4378ms step_avg:99.50ms +step:45/1670 train_time:4474ms step_avg:99.42ms +step:46/1670 train_time:4572ms step_avg:99.39ms +step:47/1670 train_time:4668ms step_avg:99.32ms +step:48/1670 train_time:4765ms step_avg:99.28ms +step:49/1670 train_time:4861ms step_avg:99.21ms +step:50/1670 train_time:4957ms step_avg:99.14ms +step:51/1670 train_time:5053ms step_avg:99.08ms +step:52/1670 train_time:5149ms step_avg:99.02ms +step:53/1670 train_time:5245ms step_avg:98.97ms +step:54/1670 train_time:5341ms step_avg:98.91ms +step:55/1670 train_time:5437ms step_avg:98.86ms +step:56/1670 train_time:5534ms step_avg:98.82ms +step:57/1670 train_time:5630ms step_avg:98.77ms +step:58/1670 train_time:5726ms step_avg:98.73ms +step:59/1670 train_time:5823ms step_avg:98.69ms +step:60/1670 train_time:5918ms step_avg:98.64ms +step:61/1670 train_time:6014ms step_avg:98.60ms +step:62/1670 train_time:6110ms step_avg:98.55ms +step:63/1670 train_time:6207ms step_avg:98.52ms +step:64/1670 train_time:6304ms step_avg:98.50ms +step:65/1670 train_time:6399ms step_avg:98.45ms +step:66/1670 train_time:6495ms step_avg:98.41ms +step:67/1670 train_time:6592ms step_avg:98.38ms +step:68/1670 train_time:6687ms step_avg:98.34ms +step:69/1670 train_time:6784ms step_avg:98.32ms +step:70/1670 train_time:6880ms step_avg:98.29ms +step:71/1670 train_time:6976ms step_avg:98.25ms +step:72/1670 train_time:7072ms step_avg:98.22ms +step:73/1670 train_time:7167ms step_avg:98.18ms +step:74/1670 train_time:7264ms step_avg:98.16ms +step:75/1670 train_time:7360ms step_avg:98.14ms +step:76/1670 train_time:7456ms step_avg:98.11ms +step:77/1670 train_time:7552ms step_avg:98.08ms +step:78/1670 train_time:7648ms step_avg:98.05ms +step:79/1670 train_time:7745ms step_avg:98.03ms +step:80/1670 train_time:7840ms step_avg:98.00ms +step:81/1670 train_time:7935ms step_avg:97.97ms +step:82/1670 train_time:8032ms step_avg:97.95ms +step:83/1670 train_time:8128ms step_avg:97.93ms +step:84/1670 train_time:8224ms step_avg:97.91ms +step:85/1670 train_time:8321ms step_avg:97.90ms +step:86/1670 train_time:8416ms step_avg:97.87ms +step:87/1670 train_time:8513ms step_avg:97.85ms +step:88/1670 train_time:8610ms step_avg:97.84ms +step:89/1670 train_time:8705ms step_avg:97.81ms +step:90/1670 train_time:8801ms step_avg:97.79ms +step:91/1670 train_time:8897ms step_avg:97.77ms +step:92/1670 train_time:8993ms step_avg:97.75ms +step:93/1670 train_time:9088ms step_avg:97.73ms +step:94/1670 train_time:9184ms step_avg:97.71ms +step:95/1670 train_time:9280ms step_avg:97.69ms +step:96/1670 train_time:9376ms step_avg:97.66ms +step:97/1670 train_time:9472ms step_avg:97.65ms +step:98/1670 train_time:9568ms step_avg:97.63ms +step:99/1670 train_time:9664ms step_avg:97.62ms +step:100/1670 train_time:9760ms step_avg:97.60ms +step:101/1670 train_time:9857ms step_avg:97.59ms +step:102/1670 train_time:9952ms step_avg:97.57ms +step:103/1670 train_time:10048ms step_avg:97.55ms +step:104/1670 train_time:10143ms step_avg:97.53ms +step:105/1670 train_time:10238ms step_avg:97.51ms +step:106/1670 train_time:10334ms step_avg:97.49ms +step:107/1670 train_time:10431ms step_avg:97.49ms +step:108/1670 train_time:10527ms step_avg:97.47ms +step:109/1670 train_time:10624ms step_avg:97.47ms +step:110/1670 train_time:10719ms step_avg:97.45ms +step:111/1670 train_time:10815ms step_avg:97.43ms +step:112/1670 train_time:10912ms step_avg:97.42ms +step:113/1670 train_time:11008ms step_avg:97.41ms +step:114/1670 train_time:11105ms step_avg:97.41ms +step:115/1670 train_time:11201ms step_avg:97.40ms +step:116/1670 train_time:11296ms step_avg:97.38ms +step:117/1670 train_time:11393ms step_avg:97.37ms +step:118/1670 train_time:11489ms step_avg:97.37ms +step:119/1670 train_time:11585ms step_avg:97.35ms +step:120/1670 train_time:11681ms step_avg:97.34ms +step:121/1670 train_time:11776ms step_avg:97.32ms +step:122/1670 train_time:11872ms step_avg:97.31ms +step:123/1670 train_time:11968ms step_avg:97.30ms +step:124/1670 train_time:12064ms step_avg:97.29ms +step:125/1670 train_time:12160ms step_avg:97.28ms +step:125/1670 val_loss:4.3131 train_time:12254ms step_avg:98.03ms +step:126/1670 train_time:12277ms step_avg:97.44ms +step:127/1670 train_time:12363ms step_avg:97.34ms +step:128/1670 train_time:12465ms step_avg:97.38ms +step:129/1670 train_time:12561ms step_avg:97.37ms +step:130/1670 train_time:12656ms step_avg:97.36ms +step:131/1670 train_time:12751ms step_avg:97.33ms +step:132/1670 train_time:12845ms step_avg:97.31ms +step:133/1670 train_time:12940ms step_avg:97.29ms +step:134/1670 train_time:13035ms step_avg:97.28ms +step:135/1670 train_time:13130ms step_avg:97.26ms +step:136/1670 train_time:13225ms step_avg:97.24ms +step:137/1670 train_time:13321ms step_avg:97.24ms +step:138/1670 train_time:13419ms step_avg:97.24ms +step:139/1670 train_time:13516ms step_avg:97.24ms +step:140/1670 train_time:13614ms step_avg:97.24ms +step:141/1670 train_time:13710ms step_avg:97.23ms +step:142/1670 train_time:13804ms step_avg:97.21ms +step:143/1670 train_time:13899ms step_avg:97.20ms +step:144/1670 train_time:13994ms step_avg:97.18ms +step:145/1670 train_time:14088ms step_avg:97.16ms +step:146/1670 train_time:14183ms step_avg:97.15ms +step:147/1670 train_time:14280ms step_avg:97.14ms +step:148/1670 train_time:14375ms step_avg:97.13ms +step:149/1670 train_time:14472ms step_avg:97.13ms +step:150/1670 train_time:14569ms step_avg:97.13ms +step:151/1670 train_time:14666ms step_avg:97.12ms +step:152/1670 train_time:14761ms step_avg:97.11ms +step:153/1670 train_time:14856ms step_avg:97.10ms +step:154/1670 train_time:14952ms step_avg:97.09ms +step:155/1670 train_time:15047ms step_avg:97.08ms +step:156/1670 train_time:15142ms step_avg:97.07ms +step:157/1670 train_time:15237ms step_avg:97.05ms +step:158/1670 train_time:15333ms step_avg:97.05ms +step:159/1670 train_time:15429ms step_avg:97.04ms +step:160/1670 train_time:15525ms step_avg:97.03ms +step:161/1670 train_time:15621ms step_avg:97.02ms +step:162/1670 train_time:15716ms step_avg:97.01ms +step:163/1670 train_time:15811ms step_avg:97.00ms +step:164/1670 train_time:15907ms step_avg:96.99ms +step:165/1670 train_time:16002ms step_avg:96.98ms +step:166/1670 train_time:16097ms step_avg:96.97ms +step:167/1670 train_time:16193ms step_avg:96.96ms +step:168/1670 train_time:16288ms step_avg:96.95ms +step:169/1670 train_time:16384ms step_avg:96.95ms +step:170/1670 train_time:16480ms step_avg:96.94ms +step:171/1670 train_time:16576ms step_avg:96.93ms +step:172/1670 train_time:16672ms step_avg:96.93ms +step:173/1670 train_time:16768ms step_avg:96.93ms +step:174/1670 train_time:16864ms step_avg:96.92ms +step:175/1670 train_time:16959ms step_avg:96.91ms +step:176/1670 train_time:17054ms step_avg:96.90ms +step:177/1670 train_time:17149ms step_avg:96.89ms +step:178/1670 train_time:17244ms step_avg:96.88ms +step:179/1670 train_time:17340ms step_avg:96.87ms +step:180/1670 train_time:17435ms step_avg:96.86ms +step:181/1670 train_time:17531ms step_avg:96.86ms +step:182/1670 train_time:17628ms step_avg:96.86ms +step:183/1670 train_time:17724ms step_avg:96.85ms +step:184/1670 train_time:17819ms step_avg:96.84ms +step:185/1670 train_time:17914ms step_avg:96.83ms +step:186/1670 train_time:18010ms step_avg:96.83ms +step:187/1670 train_time:18106ms step_avg:96.82ms +step:188/1670 train_time:18201ms step_avg:96.82ms +step:189/1670 train_time:18298ms step_avg:96.81ms +step:190/1670 train_time:18393ms step_avg:96.81ms +step:191/1670 train_time:18488ms step_avg:96.80ms +step:192/1670 train_time:18584ms step_avg:96.79ms +step:193/1670 train_time:18679ms step_avg:96.78ms +step:194/1670 train_time:18776ms step_avg:96.78ms +step:195/1670 train_time:18872ms step_avg:96.78ms +step:196/1670 train_time:18968ms step_avg:96.78ms +step:197/1670 train_time:19063ms step_avg:96.76ms +step:198/1670 train_time:19158ms step_avg:96.76ms +step:199/1670 train_time:19253ms step_avg:96.75ms +step:200/1670 train_time:19348ms step_avg:96.74ms +step:201/1670 train_time:19444ms step_avg:96.73ms +step:202/1670 train_time:19539ms step_avg:96.73ms +step:203/1670 train_time:19635ms step_avg:96.72ms +step:204/1670 train_time:19731ms step_avg:96.72ms +step:205/1670 train_time:19827ms step_avg:96.72ms +step:206/1670 train_time:19922ms step_avg:96.71ms +step:207/1670 train_time:20018ms step_avg:96.71ms +step:208/1670 train_time:20114ms step_avg:96.70ms +step:209/1670 train_time:20209ms step_avg:96.70ms +step:210/1670 train_time:20305ms step_avg:96.69ms +step:211/1670 train_time:20401ms step_avg:96.69ms +step:212/1670 train_time:20496ms step_avg:96.68ms +step:213/1670 train_time:20771ms step_avg:97.51ms +step:214/1670 train_time:20917ms step_avg:97.74ms +step:215/1670 train_time:21011ms step_avg:97.73ms +step:216/1670 train_time:21106ms step_avg:97.71ms +step:217/1670 train_time:21201ms step_avg:97.70ms +step:218/1670 train_time:21296ms step_avg:97.69ms +step:219/1670 train_time:21390ms step_avg:97.67ms +step:220/1670 train_time:21485ms step_avg:97.66ms +step:221/1670 train_time:21580ms step_avg:97.65ms +step:222/1670 train_time:21675ms step_avg:97.63ms +step:223/1670 train_time:21776ms step_avg:97.65ms +step:224/1670 train_time:21875ms step_avg:97.66ms +step:225/1670 train_time:21972ms step_avg:97.65ms +step:226/1670 train_time:22068ms step_avg:97.65ms +step:227/1670 train_time:22163ms step_avg:97.64ms +step:228/1670 train_time:22259ms step_avg:97.63ms +step:229/1670 train_time:22354ms step_avg:97.61ms +step:230/1670 train_time:22448ms step_avg:97.60ms +step:231/1670 train_time:22544ms step_avg:97.59ms +step:232/1670 train_time:22638ms step_avg:97.58ms +step:233/1670 train_time:22734ms step_avg:97.57ms +step:234/1670 train_time:22831ms step_avg:97.57ms +step:235/1670 train_time:22927ms step_avg:97.56ms +step:236/1670 train_time:23023ms step_avg:97.56ms +step:237/1670 train_time:23119ms step_avg:97.55ms +step:238/1670 train_time:23214ms step_avg:97.54ms +step:239/1670 train_time:23309ms step_avg:97.53ms +step:240/1670 train_time:23405ms step_avg:97.52ms +step:241/1670 train_time:23500ms step_avg:97.51ms +step:242/1670 train_time:23595ms step_avg:97.50ms +step:243/1670 train_time:23690ms step_avg:97.49ms +step:244/1670 train_time:23786ms step_avg:97.48ms +step:245/1670 train_time:23882ms step_avg:97.48ms +step:246/1670 train_time:23978ms step_avg:97.47ms +step:247/1670 train_time:24075ms step_avg:97.47ms +step:248/1670 train_time:24171ms step_avg:97.46ms +step:249/1670 train_time:24266ms step_avg:97.46ms +step:250/1670 train_time:24361ms step_avg:97.44ms +step:250/1670 val_loss:3.9721 train_time:24455ms step_avg:97.82ms +step:251/1670 train_time:24478ms step_avg:97.52ms +step:252/1670 train_time:24558ms step_avg:97.45ms +step:253/1670 train_time:24657ms step_avg:97.46ms +step:254/1670 train_time:24753ms step_avg:97.45ms +step:255/1670 train_time:24849ms step_avg:97.45ms +step:256/1670 train_time:24943ms step_avg:97.43ms +step:257/1670 train_time:25037ms step_avg:97.42ms +step:258/1670 train_time:25132ms step_avg:97.41ms +step:259/1670 train_time:25226ms step_avg:97.40ms +step:260/1670 train_time:25322ms step_avg:97.39ms +step:261/1670 train_time:25417ms step_avg:97.38ms +step:262/1670 train_time:25513ms step_avg:97.38ms +step:263/1670 train_time:25614ms step_avg:97.39ms +step:264/1670 train_time:25711ms step_avg:97.39ms +step:265/1670 train_time:25808ms step_avg:97.39ms +step:266/1670 train_time:25903ms step_avg:97.38ms +step:267/1670 train_time:25998ms step_avg:97.37ms +step:268/1670 train_time:26093ms step_avg:97.36ms +step:269/1670 train_time:26188ms step_avg:97.35ms +step:270/1670 train_time:26283ms step_avg:97.34ms +step:271/1670 train_time:26378ms step_avg:97.34ms +step:272/1670 train_time:26474ms step_avg:97.33ms +step:273/1670 train_time:26570ms step_avg:97.33ms +step:274/1670 train_time:26667ms step_avg:97.32ms +step:275/1670 train_time:26763ms step_avg:97.32ms +step:276/1670 train_time:26859ms step_avg:97.31ms +step:277/1670 train_time:26954ms step_avg:97.31ms +step:278/1670 train_time:27050ms step_avg:97.30ms +step:279/1670 train_time:27146ms step_avg:97.30ms +step:280/1670 train_time:27241ms step_avg:97.29ms +step:281/1670 train_time:27336ms step_avg:97.28ms +step:282/1670 train_time:27431ms step_avg:97.27ms +step:283/1670 train_time:27528ms step_avg:97.27ms +step:284/1670 train_time:27623ms step_avg:97.26ms +step:285/1670 train_time:27719ms step_avg:97.26ms +step:286/1670 train_time:27816ms step_avg:97.26ms +step:287/1670 train_time:27912ms step_avg:97.25ms +step:288/1670 train_time:28007ms step_avg:97.25ms +step:289/1670 train_time:28103ms step_avg:97.24ms +step:290/1670 train_time:28198ms step_avg:97.23ms +step:291/1670 train_time:28293ms step_avg:97.23ms +step:292/1670 train_time:28389ms step_avg:97.22ms +step:293/1670 train_time:28485ms step_avg:97.22ms +step:294/1670 train_time:28581ms step_avg:97.22ms +step:295/1670 train_time:28677ms step_avg:97.21ms +step:296/1670 train_time:28773ms step_avg:97.21ms +step:297/1670 train_time:28870ms step_avg:97.20ms +step:298/1670 train_time:28966ms step_avg:97.20ms +step:299/1670 train_time:29061ms step_avg:97.20ms +step:300/1670 train_time:29157ms step_avg:97.19ms +step:301/1670 train_time:29252ms step_avg:97.18ms +step:302/1670 train_time:29347ms step_avg:97.18ms +step:303/1670 train_time:29443ms step_avg:97.17ms +step:304/1670 train_time:29539ms step_avg:97.17ms +step:305/1670 train_time:29636ms step_avg:97.17ms +step:306/1670 train_time:29731ms step_avg:97.16ms +step:307/1670 train_time:29827ms step_avg:97.16ms +step:308/1670 train_time:29922ms step_avg:97.15ms +step:309/1670 train_time:30017ms step_avg:97.14ms +step:310/1670 train_time:30113ms step_avg:97.14ms +step:311/1670 train_time:30209ms step_avg:97.14ms +step:312/1670 train_time:30304ms step_avg:97.13ms +step:313/1670 train_time:30400ms step_avg:97.12ms +step:314/1670 train_time:30495ms step_avg:97.12ms +step:315/1670 train_time:30592ms step_avg:97.12ms +step:316/1670 train_time:30688ms step_avg:97.11ms +step:317/1670 train_time:30784ms step_avg:97.11ms +step:318/1670 train_time:30880ms step_avg:97.11ms +step:319/1670 train_time:30976ms step_avg:97.10ms +step:320/1670 train_time:31071ms step_avg:97.10ms +step:321/1670 train_time:31167ms step_avg:97.09ms +step:322/1670 train_time:31263ms step_avg:97.09ms +step:323/1670 train_time:31359ms step_avg:97.09ms +step:324/1670 train_time:31454ms step_avg:97.08ms +step:325/1670 train_time:31550ms step_avg:97.08ms +step:326/1670 train_time:31646ms step_avg:97.07ms +step:327/1670 train_time:31742ms step_avg:97.07ms +step:328/1670 train_time:31837ms step_avg:97.07ms +step:329/1670 train_time:31933ms step_avg:97.06ms +step:330/1670 train_time:32028ms step_avg:97.06ms +step:331/1670 train_time:32124ms step_avg:97.05ms +step:332/1670 train_time:32219ms step_avg:97.05ms +step:333/1670 train_time:32316ms step_avg:97.04ms +step:334/1670 train_time:32412ms step_avg:97.04ms +step:335/1670 train_time:32509ms step_avg:97.04ms +step:336/1670 train_time:32604ms step_avg:97.04ms +step:337/1670 train_time:32700ms step_avg:97.03ms +step:338/1670 train_time:32795ms step_avg:97.03ms +step:339/1670 train_time:32892ms step_avg:97.03ms +step:340/1670 train_time:32988ms step_avg:97.02ms +step:341/1670 train_time:33083ms step_avg:97.02ms +step:342/1670 train_time:33178ms step_avg:97.01ms +step:343/1670 train_time:33274ms step_avg:97.01ms +step:344/1670 train_time:33369ms step_avg:97.00ms +step:345/1670 train_time:33465ms step_avg:97.00ms +step:346/1670 train_time:33561ms step_avg:97.00ms +step:347/1670 train_time:33656ms step_avg:96.99ms +step:348/1670 train_time:33752ms step_avg:96.99ms +step:349/1670 train_time:33849ms step_avg:96.99ms +step:350/1670 train_time:33946ms step_avg:96.99ms +step:351/1670 train_time:34041ms step_avg:96.98ms +step:352/1670 train_time:34136ms step_avg:96.98ms +step:353/1670 train_time:34232ms step_avg:96.97ms +step:354/1670 train_time:34327ms step_avg:96.97ms +step:355/1670 train_time:34423ms step_avg:96.96ms +step:356/1670 train_time:34518ms step_avg:96.96ms +step:357/1670 train_time:34614ms step_avg:96.96ms +step:358/1670 train_time:34710ms step_avg:96.95ms +step:359/1670 train_time:34806ms step_avg:96.95ms +step:360/1670 train_time:34902ms step_avg:96.95ms +step:361/1670 train_time:34997ms step_avg:96.95ms +step:362/1670 train_time:35093ms step_avg:96.94ms +step:363/1670 train_time:35189ms step_avg:96.94ms +step:364/1670 train_time:35285ms step_avg:96.94ms +step:365/1670 train_time:35380ms step_avg:96.93ms +step:366/1670 train_time:35475ms step_avg:96.93ms +step:367/1670 train_time:35572ms step_avg:96.93ms +step:368/1670 train_time:35667ms step_avg:96.92ms +step:369/1670 train_time:35763ms step_avg:96.92ms +step:370/1670 train_time:35859ms step_avg:96.92ms +step:371/1670 train_time:35955ms step_avg:96.91ms +step:372/1670 train_time:36051ms step_avg:96.91ms +step:373/1670 train_time:36146ms step_avg:96.91ms +step:374/1670 train_time:36242ms step_avg:96.90ms +step:375/1670 train_time:36338ms step_avg:96.90ms +step:375/1670 val_loss:3.8167 train_time:36432ms step_avg:97.15ms +step:376/1670 train_time:36455ms step_avg:96.95ms +step:377/1670 train_time:36534ms step_avg:96.91ms +step:378/1670 train_time:36633ms step_avg:96.91ms +step:379/1670 train_time:36729ms step_avg:96.91ms +step:380/1670 train_time:36824ms step_avg:96.91ms +step:381/1670 train_time:36919ms step_avg:96.90ms +step:382/1670 train_time:37014ms step_avg:96.90ms +step:383/1670 train_time:37109ms step_avg:96.89ms +step:384/1670 train_time:37204ms step_avg:96.89ms +step:385/1670 train_time:37299ms step_avg:96.88ms +step:386/1670 train_time:37394ms step_avg:96.88ms +step:387/1670 train_time:37491ms step_avg:96.88ms +step:388/1670 train_time:37588ms step_avg:96.88ms +step:389/1670 train_time:37685ms step_avg:96.88ms +step:390/1670 train_time:37781ms step_avg:96.88ms +step:391/1670 train_time:37876ms step_avg:96.87ms +step:392/1670 train_time:37972ms step_avg:96.87ms +step:393/1670 train_time:38067ms step_avg:96.86ms +step:394/1670 train_time:38162ms step_avg:96.86ms +step:395/1670 train_time:38257ms step_avg:96.85ms +step:396/1670 train_time:38352ms step_avg:96.85ms +step:397/1670 train_time:38448ms step_avg:96.85ms +step:398/1670 train_time:38545ms step_avg:96.85ms +step:399/1670 train_time:38642ms step_avg:96.85ms +step:400/1670 train_time:38739ms step_avg:96.85ms +step:401/1670 train_time:38835ms step_avg:96.84ms +step:402/1670 train_time:38930ms step_avg:96.84ms +step:403/1670 train_time:39025ms step_avg:96.84ms +step:404/1670 train_time:39121ms step_avg:96.83ms +step:405/1670 train_time:39216ms step_avg:96.83ms +step:406/1670 train_time:39311ms step_avg:96.83ms +step:407/1670 train_time:39407ms step_avg:96.82ms +step:408/1670 train_time:39503ms step_avg:96.82ms +step:409/1670 train_time:39599ms step_avg:96.82ms +step:410/1670 train_time:39696ms step_avg:96.82ms +step:411/1670 train_time:39790ms step_avg:96.81ms +step:412/1670 train_time:39886ms step_avg:96.81ms +step:413/1670 train_time:39982ms step_avg:96.81ms +step:414/1670 train_time:40079ms step_avg:96.81ms +step:415/1670 train_time:40175ms step_avg:96.81ms +step:416/1670 train_time:40270ms step_avg:96.80ms +step:417/1670 train_time:40365ms step_avg:96.80ms +step:418/1670 train_time:40461ms step_avg:96.80ms +step:419/1670 train_time:40558ms step_avg:96.80ms +step:420/1670 train_time:40654ms step_avg:96.80ms +step:421/1670 train_time:40749ms step_avg:96.79ms +step:422/1670 train_time:40846ms step_avg:96.79ms +step:423/1670 train_time:40941ms step_avg:96.79ms +step:424/1670 train_time:41037ms step_avg:96.78ms +step:425/1670 train_time:41303ms step_avg:97.18ms +step:426/1670 train_time:41427ms step_avg:97.25ms +step:427/1670 train_time:41521ms step_avg:97.24ms +step:428/1670 train_time:41615ms step_avg:97.23ms +step:429/1670 train_time:41709ms step_avg:97.22ms +step:430/1670 train_time:41804ms step_avg:97.22ms +step:431/1670 train_time:41900ms step_avg:97.21ms +step:432/1670 train_time:41994ms step_avg:97.21ms +step:433/1670 train_time:42089ms step_avg:97.20ms +step:434/1670 train_time:42184ms step_avg:97.20ms +step:435/1670 train_time:42279ms step_avg:97.19ms +step:436/1670 train_time:42378ms step_avg:97.20ms +step:437/1670 train_time:42476ms step_avg:97.20ms +step:438/1670 train_time:42573ms step_avg:97.20ms +step:439/1670 train_time:42668ms step_avg:97.19ms +step:440/1670 train_time:42763ms step_avg:97.19ms +step:441/1670 train_time:42859ms step_avg:97.19ms +step:442/1670 train_time:42954ms step_avg:97.18ms +step:443/1670 train_time:43049ms step_avg:97.18ms +step:444/1670 train_time:43144ms step_avg:97.17ms +step:445/1670 train_time:43240ms step_avg:97.17ms +step:446/1670 train_time:43336ms step_avg:97.17ms +step:447/1670 train_time:43433ms step_avg:97.16ms +step:448/1670 train_time:43529ms step_avg:97.16ms +step:449/1670 train_time:43625ms step_avg:97.16ms +step:450/1670 train_time:43721ms step_avg:97.16ms +step:451/1670 train_time:43817ms step_avg:97.16ms +step:452/1670 train_time:43913ms step_avg:97.15ms +step:453/1670 train_time:44008ms step_avg:97.15ms +step:454/1670 train_time:44103ms step_avg:97.14ms +step:455/1670 train_time:44199ms step_avg:97.14ms +step:456/1670 train_time:44295ms step_avg:97.14ms +step:457/1670 train_time:44390ms step_avg:97.13ms +step:458/1670 train_time:44486ms step_avg:97.13ms +step:459/1670 train_time:44583ms step_avg:97.13ms +step:460/1670 train_time:44679ms step_avg:97.13ms +step:461/1670 train_time:44775ms step_avg:97.13ms +step:462/1670 train_time:44871ms step_avg:97.12ms +step:463/1670 train_time:44966ms step_avg:97.12ms +step:464/1670 train_time:45061ms step_avg:97.11ms +step:465/1670 train_time:45156ms step_avg:97.11ms +step:466/1670 train_time:45252ms step_avg:97.11ms +step:467/1670 train_time:45348ms step_avg:97.10ms +step:468/1670 train_time:45444ms step_avg:97.10ms +step:469/1670 train_time:45540ms step_avg:97.10ms +step:470/1670 train_time:45635ms step_avg:97.10ms +step:471/1670 train_time:45730ms step_avg:97.09ms +step:472/1670 train_time:45826ms step_avg:97.09ms +step:473/1670 train_time:45922ms step_avg:97.09ms +step:474/1670 train_time:46018ms step_avg:97.08ms +step:475/1670 train_time:46114ms step_avg:97.08ms +step:476/1670 train_time:46210ms step_avg:97.08ms +step:477/1670 train_time:46305ms step_avg:97.08ms +step:478/1670 train_time:46401ms step_avg:97.07ms +step:479/1670 train_time:46497ms step_avg:97.07ms +step:480/1670 train_time:46592ms step_avg:97.07ms +step:481/1670 train_time:46688ms step_avg:97.06ms +step:482/1670 train_time:46784ms step_avg:97.06ms +step:483/1670 train_time:46881ms step_avg:97.06ms +step:484/1670 train_time:46976ms step_avg:97.06ms +step:485/1670 train_time:47072ms step_avg:97.05ms +step:486/1670 train_time:47167ms step_avg:97.05ms +step:487/1670 train_time:47263ms step_avg:97.05ms +step:488/1670 train_time:47359ms step_avg:97.05ms +step:489/1670 train_time:47455ms step_avg:97.05ms +step:490/1670 train_time:47550ms step_avg:97.04ms +step:491/1670 train_time:47646ms step_avg:97.04ms +step:492/1670 train_time:47743ms step_avg:97.04ms +step:493/1670 train_time:47839ms step_avg:97.04ms +step:494/1670 train_time:47935ms step_avg:97.03ms +step:495/1670 train_time:48030ms step_avg:97.03ms +step:496/1670 train_time:48126ms step_avg:97.03ms +step:497/1670 train_time:48222ms step_avg:97.03ms +step:498/1670 train_time:48318ms step_avg:97.02ms +step:499/1670 train_time:48414ms step_avg:97.02ms +step:500/1670 train_time:48510ms step_avg:97.02ms +step:500/1670 val_loss:3.7126 train_time:48604ms step_avg:97.21ms +step:501/1670 train_time:48627ms step_avg:97.06ms +step:502/1670 train_time:48708ms step_avg:97.03ms +step:503/1670 train_time:48807ms step_avg:97.03ms +step:504/1670 train_time:48903ms step_avg:97.03ms +step:505/1670 train_time:49000ms step_avg:97.03ms +step:506/1670 train_time:49096ms step_avg:97.03ms +step:507/1670 train_time:49191ms step_avg:97.02ms +step:508/1670 train_time:49285ms step_avg:97.02ms +step:509/1670 train_time:49381ms step_avg:97.02ms +step:510/1670 train_time:49476ms step_avg:97.01ms +step:511/1670 train_time:49572ms step_avg:97.01ms +step:512/1670 train_time:49669ms step_avg:97.01ms +step:513/1670 train_time:49766ms step_avg:97.01ms +step:514/1670 train_time:49862ms step_avg:97.01ms +step:515/1670 train_time:49960ms step_avg:97.01ms +step:516/1670 train_time:50056ms step_avg:97.01ms +step:517/1670 train_time:50151ms step_avg:97.00ms +step:518/1670 train_time:50246ms step_avg:97.00ms +step:519/1670 train_time:50341ms step_avg:97.00ms +step:520/1670 train_time:50436ms step_avg:96.99ms +step:521/1670 train_time:50531ms step_avg:96.99ms +step:522/1670 train_time:50627ms step_avg:96.99ms +step:523/1670 train_time:50723ms step_avg:96.99ms +step:524/1670 train_time:50820ms step_avg:96.99ms +step:525/1670 train_time:50917ms step_avg:96.99ms +step:526/1670 train_time:51014ms step_avg:96.98ms +step:527/1670 train_time:51109ms step_avg:96.98ms +step:528/1670 train_time:51205ms step_avg:96.98ms +step:529/1670 train_time:51300ms step_avg:96.98ms +step:530/1670 train_time:51395ms step_avg:96.97ms +step:531/1670 train_time:51490ms step_avg:96.97ms +step:532/1670 train_time:51586ms step_avg:96.97ms +step:533/1670 train_time:51682ms step_avg:96.96ms +step:534/1670 train_time:51778ms step_avg:96.96ms +step:535/1670 train_time:51876ms step_avg:96.96ms +step:536/1670 train_time:51972ms step_avg:96.96ms +step:537/1670 train_time:52068ms step_avg:96.96ms +step:538/1670 train_time:52164ms step_avg:96.96ms +step:539/1670 train_time:52260ms step_avg:96.96ms +step:540/1670 train_time:52356ms step_avg:96.96ms +step:541/1670 train_time:52452ms step_avg:96.95ms +step:542/1670 train_time:52547ms step_avg:96.95ms +step:543/1670 train_time:52642ms step_avg:96.95ms +step:544/1670 train_time:52738ms step_avg:96.94ms +step:545/1670 train_time:52834ms step_avg:96.94ms +step:546/1670 train_time:52930ms step_avg:96.94ms +step:547/1670 train_time:53025ms step_avg:96.94ms +step:548/1670 train_time:53121ms step_avg:96.94ms +step:549/1670 train_time:53217ms step_avg:96.94ms +step:550/1670 train_time:53313ms step_avg:96.93ms +step:551/1670 train_time:53409ms step_avg:96.93ms +step:552/1670 train_time:53505ms step_avg:96.93ms +step:553/1670 train_time:53600ms step_avg:96.93ms +step:554/1670 train_time:53697ms step_avg:96.93ms +step:555/1670 train_time:53793ms step_avg:96.92ms +step:556/1670 train_time:53889ms step_avg:96.92ms +step:557/1670 train_time:53985ms step_avg:96.92ms +step:558/1670 train_time:54081ms step_avg:96.92ms +step:559/1670 train_time:54178ms step_avg:96.92ms +step:560/1670 train_time:54276ms step_avg:96.92ms +step:561/1670 train_time:54373ms step_avg:96.92ms +step:562/1670 train_time:54470ms step_avg:96.92ms +step:563/1670 train_time:54566ms step_avg:96.92ms +step:564/1670 train_time:54663ms step_avg:96.92ms +step:565/1670 train_time:54761ms step_avg:96.92ms +step:566/1670 train_time:54859ms step_avg:96.92ms +step:567/1670 train_time:54958ms step_avg:96.93ms +step:568/1670 train_time:55056ms step_avg:96.93ms +step:569/1670 train_time:55154ms step_avg:96.93ms +step:570/1670 train_time:55251ms step_avg:96.93ms +step:571/1670 train_time:55348ms step_avg:96.93ms +step:572/1670 train_time:55445ms step_avg:96.93ms +step:573/1670 train_time:55542ms step_avg:96.93ms +step:574/1670 train_time:55640ms step_avg:96.93ms +step:575/1670 train_time:55737ms step_avg:96.93ms +step:576/1670 train_time:55836ms step_avg:96.94ms +step:577/1670 train_time:55933ms step_avg:96.94ms +step:578/1670 train_time:56030ms step_avg:96.94ms +step:579/1670 train_time:56127ms step_avg:96.94ms +step:580/1670 train_time:56224ms step_avg:96.94ms +step:581/1670 train_time:56321ms step_avg:96.94ms +step:582/1670 train_time:56419ms step_avg:96.94ms +step:583/1670 train_time:56516ms step_avg:96.94ms +step:584/1670 train_time:56613ms step_avg:96.94ms +step:585/1670 train_time:56710ms step_avg:96.94ms +step:586/1670 train_time:56807ms step_avg:96.94ms +step:587/1670 train_time:56903ms step_avg:96.94ms +step:588/1670 train_time:57002ms step_avg:96.94ms +step:589/1670 train_time:57099ms step_avg:96.94ms +step:590/1670 train_time:57197ms step_avg:96.94ms +step:591/1670 train_time:57294ms step_avg:96.94ms +step:592/1670 train_time:57392ms step_avg:96.95ms +step:593/1670 train_time:57489ms step_avg:96.95ms +step:594/1670 train_time:57585ms step_avg:96.94ms +step:595/1670 train_time:57682ms step_avg:96.94ms +step:596/1670 train_time:57780ms step_avg:96.95ms +step:597/1670 train_time:57879ms step_avg:96.95ms +step:598/1670 train_time:57977ms step_avg:96.95ms +step:599/1670 train_time:58074ms step_avg:96.95ms +step:600/1670 train_time:58171ms step_avg:96.95ms +step:601/1670 train_time:58268ms step_avg:96.95ms +step:602/1670 train_time:58365ms step_avg:96.95ms +step:603/1670 train_time:58463ms step_avg:96.95ms +step:604/1670 train_time:58561ms step_avg:96.96ms +step:605/1670 train_time:58658ms step_avg:96.96ms +step:606/1670 train_time:58755ms step_avg:96.96ms +step:607/1670 train_time:58853ms step_avg:96.96ms +step:608/1670 train_time:58949ms step_avg:96.96ms +step:609/1670 train_time:59046ms step_avg:96.96ms +step:610/1670 train_time:59143ms step_avg:96.96ms +step:611/1670 train_time:59240ms step_avg:96.96ms +step:612/1670 train_time:59338ms step_avg:96.96ms +step:613/1670 train_time:59436ms step_avg:96.96ms +step:614/1670 train_time:59533ms step_avg:96.96ms +step:615/1670 train_time:59630ms step_avg:96.96ms +step:616/1670 train_time:59726ms step_avg:96.96ms +step:617/1670 train_time:59823ms step_avg:96.96ms +step:618/1670 train_time:59921ms step_avg:96.96ms +step:619/1670 train_time:60020ms step_avg:96.96ms +step:620/1670 train_time:60117ms step_avg:96.96ms +step:621/1670 train_time:60215ms step_avg:96.96ms +step:622/1670 train_time:60312ms step_avg:96.97ms +step:623/1670 train_time:60409ms step_avg:96.96ms +step:624/1670 train_time:60506ms step_avg:96.96ms +step:625/1670 train_time:60604ms step_avg:96.97ms +step:625/1670 val_loss:3.6154 train_time:60700ms step_avg:97.12ms +step:626/1670 train_time:60724ms step_avg:97.00ms +step:627/1670 train_time:60813ms step_avg:96.99ms +step:628/1670 train_time:60912ms step_avg:96.99ms +step:629/1670 train_time:61008ms step_avg:96.99ms +step:630/1670 train_time:61104ms step_avg:96.99ms +step:631/1670 train_time:61200ms step_avg:96.99ms +step:632/1670 train_time:61296ms step_avg:96.99ms +step:633/1670 train_time:61391ms step_avg:96.98ms +step:634/1670 train_time:61487ms step_avg:96.98ms +step:635/1670 train_time:61583ms step_avg:96.98ms +step:636/1670 train_time:61683ms step_avg:96.99ms +step:637/1670 train_time:61785ms step_avg:96.99ms +step:638/1670 train_time:61885ms step_avg:97.00ms +step:639/1670 train_time:62253ms step_avg:97.42ms +step:640/1670 train_time:62343ms step_avg:97.41ms +step:641/1670 train_time:62440ms step_avg:97.41ms +step:642/1670 train_time:62536ms step_avg:97.41ms +step:643/1670 train_time:62632ms step_avg:97.41ms +step:644/1670 train_time:62728ms step_avg:97.40ms +step:645/1670 train_time:62824ms step_avg:97.40ms +step:646/1670 train_time:62920ms step_avg:97.40ms +step:647/1670 train_time:63017ms step_avg:97.40ms +step:648/1670 train_time:63114ms step_avg:97.40ms +step:649/1670 train_time:63218ms step_avg:97.41ms +step:650/1670 train_time:63317ms step_avg:97.41ms +step:651/1670 train_time:63416ms step_avg:97.41ms +step:652/1670 train_time:63512ms step_avg:97.41ms +step:653/1670 train_time:63609ms step_avg:97.41ms +step:654/1670 train_time:63706ms step_avg:97.41ms +step:655/1670 train_time:63802ms step_avg:97.41ms +step:656/1670 train_time:63898ms step_avg:97.41ms +step:657/1670 train_time:63994ms step_avg:97.40ms +step:658/1670 train_time:64091ms step_avg:97.40ms +step:659/1670 train_time:64188ms step_avg:97.40ms +step:660/1670 train_time:64288ms step_avg:97.41ms +step:661/1670 train_time:64387ms step_avg:97.41ms +step:662/1670 train_time:64485ms step_avg:97.41ms +step:663/1670 train_time:64583ms step_avg:97.41ms +step:664/1670 train_time:64680ms step_avg:97.41ms +step:665/1670 train_time:64778ms step_avg:97.41ms +step:666/1670 train_time:64874ms step_avg:97.41ms +step:667/1670 train_time:64970ms step_avg:97.41ms +step:668/1670 train_time:65067ms step_avg:97.41ms +step:669/1670 train_time:65164ms step_avg:97.41ms +step:670/1670 train_time:65263ms step_avg:97.41ms +step:671/1670 train_time:65363ms step_avg:97.41ms +step:672/1670 train_time:65461ms step_avg:97.41ms +step:673/1670 train_time:65559ms step_avg:97.41ms +step:674/1670 train_time:65656ms step_avg:97.41ms +step:675/1670 train_time:65753ms step_avg:97.41ms +step:676/1670 train_time:65849ms step_avg:97.41ms +step:677/1670 train_time:65946ms step_avg:97.41ms +step:678/1670 train_time:66044ms step_avg:97.41ms +step:679/1670 train_time:66141ms step_avg:97.41ms +step:680/1670 train_time:66238ms step_avg:97.41ms +step:681/1670 train_time:66336ms step_avg:97.41ms +step:682/1670 train_time:66434ms step_avg:97.41ms +step:683/1670 train_time:66531ms step_avg:97.41ms +step:684/1670 train_time:66628ms step_avg:97.41ms +step:685/1670 train_time:66726ms step_avg:97.41ms +step:686/1670 train_time:66822ms step_avg:97.41ms +step:687/1670 train_time:66919ms step_avg:97.41ms +step:688/1670 train_time:67017ms step_avg:97.41ms +step:689/1670 train_time:67114ms step_avg:97.41ms +step:690/1670 train_time:67211ms step_avg:97.41ms +step:691/1670 train_time:67308ms step_avg:97.41ms +step:692/1670 train_time:67407ms step_avg:97.41ms +step:693/1670 train_time:67505ms step_avg:97.41ms +step:694/1670 train_time:67603ms step_avg:97.41ms +step:695/1670 train_time:67702ms step_avg:97.41ms +step:696/1670 train_time:67799ms step_avg:97.41ms +step:697/1670 train_time:67896ms step_avg:97.41ms +step:698/1670 train_time:67992ms step_avg:97.41ms +step:699/1670 train_time:68088ms step_avg:97.41ms +step:700/1670 train_time:68186ms step_avg:97.41ms +step:701/1670 train_time:68283ms step_avg:97.41ms +step:702/1670 train_time:68382ms step_avg:97.41ms +step:703/1670 train_time:68480ms step_avg:97.41ms +step:704/1670 train_time:68578ms step_avg:97.41ms +step:705/1670 train_time:68676ms step_avg:97.41ms +step:706/1670 train_time:68773ms step_avg:97.41ms +step:707/1670 train_time:68870ms step_avg:97.41ms +step:708/1670 train_time:68967ms step_avg:97.41ms +step:709/1670 train_time:69064ms step_avg:97.41ms +step:710/1670 train_time:69161ms step_avg:97.41ms +step:711/1670 train_time:69259ms step_avg:97.41ms +step:712/1670 train_time:69356ms step_avg:97.41ms +step:713/1670 train_time:69453ms step_avg:97.41ms +step:714/1670 train_time:69550ms step_avg:97.41ms +step:715/1670 train_time:69648ms step_avg:97.41ms +step:716/1670 train_time:69746ms step_avg:97.41ms +step:717/1670 train_time:69844ms step_avg:97.41ms +step:718/1670 train_time:69941ms step_avg:97.41ms +step:719/1670 train_time:70039ms step_avg:97.41ms +step:720/1670 train_time:70136ms step_avg:97.41ms +step:721/1670 train_time:70233ms step_avg:97.41ms +step:722/1670 train_time:70330ms step_avg:97.41ms +step:723/1670 train_time:70428ms step_avg:97.41ms +step:724/1670 train_time:70525ms step_avg:97.41ms +step:725/1670 train_time:70624ms step_avg:97.41ms +step:726/1670 train_time:70721ms step_avg:97.41ms +step:727/1670 train_time:70818ms step_avg:97.41ms +step:728/1670 train_time:70916ms step_avg:97.41ms +step:729/1670 train_time:71013ms step_avg:97.41ms +step:730/1670 train_time:71110ms step_avg:97.41ms +step:731/1670 train_time:71208ms step_avg:97.41ms +step:732/1670 train_time:71306ms step_avg:97.41ms +step:733/1670 train_time:71403ms step_avg:97.41ms +step:734/1670 train_time:71500ms step_avg:97.41ms +step:735/1670 train_time:71599ms step_avg:97.41ms +step:736/1670 train_time:71696ms step_avg:97.41ms +step:737/1670 train_time:71794ms step_avg:97.41ms +step:738/1670 train_time:71890ms step_avg:97.41ms +step:739/1670 train_time:71987ms step_avg:97.41ms +step:740/1670 train_time:72085ms step_avg:97.41ms +step:741/1670 train_time:72183ms step_avg:97.41ms +step:742/1670 train_time:72281ms step_avg:97.41ms +step:743/1670 train_time:72379ms step_avg:97.41ms +step:744/1670 train_time:72476ms step_avg:97.41ms +step:745/1670 train_time:72573ms step_avg:97.41ms +step:746/1670 train_time:72670ms step_avg:97.41ms +step:747/1670 train_time:72768ms step_avg:97.41ms +step:748/1670 train_time:72864ms step_avg:97.41ms +step:749/1670 train_time:72963ms step_avg:97.41ms +step:750/1670 train_time:73061ms step_avg:97.41ms +step:750/1670 val_loss:3.5609 train_time:73158ms step_avg:97.54ms +step:751/1670 train_time:73181ms step_avg:97.44ms +step:752/1670 train_time:73262ms step_avg:97.42ms +step:753/1670 train_time:73361ms step_avg:97.43ms +step:754/1670 train_time:73460ms step_avg:97.43ms +step:755/1670 train_time:73557ms step_avg:97.43ms +step:756/1670 train_time:73653ms step_avg:97.42ms +step:757/1670 train_time:73749ms step_avg:97.42ms +step:758/1670 train_time:73846ms step_avg:97.42ms +step:759/1670 train_time:73942ms step_avg:97.42ms +step:760/1670 train_time:74038ms step_avg:97.42ms +step:761/1670 train_time:74137ms step_avg:97.42ms +step:762/1670 train_time:74237ms step_avg:97.42ms +step:763/1670 train_time:74337ms step_avg:97.43ms +step:764/1670 train_time:74435ms step_avg:97.43ms +step:765/1670 train_time:74533ms step_avg:97.43ms +step:766/1670 train_time:74631ms step_avg:97.43ms +step:767/1670 train_time:74728ms step_avg:97.43ms +step:768/1670 train_time:74825ms step_avg:97.43ms +step:769/1670 train_time:74922ms step_avg:97.43ms +step:770/1670 train_time:75018ms step_avg:97.43ms +step:771/1670 train_time:75116ms step_avg:97.43ms +step:772/1670 train_time:75214ms step_avg:97.43ms +step:773/1670 train_time:75313ms step_avg:97.43ms +step:774/1670 train_time:75412ms step_avg:97.43ms +step:775/1670 train_time:75510ms step_avg:97.43ms +step:776/1670 train_time:75607ms step_avg:97.43ms +step:777/1670 train_time:75704ms step_avg:97.43ms +step:778/1670 train_time:75801ms step_avg:97.43ms +step:779/1670 train_time:75897ms step_avg:97.43ms +step:780/1670 train_time:75995ms step_avg:97.43ms +step:781/1670 train_time:76091ms step_avg:97.43ms +step:782/1670 train_time:76189ms step_avg:97.43ms +step:783/1670 train_time:76288ms step_avg:97.43ms +step:784/1670 train_time:76386ms step_avg:97.43ms +step:785/1670 train_time:76483ms step_avg:97.43ms +step:786/1670 train_time:76581ms step_avg:97.43ms +step:787/1670 train_time:76679ms step_avg:97.43ms +step:788/1670 train_time:76776ms step_avg:97.43ms +step:789/1670 train_time:76874ms step_avg:97.43ms +step:790/1670 train_time:76972ms step_avg:97.43ms +step:791/1670 train_time:77069ms step_avg:97.43ms +step:792/1670 train_time:77167ms step_avg:97.43ms +step:793/1670 train_time:77267ms step_avg:97.44ms +step:794/1670 train_time:77364ms step_avg:97.44ms +step:795/1670 train_time:77462ms step_avg:97.44ms +step:796/1670 train_time:77558ms step_avg:97.44ms +step:797/1670 train_time:77656ms step_avg:97.43ms +step:798/1670 train_time:77752ms step_avg:97.43ms +step:799/1670 train_time:77849ms step_avg:97.43ms +step:800/1670 train_time:77946ms step_avg:97.43ms +step:801/1670 train_time:78043ms step_avg:97.43ms +step:802/1670 train_time:78140ms step_avg:97.43ms +step:803/1670 train_time:78238ms step_avg:97.43ms +step:804/1670 train_time:78336ms step_avg:97.43ms +step:805/1670 train_time:78436ms step_avg:97.44ms +step:806/1670 train_time:78535ms step_avg:97.44ms +step:807/1670 train_time:78633ms step_avg:97.44ms +step:808/1670 train_time:78730ms step_avg:97.44ms +step:809/1670 train_time:78827ms step_avg:97.44ms +step:810/1670 train_time:78923ms step_avg:97.44ms +step:811/1670 train_time:79020ms step_avg:97.44ms +step:812/1670 train_time:79118ms step_avg:97.44ms +step:813/1670 train_time:79216ms step_avg:97.44ms +step:814/1670 train_time:79314ms step_avg:97.44ms +step:815/1670 train_time:79412ms step_avg:97.44ms +step:816/1670 train_time:79510ms step_avg:97.44ms +step:817/1670 train_time:79608ms step_avg:97.44ms +step:818/1670 train_time:79705ms step_avg:97.44ms +step:819/1670 train_time:79802ms step_avg:97.44ms +step:820/1670 train_time:79899ms step_avg:97.44ms +step:821/1670 train_time:79996ms step_avg:97.44ms +step:822/1670 train_time:80093ms step_avg:97.44ms +step:823/1670 train_time:80192ms step_avg:97.44ms +step:824/1670 train_time:80291ms step_avg:97.44ms +step:825/1670 train_time:80388ms step_avg:97.44ms +step:826/1670 train_time:80486ms step_avg:97.44ms +step:827/1670 train_time:80583ms step_avg:97.44ms +step:828/1670 train_time:80680ms step_avg:97.44ms +step:829/1670 train_time:80777ms step_avg:97.44ms +step:830/1670 train_time:80875ms step_avg:97.44ms +step:831/1670 train_time:80973ms step_avg:97.44ms +step:832/1670 train_time:81071ms step_avg:97.44ms +step:833/1670 train_time:81168ms step_avg:97.44ms +step:834/1670 train_time:81265ms step_avg:97.44ms +step:835/1670 train_time:81363ms step_avg:97.44ms +step:836/1670 train_time:81460ms step_avg:97.44ms +step:837/1670 train_time:81558ms step_avg:97.44ms +step:838/1670 train_time:81655ms step_avg:97.44ms +step:839/1670 train_time:81753ms step_avg:97.44ms +step:840/1670 train_time:81851ms step_avg:97.44ms +step:841/1670 train_time:81949ms step_avg:97.44ms +step:842/1670 train_time:82045ms step_avg:97.44ms +step:843/1670 train_time:82142ms step_avg:97.44ms +step:844/1670 train_time:82239ms step_avg:97.44ms +step:845/1670 train_time:82335ms step_avg:97.44ms +step:846/1670 train_time:82434ms step_avg:97.44ms +step:847/1670 train_time:82534ms step_avg:97.44ms +step:848/1670 train_time:82631ms step_avg:97.44ms +step:849/1670 train_time:82729ms step_avg:97.44ms +step:850/1670 train_time:82826ms step_avg:97.44ms +step:851/1670 train_time:83088ms step_avg:97.64ms +step:852/1670 train_time:83261ms step_avg:97.72ms +step:853/1670 train_time:83356ms step_avg:97.72ms +step:854/1670 train_time:83453ms step_avg:97.72ms +step:855/1670 train_time:83550ms step_avg:97.72ms +step:856/1670 train_time:83646ms step_avg:97.72ms +step:857/1670 train_time:83742ms step_avg:97.71ms +step:858/1670 train_time:83838ms step_avg:97.71ms +step:859/1670 train_time:83935ms step_avg:97.71ms +step:860/1670 train_time:84031ms step_avg:97.71ms +step:861/1670 train_time:84135ms step_avg:97.72ms +step:862/1670 train_time:84238ms step_avg:97.72ms +step:863/1670 train_time:84337ms step_avg:97.73ms +step:864/1670 train_time:84435ms step_avg:97.73ms +step:865/1670 train_time:84532ms step_avg:97.73ms +step:866/1670 train_time:84630ms step_avg:97.73ms +step:867/1670 train_time:84727ms step_avg:97.72ms +step:868/1670 train_time:84823ms step_avg:97.72ms +step:869/1670 train_time:84920ms step_avg:97.72ms +step:870/1670 train_time:85016ms step_avg:97.72ms +step:871/1670 train_time:85114ms step_avg:97.72ms +step:872/1670 train_time:85215ms step_avg:97.72ms +step:873/1670 train_time:85314ms step_avg:97.73ms +step:874/1670 train_time:85413ms step_avg:97.73ms +step:875/1670 train_time:85511ms step_avg:97.73ms +step:875/1670 val_loss:3.5199 train_time:85608ms step_avg:97.84ms +step:876/1670 train_time:85630ms step_avg:97.75ms +step:877/1670 train_time:85714ms step_avg:97.74ms +step:878/1670 train_time:85817ms step_avg:97.74ms +step:879/1670 train_time:85915ms step_avg:97.74ms +step:880/1670 train_time:86012ms step_avg:97.74ms +step:881/1670 train_time:86108ms step_avg:97.74ms +step:882/1670 train_time:86205ms step_avg:97.74ms +step:883/1670 train_time:86301ms step_avg:97.74ms +step:884/1670 train_time:86398ms step_avg:97.73ms +step:885/1670 train_time:86495ms step_avg:97.73ms +step:886/1670 train_time:86594ms step_avg:97.74ms +step:887/1670 train_time:86695ms step_avg:97.74ms +step:888/1670 train_time:86796ms step_avg:97.74ms +step:889/1670 train_time:86894ms step_avg:97.74ms +step:890/1670 train_time:86992ms step_avg:97.74ms +step:891/1670 train_time:87088ms step_avg:97.74ms +step:892/1670 train_time:87184ms step_avg:97.74ms +step:893/1670 train_time:87281ms step_avg:97.74ms +step:894/1670 train_time:87377ms step_avg:97.74ms +step:895/1670 train_time:87474ms step_avg:97.74ms +step:896/1670 train_time:87572ms step_avg:97.74ms +step:897/1670 train_time:87671ms step_avg:97.74ms +step:898/1670 train_time:87769ms step_avg:97.74ms +step:899/1670 train_time:87867ms step_avg:97.74ms +step:900/1670 train_time:87964ms step_avg:97.74ms +step:901/1670 train_time:88061ms step_avg:97.74ms +step:902/1670 train_time:88158ms step_avg:97.74ms +step:903/1670 train_time:88255ms step_avg:97.74ms +step:904/1670 train_time:88352ms step_avg:97.73ms +step:905/1670 train_time:88449ms step_avg:97.73ms +step:906/1670 train_time:88546ms step_avg:97.73ms +step:907/1670 train_time:88644ms step_avg:97.73ms +step:908/1670 train_time:88742ms step_avg:97.73ms +step:909/1670 train_time:88841ms step_avg:97.73ms +step:910/1670 train_time:88940ms step_avg:97.74ms +step:911/1670 train_time:89038ms step_avg:97.74ms +step:912/1670 train_time:89136ms step_avg:97.74ms +step:913/1670 train_time:89234ms step_avg:97.74ms +step:914/1670 train_time:89330ms step_avg:97.74ms +step:915/1670 train_time:89427ms step_avg:97.73ms +step:916/1670 train_time:89524ms step_avg:97.73ms +step:917/1670 train_time:89621ms step_avg:97.73ms +step:918/1670 train_time:89719ms step_avg:97.73ms +step:919/1670 train_time:89817ms step_avg:97.73ms +step:920/1670 train_time:89915ms step_avg:97.73ms +step:921/1670 train_time:90014ms step_avg:97.74ms +step:922/1670 train_time:90112ms step_avg:97.73ms +step:923/1670 train_time:90209ms step_avg:97.73ms +step:924/1670 train_time:90306ms step_avg:97.73ms +step:925/1670 train_time:90403ms step_avg:97.73ms +step:926/1670 train_time:90500ms step_avg:97.73ms +step:927/1670 train_time:90597ms step_avg:97.73ms +step:928/1670 train_time:90694ms step_avg:97.73ms +step:929/1670 train_time:90793ms step_avg:97.73ms +step:930/1670 train_time:90892ms step_avg:97.73ms +step:931/1670 train_time:90989ms step_avg:97.73ms +step:932/1670 train_time:91087ms step_avg:97.73ms +step:933/1670 train_time:91185ms step_avg:97.73ms +step:934/1670 train_time:91282ms step_avg:97.73ms +step:935/1670 train_time:91380ms step_avg:97.73ms +step:936/1670 train_time:91476ms step_avg:97.73ms +step:937/1670 train_time:91574ms step_avg:97.73ms +step:938/1670 train_time:91671ms step_avg:97.73ms +step:939/1670 train_time:91770ms step_avg:97.73ms +step:940/1670 train_time:91868ms step_avg:97.73ms +step:941/1670 train_time:91965ms step_avg:97.73ms +step:942/1670 train_time:92062ms step_avg:97.73ms +step:943/1670 train_time:92159ms step_avg:97.73ms +step:944/1670 train_time:92257ms step_avg:97.73ms +step:945/1670 train_time:92355ms step_avg:97.73ms +step:946/1670 train_time:92452ms step_avg:97.73ms +step:947/1670 train_time:92549ms step_avg:97.73ms +step:948/1670 train_time:92646ms step_avg:97.73ms +step:949/1670 train_time:92743ms step_avg:97.73ms +step:950/1670 train_time:92841ms step_avg:97.73ms +step:951/1670 train_time:92939ms step_avg:97.73ms +step:952/1670 train_time:93037ms step_avg:97.73ms +step:953/1670 train_time:93136ms step_avg:97.73ms +step:954/1670 train_time:93233ms step_avg:97.73ms +step:955/1670 train_time:93331ms step_avg:97.73ms +step:956/1670 train_time:93428ms step_avg:97.73ms +step:957/1670 train_time:93524ms step_avg:97.73ms +step:958/1670 train_time:93621ms step_avg:97.73ms +step:959/1670 train_time:93718ms step_avg:97.72ms +step:960/1670 train_time:93816ms step_avg:97.72ms +step:961/1670 train_time:93914ms step_avg:97.73ms +step:962/1670 train_time:94013ms step_avg:97.73ms +step:963/1670 train_time:94112ms step_avg:97.73ms +step:964/1670 train_time:94210ms step_avg:97.73ms +step:965/1670 train_time:94308ms step_avg:97.73ms +step:966/1670 train_time:94406ms step_avg:97.73ms +step:967/1670 train_time:94502ms step_avg:97.73ms +step:968/1670 train_time:94598ms step_avg:97.73ms +step:969/1670 train_time:94696ms step_avg:97.73ms +step:970/1670 train_time:94794ms step_avg:97.73ms +step:971/1670 train_time:94892ms step_avg:97.73ms +step:972/1670 train_time:94989ms step_avg:97.73ms +step:973/1670 train_time:95087ms step_avg:97.73ms +step:974/1670 train_time:95184ms step_avg:97.73ms +step:975/1670 train_time:95282ms step_avg:97.72ms +step:976/1670 train_time:95378ms step_avg:97.72ms +step:977/1670 train_time:95476ms step_avg:97.72ms +step:978/1670 train_time:95574ms step_avg:97.72ms +step:979/1670 train_time:95672ms step_avg:97.72ms +step:980/1670 train_time:95769ms step_avg:97.72ms +step:981/1670 train_time:95867ms step_avg:97.72ms +step:982/1670 train_time:95965ms step_avg:97.72ms +step:983/1670 train_time:96063ms step_avg:97.72ms +step:984/1670 train_time:96160ms step_avg:97.72ms +step:985/1670 train_time:96258ms step_avg:97.72ms +step:986/1670 train_time:96355ms step_avg:97.72ms +step:987/1670 train_time:96452ms step_avg:97.72ms +step:988/1670 train_time:96550ms step_avg:97.72ms +step:989/1670 train_time:96647ms step_avg:97.72ms +step:990/1670 train_time:96744ms step_avg:97.72ms +step:991/1670 train_time:96842ms step_avg:97.72ms +step:992/1670 train_time:96939ms step_avg:97.72ms +step:993/1670 train_time:97036ms step_avg:97.72ms +step:994/1670 train_time:97135ms step_avg:97.72ms +step:995/1670 train_time:97233ms step_avg:97.72ms +step:996/1670 train_time:97331ms step_avg:97.72ms +step:997/1670 train_time:97428ms step_avg:97.72ms +step:998/1670 train_time:97524ms step_avg:97.72ms +step:999/1670 train_time:97622ms step_avg:97.72ms +step:1000/1670 train_time:97719ms step_avg:97.72ms +step:1000/1670 val_loss:3.4777 train_time:97816ms step_avg:97.82ms +step:1001/1670 train_time:97839ms step_avg:97.74ms +step:1002/1670 train_time:97921ms step_avg:97.73ms +step:1003/1670 train_time:98021ms step_avg:97.73ms +step:1004/1670 train_time:98120ms step_avg:97.73ms +step:1005/1670 train_time:98217ms step_avg:97.73ms +step:1006/1670 train_time:98313ms step_avg:97.73ms +step:1007/1670 train_time:98409ms step_avg:97.73ms +step:1008/1670 train_time:98506ms step_avg:97.72ms +step:1009/1670 train_time:98602ms step_avg:97.72ms +step:1010/1670 train_time:98699ms step_avg:97.72ms +step:1011/1670 train_time:98798ms step_avg:97.72ms +step:1012/1670 train_time:98898ms step_avg:97.72ms +step:1013/1670 train_time:98998ms step_avg:97.73ms +step:1014/1670 train_time:99097ms step_avg:97.73ms +step:1015/1670 train_time:99194ms step_avg:97.73ms +step:1016/1670 train_time:99292ms step_avg:97.73ms +step:1017/1670 train_time:99388ms step_avg:97.73ms +step:1018/1670 train_time:99485ms step_avg:97.73ms +step:1019/1670 train_time:99582ms step_avg:97.72ms +step:1020/1670 train_time:99678ms step_avg:97.72ms +step:1021/1670 train_time:99776ms step_avg:97.72ms +step:1022/1670 train_time:99873ms step_avg:97.72ms +step:1023/1670 train_time:99972ms step_avg:97.72ms +step:1024/1670 train_time:100070ms step_avg:97.72ms +step:1025/1670 train_time:100168ms step_avg:97.72ms +step:1026/1670 train_time:100266ms step_avg:97.73ms +step:1027/1670 train_time:100364ms step_avg:97.73ms +step:1028/1670 train_time:100461ms step_avg:97.72ms +step:1029/1670 train_time:100557ms step_avg:97.72ms +step:1030/1670 train_time:100654ms step_avg:97.72ms +step:1031/1670 train_time:100751ms step_avg:97.72ms +step:1032/1670 train_time:100850ms step_avg:97.72ms +step:1033/1670 train_time:100950ms step_avg:97.72ms +step:1034/1670 train_time:101049ms step_avg:97.73ms +step:1035/1670 train_time:101147ms step_avg:97.73ms +step:1036/1670 train_time:101245ms step_avg:97.73ms +step:1037/1670 train_time:101343ms step_avg:97.73ms +step:1038/1670 train_time:101439ms step_avg:97.73ms +step:1039/1670 train_time:101536ms step_avg:97.72ms +step:1040/1670 train_time:101632ms step_avg:97.72ms +step:1041/1670 train_time:101730ms step_avg:97.72ms +step:1042/1670 train_time:101828ms step_avg:97.72ms +step:1043/1670 train_time:101927ms step_avg:97.72ms +step:1044/1670 train_time:102026ms step_avg:97.73ms +step:1045/1670 train_time:102123ms step_avg:97.73ms +step:1046/1670 train_time:102222ms step_avg:97.73ms +step:1047/1670 train_time:102319ms step_avg:97.73ms +step:1048/1670 train_time:102417ms step_avg:97.73ms +step:1049/1670 train_time:102514ms step_avg:97.73ms +step:1050/1670 train_time:102611ms step_avg:97.72ms +step:1051/1670 train_time:102708ms step_avg:97.72ms +step:1052/1670 train_time:102806ms step_avg:97.72ms +step:1053/1670 train_time:103228ms step_avg:98.03ms +step:1054/1670 train_time:103323ms step_avg:98.03ms +step:1055/1670 train_time:103419ms step_avg:98.03ms +step:1056/1670 train_time:103515ms step_avg:98.03ms +step:1057/1670 train_time:103610ms step_avg:98.02ms +step:1058/1670 train_time:103707ms step_avg:98.02ms +step:1059/1670 train_time:103803ms step_avg:98.02ms +step:1060/1670 train_time:103900ms step_avg:98.02ms +step:1061/1670 train_time:103997ms step_avg:98.02ms +step:1062/1670 train_time:104337ms step_avg:98.25ms +step:1063/1670 train_time:104437ms step_avg:98.25ms +step:1064/1670 train_time:104532ms step_avg:98.24ms +step:1065/1670 train_time:104629ms step_avg:98.24ms +step:1066/1670 train_time:104725ms step_avg:98.24ms +step:1067/1670 train_time:104821ms step_avg:98.24ms +step:1068/1670 train_time:104917ms step_avg:98.24ms +step:1069/1670 train_time:105014ms step_avg:98.24ms +step:1070/1670 train_time:105110ms step_avg:98.23ms +step:1071/1670 train_time:105207ms step_avg:98.23ms +step:1072/1670 train_time:105308ms step_avg:98.23ms +step:1073/1670 train_time:105410ms step_avg:98.24ms +step:1074/1670 train_time:105510ms step_avg:98.24ms +step:1075/1670 train_time:105608ms step_avg:98.24ms +step:1076/1670 train_time:105705ms step_avg:98.24ms +step:1077/1670 train_time:105802ms step_avg:98.24ms +step:1078/1670 train_time:105899ms step_avg:98.24ms +step:1079/1670 train_time:105995ms step_avg:98.23ms +step:1080/1670 train_time:106092ms step_avg:98.23ms +step:1081/1670 train_time:106188ms step_avg:98.23ms +step:1082/1670 train_time:106286ms step_avg:98.23ms +step:1083/1670 train_time:106387ms step_avg:98.23ms +step:1084/1670 train_time:106486ms step_avg:98.23ms +step:1085/1670 train_time:106585ms step_avg:98.24ms +step:1086/1670 train_time:106682ms step_avg:98.23ms +step:1087/1670 train_time:106779ms step_avg:98.23ms +step:1088/1670 train_time:106876ms step_avg:98.23ms +step:1089/1670 train_time:106972ms step_avg:98.23ms +step:1090/1670 train_time:107069ms step_avg:98.23ms +step:1091/1670 train_time:107166ms step_avg:98.23ms +step:1092/1670 train_time:107263ms step_avg:98.23ms +step:1093/1670 train_time:107363ms step_avg:98.23ms +step:1094/1670 train_time:107461ms step_avg:98.23ms +step:1095/1670 train_time:107560ms step_avg:98.23ms +step:1096/1670 train_time:107658ms step_avg:98.23ms +step:1097/1670 train_time:107755ms step_avg:98.23ms +step:1098/1670 train_time:107851ms step_avg:98.23ms +step:1099/1670 train_time:107948ms step_avg:98.22ms +step:1100/1670 train_time:108045ms step_avg:98.22ms +step:1101/1670 train_time:108143ms step_avg:98.22ms +step:1102/1670 train_time:108240ms step_avg:98.22ms +step:1103/1670 train_time:108336ms step_avg:98.22ms +step:1104/1670 train_time:108434ms step_avg:98.22ms +step:1105/1670 train_time:108531ms step_avg:98.22ms +step:1106/1670 train_time:108629ms step_avg:98.22ms +step:1107/1670 train_time:108727ms step_avg:98.22ms +step:1108/1670 train_time:108825ms step_avg:98.22ms +step:1109/1670 train_time:108924ms step_avg:98.22ms +step:1110/1670 train_time:109021ms step_avg:98.22ms +step:1111/1670 train_time:109117ms step_avg:98.22ms +step:1112/1670 train_time:109215ms step_avg:98.21ms +step:1113/1670 train_time:109311ms step_avg:98.21ms +step:1114/1670 train_time:109409ms step_avg:98.21ms +step:1115/1670 train_time:109507ms step_avg:98.21ms +step:1116/1670 train_time:109607ms step_avg:98.21ms +step:1117/1670 train_time:109705ms step_avg:98.21ms +step:1118/1670 train_time:109804ms step_avg:98.21ms +step:1119/1670 train_time:109904ms step_avg:98.22ms +step:1120/1670 train_time:110002ms step_avg:98.22ms +step:1121/1670 train_time:110101ms step_avg:98.22ms +step:1122/1670 train_time:110200ms step_avg:98.22ms +step:1123/1670 train_time:110299ms step_avg:98.22ms +step:1124/1670 train_time:110398ms step_avg:98.22ms +step:1125/1670 train_time:110497ms step_avg:98.22ms +step:1125/1670 val_loss:3.4234 train_time:110594ms step_avg:98.31ms +step:1126/1670 train_time:110618ms step_avg:98.24ms +step:1127/1670 train_time:110705ms step_avg:98.23ms +step:1128/1670 train_time:110804ms step_avg:98.23ms +step:1129/1670 train_time:110901ms step_avg:98.23ms +step:1130/1670 train_time:110998ms step_avg:98.23ms +step:1131/1670 train_time:111095ms step_avg:98.23ms +step:1132/1670 train_time:111192ms step_avg:98.23ms +step:1133/1670 train_time:111289ms step_avg:98.22ms +step:1134/1670 train_time:111386ms step_avg:98.22ms +step:1135/1670 train_time:111485ms step_avg:98.22ms +step:1136/1670 train_time:111587ms step_avg:98.23ms +step:1137/1670 train_time:111689ms step_avg:98.23ms +step:1138/1670 train_time:111788ms step_avg:98.23ms +step:1139/1670 train_time:111887ms step_avg:98.23ms +step:1140/1670 train_time:111986ms step_avg:98.23ms +step:1141/1670 train_time:112084ms step_avg:98.23ms +step:1142/1670 train_time:112181ms step_avg:98.23ms +step:1143/1670 train_time:112279ms step_avg:98.23ms +step:1144/1670 train_time:112376ms step_avg:98.23ms +step:1145/1670 train_time:112476ms step_avg:98.23ms +step:1146/1670 train_time:112576ms step_avg:98.23ms +step:1147/1670 train_time:112676ms step_avg:98.24ms +step:1148/1670 train_time:112776ms step_avg:98.24ms +step:1149/1670 train_time:112874ms step_avg:98.24ms +step:1150/1670 train_time:112972ms step_avg:98.24ms +step:1151/1670 train_time:113069ms step_avg:98.24ms +step:1152/1670 train_time:113166ms step_avg:98.23ms +step:1153/1670 train_time:113264ms step_avg:98.23ms +step:1154/1670 train_time:113362ms step_avg:98.23ms +step:1155/1670 train_time:113460ms step_avg:98.23ms +step:1156/1670 train_time:113562ms step_avg:98.24ms +step:1157/1670 train_time:113662ms step_avg:98.24ms +step:1158/1670 train_time:113763ms step_avg:98.24ms +step:1159/1670 train_time:113863ms step_avg:98.24ms +step:1160/1670 train_time:113961ms step_avg:98.24ms +step:1161/1670 train_time:114060ms step_avg:98.24ms +step:1162/1670 train_time:114158ms step_avg:98.24ms +step:1163/1670 train_time:114256ms step_avg:98.24ms +step:1164/1670 train_time:114353ms step_avg:98.24ms +step:1165/1670 train_time:114449ms step_avg:98.24ms +step:1166/1670 train_time:114548ms step_avg:98.24ms +step:1167/1670 train_time:114646ms step_avg:98.24ms +step:1168/1670 train_time:114747ms step_avg:98.24ms +step:1169/1670 train_time:114846ms step_avg:98.24ms +step:1170/1670 train_time:114945ms step_avg:98.24ms +step:1171/1670 train_time:115044ms step_avg:98.24ms +step:1172/1670 train_time:115143ms step_avg:98.25ms +step:1173/1670 train_time:115242ms step_avg:98.25ms +step:1174/1670 train_time:115341ms step_avg:98.25ms +step:1175/1670 train_time:115441ms step_avg:98.25ms +step:1176/1670 train_time:115540ms step_avg:98.25ms +step:1177/1670 train_time:115638ms step_avg:98.25ms +step:1178/1670 train_time:115736ms step_avg:98.25ms +step:1179/1670 train_time:115834ms step_avg:98.25ms +step:1180/1670 train_time:115933ms step_avg:98.25ms +step:1181/1670 train_time:116031ms step_avg:98.25ms +step:1182/1670 train_time:116129ms step_avg:98.25ms +step:1183/1670 train_time:116227ms step_avg:98.25ms +step:1184/1670 train_time:116325ms step_avg:98.25ms +step:1185/1670 train_time:116423ms step_avg:98.25ms +step:1186/1670 train_time:116523ms step_avg:98.25ms +step:1187/1670 train_time:116621ms step_avg:98.25ms +step:1188/1670 train_time:116720ms step_avg:98.25ms +step:1189/1670 train_time:116819ms step_avg:98.25ms +step:1190/1670 train_time:116920ms step_avg:98.25ms +step:1191/1670 train_time:117021ms step_avg:98.25ms +step:1192/1670 train_time:117120ms step_avg:98.26ms +step:1193/1670 train_time:117219ms step_avg:98.26ms +step:1194/1670 train_time:117317ms step_avg:98.26ms +step:1195/1670 train_time:117416ms step_avg:98.26ms +step:1196/1670 train_time:117514ms step_avg:98.26ms +step:1197/1670 train_time:117611ms step_avg:98.25ms +step:1198/1670 train_time:117708ms step_avg:98.25ms +step:1199/1670 train_time:117807ms step_avg:98.25ms +step:1200/1670 train_time:117904ms step_avg:98.25ms +step:1201/1670 train_time:118004ms step_avg:98.25ms +step:1202/1670 train_time:118102ms step_avg:98.25ms +step:1203/1670 train_time:118201ms step_avg:98.26ms +step:1204/1670 train_time:118299ms step_avg:98.25ms +step:1205/1670 train_time:118397ms step_avg:98.25ms +step:1206/1670 train_time:118496ms step_avg:98.26ms +step:1207/1670 train_time:118594ms step_avg:98.26ms +step:1208/1670 train_time:118691ms step_avg:98.25ms +step:1209/1670 train_time:118789ms step_avg:98.25ms +step:1210/1670 train_time:118887ms step_avg:98.25ms +step:1211/1670 train_time:118984ms step_avg:98.25ms +step:1212/1670 train_time:119083ms step_avg:98.25ms +step:1213/1670 train_time:119182ms step_avg:98.25ms +step:1214/1670 train_time:119282ms step_avg:98.26ms +step:1215/1670 train_time:119381ms step_avg:98.26ms +step:1216/1670 train_time:119481ms step_avg:98.26ms +step:1217/1670 train_time:119580ms step_avg:98.26ms +step:1218/1670 train_time:119681ms step_avg:98.26ms +step:1219/1670 train_time:119779ms step_avg:98.26ms +step:1220/1670 train_time:119878ms step_avg:98.26ms +step:1221/1670 train_time:119975ms step_avg:98.26ms +step:1222/1670 train_time:120072ms step_avg:98.26ms +step:1223/1670 train_time:120170ms step_avg:98.26ms +step:1224/1670 train_time:120268ms step_avg:98.26ms +step:1225/1670 train_time:120366ms step_avg:98.26ms +step:1226/1670 train_time:120465ms step_avg:98.26ms +step:1227/1670 train_time:120563ms step_avg:98.26ms +step:1228/1670 train_time:120663ms step_avg:98.26ms +step:1229/1670 train_time:120763ms step_avg:98.26ms +step:1230/1670 train_time:120861ms step_avg:98.26ms +step:1231/1670 train_time:120960ms step_avg:98.26ms +step:1232/1670 train_time:121058ms step_avg:98.26ms +step:1233/1670 train_time:121156ms step_avg:98.26ms +step:1234/1670 train_time:121255ms step_avg:98.26ms +step:1235/1670 train_time:121353ms step_avg:98.26ms +step:1236/1670 train_time:121450ms step_avg:98.26ms +step:1237/1670 train_time:121548ms step_avg:98.26ms +step:1238/1670 train_time:121646ms step_avg:98.26ms +step:1239/1670 train_time:121745ms step_avg:98.26ms +step:1240/1670 train_time:121843ms step_avg:98.26ms +step:1241/1670 train_time:121941ms step_avg:98.26ms +step:1242/1670 train_time:122041ms step_avg:98.26ms +step:1243/1670 train_time:122140ms step_avg:98.26ms +step:1244/1670 train_time:122238ms step_avg:98.26ms +step:1245/1670 train_time:122336ms step_avg:98.26ms +step:1246/1670 train_time:122434ms step_avg:98.26ms +step:1247/1670 train_time:122532ms step_avg:98.26ms +step:1248/1670 train_time:122629ms step_avg:98.26ms +step:1249/1670 train_time:122727ms step_avg:98.26ms +step:1250/1670 train_time:122824ms step_avg:98.26ms +step:1250/1670 val_loss:3.3805 train_time:122921ms step_avg:98.34ms +step:1251/1670 train_time:122944ms step_avg:98.28ms +step:1252/1670 train_time:123027ms step_avg:98.26ms +step:1253/1670 train_time:123127ms step_avg:98.27ms +step:1254/1670 train_time:123226ms step_avg:98.27ms +step:1255/1670 train_time:123322ms step_avg:98.26ms +step:1256/1670 train_time:123419ms step_avg:98.26ms +step:1257/1670 train_time:123516ms step_avg:98.26ms +step:1258/1670 train_time:123613ms step_avg:98.26ms +step:1259/1670 train_time:123710ms step_avg:98.26ms +step:1260/1670 train_time:123807ms step_avg:98.26ms +step:1261/1670 train_time:123908ms step_avg:98.26ms +step:1262/1670 train_time:124010ms step_avg:98.26ms +step:1263/1670 train_time:124111ms step_avg:98.27ms +step:1264/1670 train_time:124209ms step_avg:98.27ms +step:1265/1670 train_time:124308ms step_avg:98.27ms +step:1266/1670 train_time:124407ms step_avg:98.27ms +step:1267/1670 train_time:124505ms step_avg:98.27ms +step:1268/1670 train_time:124602ms step_avg:98.27ms +step:1269/1670 train_time:124698ms step_avg:98.27ms +step:1270/1670 train_time:124796ms step_avg:98.26ms +step:1271/1670 train_time:124894ms step_avg:98.26ms +step:1272/1670 train_time:124994ms step_avg:98.27ms +step:1273/1670 train_time:125094ms step_avg:98.27ms +step:1274/1670 train_time:125352ms step_avg:98.39ms +step:1275/1670 train_time:125536ms step_avg:98.46ms +step:1276/1670 train_time:125634ms step_avg:98.46ms +step:1277/1670 train_time:125731ms step_avg:98.46ms +step:1278/1670 train_time:125828ms step_avg:98.46ms +step:1279/1670 train_time:125925ms step_avg:98.46ms +step:1280/1670 train_time:126022ms step_avg:98.45ms +step:1281/1670 train_time:126119ms step_avg:98.45ms +step:1282/1670 train_time:126216ms step_avg:98.45ms +step:1283/1670 train_time:126314ms step_avg:98.45ms +step:1284/1670 train_time:126419ms step_avg:98.46ms +step:1285/1670 train_time:126520ms step_avg:98.46ms +step:1286/1670 train_time:126620ms step_avg:98.46ms +step:1287/1670 train_time:126719ms step_avg:98.46ms +step:1288/1670 train_time:126817ms step_avg:98.46ms +step:1289/1670 train_time:126914ms step_avg:98.46ms +step:1290/1670 train_time:127011ms step_avg:98.46ms +step:1291/1670 train_time:127109ms step_avg:98.46ms +step:1292/1670 train_time:127206ms step_avg:98.46ms +step:1293/1670 train_time:127304ms step_avg:98.46ms +step:1294/1670 train_time:127404ms step_avg:98.46ms +step:1295/1670 train_time:127504ms step_avg:98.46ms +step:1296/1670 train_time:127604ms step_avg:98.46ms +step:1297/1670 train_time:127703ms step_avg:98.46ms +step:1298/1670 train_time:127801ms step_avg:98.46ms +step:1299/1670 train_time:127899ms step_avg:98.46ms +step:1300/1670 train_time:127996ms step_avg:98.46ms +step:1301/1670 train_time:128094ms step_avg:98.46ms +step:1302/1670 train_time:128191ms step_avg:98.46ms +step:1303/1670 train_time:128289ms step_avg:98.46ms +step:1304/1670 train_time:128388ms step_avg:98.46ms +step:1305/1670 train_time:128488ms step_avg:98.46ms +step:1306/1670 train_time:128589ms step_avg:98.46ms +step:1307/1670 train_time:128690ms step_avg:98.46ms +step:1308/1670 train_time:128787ms step_avg:98.46ms +step:1309/1670 train_time:128886ms step_avg:98.46ms +step:1310/1670 train_time:128986ms step_avg:98.46ms +step:1311/1670 train_time:129086ms step_avg:98.46ms +step:1312/1670 train_time:129183ms step_avg:98.46ms +step:1313/1670 train_time:129280ms step_avg:98.46ms +step:1314/1670 train_time:129378ms step_avg:98.46ms +step:1315/1670 train_time:129475ms step_avg:98.46ms +step:1316/1670 train_time:129575ms step_avg:98.46ms +step:1317/1670 train_time:129674ms step_avg:98.46ms +step:1318/1670 train_time:129774ms step_avg:98.46ms +step:1319/1670 train_time:129874ms step_avg:98.46ms +step:1320/1670 train_time:129973ms step_avg:98.46ms +step:1321/1670 train_time:130072ms step_avg:98.46ms +step:1322/1670 train_time:130170ms step_avg:98.46ms +step:1323/1670 train_time:130268ms step_avg:98.46ms +step:1324/1670 train_time:130366ms step_avg:98.46ms +step:1325/1670 train_time:130465ms step_avg:98.46ms +step:1326/1670 train_time:130563ms step_avg:98.46ms +step:1327/1670 train_time:130661ms step_avg:98.46ms +step:1328/1670 train_time:130759ms step_avg:98.46ms +step:1329/1670 train_time:130857ms step_avg:98.46ms +step:1330/1670 train_time:130957ms step_avg:98.46ms +step:1331/1670 train_time:131055ms step_avg:98.46ms +step:1332/1670 train_time:131154ms step_avg:98.46ms +step:1333/1670 train_time:131253ms step_avg:98.46ms +step:1334/1670 train_time:131351ms step_avg:98.46ms +step:1335/1670 train_time:131449ms step_avg:98.46ms +step:1336/1670 train_time:131547ms step_avg:98.46ms +step:1337/1670 train_time:131646ms step_avg:98.46ms +step:1338/1670 train_time:131746ms step_avg:98.46ms +step:1339/1670 train_time:131847ms step_avg:98.47ms +step:1340/1670 train_time:131947ms step_avg:98.47ms +step:1341/1670 train_time:132047ms step_avg:98.47ms +step:1342/1670 train_time:132145ms step_avg:98.47ms +step:1343/1670 train_time:132244ms step_avg:98.47ms +step:1344/1670 train_time:132341ms step_avg:98.47ms +step:1345/1670 train_time:132439ms step_avg:98.47ms +step:1346/1670 train_time:132536ms step_avg:98.47ms +step:1347/1670 train_time:132635ms step_avg:98.47ms +step:1348/1670 train_time:132733ms step_avg:98.47ms +step:1349/1670 train_time:132833ms step_avg:98.47ms +step:1350/1670 train_time:132932ms step_avg:98.47ms +step:1351/1670 train_time:133031ms step_avg:98.47ms +step:1352/1670 train_time:133129ms step_avg:98.47ms +step:1353/1670 train_time:133229ms step_avg:98.47ms +step:1354/1670 train_time:133327ms step_avg:98.47ms +step:1355/1670 train_time:133426ms step_avg:98.47ms +step:1356/1670 train_time:133524ms step_avg:98.47ms +step:1357/1670 train_time:133623ms step_avg:98.47ms +step:1358/1670 train_time:133721ms step_avg:98.47ms +step:1359/1670 train_time:133820ms step_avg:98.47ms +step:1360/1670 train_time:133918ms step_avg:98.47ms +step:1361/1670 train_time:134016ms step_avg:98.47ms +step:1362/1670 train_time:134115ms step_avg:98.47ms +step:1363/1670 train_time:134213ms step_avg:98.47ms +step:1364/1670 train_time:134311ms step_avg:98.47ms +step:1365/1670 train_time:134409ms step_avg:98.47ms +step:1366/1670 train_time:134508ms step_avg:98.47ms +step:1367/1670 train_time:134606ms step_avg:98.47ms +step:1368/1670 train_time:134705ms step_avg:98.47ms +step:1369/1670 train_time:134803ms step_avg:98.47ms +step:1370/1670 train_time:134903ms step_avg:98.47ms +step:1371/1670 train_time:135001ms step_avg:98.47ms +step:1372/1670 train_time:135100ms step_avg:98.47ms +step:1373/1670 train_time:135199ms step_avg:98.47ms +step:1374/1670 train_time:135297ms step_avg:98.47ms +step:1375/1670 train_time:135396ms step_avg:98.47ms +step:1375/1670 val_loss:3.3434 train_time:135493ms step_avg:98.54ms +step:1376/1670 train_time:135517ms step_avg:98.49ms +step:1377/1670 train_time:135602ms step_avg:98.48ms +step:1378/1670 train_time:135701ms step_avg:98.48ms +step:1379/1670 train_time:135799ms step_avg:98.48ms +step:1380/1670 train_time:135897ms step_avg:98.48ms +step:1381/1670 train_time:135993ms step_avg:98.47ms +step:1382/1670 train_time:136091ms step_avg:98.47ms +step:1383/1670 train_time:136189ms step_avg:98.47ms +step:1384/1670 train_time:136286ms step_avg:98.47ms +step:1385/1670 train_time:136383ms step_avg:98.47ms +step:1386/1670 train_time:136484ms step_avg:98.47ms +step:1387/1670 train_time:136584ms step_avg:98.47ms +step:1388/1670 train_time:136686ms step_avg:98.48ms +step:1389/1670 train_time:136784ms step_avg:98.48ms +step:1390/1670 train_time:136882ms step_avg:98.48ms +step:1391/1670 train_time:136981ms step_avg:98.48ms +step:1392/1670 train_time:137079ms step_avg:98.48ms +step:1393/1670 train_time:137176ms step_avg:98.48ms +step:1394/1670 train_time:137273ms step_avg:98.47ms +step:1395/1670 train_time:137372ms step_avg:98.47ms +step:1396/1670 train_time:137472ms step_avg:98.48ms +step:1397/1670 train_time:137571ms step_avg:98.48ms +step:1398/1670 train_time:137672ms step_avg:98.48ms +step:1399/1670 train_time:137771ms step_avg:98.48ms +step:1400/1670 train_time:137870ms step_avg:98.48ms +step:1401/1670 train_time:137968ms step_avg:98.48ms +step:1402/1670 train_time:138067ms step_avg:98.48ms +step:1403/1670 train_time:138166ms step_avg:98.48ms +step:1404/1670 train_time:138264ms step_avg:98.48ms +step:1405/1670 train_time:138362ms step_avg:98.48ms +step:1406/1670 train_time:138461ms step_avg:98.48ms +step:1407/1670 train_time:138561ms step_avg:98.48ms +step:1408/1670 train_time:138660ms step_avg:98.48ms +step:1409/1670 train_time:138760ms step_avg:98.48ms +step:1410/1670 train_time:138858ms step_avg:98.48ms +step:1411/1670 train_time:138957ms step_avg:98.48ms +step:1412/1670 train_time:139056ms step_avg:98.48ms +step:1413/1670 train_time:139155ms step_avg:98.48ms +step:1414/1670 train_time:139254ms step_avg:98.48ms +step:1415/1670 train_time:139353ms step_avg:98.48ms +step:1416/1670 train_time:139451ms step_avg:98.48ms +step:1417/1670 train_time:139550ms step_avg:98.48ms +step:1418/1670 train_time:139649ms step_avg:98.48ms +step:1419/1670 train_time:139747ms step_avg:98.48ms +step:1420/1670 train_time:139845ms step_avg:98.48ms +step:1421/1670 train_time:139943ms step_avg:98.48ms +step:1422/1670 train_time:140042ms step_avg:98.48ms +step:1423/1670 train_time:140141ms step_avg:98.48ms +step:1424/1670 train_time:140239ms step_avg:98.48ms +step:1425/1670 train_time:140337ms step_avg:98.48ms +step:1426/1670 train_time:140436ms step_avg:98.48ms +step:1427/1670 train_time:140536ms step_avg:98.48ms +step:1428/1670 train_time:140634ms step_avg:98.48ms +step:1429/1670 train_time:140734ms step_avg:98.48ms +step:1430/1670 train_time:140834ms step_avg:98.49ms +step:1431/1670 train_time:140934ms step_avg:98.49ms +step:1432/1670 train_time:141032ms step_avg:98.49ms +step:1433/1670 train_time:141131ms step_avg:98.49ms +step:1434/1670 train_time:141230ms step_avg:98.49ms +step:1435/1670 train_time:141327ms step_avg:98.49ms +step:1436/1670 train_time:141425ms step_avg:98.49ms +step:1437/1670 train_time:141522ms step_avg:98.48ms +step:1438/1670 train_time:141620ms step_avg:98.48ms +step:1439/1670 train_time:141718ms step_avg:98.48ms +step:1440/1670 train_time:141817ms step_avg:98.48ms +step:1441/1670 train_time:141917ms step_avg:98.49ms +step:1442/1670 train_time:142017ms step_avg:98.49ms +step:1443/1670 train_time:142115ms step_avg:98.49ms +step:1444/1670 train_time:142215ms step_avg:98.49ms +step:1445/1670 train_time:142316ms step_avg:98.49ms +step:1446/1670 train_time:142415ms step_avg:98.49ms +step:1447/1670 train_time:142514ms step_avg:98.49ms +step:1448/1670 train_time:142613ms step_avg:98.49ms +step:1449/1670 train_time:142712ms step_avg:98.49ms +step:1450/1670 train_time:142810ms step_avg:98.49ms +step:1451/1670 train_time:142907ms step_avg:98.49ms +step:1452/1670 train_time:143007ms step_avg:98.49ms +step:1453/1670 train_time:143106ms step_avg:98.49ms +step:1454/1670 train_time:143203ms step_avg:98.49ms +step:1455/1670 train_time:143302ms step_avg:98.49ms +step:1456/1670 train_time:143401ms step_avg:98.49ms +step:1457/1670 train_time:143500ms step_avg:98.49ms +step:1458/1670 train_time:143598ms step_avg:98.49ms +step:1459/1670 train_time:143698ms step_avg:98.49ms +step:1460/1670 train_time:143797ms step_avg:98.49ms +step:1461/1670 train_time:143895ms step_avg:98.49ms +step:1462/1670 train_time:143994ms step_avg:98.49ms +step:1463/1670 train_time:144094ms step_avg:98.49ms +step:1464/1670 train_time:144193ms step_avg:98.49ms +step:1465/1670 train_time:144293ms step_avg:98.49ms +step:1466/1670 train_time:144392ms step_avg:98.49ms +step:1467/1670 train_time:144490ms step_avg:98.49ms +step:1468/1670 train_time:144588ms step_avg:98.49ms +step:1469/1670 train_time:144686ms step_avg:98.49ms +step:1470/1670 train_time:144784ms step_avg:98.49ms +step:1471/1670 train_time:144882ms step_avg:98.49ms +step:1472/1670 train_time:144980ms step_avg:98.49ms +step:1473/1670 train_time:145079ms step_avg:98.49ms +step:1474/1670 train_time:145179ms step_avg:98.49ms +step:1475/1670 train_time:145278ms step_avg:98.49ms +step:1476/1670 train_time:145378ms step_avg:98.49ms +step:1477/1670 train_time:145477ms step_avg:98.49ms +step:1478/1670 train_time:145576ms step_avg:98.50ms +step:1479/1670 train_time:145675ms step_avg:98.50ms +step:1480/1670 train_time:145774ms step_avg:98.50ms +step:1481/1670 train_time:145876ms step_avg:98.50ms +step:1482/1670 train_time:145975ms step_avg:98.50ms +step:1483/1670 train_time:146074ms step_avg:98.50ms +step:1484/1670 train_time:146172ms step_avg:98.50ms +step:1485/1670 train_time:146500ms step_avg:98.65ms +step:1486/1670 train_time:146574ms step_avg:98.64ms +step:1487/1670 train_time:146670ms step_avg:98.63ms +step:1488/1670 train_time:146766ms step_avg:98.63ms +step:1489/1670 train_time:146863ms step_avg:98.63ms +step:1490/1670 train_time:146961ms step_avg:98.63ms +step:1491/1670 train_time:147058ms step_avg:98.63ms +step:1492/1670 train_time:147156ms step_avg:98.63ms +step:1493/1670 train_time:147253ms step_avg:98.63ms +step:1494/1670 train_time:147351ms step_avg:98.63ms +step:1495/1670 train_time:147451ms step_avg:98.63ms +step:1496/1670 train_time:147554ms step_avg:98.63ms +step:1497/1670 train_time:147655ms step_avg:98.63ms +step:1498/1670 train_time:147753ms step_avg:98.63ms +step:1499/1670 train_time:147852ms step_avg:98.63ms +step:1500/1670 train_time:147950ms step_avg:98.63ms +step:1500/1670 val_loss:3.3113 train_time:148046ms step_avg:98.70ms +step:1501/1670 train_time:148069ms step_avg:98.65ms +step:1502/1670 train_time:148151ms step_avg:98.64ms +step:1503/1670 train_time:148251ms step_avg:98.64ms +step:1504/1670 train_time:148348ms step_avg:98.64ms +step:1505/1670 train_time:148445ms step_avg:98.63ms +step:1506/1670 train_time:148541ms step_avg:98.63ms +step:1507/1670 train_time:148638ms step_avg:98.63ms +step:1508/1670 train_time:148736ms step_avg:98.63ms +step:1509/1670 train_time:148833ms step_avg:98.63ms +step:1510/1670 train_time:148931ms step_avg:98.63ms +step:1511/1670 train_time:149031ms step_avg:98.63ms +step:1512/1670 train_time:149132ms step_avg:98.63ms +step:1513/1670 train_time:149232ms step_avg:98.63ms +step:1514/1670 train_time:149331ms step_avg:98.63ms +step:1515/1670 train_time:149430ms step_avg:98.63ms +step:1516/1670 train_time:149528ms step_avg:98.63ms +step:1517/1670 train_time:149625ms step_avg:98.63ms +step:1518/1670 train_time:149722ms step_avg:98.63ms +step:1519/1670 train_time:149820ms step_avg:98.63ms +step:1520/1670 train_time:149918ms step_avg:98.63ms +step:1521/1670 train_time:150018ms step_avg:98.63ms +step:1522/1670 train_time:150118ms step_avg:98.63ms +step:1523/1670 train_time:150218ms step_avg:98.63ms +step:1524/1670 train_time:150318ms step_avg:98.63ms +step:1525/1670 train_time:150419ms step_avg:98.64ms +step:1526/1670 train_time:150518ms step_avg:98.64ms +step:1527/1670 train_time:150618ms step_avg:98.64ms +step:1528/1670 train_time:150716ms step_avg:98.64ms +step:1529/1670 train_time:150813ms step_avg:98.64ms +step:1530/1670 train_time:150910ms step_avg:98.63ms +step:1531/1670 train_time:151008ms step_avg:98.63ms +step:1532/1670 train_time:151105ms step_avg:98.63ms +step:1533/1670 train_time:151204ms step_avg:98.63ms +step:1534/1670 train_time:151303ms step_avg:98.63ms +step:1535/1670 train_time:151401ms step_avg:98.63ms +step:1536/1670 train_time:151500ms step_avg:98.63ms +step:1537/1670 train_time:151599ms step_avg:98.63ms +step:1538/1670 train_time:151697ms step_avg:98.63ms +step:1539/1670 train_time:151795ms step_avg:98.63ms +step:1540/1670 train_time:151894ms step_avg:98.63ms +step:1541/1670 train_time:151994ms step_avg:98.63ms +step:1542/1670 train_time:152094ms step_avg:98.63ms +step:1543/1670 train_time:152194ms step_avg:98.64ms +step:1544/1670 train_time:152294ms step_avg:98.64ms +step:1545/1670 train_time:152394ms step_avg:98.64ms +step:1546/1670 train_time:152491ms step_avg:98.64ms +step:1547/1670 train_time:152589ms step_avg:98.64ms +step:1548/1670 train_time:152687ms step_avg:98.64ms +step:1549/1670 train_time:152785ms step_avg:98.63ms +step:1550/1670 train_time:152882ms step_avg:98.63ms +step:1551/1670 train_time:152981ms step_avg:98.63ms +step:1552/1670 train_time:153080ms step_avg:98.63ms +step:1553/1670 train_time:153179ms step_avg:98.63ms +step:1554/1670 train_time:153278ms step_avg:98.63ms +step:1555/1670 train_time:153378ms step_avg:98.64ms +step:1556/1670 train_time:153477ms step_avg:98.64ms +step:1557/1670 train_time:153576ms step_avg:98.64ms +step:1558/1670 train_time:153674ms step_avg:98.64ms +step:1559/1670 train_time:153772ms step_avg:98.64ms +step:1560/1670 train_time:153871ms step_avg:98.64ms +step:1561/1670 train_time:153968ms step_avg:98.63ms +step:1562/1670 train_time:154066ms step_avg:98.63ms +step:1563/1670 train_time:154164ms step_avg:98.63ms +step:1564/1670 train_time:154262ms step_avg:98.63ms +step:1565/1670 train_time:154362ms step_avg:98.63ms +step:1566/1670 train_time:154461ms step_avg:98.63ms +step:1567/1670 train_time:154559ms step_avg:98.63ms +step:1568/1670 train_time:154658ms step_avg:98.63ms +step:1569/1670 train_time:154757ms step_avg:98.63ms +step:1570/1670 train_time:154855ms step_avg:98.63ms +step:1571/1670 train_time:154953ms step_avg:98.63ms +step:1572/1670 train_time:155053ms step_avg:98.63ms +step:1573/1670 train_time:155151ms step_avg:98.63ms +step:1574/1670 train_time:155250ms step_avg:98.63ms +step:1575/1670 train_time:155350ms step_avg:98.63ms +step:1576/1670 train_time:155447ms step_avg:98.63ms +step:1577/1670 train_time:155545ms step_avg:98.63ms +step:1578/1670 train_time:155643ms step_avg:98.63ms +step:1579/1670 train_time:155742ms step_avg:98.63ms +step:1580/1670 train_time:155841ms step_avg:98.63ms +step:1581/1670 train_time:155941ms step_avg:98.63ms +step:1582/1670 train_time:156039ms step_avg:98.63ms +step:1583/1670 train_time:156138ms step_avg:98.63ms +step:1584/1670 train_time:156236ms step_avg:98.63ms +step:1585/1670 train_time:156335ms step_avg:98.63ms +step:1586/1670 train_time:156436ms step_avg:98.64ms +step:1587/1670 train_time:156536ms step_avg:98.64ms +step:1588/1670 train_time:156635ms step_avg:98.64ms +step:1589/1670 train_time:156735ms step_avg:98.64ms +step:1590/1670 train_time:156833ms step_avg:98.64ms +step:1591/1670 train_time:156932ms step_avg:98.64ms +step:1592/1670 train_time:157030ms step_avg:98.64ms +step:1593/1670 train_time:157129ms step_avg:98.64ms +step:1594/1670 train_time:157226ms step_avg:98.64ms +step:1595/1670 train_time:157323ms step_avg:98.64ms +step:1596/1670 train_time:157421ms step_avg:98.63ms +step:1597/1670 train_time:157520ms step_avg:98.63ms +step:1598/1670 train_time:157619ms step_avg:98.64ms +step:1599/1670 train_time:157719ms step_avg:98.64ms +step:1600/1670 train_time:157816ms step_avg:98.64ms +step:1601/1670 train_time:157917ms step_avg:98.64ms +step:1602/1670 train_time:158016ms step_avg:98.64ms +step:1603/1670 train_time:158115ms step_avg:98.64ms +step:1604/1670 train_time:158214ms step_avg:98.64ms +step:1605/1670 train_time:158315ms step_avg:98.64ms +step:1606/1670 train_time:158415ms step_avg:98.64ms +step:1607/1670 train_time:158514ms step_avg:98.64ms +step:1608/1670 train_time:158614ms step_avg:98.64ms +step:1609/1670 train_time:158712ms step_avg:98.64ms +step:1610/1670 train_time:158810ms step_avg:98.64ms +step:1611/1670 train_time:158909ms step_avg:98.64ms +step:1612/1670 train_time:159006ms step_avg:98.64ms +step:1613/1670 train_time:159104ms step_avg:98.64ms +step:1614/1670 train_time:159202ms step_avg:98.64ms +step:1615/1670 train_time:159300ms step_avg:98.64ms +step:1616/1670 train_time:159399ms step_avg:98.64ms +step:1617/1670 train_time:159498ms step_avg:98.64ms +step:1618/1670 train_time:159598ms step_avg:98.64ms +step:1619/1670 train_time:159698ms step_avg:98.64ms +step:1620/1670 train_time:159798ms step_avg:98.64ms +step:1621/1670 train_time:159897ms step_avg:98.64ms +step:1622/1670 train_time:159996ms step_avg:98.64ms +step:1623/1670 train_time:160095ms step_avg:98.64ms +step:1624/1670 train_time:160196ms step_avg:98.64ms +step:1625/1670 train_time:160296ms step_avg:98.64ms +step:1625/1670 val_loss:3.2842 train_time:160393ms step_avg:98.70ms +step:1626/1670 train_time:160416ms step_avg:98.66ms +step:1627/1670 train_time:160497ms step_avg:98.65ms +step:1628/1670 train_time:160596ms step_avg:98.65ms +step:1629/1670 train_time:160694ms step_avg:98.65ms +step:1630/1670 train_time:160792ms step_avg:98.65ms +step:1631/1670 train_time:160890ms step_avg:98.64ms +step:1632/1670 train_time:160987ms step_avg:98.64ms +step:1633/1670 train_time:161084ms step_avg:98.64ms +step:1634/1670 train_time:161182ms step_avg:98.64ms +step:1635/1670 train_time:161279ms step_avg:98.64ms +step:1636/1670 train_time:161378ms step_avg:98.64ms +step:1637/1670 train_time:161478ms step_avg:98.64ms +step:1638/1670 train_time:161577ms step_avg:98.64ms +step:1639/1670 train_time:161676ms step_avg:98.64ms +step:1640/1670 train_time:161774ms step_avg:98.64ms +step:1641/1670 train_time:161872ms step_avg:98.64ms +step:1642/1670 train_time:161970ms step_avg:98.64ms +step:1643/1670 train_time:162068ms step_avg:98.64ms +step:1644/1670 train_time:162166ms step_avg:98.64ms +step:1645/1670 train_time:162265ms step_avg:98.64ms +step:1646/1670 train_time:162364ms step_avg:98.64ms +step:1647/1670 train_time:162464ms step_avg:98.64ms +step:1648/1670 train_time:162565ms step_avg:98.64ms +step:1649/1670 train_time:162666ms step_avg:98.65ms +step:1650/1670 train_time:162767ms step_avg:98.65ms +step:1651/1670 train_time:162865ms step_avg:98.65ms +step:1652/1670 train_time:162964ms step_avg:98.65ms +step:1653/1670 train_time:163063ms step_avg:98.65ms +step:1654/1670 train_time:163160ms step_avg:98.65ms +step:1655/1670 train_time:163257ms step_avg:98.64ms +step:1656/1670 train_time:163354ms step_avg:98.64ms +step:1657/1670 train_time:163453ms step_avg:98.64ms +step:1658/1670 train_time:163553ms step_avg:98.64ms +step:1659/1670 train_time:163653ms step_avg:98.65ms +step:1660/1670 train_time:163752ms step_avg:98.65ms +step:1661/1670 train_time:163850ms step_avg:98.65ms +step:1662/1670 train_time:163949ms step_avg:98.65ms +step:1663/1670 train_time:164047ms step_avg:98.65ms +step:1664/1670 train_time:164146ms step_avg:98.65ms +step:1665/1670 train_time:164244ms step_avg:98.64ms +step:1666/1670 train_time:164341ms step_avg:98.64ms +step:1667/1670 train_time:164440ms step_avg:98.64ms +step:1668/1670 train_time:164539ms step_avg:98.64ms +step:1669/1670 train_time:164639ms step_avg:98.65ms +step:1670/1670 train_time:164737ms step_avg:98.64ms +step:1670/1670 val_loss:3.2768 train_time:164833ms step_avg:98.70ms +peak memory allocated: 34073 MiB reserved: 49756 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_adcc39f4-c919-420a-bd94-9d0035f0038c.txt b/records/090525_SkipMLPBlocks/comparison_adcc39f4-c919-420a-bd94-9d0035f0038c.txt new file mode 100644 index 000000000..58bbe8537 --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_adcc39f4-c919-420a-bd94-9d0035f0038c.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:49:16 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 129W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 74357 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 74358 C /usr/bin/python3 610MiB | +| 0 N/A N/A 74359 C /usr/bin/python3 610MiB | +| 0 N/A N/A 74360 C /usr/bin/python3 610MiB | +| 0 N/A N/A 74361 C /usr/bin/python3 610MiB | +| 0 N/A N/A 74362 C /usr/bin/python3 610MiB | +| 0 N/A N/A 74363 C /usr/bin/python3 610MiB | +| 0 N/A N/A 74364 C /usr/bin/python3 610MiB | +| 1 N/A N/A 74358 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 74359 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 74360 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 74361 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 74362 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 74363 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 74364 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.07ms +step:1/1670 train_time:380ms step_avg:380.20ms +step:2/1670 train_time:402ms step_avg:201.05ms +step:3/1670 train_time:475ms step_avg:158.24ms +step:4/1670 train_time:568ms step_avg:142.12ms +step:5/1670 train_time:663ms step_avg:132.63ms +step:6/1670 train_time:759ms step_avg:126.42ms +step:7/1670 train_time:854ms step_avg:122.05ms +step:8/1670 train_time:949ms step_avg:118.60ms +step:9/1670 train_time:1044ms step_avg:116.01ms +step:10/1670 train_time:1140ms step_avg:114.00ms +step:11/1670 train_time:1235ms step_avg:112.28ms +step:12/1670 train_time:1332ms step_avg:111.00ms +step:13/1670 train_time:1429ms step_avg:109.94ms +step:14/1670 train_time:1526ms step_avg:108.99ms +step:15/1670 train_time:1622ms step_avg:108.16ms +step:16/1670 train_time:1718ms step_avg:107.39ms +step:17/1670 train_time:1814ms step_avg:106.72ms +step:18/1670 train_time:1909ms step_avg:106.08ms +step:19/1670 train_time:2004ms step_avg:105.49ms +step:20/1670 train_time:2100ms step_avg:105.00ms +step:21/1670 train_time:2196ms step_avg:104.56ms +step:22/1670 train_time:2292ms step_avg:104.17ms +step:23/1670 train_time:2388ms step_avg:103.84ms +step:24/1670 train_time:2485ms step_avg:103.52ms +step:25/1670 train_time:2582ms step_avg:103.27ms +step:26/1670 train_time:2679ms step_avg:103.04ms +step:27/1670 train_time:2776ms step_avg:102.82ms +step:28/1670 train_time:2871ms step_avg:102.55ms +step:29/1670 train_time:2967ms step_avg:102.30ms +step:30/1670 train_time:3062ms step_avg:102.07ms +step:31/1670 train_time:3158ms step_avg:101.86ms +step:32/1670 train_time:3254ms step_avg:101.68ms +step:33/1670 train_time:3350ms step_avg:101.52ms +step:34/1670 train_time:3446ms step_avg:101.36ms +step:35/1670 train_time:3543ms step_avg:101.21ms +step:36/1670 train_time:3640ms step_avg:101.10ms +step:37/1670 train_time:3736ms step_avg:100.97ms +step:38/1670 train_time:3832ms step_avg:100.84ms +step:39/1670 train_time:3927ms step_avg:100.70ms +step:40/1670 train_time:4022ms step_avg:100.56ms +step:41/1670 train_time:4118ms step_avg:100.44ms +step:42/1670 train_time:4214ms step_avg:100.32ms +step:43/1670 train_time:4309ms step_avg:100.21ms +step:44/1670 train_time:4405ms step_avg:100.12ms +step:45/1670 train_time:4501ms step_avg:100.03ms +step:46/1670 train_time:4599ms step_avg:99.98ms +step:47/1670 train_time:4695ms step_avg:99.90ms +step:48/1670 train_time:4792ms step_avg:99.84ms +step:49/1670 train_time:4888ms step_avg:99.75ms +step:50/1670 train_time:4984ms step_avg:99.68ms +step:51/1670 train_time:5079ms step_avg:99.60ms +step:52/1670 train_time:5175ms step_avg:99.53ms +step:53/1670 train_time:5271ms step_avg:99.45ms +step:54/1670 train_time:5366ms step_avg:99.37ms +step:55/1670 train_time:5462ms step_avg:99.32ms +step:56/1670 train_time:5560ms step_avg:99.29ms +step:57/1670 train_time:5656ms step_avg:99.23ms +step:58/1670 train_time:5753ms step_avg:99.20ms +step:59/1670 train_time:5849ms step_avg:99.14ms +step:60/1670 train_time:5945ms step_avg:99.08ms +step:61/1670 train_time:6041ms step_avg:99.03ms +step:62/1670 train_time:6137ms step_avg:98.99ms +step:63/1670 train_time:6234ms step_avg:98.95ms +step:64/1670 train_time:6329ms step_avg:98.89ms +step:65/1670 train_time:6424ms step_avg:98.84ms +step:66/1670 train_time:6522ms step_avg:98.82ms +step:67/1670 train_time:6618ms step_avg:98.78ms +step:68/1670 train_time:6716ms step_avg:98.77ms +step:69/1670 train_time:6813ms step_avg:98.74ms +step:70/1670 train_time:6907ms step_avg:98.68ms +step:71/1670 train_time:7002ms step_avg:98.63ms +step:72/1670 train_time:7098ms step_avg:98.59ms +step:73/1670 train_time:7195ms step_avg:98.56ms +step:74/1670 train_time:7291ms step_avg:98.53ms +step:75/1670 train_time:7386ms step_avg:98.48ms +step:76/1670 train_time:7482ms step_avg:98.45ms +step:77/1670 train_time:7578ms step_avg:98.41ms +step:78/1670 train_time:7674ms step_avg:98.38ms +step:79/1670 train_time:7769ms step_avg:98.34ms +step:80/1670 train_time:7865ms step_avg:98.31ms +step:81/1670 train_time:7962ms step_avg:98.30ms +step:82/1670 train_time:8059ms step_avg:98.28ms +step:83/1670 train_time:8155ms step_avg:98.25ms +step:84/1670 train_time:8250ms step_avg:98.21ms +step:85/1670 train_time:8345ms step_avg:98.18ms +step:86/1670 train_time:8442ms step_avg:98.16ms +step:87/1670 train_time:8537ms step_avg:98.13ms +step:88/1670 train_time:8633ms step_avg:98.11ms +step:89/1670 train_time:8729ms step_avg:98.07ms +step:90/1670 train_time:8824ms step_avg:98.05ms +step:91/1670 train_time:8922ms step_avg:98.04ms +step:92/1670 train_time:9018ms step_avg:98.02ms +step:93/1670 train_time:9115ms step_avg:98.01ms +step:94/1670 train_time:9209ms step_avg:97.97ms +step:95/1670 train_time:9305ms step_avg:97.95ms +step:96/1670 train_time:9401ms step_avg:97.92ms +step:97/1670 train_time:9496ms step_avg:97.90ms +step:98/1670 train_time:9593ms step_avg:97.88ms +step:99/1670 train_time:9688ms step_avg:97.86ms +step:100/1670 train_time:9784ms step_avg:97.84ms +step:101/1670 train_time:9881ms step_avg:97.83ms +step:102/1670 train_time:9977ms step_avg:97.81ms +step:103/1670 train_time:10072ms step_avg:97.79ms +step:104/1670 train_time:10168ms step_avg:97.77ms +step:105/1670 train_time:10263ms step_avg:97.74ms +step:106/1670 train_time:10359ms step_avg:97.73ms +step:107/1670 train_time:10455ms step_avg:97.71ms +step:108/1670 train_time:10551ms step_avg:97.69ms +step:109/1670 train_time:10646ms step_avg:97.67ms +step:110/1670 train_time:10741ms step_avg:97.65ms +step:111/1670 train_time:10837ms step_avg:97.63ms +step:112/1670 train_time:10932ms step_avg:97.61ms +step:113/1670 train_time:11028ms step_avg:97.60ms +step:114/1670 train_time:11124ms step_avg:97.58ms +step:115/1670 train_time:11220ms step_avg:97.57ms +step:116/1670 train_time:11317ms step_avg:97.56ms +step:117/1670 train_time:11412ms step_avg:97.54ms +step:118/1670 train_time:11508ms step_avg:97.52ms +step:119/1670 train_time:11604ms step_avg:97.51ms +step:120/1670 train_time:11700ms step_avg:97.50ms +step:121/1670 train_time:11796ms step_avg:97.49ms +step:122/1670 train_time:11891ms step_avg:97.47ms +step:123/1670 train_time:11987ms step_avg:97.45ms +step:124/1670 train_time:12083ms step_avg:97.44ms +step:125/1670 train_time:12179ms step_avg:97.44ms +step:125/1670 val_loss:4.2999 train_time:12274ms step_avg:98.19ms +step:126/1670 train_time:12298ms step_avg:97.60ms +step:127/1670 train_time:12382ms step_avg:97.50ms +step:128/1670 train_time:12484ms step_avg:97.53ms +step:129/1670 train_time:12582ms step_avg:97.53ms +step:130/1670 train_time:12677ms step_avg:97.52ms +step:131/1670 train_time:12772ms step_avg:97.50ms +step:132/1670 train_time:12868ms step_avg:97.48ms +step:133/1670 train_time:12962ms step_avg:97.46ms +step:134/1670 train_time:13057ms step_avg:97.44ms +step:135/1670 train_time:13152ms step_avg:97.42ms +step:136/1670 train_time:13247ms step_avg:97.40ms +step:137/1670 train_time:13344ms step_avg:97.40ms +step:138/1670 train_time:13442ms step_avg:97.41ms +step:139/1670 train_time:13541ms step_avg:97.42ms +step:140/1670 train_time:13638ms step_avg:97.41ms +step:141/1670 train_time:13734ms step_avg:97.40ms +step:142/1670 train_time:13829ms step_avg:97.39ms +step:143/1670 train_time:13924ms step_avg:97.37ms +step:144/1670 train_time:14019ms step_avg:97.35ms +step:145/1670 train_time:14114ms step_avg:97.34ms +step:146/1670 train_time:14210ms step_avg:97.33ms +step:147/1670 train_time:14305ms step_avg:97.31ms +step:148/1670 train_time:14402ms step_avg:97.31ms +step:149/1670 train_time:14500ms step_avg:97.31ms +step:150/1670 train_time:14597ms step_avg:97.31ms +step:151/1670 train_time:14693ms step_avg:97.30ms +step:152/1670 train_time:14788ms step_avg:97.29ms +step:153/1670 train_time:14884ms step_avg:97.28ms +step:154/1670 train_time:14979ms step_avg:97.27ms +step:155/1670 train_time:15074ms step_avg:97.25ms +step:156/1670 train_time:15170ms step_avg:97.25ms +step:157/1670 train_time:15265ms step_avg:97.23ms +step:158/1670 train_time:15361ms step_avg:97.22ms +step:159/1670 train_time:15458ms step_avg:97.22ms +step:160/1670 train_time:15555ms step_avg:97.22ms +step:161/1670 train_time:15651ms step_avg:97.21ms +step:162/1670 train_time:15746ms step_avg:97.20ms +step:163/1670 train_time:15841ms step_avg:97.19ms +step:164/1670 train_time:15938ms step_avg:97.18ms +step:165/1670 train_time:16032ms step_avg:97.16ms +step:166/1670 train_time:16127ms step_avg:97.15ms +step:167/1670 train_time:16223ms step_avg:97.14ms +step:168/1670 train_time:16319ms step_avg:97.14ms +step:169/1670 train_time:16415ms step_avg:97.13ms +step:170/1670 train_time:16512ms step_avg:97.13ms +step:171/1670 train_time:16608ms step_avg:97.12ms +step:172/1670 train_time:16704ms step_avg:97.12ms +step:173/1670 train_time:16800ms step_avg:97.11ms +step:174/1670 train_time:16897ms step_avg:97.11ms +step:175/1670 train_time:16993ms step_avg:97.10ms +step:176/1670 train_time:17087ms step_avg:97.09ms +step:177/1670 train_time:17183ms step_avg:97.08ms +step:178/1670 train_time:17279ms step_avg:97.07ms +step:179/1670 train_time:17375ms step_avg:97.07ms +step:180/1670 train_time:17471ms step_avg:97.06ms +step:181/1670 train_time:17565ms step_avg:97.05ms +step:182/1670 train_time:17662ms step_avg:97.04ms +step:183/1670 train_time:17758ms step_avg:97.04ms +step:184/1670 train_time:17854ms step_avg:97.03ms +step:185/1670 train_time:17949ms step_avg:97.02ms +step:186/1670 train_time:18044ms step_avg:97.01ms +step:187/1670 train_time:18140ms step_avg:97.00ms +step:188/1670 train_time:18235ms step_avg:97.00ms +step:189/1670 train_time:18330ms step_avg:96.99ms +step:190/1670 train_time:18425ms step_avg:96.98ms +step:191/1670 train_time:18521ms step_avg:96.97ms +step:192/1670 train_time:18618ms step_avg:96.97ms +step:193/1670 train_time:18715ms step_avg:96.97ms +step:194/1670 train_time:18811ms step_avg:96.97ms +step:195/1670 train_time:18907ms step_avg:96.96ms +step:196/1670 train_time:19003ms step_avg:96.95ms +step:197/1670 train_time:19099ms step_avg:96.95ms +step:198/1670 train_time:19195ms step_avg:96.94ms +step:199/1670 train_time:19290ms step_avg:96.94ms +step:200/1670 train_time:19385ms step_avg:96.93ms +step:201/1670 train_time:19481ms step_avg:96.92ms +step:202/1670 train_time:19578ms step_avg:96.92ms +step:203/1670 train_time:19674ms step_avg:96.92ms +step:204/1670 train_time:19769ms step_avg:96.91ms +step:205/1670 train_time:19865ms step_avg:96.90ms +step:206/1670 train_time:19961ms step_avg:96.90ms +step:207/1670 train_time:20057ms step_avg:96.89ms +step:208/1670 train_time:20153ms step_avg:96.89ms +step:209/1670 train_time:20248ms step_avg:96.88ms +step:210/1670 train_time:20343ms step_avg:96.87ms +step:211/1670 train_time:20439ms step_avg:96.87ms +step:212/1670 train_time:20535ms step_avg:96.86ms +step:213/1670 train_time:20810ms step_avg:97.70ms +step:214/1670 train_time:20968ms step_avg:97.98ms +step:215/1670 train_time:21062ms step_avg:97.96ms +step:216/1670 train_time:21157ms step_avg:97.95ms +step:217/1670 train_time:21252ms step_avg:97.93ms +step:218/1670 train_time:21346ms step_avg:97.92ms +step:219/1670 train_time:21441ms step_avg:97.90ms +step:220/1670 train_time:21537ms step_avg:97.89ms +step:221/1670 train_time:21631ms step_avg:97.88ms +step:222/1670 train_time:21725ms step_avg:97.86ms +step:223/1670 train_time:21823ms step_avg:97.86ms +step:224/1670 train_time:21922ms step_avg:97.87ms +step:225/1670 train_time:22021ms step_avg:97.87ms +step:226/1670 train_time:22116ms step_avg:97.86ms +step:227/1670 train_time:22211ms step_avg:97.85ms +step:228/1670 train_time:22307ms step_avg:97.84ms +step:229/1670 train_time:22401ms step_avg:97.82ms +step:230/1670 train_time:22496ms step_avg:97.81ms +step:231/1670 train_time:22592ms step_avg:97.80ms +step:232/1670 train_time:22687ms step_avg:97.79ms +step:233/1670 train_time:22782ms step_avg:97.78ms +step:234/1670 train_time:22880ms step_avg:97.78ms +step:235/1670 train_time:22977ms step_avg:97.78ms +step:236/1670 train_time:23074ms step_avg:97.77ms +step:237/1670 train_time:23170ms step_avg:97.76ms +step:238/1670 train_time:23265ms step_avg:97.75ms +step:239/1670 train_time:23360ms step_avg:97.74ms +step:240/1670 train_time:23455ms step_avg:97.73ms +step:241/1670 train_time:23550ms step_avg:97.72ms +step:242/1670 train_time:23645ms step_avg:97.71ms +step:243/1670 train_time:23740ms step_avg:97.69ms +step:244/1670 train_time:23836ms step_avg:97.69ms +step:245/1670 train_time:23932ms step_avg:97.68ms +step:246/1670 train_time:24028ms step_avg:97.67ms +step:247/1670 train_time:24124ms step_avg:97.67ms +step:248/1670 train_time:24219ms step_avg:97.66ms +step:249/1670 train_time:24316ms step_avg:97.65ms +step:250/1670 train_time:24411ms step_avg:97.64ms +step:250/1670 val_loss:3.9672 train_time:24505ms step_avg:98.02ms +step:251/1670 train_time:24529ms step_avg:97.73ms +step:252/1670 train_time:24607ms step_avg:97.65ms +step:253/1670 train_time:24706ms step_avg:97.65ms +step:254/1670 train_time:24801ms step_avg:97.64ms +step:255/1670 train_time:24896ms step_avg:97.63ms +step:256/1670 train_time:24991ms step_avg:97.62ms +step:257/1670 train_time:25086ms step_avg:97.61ms +step:258/1670 train_time:25180ms step_avg:97.60ms +step:259/1670 train_time:25275ms step_avg:97.59ms +step:260/1670 train_time:25370ms step_avg:97.58ms +step:261/1670 train_time:25465ms step_avg:97.57ms +step:262/1670 train_time:25563ms step_avg:97.57ms +step:263/1670 train_time:25659ms step_avg:97.56ms +step:264/1670 train_time:25757ms step_avg:97.56ms +step:265/1670 train_time:25853ms step_avg:97.56ms +step:266/1670 train_time:25948ms step_avg:97.55ms +step:267/1670 train_time:26043ms step_avg:97.54ms +step:268/1670 train_time:26139ms step_avg:97.53ms +step:269/1670 train_time:26234ms step_avg:97.52ms +step:270/1670 train_time:26329ms step_avg:97.51ms +step:271/1670 train_time:26424ms step_avg:97.51ms +step:272/1670 train_time:26520ms step_avg:97.50ms +step:273/1670 train_time:26616ms step_avg:97.50ms +step:274/1670 train_time:26714ms step_avg:97.50ms +step:275/1670 train_time:26810ms step_avg:97.49ms +step:276/1670 train_time:26905ms step_avg:97.48ms +step:277/1670 train_time:27002ms step_avg:97.48ms +step:278/1670 train_time:27095ms step_avg:97.47ms +step:279/1670 train_time:27191ms step_avg:97.46ms +step:280/1670 train_time:27286ms step_avg:97.45ms +step:281/1670 train_time:27382ms step_avg:97.44ms +step:282/1670 train_time:27477ms step_avg:97.44ms +step:283/1670 train_time:27573ms step_avg:97.43ms +step:284/1670 train_time:27669ms step_avg:97.43ms +step:285/1670 train_time:27765ms step_avg:97.42ms +step:286/1670 train_time:27861ms step_avg:97.41ms +step:287/1670 train_time:27956ms step_avg:97.41ms +step:288/1670 train_time:28053ms step_avg:97.40ms +step:289/1670 train_time:28148ms step_avg:97.40ms +step:290/1670 train_time:28242ms step_avg:97.39ms +step:291/1670 train_time:28338ms step_avg:97.38ms +step:292/1670 train_time:28434ms step_avg:97.38ms +step:293/1670 train_time:28530ms step_avg:97.37ms +step:294/1670 train_time:28625ms step_avg:97.36ms +step:295/1670 train_time:28720ms step_avg:97.36ms +step:296/1670 train_time:28815ms step_avg:97.35ms +step:297/1670 train_time:28912ms step_avg:97.35ms +step:298/1670 train_time:29007ms step_avg:97.34ms +step:299/1670 train_time:29102ms step_avg:97.33ms +step:300/1670 train_time:29197ms step_avg:97.32ms +step:301/1670 train_time:29293ms step_avg:97.32ms +step:302/1670 train_time:29389ms step_avg:97.31ms +step:303/1670 train_time:29484ms step_avg:97.31ms +step:304/1670 train_time:29580ms step_avg:97.30ms +step:305/1670 train_time:29676ms step_avg:97.30ms +step:306/1670 train_time:29773ms step_avg:97.30ms +step:307/1670 train_time:29868ms step_avg:97.29ms +step:308/1670 train_time:29963ms step_avg:97.28ms +step:309/1670 train_time:30059ms step_avg:97.28ms +step:310/1670 train_time:30155ms step_avg:97.27ms +step:311/1670 train_time:30251ms step_avg:97.27ms +step:312/1670 train_time:30346ms step_avg:97.26ms +step:313/1670 train_time:30441ms step_avg:97.26ms +step:314/1670 train_time:30536ms step_avg:97.25ms +step:315/1670 train_time:30633ms step_avg:97.25ms +step:316/1670 train_time:30729ms step_avg:97.24ms +step:317/1670 train_time:30824ms step_avg:97.24ms +step:318/1670 train_time:30920ms step_avg:97.23ms +step:319/1670 train_time:31016ms step_avg:97.23ms +step:320/1670 train_time:31112ms step_avg:97.22ms +step:321/1670 train_time:31208ms step_avg:97.22ms +step:322/1670 train_time:31303ms step_avg:97.22ms +step:323/1670 train_time:31399ms step_avg:97.21ms +step:324/1670 train_time:31494ms step_avg:97.20ms +step:325/1670 train_time:31590ms step_avg:97.20ms +step:326/1670 train_time:31685ms step_avg:97.19ms +step:327/1670 train_time:31780ms step_avg:97.19ms +step:328/1670 train_time:31875ms step_avg:97.18ms +step:329/1670 train_time:31971ms step_avg:97.18ms +step:330/1670 train_time:32067ms step_avg:97.17ms +step:331/1670 train_time:32163ms step_avg:97.17ms +step:332/1670 train_time:32258ms step_avg:97.16ms +step:333/1670 train_time:32354ms step_avg:97.16ms +step:334/1670 train_time:32450ms step_avg:97.16ms +step:335/1670 train_time:32546ms step_avg:97.15ms +step:336/1670 train_time:32642ms step_avg:97.15ms +step:337/1670 train_time:32738ms step_avg:97.15ms +step:338/1670 train_time:32834ms step_avg:97.14ms +step:339/1670 train_time:32929ms step_avg:97.14ms +step:340/1670 train_time:33025ms step_avg:97.13ms +step:341/1670 train_time:33120ms step_avg:97.13ms +step:342/1670 train_time:33216ms step_avg:97.12ms +step:343/1670 train_time:33312ms step_avg:97.12ms +step:344/1670 train_time:33409ms step_avg:97.12ms +step:345/1670 train_time:33504ms step_avg:97.11ms +step:346/1670 train_time:33600ms step_avg:97.11ms +step:347/1670 train_time:33696ms step_avg:97.11ms +step:348/1670 train_time:33793ms step_avg:97.11ms +step:349/1670 train_time:33889ms step_avg:97.10ms +step:350/1670 train_time:33985ms step_avg:97.10ms +step:351/1670 train_time:34080ms step_avg:97.09ms +step:352/1670 train_time:34176ms step_avg:97.09ms +step:353/1670 train_time:34272ms step_avg:97.09ms +step:354/1670 train_time:34368ms step_avg:97.08ms +step:355/1670 train_time:34463ms step_avg:97.08ms +step:356/1670 train_time:34558ms step_avg:97.07ms +step:357/1670 train_time:34654ms step_avg:97.07ms +step:358/1670 train_time:34750ms step_avg:97.07ms +step:359/1670 train_time:34846ms step_avg:97.06ms +step:360/1670 train_time:34941ms step_avg:97.06ms +step:361/1670 train_time:35037ms step_avg:97.05ms +step:362/1670 train_time:35132ms step_avg:97.05ms +step:363/1670 train_time:35228ms step_avg:97.05ms +step:364/1670 train_time:35324ms step_avg:97.04ms +step:365/1670 train_time:35419ms step_avg:97.04ms +step:366/1670 train_time:35515ms step_avg:97.04ms +step:367/1670 train_time:35611ms step_avg:97.03ms +step:368/1670 train_time:35707ms step_avg:97.03ms +step:369/1670 train_time:35803ms step_avg:97.03ms +step:370/1670 train_time:35899ms step_avg:97.02ms +step:371/1670 train_time:35995ms step_avg:97.02ms +step:372/1670 train_time:36092ms step_avg:97.02ms +step:373/1670 train_time:36188ms step_avg:97.02ms +step:374/1670 train_time:36284ms step_avg:97.02ms +step:375/1670 train_time:36379ms step_avg:97.01ms +step:375/1670 val_loss:3.8161 train_time:36473ms step_avg:97.26ms +step:376/1670 train_time:36499ms step_avg:97.07ms +step:377/1670 train_time:36577ms step_avg:97.02ms +step:378/1670 train_time:36675ms step_avg:97.02ms +step:379/1670 train_time:36771ms step_avg:97.02ms +step:380/1670 train_time:36866ms step_avg:97.01ms +step:381/1670 train_time:36961ms step_avg:97.01ms +step:382/1670 train_time:37055ms step_avg:97.00ms +step:383/1670 train_time:37149ms step_avg:97.00ms +step:384/1670 train_time:37245ms step_avg:96.99ms +step:385/1670 train_time:37340ms step_avg:96.99ms +step:386/1670 train_time:37437ms step_avg:96.99ms +step:387/1670 train_time:37535ms step_avg:96.99ms +step:388/1670 train_time:37632ms step_avg:96.99ms +step:389/1670 train_time:37728ms step_avg:96.99ms +step:390/1670 train_time:37824ms step_avg:96.98ms +step:391/1670 train_time:37919ms step_avg:96.98ms +step:392/1670 train_time:38015ms step_avg:96.98ms +step:393/1670 train_time:38109ms step_avg:96.97ms +step:394/1670 train_time:38205ms step_avg:96.97ms +step:395/1670 train_time:38300ms step_avg:96.96ms +step:396/1670 train_time:38394ms step_avg:96.96ms +step:397/1670 train_time:38490ms step_avg:96.95ms +step:398/1670 train_time:38587ms step_avg:96.95ms +step:399/1670 train_time:38684ms step_avg:96.95ms +step:400/1670 train_time:38780ms step_avg:96.95ms +step:401/1670 train_time:38877ms step_avg:96.95ms +step:402/1670 train_time:38972ms step_avg:96.94ms +step:403/1670 train_time:39067ms step_avg:96.94ms +step:404/1670 train_time:39162ms step_avg:96.94ms +step:405/1670 train_time:39257ms step_avg:96.93ms +step:406/1670 train_time:39352ms step_avg:96.93ms +step:407/1670 train_time:39447ms step_avg:96.92ms +step:408/1670 train_time:39544ms step_avg:96.92ms +step:409/1670 train_time:39640ms step_avg:96.92ms +step:410/1670 train_time:39737ms step_avg:96.92ms +step:411/1670 train_time:39832ms step_avg:96.92ms +step:412/1670 train_time:39928ms step_avg:96.91ms +step:413/1670 train_time:40024ms step_avg:96.91ms +step:414/1670 train_time:40120ms step_avg:96.91ms +step:415/1670 train_time:40215ms step_avg:96.90ms +step:416/1670 train_time:40310ms step_avg:96.90ms +step:417/1670 train_time:40405ms step_avg:96.89ms +step:418/1670 train_time:40501ms step_avg:96.89ms +step:419/1670 train_time:40596ms step_avg:96.89ms +step:420/1670 train_time:40691ms step_avg:96.88ms +step:421/1670 train_time:40787ms step_avg:96.88ms +step:422/1670 train_time:40884ms step_avg:96.88ms +step:423/1670 train_time:40980ms step_avg:96.88ms +step:424/1670 train_time:41076ms step_avg:96.88ms +step:425/1670 train_time:41338ms step_avg:97.27ms +step:426/1670 train_time:41461ms step_avg:97.33ms +step:427/1670 train_time:41556ms step_avg:97.32ms +step:428/1670 train_time:41650ms step_avg:97.31ms +step:429/1670 train_time:41745ms step_avg:97.31ms +step:430/1670 train_time:41839ms step_avg:97.30ms +step:431/1670 train_time:41934ms step_avg:97.29ms +step:432/1670 train_time:42029ms step_avg:97.29ms +step:433/1670 train_time:42124ms step_avg:97.28ms +step:434/1670 train_time:42219ms step_avg:97.28ms +step:435/1670 train_time:42319ms step_avg:97.29ms +step:436/1670 train_time:42417ms step_avg:97.29ms +step:437/1670 train_time:42514ms step_avg:97.29ms +step:438/1670 train_time:42610ms step_avg:97.28ms +step:439/1670 train_time:42706ms step_avg:97.28ms +step:440/1670 train_time:42802ms step_avg:97.28ms +step:441/1670 train_time:42897ms step_avg:97.27ms +step:442/1670 train_time:42992ms step_avg:97.27ms +step:443/1670 train_time:43087ms step_avg:97.26ms +step:444/1670 train_time:43182ms step_avg:97.26ms +step:445/1670 train_time:43278ms step_avg:97.25ms +step:446/1670 train_time:43374ms step_avg:97.25ms +step:447/1670 train_time:43471ms step_avg:97.25ms +step:448/1670 train_time:43567ms step_avg:97.25ms +step:449/1670 train_time:43664ms step_avg:97.25ms +step:450/1670 train_time:43759ms step_avg:97.24ms +step:451/1670 train_time:43855ms step_avg:97.24ms +step:452/1670 train_time:43950ms step_avg:97.23ms +step:453/1670 train_time:44044ms step_avg:97.23ms +step:454/1670 train_time:44139ms step_avg:97.22ms +step:455/1670 train_time:44234ms step_avg:97.22ms +step:456/1670 train_time:44330ms step_avg:97.22ms +step:457/1670 train_time:44427ms step_avg:97.21ms +step:458/1670 train_time:44524ms step_avg:97.21ms +step:459/1670 train_time:44621ms step_avg:97.21ms +step:460/1670 train_time:44716ms step_avg:97.21ms +step:461/1670 train_time:44811ms step_avg:97.20ms +step:462/1670 train_time:44906ms step_avg:97.20ms +step:463/1670 train_time:45002ms step_avg:97.20ms +step:464/1670 train_time:45097ms step_avg:97.19ms +step:465/1670 train_time:45192ms step_avg:97.19ms +step:466/1670 train_time:45288ms step_avg:97.18ms +step:467/1670 train_time:45385ms step_avg:97.19ms +step:468/1670 train_time:45483ms step_avg:97.19ms +step:469/1670 train_time:45579ms step_avg:97.18ms +step:470/1670 train_time:45676ms step_avg:97.18ms +step:471/1670 train_time:45771ms step_avg:97.18ms +step:472/1670 train_time:45867ms step_avg:97.18ms +step:473/1670 train_time:45963ms step_avg:97.17ms +step:474/1670 train_time:46058ms step_avg:97.17ms +step:475/1670 train_time:46153ms step_avg:97.16ms +step:476/1670 train_time:46249ms step_avg:97.16ms +step:477/1670 train_time:46345ms step_avg:97.16ms +step:478/1670 train_time:46441ms step_avg:97.16ms +step:479/1670 train_time:46536ms step_avg:97.15ms +step:480/1670 train_time:46632ms step_avg:97.15ms +step:481/1670 train_time:46728ms step_avg:97.15ms +step:482/1670 train_time:46825ms step_avg:97.15ms +step:483/1670 train_time:46921ms step_avg:97.15ms +step:484/1670 train_time:47017ms step_avg:97.14ms +step:485/1670 train_time:47112ms step_avg:97.14ms +step:486/1670 train_time:47207ms step_avg:97.13ms +step:487/1670 train_time:47303ms step_avg:97.13ms +step:488/1670 train_time:47399ms step_avg:97.13ms +step:489/1670 train_time:47494ms step_avg:97.12ms +step:490/1670 train_time:47590ms step_avg:97.12ms +step:491/1670 train_time:47687ms step_avg:97.12ms +step:492/1670 train_time:47783ms step_avg:97.12ms +step:493/1670 train_time:47879ms step_avg:97.12ms +step:494/1670 train_time:47975ms step_avg:97.11ms +step:495/1670 train_time:48070ms step_avg:97.11ms +step:496/1670 train_time:48166ms step_avg:97.11ms +step:497/1670 train_time:48261ms step_avg:97.10ms +step:498/1670 train_time:48357ms step_avg:97.10ms +step:499/1670 train_time:48452ms step_avg:97.10ms +step:500/1670 train_time:48549ms step_avg:97.10ms +step:500/1670 val_loss:3.7116 train_time:48644ms step_avg:97.29ms +step:501/1670 train_time:48668ms step_avg:97.14ms +step:502/1670 train_time:48747ms step_avg:97.10ms +step:503/1670 train_time:48846ms step_avg:97.11ms +step:504/1670 train_time:48942ms step_avg:97.11ms +step:505/1670 train_time:49037ms step_avg:97.10ms +step:506/1670 train_time:49131ms step_avg:97.10ms +step:507/1670 train_time:49226ms step_avg:97.09ms +step:508/1670 train_time:49320ms step_avg:97.09ms +step:509/1670 train_time:49415ms step_avg:97.08ms +step:510/1670 train_time:49510ms step_avg:97.08ms +step:511/1670 train_time:49606ms step_avg:97.08ms +step:512/1670 train_time:49703ms step_avg:97.08ms +step:513/1670 train_time:49801ms step_avg:97.08ms +step:514/1670 train_time:49897ms step_avg:97.08ms +step:515/1670 train_time:49994ms step_avg:97.08ms +step:516/1670 train_time:50089ms step_avg:97.07ms +step:517/1670 train_time:50184ms step_avg:97.07ms +step:518/1670 train_time:50279ms step_avg:97.06ms +step:519/1670 train_time:50374ms step_avg:97.06ms +step:520/1670 train_time:50469ms step_avg:97.06ms +step:521/1670 train_time:50565ms step_avg:97.05ms +step:522/1670 train_time:50660ms step_avg:97.05ms +step:523/1670 train_time:50757ms step_avg:97.05ms +step:524/1670 train_time:50855ms step_avg:97.05ms +step:525/1670 train_time:50951ms step_avg:97.05ms +step:526/1670 train_time:51046ms step_avg:97.05ms +step:527/1670 train_time:51142ms step_avg:97.04ms +step:528/1670 train_time:51237ms step_avg:97.04ms +step:529/1670 train_time:51332ms step_avg:97.04ms +step:530/1670 train_time:51428ms step_avg:97.03ms +step:531/1670 train_time:51523ms step_avg:97.03ms +step:532/1670 train_time:51619ms step_avg:97.03ms +step:533/1670 train_time:51716ms step_avg:97.03ms +step:534/1670 train_time:51812ms step_avg:97.03ms +step:535/1670 train_time:51908ms step_avg:97.03ms +step:536/1670 train_time:52004ms step_avg:97.02ms +step:537/1670 train_time:52100ms step_avg:97.02ms +step:538/1670 train_time:52196ms step_avg:97.02ms +step:539/1670 train_time:52291ms step_avg:97.01ms +step:540/1670 train_time:52386ms step_avg:97.01ms +step:541/1670 train_time:52482ms step_avg:97.01ms +step:542/1670 train_time:52578ms step_avg:97.01ms +step:543/1670 train_time:52674ms step_avg:97.01ms +step:544/1670 train_time:52771ms step_avg:97.00ms +step:545/1670 train_time:52866ms step_avg:97.00ms +step:546/1670 train_time:52963ms step_avg:97.00ms +step:547/1670 train_time:53059ms step_avg:97.00ms +step:548/1670 train_time:53155ms step_avg:97.00ms +step:549/1670 train_time:53251ms step_avg:97.00ms +step:550/1670 train_time:53345ms step_avg:96.99ms +step:551/1670 train_time:53441ms step_avg:96.99ms +step:552/1670 train_time:53537ms step_avg:96.99ms +step:553/1670 train_time:53633ms step_avg:96.99ms +step:554/1670 train_time:53730ms step_avg:96.99ms +step:555/1670 train_time:53826ms step_avg:96.98ms +step:556/1670 train_time:53921ms step_avg:96.98ms +step:557/1670 train_time:54016ms step_avg:96.98ms +step:558/1670 train_time:54113ms step_avg:96.98ms +step:559/1670 train_time:54210ms step_avg:96.98ms +step:560/1670 train_time:54306ms step_avg:96.98ms +step:561/1670 train_time:54402ms step_avg:96.97ms +step:562/1670 train_time:54499ms step_avg:96.97ms +step:563/1670 train_time:54597ms step_avg:96.98ms +step:564/1670 train_time:54695ms step_avg:96.98ms +step:565/1670 train_time:54794ms step_avg:96.98ms +step:566/1670 train_time:54891ms step_avg:96.98ms +step:567/1670 train_time:54988ms step_avg:96.98ms +step:568/1670 train_time:55085ms step_avg:96.98ms +step:569/1670 train_time:55181ms step_avg:96.98ms +step:570/1670 train_time:55279ms step_avg:96.98ms +step:571/1670 train_time:55377ms step_avg:96.98ms +step:572/1670 train_time:55473ms step_avg:96.98ms +step:573/1670 train_time:55570ms step_avg:96.98ms +step:574/1670 train_time:55667ms step_avg:96.98ms +step:575/1670 train_time:55764ms step_avg:96.98ms +step:576/1670 train_time:55861ms step_avg:96.98ms +step:577/1670 train_time:55959ms step_avg:96.98ms +step:578/1670 train_time:56057ms step_avg:96.99ms +step:579/1670 train_time:56155ms step_avg:96.99ms +step:580/1670 train_time:56253ms step_avg:96.99ms +step:581/1670 train_time:56350ms step_avg:96.99ms +step:582/1670 train_time:56446ms step_avg:96.99ms +step:583/1670 train_time:56543ms step_avg:96.99ms +step:584/1670 train_time:56640ms step_avg:96.99ms +step:585/1670 train_time:56738ms step_avg:96.99ms +step:586/1670 train_time:56835ms step_avg:96.99ms +step:587/1670 train_time:56934ms step_avg:96.99ms +step:588/1670 train_time:57032ms step_avg:96.99ms +step:589/1670 train_time:57129ms step_avg:96.99ms +step:590/1670 train_time:57225ms step_avg:96.99ms +step:591/1670 train_time:57322ms step_avg:96.99ms +step:592/1670 train_time:57419ms step_avg:96.99ms +step:593/1670 train_time:57518ms step_avg:96.99ms +step:594/1670 train_time:57617ms step_avg:97.00ms +step:595/1670 train_time:57714ms step_avg:97.00ms +step:596/1670 train_time:57812ms step_avg:97.00ms +step:597/1670 train_time:57909ms step_avg:97.00ms +step:598/1670 train_time:58006ms step_avg:97.00ms +step:599/1670 train_time:58103ms step_avg:97.00ms +step:600/1670 train_time:58200ms step_avg:97.00ms +step:601/1670 train_time:58298ms step_avg:97.00ms +step:602/1670 train_time:58395ms step_avg:97.00ms +step:603/1670 train_time:58493ms step_avg:97.00ms +step:604/1670 train_time:58590ms step_avg:97.00ms +step:605/1670 train_time:58687ms step_avg:97.00ms +step:606/1670 train_time:58783ms step_avg:97.00ms +step:607/1670 train_time:58881ms step_avg:97.00ms +step:608/1670 train_time:58979ms step_avg:97.00ms +step:609/1670 train_time:59077ms step_avg:97.01ms +step:610/1670 train_time:59175ms step_avg:97.01ms +step:611/1670 train_time:59272ms step_avg:97.01ms +step:612/1670 train_time:59368ms step_avg:97.01ms +step:613/1670 train_time:59465ms step_avg:97.01ms +step:614/1670 train_time:59563ms step_avg:97.01ms +step:615/1670 train_time:59660ms step_avg:97.01ms +step:616/1670 train_time:59757ms step_avg:97.01ms +step:617/1670 train_time:59856ms step_avg:97.01ms +step:618/1670 train_time:59953ms step_avg:97.01ms +step:619/1670 train_time:60051ms step_avg:97.01ms +step:620/1670 train_time:60149ms step_avg:97.01ms +step:621/1670 train_time:60246ms step_avg:97.02ms +step:622/1670 train_time:60342ms step_avg:97.01ms +step:623/1670 train_time:60439ms step_avg:97.01ms +step:624/1670 train_time:60538ms step_avg:97.02ms +step:625/1670 train_time:60635ms step_avg:97.02ms +step:625/1670 val_loss:3.6117 train_time:60731ms step_avg:97.17ms +step:626/1670 train_time:60755ms step_avg:97.05ms +step:627/1670 train_time:60839ms step_avg:97.03ms +step:628/1670 train_time:60939ms step_avg:97.04ms +step:629/1670 train_time:61035ms step_avg:97.04ms +step:630/1670 train_time:61131ms step_avg:97.03ms +step:631/1670 train_time:61227ms step_avg:97.03ms +step:632/1670 train_time:61323ms step_avg:97.03ms +step:633/1670 train_time:61419ms step_avg:97.03ms +step:634/1670 train_time:61515ms step_avg:97.03ms +step:635/1670 train_time:61611ms step_avg:97.02ms +step:636/1670 train_time:61710ms step_avg:97.03ms +step:637/1670 train_time:61808ms step_avg:97.03ms +step:638/1670 train_time:61906ms step_avg:97.03ms +step:639/1670 train_time:62281ms step_avg:97.47ms +step:640/1670 train_time:62353ms step_avg:97.43ms +step:641/1670 train_time:62449ms step_avg:97.42ms +step:642/1670 train_time:62544ms step_avg:97.42ms +step:643/1670 train_time:62641ms step_avg:97.42ms +step:644/1670 train_time:62737ms step_avg:97.42ms +step:645/1670 train_time:62833ms step_avg:97.42ms +step:646/1670 train_time:62929ms step_avg:97.41ms +step:647/1670 train_time:63024ms step_avg:97.41ms +step:648/1670 train_time:63121ms step_avg:97.41ms +step:649/1670 train_time:63222ms step_avg:97.41ms +step:650/1670 train_time:63323ms step_avg:97.42ms +step:651/1670 train_time:63422ms step_avg:97.42ms +step:652/1670 train_time:63520ms step_avg:97.42ms +step:653/1670 train_time:63617ms step_avg:97.42ms +step:654/1670 train_time:63713ms step_avg:97.42ms +step:655/1670 train_time:63808ms step_avg:97.42ms +step:656/1670 train_time:63904ms step_avg:97.41ms +step:657/1670 train_time:64000ms step_avg:97.41ms +step:658/1670 train_time:64098ms step_avg:97.41ms +step:659/1670 train_time:64196ms step_avg:97.41ms +step:660/1670 train_time:64296ms step_avg:97.42ms +step:661/1670 train_time:64395ms step_avg:97.42ms +step:662/1670 train_time:64493ms step_avg:97.42ms +step:663/1670 train_time:64590ms step_avg:97.42ms +step:664/1670 train_time:64687ms step_avg:97.42ms +step:665/1670 train_time:64783ms step_avg:97.42ms +step:666/1670 train_time:64879ms step_avg:97.42ms +step:667/1670 train_time:64976ms step_avg:97.42ms +step:668/1670 train_time:65074ms step_avg:97.42ms +step:669/1670 train_time:65171ms step_avg:97.41ms +step:670/1670 train_time:65268ms step_avg:97.41ms +step:671/1670 train_time:65365ms step_avg:97.41ms +step:672/1670 train_time:65463ms step_avg:97.41ms +step:673/1670 train_time:65560ms step_avg:97.41ms +step:674/1670 train_time:65658ms step_avg:97.42ms +step:675/1670 train_time:65756ms step_avg:97.42ms +step:676/1670 train_time:65853ms step_avg:97.42ms +step:677/1670 train_time:65949ms step_avg:97.41ms +step:678/1670 train_time:66045ms step_avg:97.41ms +step:679/1670 train_time:66142ms step_avg:97.41ms +step:680/1670 train_time:66240ms step_avg:97.41ms +step:681/1670 train_time:66338ms step_avg:97.41ms +step:682/1670 train_time:66436ms step_avg:97.41ms +step:683/1670 train_time:66534ms step_avg:97.41ms +step:684/1670 train_time:66632ms step_avg:97.41ms +step:685/1670 train_time:66729ms step_avg:97.41ms +step:686/1670 train_time:66826ms step_avg:97.41ms +step:687/1670 train_time:66923ms step_avg:97.41ms +step:688/1670 train_time:67019ms step_avg:97.41ms +step:689/1670 train_time:67117ms step_avg:97.41ms +step:690/1670 train_time:67214ms step_avg:97.41ms +step:691/1670 train_time:67311ms step_avg:97.41ms +step:692/1670 train_time:67409ms step_avg:97.41ms +step:693/1670 train_time:67505ms step_avg:97.41ms +step:694/1670 train_time:67602ms step_avg:97.41ms +step:695/1670 train_time:67699ms step_avg:97.41ms +step:696/1670 train_time:67797ms step_avg:97.41ms +step:697/1670 train_time:67895ms step_avg:97.41ms +step:698/1670 train_time:67993ms step_avg:97.41ms +step:699/1670 train_time:68089ms step_avg:97.41ms +step:700/1670 train_time:68186ms step_avg:97.41ms +step:701/1670 train_time:68282ms step_avg:97.41ms +step:702/1670 train_time:68383ms step_avg:97.41ms +step:703/1670 train_time:68479ms step_avg:97.41ms +step:704/1670 train_time:68578ms step_avg:97.41ms +step:705/1670 train_time:68675ms step_avg:97.41ms +step:706/1670 train_time:68772ms step_avg:97.41ms +step:707/1670 train_time:68869ms step_avg:97.41ms +step:708/1670 train_time:68965ms step_avg:97.41ms +step:709/1670 train_time:69062ms step_avg:97.41ms +step:710/1670 train_time:69159ms step_avg:97.41ms +step:711/1670 train_time:69257ms step_avg:97.41ms +step:712/1670 train_time:69354ms step_avg:97.41ms +step:713/1670 train_time:69451ms step_avg:97.41ms +step:714/1670 train_time:69547ms step_avg:97.41ms +step:715/1670 train_time:69644ms step_avg:97.40ms +step:716/1670 train_time:69741ms step_avg:97.40ms +step:717/1670 train_time:69839ms step_avg:97.40ms +step:718/1670 train_time:69937ms step_avg:97.40ms +step:719/1670 train_time:70034ms step_avg:97.40ms +step:720/1670 train_time:70131ms step_avg:97.40ms +step:721/1670 train_time:70228ms step_avg:97.40ms +step:722/1670 train_time:70324ms step_avg:97.40ms +step:723/1670 train_time:70421ms step_avg:97.40ms +step:724/1670 train_time:70518ms step_avg:97.40ms +step:725/1670 train_time:70616ms step_avg:97.40ms +step:726/1670 train_time:70714ms step_avg:97.40ms +step:727/1670 train_time:70811ms step_avg:97.40ms +step:728/1670 train_time:70908ms step_avg:97.40ms +step:729/1670 train_time:71005ms step_avg:97.40ms +step:730/1670 train_time:71102ms step_avg:97.40ms +step:731/1670 train_time:71200ms step_avg:97.40ms +step:732/1670 train_time:71297ms step_avg:97.40ms +step:733/1670 train_time:71395ms step_avg:97.40ms +step:734/1670 train_time:71492ms step_avg:97.40ms +step:735/1670 train_time:71589ms step_avg:97.40ms +step:736/1670 train_time:71686ms step_avg:97.40ms +step:737/1670 train_time:71783ms step_avg:97.40ms +step:738/1670 train_time:71880ms step_avg:97.40ms +step:739/1670 train_time:71977ms step_avg:97.40ms +step:740/1670 train_time:72076ms step_avg:97.40ms +step:741/1670 train_time:72172ms step_avg:97.40ms +step:742/1670 train_time:72269ms step_avg:97.40ms +step:743/1670 train_time:72366ms step_avg:97.40ms +step:744/1670 train_time:72463ms step_avg:97.40ms +step:745/1670 train_time:72560ms step_avg:97.40ms +step:746/1670 train_time:72658ms step_avg:97.40ms +step:747/1670 train_time:72757ms step_avg:97.40ms +step:748/1670 train_time:72853ms step_avg:97.40ms +step:749/1670 train_time:72950ms step_avg:97.40ms +step:750/1670 train_time:73046ms step_avg:97.39ms +step:750/1670 val_loss:3.5598 train_time:73142ms step_avg:97.52ms +step:751/1670 train_time:73166ms step_avg:97.42ms +step:752/1670 train_time:73249ms step_avg:97.41ms +step:753/1670 train_time:73351ms step_avg:97.41ms +step:754/1670 train_time:73448ms step_avg:97.41ms +step:755/1670 train_time:73544ms step_avg:97.41ms +step:756/1670 train_time:73640ms step_avg:97.41ms +step:757/1670 train_time:73737ms step_avg:97.41ms +step:758/1670 train_time:73833ms step_avg:97.41ms +step:759/1670 train_time:73930ms step_avg:97.40ms +step:760/1670 train_time:74025ms step_avg:97.40ms +step:761/1670 train_time:74122ms step_avg:97.40ms +step:762/1670 train_time:74221ms step_avg:97.40ms +step:763/1670 train_time:74320ms step_avg:97.41ms +step:764/1670 train_time:74420ms step_avg:97.41ms +step:765/1670 train_time:74517ms step_avg:97.41ms +step:766/1670 train_time:74614ms step_avg:97.41ms +step:767/1670 train_time:74711ms step_avg:97.41ms +step:768/1670 train_time:74807ms step_avg:97.41ms +step:769/1670 train_time:74903ms step_avg:97.40ms +step:770/1670 train_time:75000ms step_avg:97.40ms +step:771/1670 train_time:75096ms step_avg:97.40ms +step:772/1670 train_time:75195ms step_avg:97.40ms +step:773/1670 train_time:75295ms step_avg:97.41ms +step:774/1670 train_time:75395ms step_avg:97.41ms +step:775/1670 train_time:75495ms step_avg:97.41ms +step:776/1670 train_time:75593ms step_avg:97.41ms +step:777/1670 train_time:75691ms step_avg:97.41ms +step:778/1670 train_time:75788ms step_avg:97.41ms +step:779/1670 train_time:75884ms step_avg:97.41ms +step:780/1670 train_time:75980ms step_avg:97.41ms +step:781/1670 train_time:76077ms step_avg:97.41ms +step:782/1670 train_time:76175ms step_avg:97.41ms +step:783/1670 train_time:76273ms step_avg:97.41ms +step:784/1670 train_time:76371ms step_avg:97.41ms +step:785/1670 train_time:76469ms step_avg:97.41ms +step:786/1670 train_time:76566ms step_avg:97.41ms +step:787/1670 train_time:76663ms step_avg:97.41ms +step:788/1670 train_time:76760ms step_avg:97.41ms +step:789/1670 train_time:76857ms step_avg:97.41ms +step:790/1670 train_time:76955ms step_avg:97.41ms +step:791/1670 train_time:77053ms step_avg:97.41ms +step:792/1670 train_time:77151ms step_avg:97.41ms +step:793/1670 train_time:77248ms step_avg:97.41ms +step:794/1670 train_time:77344ms step_avg:97.41ms +step:795/1670 train_time:77442ms step_avg:97.41ms +step:796/1670 train_time:77539ms step_avg:97.41ms +step:797/1670 train_time:77637ms step_avg:97.41ms +step:798/1670 train_time:77735ms step_avg:97.41ms +step:799/1670 train_time:77832ms step_avg:97.41ms +step:800/1670 train_time:77929ms step_avg:97.41ms +step:801/1670 train_time:78025ms step_avg:97.41ms +step:802/1670 train_time:78122ms step_avg:97.41ms +step:803/1670 train_time:78219ms step_avg:97.41ms +step:804/1670 train_time:78317ms step_avg:97.41ms +step:805/1670 train_time:78416ms step_avg:97.41ms +step:806/1670 train_time:78514ms step_avg:97.41ms +step:807/1670 train_time:78612ms step_avg:97.41ms +step:808/1670 train_time:78709ms step_avg:97.41ms +step:809/1670 train_time:78805ms step_avg:97.41ms +step:810/1670 train_time:78902ms step_avg:97.41ms +step:811/1670 train_time:78999ms step_avg:97.41ms +step:812/1670 train_time:79097ms step_avg:97.41ms +step:813/1670 train_time:79194ms step_avg:97.41ms +step:814/1670 train_time:79292ms step_avg:97.41ms +step:815/1670 train_time:79389ms step_avg:97.41ms +step:816/1670 train_time:79487ms step_avg:97.41ms +step:817/1670 train_time:79583ms step_avg:97.41ms +step:818/1670 train_time:79680ms step_avg:97.41ms +step:819/1670 train_time:79778ms step_avg:97.41ms +step:820/1670 train_time:79875ms step_avg:97.41ms +step:821/1670 train_time:79973ms step_avg:97.41ms +step:822/1670 train_time:80070ms step_avg:97.41ms +step:823/1670 train_time:80167ms step_avg:97.41ms +step:824/1670 train_time:80265ms step_avg:97.41ms +step:825/1670 train_time:80362ms step_avg:97.41ms +step:826/1670 train_time:80459ms step_avg:97.41ms +step:827/1670 train_time:80556ms step_avg:97.41ms +step:828/1670 train_time:80655ms step_avg:97.41ms +step:829/1670 train_time:80754ms step_avg:97.41ms +step:830/1670 train_time:80852ms step_avg:97.41ms +step:831/1670 train_time:80949ms step_avg:97.41ms +step:832/1670 train_time:81046ms step_avg:97.41ms +step:833/1670 train_time:81142ms step_avg:97.41ms +step:834/1670 train_time:81239ms step_avg:97.41ms +step:835/1670 train_time:81337ms step_avg:97.41ms +step:836/1670 train_time:81434ms step_avg:97.41ms +step:837/1670 train_time:81532ms step_avg:97.41ms +step:838/1670 train_time:81629ms step_avg:97.41ms +step:839/1670 train_time:81726ms step_avg:97.41ms +step:840/1670 train_time:81823ms step_avg:97.41ms +step:841/1670 train_time:81920ms step_avg:97.41ms +step:842/1670 train_time:82019ms step_avg:97.41ms +step:843/1670 train_time:82116ms step_avg:97.41ms +step:844/1670 train_time:82214ms step_avg:97.41ms +step:845/1670 train_time:82310ms step_avg:97.41ms +step:846/1670 train_time:82407ms step_avg:97.41ms +step:847/1670 train_time:82505ms step_avg:97.41ms +step:848/1670 train_time:82601ms step_avg:97.41ms +step:849/1670 train_time:82699ms step_avg:97.41ms +step:850/1670 train_time:82796ms step_avg:97.41ms +step:851/1670 train_time:83066ms step_avg:97.61ms +step:852/1670 train_time:83206ms step_avg:97.66ms +step:853/1670 train_time:83302ms step_avg:97.66ms +step:854/1670 train_time:83398ms step_avg:97.66ms +step:855/1670 train_time:83495ms step_avg:97.65ms +step:856/1670 train_time:83591ms step_avg:97.65ms +step:857/1670 train_time:83688ms step_avg:97.65ms +step:858/1670 train_time:83783ms step_avg:97.65ms +step:859/1670 train_time:83880ms step_avg:97.65ms +step:860/1670 train_time:83976ms step_avg:97.65ms +step:861/1670 train_time:84078ms step_avg:97.65ms +step:862/1670 train_time:84183ms step_avg:97.66ms +step:863/1670 train_time:84281ms step_avg:97.66ms +step:864/1670 train_time:84378ms step_avg:97.66ms +step:865/1670 train_time:84475ms step_avg:97.66ms +step:866/1670 train_time:84572ms step_avg:97.66ms +step:867/1670 train_time:84668ms step_avg:97.66ms +step:868/1670 train_time:84764ms step_avg:97.65ms +step:869/1670 train_time:84859ms step_avg:97.65ms +step:870/1670 train_time:84956ms step_avg:97.65ms +step:871/1670 train_time:85055ms step_avg:97.65ms +step:872/1670 train_time:85158ms step_avg:97.66ms +step:873/1670 train_time:85258ms step_avg:97.66ms +step:874/1670 train_time:85357ms step_avg:97.66ms +step:875/1670 train_time:85455ms step_avg:97.66ms +step:875/1670 val_loss:3.5197 train_time:85551ms step_avg:97.77ms +step:876/1670 train_time:85574ms step_avg:97.69ms +step:877/1670 train_time:85657ms step_avg:97.67ms +step:878/1670 train_time:85754ms step_avg:97.67ms +step:879/1670 train_time:85853ms step_avg:97.67ms +step:880/1670 train_time:85950ms step_avg:97.67ms +step:881/1670 train_time:86046ms step_avg:97.67ms +step:882/1670 train_time:86142ms step_avg:97.67ms +step:883/1670 train_time:86238ms step_avg:97.66ms +step:884/1670 train_time:86335ms step_avg:97.66ms +step:885/1670 train_time:86431ms step_avg:97.66ms +step:886/1670 train_time:86532ms step_avg:97.67ms +step:887/1670 train_time:86632ms step_avg:97.67ms +step:888/1670 train_time:86730ms step_avg:97.67ms +step:889/1670 train_time:86828ms step_avg:97.67ms +step:890/1670 train_time:86926ms step_avg:97.67ms +step:891/1670 train_time:87023ms step_avg:97.67ms +step:892/1670 train_time:87119ms step_avg:97.67ms +step:893/1670 train_time:87215ms step_avg:97.67ms +step:894/1670 train_time:87312ms step_avg:97.66ms +step:895/1670 train_time:87409ms step_avg:97.66ms +step:896/1670 train_time:87508ms step_avg:97.66ms +step:897/1670 train_time:87608ms step_avg:97.67ms +step:898/1670 train_time:87707ms step_avg:97.67ms +step:899/1670 train_time:87804ms step_avg:97.67ms +step:900/1670 train_time:87902ms step_avg:97.67ms +step:901/1670 train_time:87999ms step_avg:97.67ms +step:902/1670 train_time:88095ms step_avg:97.67ms +step:903/1670 train_time:88192ms step_avg:97.67ms +step:904/1670 train_time:88289ms step_avg:97.66ms +step:905/1670 train_time:88386ms step_avg:97.66ms +step:906/1670 train_time:88482ms step_avg:97.66ms +step:907/1670 train_time:88580ms step_avg:97.66ms +step:908/1670 train_time:88677ms step_avg:97.66ms +step:909/1670 train_time:88774ms step_avg:97.66ms +step:910/1670 train_time:88872ms step_avg:97.66ms +step:911/1670 train_time:88970ms step_avg:97.66ms +step:912/1670 train_time:89068ms step_avg:97.66ms +step:913/1670 train_time:89166ms step_avg:97.66ms +step:914/1670 train_time:89262ms step_avg:97.66ms +step:915/1670 train_time:89359ms step_avg:97.66ms +step:916/1670 train_time:89455ms step_avg:97.66ms +step:917/1670 train_time:89552ms step_avg:97.66ms +step:918/1670 train_time:89650ms step_avg:97.66ms +step:919/1670 train_time:89748ms step_avg:97.66ms +step:920/1670 train_time:89846ms step_avg:97.66ms +step:921/1670 train_time:89944ms step_avg:97.66ms +step:922/1670 train_time:90041ms step_avg:97.66ms +step:923/1670 train_time:90137ms step_avg:97.66ms +step:924/1670 train_time:90234ms step_avg:97.66ms +step:925/1670 train_time:90331ms step_avg:97.65ms +step:926/1670 train_time:90428ms step_avg:97.65ms +step:927/1670 train_time:90526ms step_avg:97.65ms +step:928/1670 train_time:90624ms step_avg:97.66ms +step:929/1670 train_time:90721ms step_avg:97.65ms +step:930/1670 train_time:90819ms step_avg:97.65ms +step:931/1670 train_time:90915ms step_avg:97.65ms +step:932/1670 train_time:91013ms step_avg:97.65ms +step:933/1670 train_time:91110ms step_avg:97.65ms +step:934/1670 train_time:91208ms step_avg:97.65ms +step:935/1670 train_time:91306ms step_avg:97.65ms +step:936/1670 train_time:91404ms step_avg:97.65ms +step:937/1670 train_time:91502ms step_avg:97.65ms +step:938/1670 train_time:91598ms step_avg:97.65ms +step:939/1670 train_time:91695ms step_avg:97.65ms +step:940/1670 train_time:91793ms step_avg:97.65ms +step:941/1670 train_time:91891ms step_avg:97.65ms +step:942/1670 train_time:91988ms step_avg:97.65ms +step:943/1670 train_time:92085ms step_avg:97.65ms +step:944/1670 train_time:92182ms step_avg:97.65ms +step:945/1670 train_time:92279ms step_avg:97.65ms +step:946/1670 train_time:92375ms step_avg:97.65ms +step:947/1670 train_time:92473ms step_avg:97.65ms +step:948/1670 train_time:92570ms step_avg:97.65ms +step:949/1670 train_time:92669ms step_avg:97.65ms +step:950/1670 train_time:92766ms step_avg:97.65ms +step:951/1670 train_time:92864ms step_avg:97.65ms +step:952/1670 train_time:92962ms step_avg:97.65ms +step:953/1670 train_time:93058ms step_avg:97.65ms +step:954/1670 train_time:93155ms step_avg:97.65ms +step:955/1670 train_time:93253ms step_avg:97.65ms +step:956/1670 train_time:93350ms step_avg:97.65ms +step:957/1670 train_time:93448ms step_avg:97.65ms +step:958/1670 train_time:93545ms step_avg:97.65ms +step:959/1670 train_time:93643ms step_avg:97.65ms +step:960/1670 train_time:93740ms step_avg:97.65ms +step:961/1670 train_time:93836ms step_avg:97.64ms +step:962/1670 train_time:93933ms step_avg:97.64ms +step:963/1670 train_time:94030ms step_avg:97.64ms +step:964/1670 train_time:94128ms step_avg:97.64ms +step:965/1670 train_time:94226ms step_avg:97.64ms +step:966/1670 train_time:94324ms step_avg:97.64ms +step:967/1670 train_time:94421ms step_avg:97.64ms +step:968/1670 train_time:94517ms step_avg:97.64ms +step:969/1670 train_time:94614ms step_avg:97.64ms +step:970/1670 train_time:94713ms step_avg:97.64ms +step:971/1670 train_time:94810ms step_avg:97.64ms +step:972/1670 train_time:94907ms step_avg:97.64ms +step:973/1670 train_time:95004ms step_avg:97.64ms +step:974/1670 train_time:95102ms step_avg:97.64ms +step:975/1670 train_time:95199ms step_avg:97.64ms +step:976/1670 train_time:95296ms step_avg:97.64ms +step:977/1670 train_time:95393ms step_avg:97.64ms +step:978/1670 train_time:95490ms step_avg:97.64ms +step:979/1670 train_time:95588ms step_avg:97.64ms +step:980/1670 train_time:95687ms step_avg:97.64ms +step:981/1670 train_time:95785ms step_avg:97.64ms +step:982/1670 train_time:95882ms step_avg:97.64ms +step:983/1670 train_time:95979ms step_avg:97.64ms +step:984/1670 train_time:96076ms step_avg:97.64ms +step:985/1670 train_time:96173ms step_avg:97.64ms +step:986/1670 train_time:96270ms step_avg:97.64ms +step:987/1670 train_time:96368ms step_avg:97.64ms +step:988/1670 train_time:96465ms step_avg:97.64ms +step:989/1670 train_time:96564ms step_avg:97.64ms +step:990/1670 train_time:96661ms step_avg:97.64ms +step:991/1670 train_time:96758ms step_avg:97.64ms +step:992/1670 train_time:96854ms step_avg:97.64ms +step:993/1670 train_time:96953ms step_avg:97.64ms +step:994/1670 train_time:97051ms step_avg:97.64ms +step:995/1670 train_time:97148ms step_avg:97.64ms +step:996/1670 train_time:97246ms step_avg:97.64ms +step:997/1670 train_time:97343ms step_avg:97.64ms +step:998/1670 train_time:97439ms step_avg:97.63ms +step:999/1670 train_time:97536ms step_avg:97.63ms +step:1000/1670 train_time:97633ms step_avg:97.63ms +step:1000/1670 val_loss:3.4772 train_time:97729ms step_avg:97.73ms +step:1001/1670 train_time:97752ms step_avg:97.65ms +step:1002/1670 train_time:97833ms step_avg:97.64ms +step:1003/1670 train_time:97935ms step_avg:97.64ms +step:1004/1670 train_time:98032ms step_avg:97.64ms +step:1005/1670 train_time:98128ms step_avg:97.64ms +step:1006/1670 train_time:98224ms step_avg:97.64ms +step:1007/1670 train_time:98321ms step_avg:97.64ms +step:1008/1670 train_time:98417ms step_avg:97.64ms +step:1009/1670 train_time:98514ms step_avg:97.63ms +step:1010/1670 train_time:98609ms step_avg:97.63ms +step:1011/1670 train_time:98707ms step_avg:97.63ms +step:1012/1670 train_time:98806ms step_avg:97.63ms +step:1013/1670 train_time:98905ms step_avg:97.64ms +step:1014/1670 train_time:99003ms step_avg:97.64ms +step:1015/1670 train_time:99102ms step_avg:97.64ms +step:1016/1670 train_time:99199ms step_avg:97.64ms +step:1017/1670 train_time:99295ms step_avg:97.64ms +step:1018/1670 train_time:99391ms step_avg:97.63ms +step:1019/1670 train_time:99488ms step_avg:97.63ms +step:1020/1670 train_time:99584ms step_avg:97.63ms +step:1021/1670 train_time:99683ms step_avg:97.63ms +step:1022/1670 train_time:99781ms step_avg:97.63ms +step:1023/1670 train_time:99882ms step_avg:97.64ms +step:1024/1670 train_time:99981ms step_avg:97.64ms +step:1025/1670 train_time:100079ms step_avg:97.64ms +step:1026/1670 train_time:100176ms step_avg:97.64ms +step:1027/1670 train_time:100274ms step_avg:97.64ms +step:1028/1670 train_time:100371ms step_avg:97.64ms +step:1029/1670 train_time:100467ms step_avg:97.64ms +step:1030/1670 train_time:100564ms step_avg:97.63ms +step:1031/1670 train_time:100661ms step_avg:97.63ms +step:1032/1670 train_time:100760ms step_avg:97.64ms +step:1033/1670 train_time:100859ms step_avg:97.64ms +step:1034/1670 train_time:100957ms step_avg:97.64ms +step:1035/1670 train_time:101055ms step_avg:97.64ms +step:1036/1670 train_time:101153ms step_avg:97.64ms +step:1037/1670 train_time:101249ms step_avg:97.64ms +step:1038/1670 train_time:101346ms step_avg:97.64ms +step:1039/1670 train_time:101443ms step_avg:97.63ms +step:1040/1670 train_time:101540ms step_avg:97.63ms +step:1041/1670 train_time:101639ms step_avg:97.64ms +step:1042/1670 train_time:101736ms step_avg:97.64ms +step:1043/1670 train_time:101835ms step_avg:97.64ms +step:1044/1670 train_time:101933ms step_avg:97.64ms +step:1045/1670 train_time:102030ms step_avg:97.64ms +step:1046/1670 train_time:102128ms step_avg:97.64ms +step:1047/1670 train_time:102225ms step_avg:97.64ms +step:1048/1670 train_time:102323ms step_avg:97.64ms +step:1049/1670 train_time:102421ms step_avg:97.64ms +step:1050/1670 train_time:102518ms step_avg:97.64ms +step:1051/1670 train_time:102616ms step_avg:97.64ms +step:1052/1670 train_time:102713ms step_avg:97.64ms +step:1053/1670 train_time:102810ms step_avg:97.63ms +step:1054/1670 train_time:102907ms step_avg:97.63ms +step:1055/1670 train_time:103005ms step_avg:97.63ms +step:1056/1670 train_time:103103ms step_avg:97.64ms +step:1057/1670 train_time:103202ms step_avg:97.64ms +step:1058/1670 train_time:103299ms step_avg:97.64ms +step:1059/1670 train_time:103396ms step_avg:97.64ms +step:1060/1670 train_time:103494ms step_avg:97.64ms +step:1061/1670 train_time:103592ms step_avg:97.64ms +step:1062/1670 train_time:103856ms step_avg:97.79ms +step:1063/1670 train_time:104047ms step_avg:97.88ms +step:1064/1670 train_time:104142ms step_avg:97.88ms +step:1065/1670 train_time:104238ms step_avg:97.88ms +step:1066/1670 train_time:104334ms step_avg:97.87ms +step:1067/1670 train_time:104431ms step_avg:97.87ms +step:1068/1670 train_time:104526ms step_avg:97.87ms +step:1069/1670 train_time:104623ms step_avg:97.87ms +step:1070/1670 train_time:104720ms step_avg:97.87ms +step:1071/1670 train_time:104817ms step_avg:97.87ms +step:1072/1670 train_time:104917ms step_avg:97.87ms +step:1073/1670 train_time:105021ms step_avg:97.88ms +step:1074/1670 train_time:105122ms step_avg:97.88ms +step:1075/1670 train_time:105219ms step_avg:97.88ms +step:1076/1670 train_time:105316ms step_avg:97.88ms +step:1077/1670 train_time:105414ms step_avg:97.88ms +step:1078/1670 train_time:105512ms step_avg:97.88ms +step:1079/1670 train_time:105609ms step_avg:97.88ms +step:1080/1670 train_time:105705ms step_avg:97.87ms +step:1081/1670 train_time:105801ms step_avg:97.87ms +step:1082/1670 train_time:105898ms step_avg:97.87ms +step:1083/1670 train_time:105998ms step_avg:97.87ms +step:1084/1670 train_time:106095ms step_avg:97.87ms +step:1085/1670 train_time:106194ms step_avg:97.87ms +step:1086/1670 train_time:106290ms step_avg:97.87ms +step:1087/1670 train_time:106387ms step_avg:97.87ms +step:1088/1670 train_time:106484ms step_avg:97.87ms +step:1089/1670 train_time:106581ms step_avg:97.87ms +step:1090/1670 train_time:106678ms step_avg:97.87ms +step:1091/1670 train_time:106776ms step_avg:97.87ms +step:1092/1670 train_time:106874ms step_avg:97.87ms +step:1093/1670 train_time:106971ms step_avg:97.87ms +step:1094/1670 train_time:107068ms step_avg:97.87ms +step:1095/1670 train_time:107166ms step_avg:97.87ms +step:1096/1670 train_time:107264ms step_avg:97.87ms +step:1097/1670 train_time:107363ms step_avg:97.87ms +step:1098/1670 train_time:107460ms step_avg:97.87ms +step:1099/1670 train_time:107558ms step_avg:97.87ms +step:1100/1670 train_time:107655ms step_avg:97.87ms +step:1101/1670 train_time:107752ms step_avg:97.87ms +step:1102/1670 train_time:107848ms step_avg:97.87ms +step:1103/1670 train_time:107945ms step_avg:97.87ms +step:1104/1670 train_time:108043ms step_avg:97.86ms +step:1105/1670 train_time:108142ms step_avg:97.87ms +step:1106/1670 train_time:108240ms step_avg:97.87ms +step:1107/1670 train_time:108338ms step_avg:97.87ms +step:1108/1670 train_time:108435ms step_avg:97.87ms +step:1109/1670 train_time:108532ms step_avg:97.86ms +step:1110/1670 train_time:108629ms step_avg:97.86ms +step:1111/1670 train_time:108726ms step_avg:97.86ms +step:1112/1670 train_time:108823ms step_avg:97.86ms +step:1113/1670 train_time:108921ms step_avg:97.86ms +step:1114/1670 train_time:109019ms step_avg:97.86ms +step:1115/1670 train_time:109118ms step_avg:97.86ms +step:1116/1670 train_time:109216ms step_avg:97.86ms +step:1117/1670 train_time:109314ms step_avg:97.86ms +step:1118/1670 train_time:109412ms step_avg:97.86ms +step:1119/1670 train_time:109509ms step_avg:97.86ms +step:1120/1670 train_time:109607ms step_avg:97.86ms +step:1121/1670 train_time:109705ms step_avg:97.86ms +step:1122/1670 train_time:109802ms step_avg:97.86ms +step:1123/1670 train_time:109901ms step_avg:97.86ms +step:1124/1670 train_time:110000ms step_avg:97.87ms +step:1125/1670 train_time:110100ms step_avg:97.87ms +step:1125/1670 val_loss:3.4233 train_time:110198ms step_avg:97.95ms +step:1126/1670 train_time:110221ms step_avg:97.89ms +step:1127/1670 train_time:110303ms step_avg:97.87ms +step:1128/1670 train_time:110402ms step_avg:97.87ms +step:1129/1670 train_time:110499ms step_avg:97.87ms +step:1130/1670 train_time:110595ms step_avg:97.87ms +step:1131/1670 train_time:110691ms step_avg:97.87ms +step:1132/1670 train_time:110788ms step_avg:97.87ms +step:1133/1670 train_time:110885ms step_avg:97.87ms +step:1134/1670 train_time:110983ms step_avg:97.87ms +step:1135/1670 train_time:111081ms step_avg:97.87ms +step:1136/1670 train_time:111184ms step_avg:97.87ms +step:1137/1670 train_time:111288ms step_avg:97.88ms +step:1138/1670 train_time:111387ms step_avg:97.88ms +step:1139/1670 train_time:111486ms step_avg:97.88ms +step:1140/1670 train_time:111585ms step_avg:97.88ms +step:1141/1670 train_time:111683ms step_avg:97.88ms +step:1142/1670 train_time:111780ms step_avg:97.88ms +step:1143/1670 train_time:111876ms step_avg:97.88ms +step:1144/1670 train_time:111974ms step_avg:97.88ms +step:1145/1670 train_time:112071ms step_avg:97.88ms +step:1146/1670 train_time:112170ms step_avg:97.88ms +step:1147/1670 train_time:112268ms step_avg:97.88ms +step:1148/1670 train_time:112368ms step_avg:97.88ms +step:1149/1670 train_time:112466ms step_avg:97.88ms +step:1150/1670 train_time:112564ms step_avg:97.88ms +step:1151/1670 train_time:112664ms step_avg:97.88ms +step:1152/1670 train_time:112762ms step_avg:97.88ms +step:1153/1670 train_time:112860ms step_avg:97.88ms +step:1154/1670 train_time:112956ms step_avg:97.88ms +step:1155/1670 train_time:113054ms step_avg:97.88ms +step:1156/1670 train_time:113153ms step_avg:97.88ms +step:1157/1670 train_time:113251ms step_avg:97.88ms +step:1158/1670 train_time:113349ms step_avg:97.88ms +step:1159/1670 train_time:113448ms step_avg:97.88ms +step:1160/1670 train_time:113546ms step_avg:97.88ms +step:1161/1670 train_time:113644ms step_avg:97.88ms +step:1162/1670 train_time:113741ms step_avg:97.88ms +step:1163/1670 train_time:113840ms step_avg:97.88ms +step:1164/1670 train_time:113938ms step_avg:97.88ms +step:1165/1670 train_time:114037ms step_avg:97.89ms +step:1166/1670 train_time:114136ms step_avg:97.89ms +step:1167/1670 train_time:114236ms step_avg:97.89ms +step:1168/1670 train_time:114335ms step_avg:97.89ms +step:1169/1670 train_time:114434ms step_avg:97.89ms +step:1170/1670 train_time:114531ms step_avg:97.89ms +step:1171/1670 train_time:114629ms step_avg:97.89ms +step:1172/1670 train_time:114727ms step_avg:97.89ms +step:1173/1670 train_time:114825ms step_avg:97.89ms +step:1174/1670 train_time:114923ms step_avg:97.89ms +step:1175/1670 train_time:115022ms step_avg:97.89ms +step:1176/1670 train_time:115123ms step_avg:97.89ms +step:1177/1670 train_time:115222ms step_avg:97.89ms +step:1178/1670 train_time:115319ms step_avg:97.89ms +step:1179/1670 train_time:115418ms step_avg:97.90ms +step:1180/1670 train_time:115517ms step_avg:97.90ms +step:1181/1670 train_time:115614ms step_avg:97.89ms +step:1182/1670 train_time:115710ms step_avg:97.89ms +step:1183/1670 train_time:115808ms step_avg:97.89ms +step:1184/1670 train_time:115906ms step_avg:97.89ms +step:1185/1670 train_time:116005ms step_avg:97.89ms +step:1186/1670 train_time:116104ms step_avg:97.90ms +step:1187/1670 train_time:116202ms step_avg:97.90ms +step:1188/1670 train_time:116301ms step_avg:97.90ms +step:1189/1670 train_time:116400ms step_avg:97.90ms +step:1190/1670 train_time:116498ms step_avg:97.90ms +step:1191/1670 train_time:116597ms step_avg:97.90ms +step:1192/1670 train_time:116695ms step_avg:97.90ms +step:1193/1670 train_time:116792ms step_avg:97.90ms +step:1194/1670 train_time:116888ms step_avg:97.90ms +step:1195/1670 train_time:116986ms step_avg:97.90ms +step:1196/1670 train_time:117085ms step_avg:97.90ms +step:1197/1670 train_time:117183ms step_avg:97.90ms +step:1198/1670 train_time:117283ms step_avg:97.90ms +step:1199/1670 train_time:117382ms step_avg:97.90ms +step:1200/1670 train_time:117482ms step_avg:97.90ms +step:1201/1670 train_time:117581ms step_avg:97.90ms +step:1202/1670 train_time:117680ms step_avg:97.90ms +step:1203/1670 train_time:117779ms step_avg:97.90ms +step:1204/1670 train_time:117877ms step_avg:97.90ms +step:1205/1670 train_time:117974ms step_avg:97.90ms +step:1206/1670 train_time:118071ms step_avg:97.90ms +step:1207/1670 train_time:118168ms step_avg:97.90ms +step:1208/1670 train_time:118267ms step_avg:97.90ms +step:1209/1670 train_time:118366ms step_avg:97.90ms +step:1210/1670 train_time:118465ms step_avg:97.90ms +step:1211/1670 train_time:118565ms step_avg:97.91ms +step:1212/1670 train_time:118665ms step_avg:97.91ms +step:1213/1670 train_time:118765ms step_avg:97.91ms +step:1214/1670 train_time:118865ms step_avg:97.91ms +step:1215/1670 train_time:118965ms step_avg:97.91ms +step:1216/1670 train_time:119064ms step_avg:97.91ms +step:1217/1670 train_time:119162ms step_avg:97.91ms +step:1218/1670 train_time:119260ms step_avg:97.91ms +step:1219/1670 train_time:119358ms step_avg:97.91ms +step:1220/1670 train_time:119456ms step_avg:97.92ms +step:1221/1670 train_time:119553ms step_avg:97.91ms +step:1222/1670 train_time:119651ms step_avg:97.91ms +step:1223/1670 train_time:119749ms step_avg:97.91ms +step:1224/1670 train_time:119848ms step_avg:97.91ms +step:1225/1670 train_time:119946ms step_avg:97.91ms +step:1226/1670 train_time:120044ms step_avg:97.92ms +step:1227/1670 train_time:120142ms step_avg:97.92ms +step:1228/1670 train_time:120240ms step_avg:97.92ms +step:1229/1670 train_time:120338ms step_avg:97.92ms +step:1230/1670 train_time:120437ms step_avg:97.92ms +step:1231/1670 train_time:120535ms step_avg:97.92ms +step:1232/1670 train_time:120633ms step_avg:97.92ms +step:1233/1670 train_time:120730ms step_avg:97.92ms +step:1234/1670 train_time:120827ms step_avg:97.92ms +step:1235/1670 train_time:120925ms step_avg:97.92ms +step:1236/1670 train_time:121023ms step_avg:97.92ms +step:1237/1670 train_time:121122ms step_avg:97.92ms +step:1238/1670 train_time:121219ms step_avg:97.92ms +step:1239/1670 train_time:121317ms step_avg:97.92ms +step:1240/1670 train_time:121416ms step_avg:97.92ms +step:1241/1670 train_time:121514ms step_avg:97.92ms +step:1242/1670 train_time:121611ms step_avg:97.92ms +step:1243/1670 train_time:121708ms step_avg:97.91ms +step:1244/1670 train_time:121805ms step_avg:97.91ms +step:1245/1670 train_time:121904ms step_avg:97.91ms +step:1246/1670 train_time:122002ms step_avg:97.92ms +step:1247/1670 train_time:122100ms step_avg:97.92ms +step:1248/1670 train_time:122198ms step_avg:97.92ms +step:1249/1670 train_time:122297ms step_avg:97.92ms +step:1250/1670 train_time:122395ms step_avg:97.92ms +step:1250/1670 val_loss:3.3801 train_time:122492ms step_avg:97.99ms +step:1251/1670 train_time:122515ms step_avg:97.93ms +step:1252/1670 train_time:122598ms step_avg:97.92ms +step:1253/1670 train_time:122696ms step_avg:97.92ms +step:1254/1670 train_time:122794ms step_avg:97.92ms +step:1255/1670 train_time:122892ms step_avg:97.92ms +step:1256/1670 train_time:122989ms step_avg:97.92ms +step:1257/1670 train_time:123087ms step_avg:97.92ms +step:1258/1670 train_time:123184ms step_avg:97.92ms +step:1259/1670 train_time:123282ms step_avg:97.92ms +step:1260/1670 train_time:123378ms step_avg:97.92ms +step:1261/1670 train_time:123478ms step_avg:97.92ms +step:1262/1670 train_time:123578ms step_avg:97.92ms +step:1263/1670 train_time:123676ms step_avg:97.92ms +step:1264/1670 train_time:123775ms step_avg:97.92ms +step:1265/1670 train_time:123874ms step_avg:97.92ms +step:1266/1670 train_time:123971ms step_avg:97.92ms +step:1267/1670 train_time:124068ms step_avg:97.92ms +step:1268/1670 train_time:124166ms step_avg:97.92ms +step:1269/1670 train_time:124263ms step_avg:97.92ms +step:1270/1670 train_time:124360ms step_avg:97.92ms +step:1271/1670 train_time:124458ms step_avg:97.92ms +step:1272/1670 train_time:124557ms step_avg:97.92ms +step:1273/1670 train_time:124655ms step_avg:97.92ms +step:1274/1670 train_time:125017ms step_avg:98.13ms +step:1275/1670 train_time:125117ms step_avg:98.13ms +step:1276/1670 train_time:125214ms step_avg:98.13ms +step:1277/1670 train_time:125310ms step_avg:98.13ms +step:1278/1670 train_time:125408ms step_avg:98.13ms +step:1279/1670 train_time:125505ms step_avg:98.13ms +step:1280/1670 train_time:125602ms step_avg:98.13ms +step:1281/1670 train_time:125698ms step_avg:98.12ms +step:1282/1670 train_time:125795ms step_avg:98.12ms +step:1283/1670 train_time:125892ms step_avg:98.12ms +step:1284/1670 train_time:125998ms step_avg:98.13ms +step:1285/1670 train_time:126098ms step_avg:98.13ms +step:1286/1670 train_time:126196ms step_avg:98.13ms +step:1287/1670 train_time:126293ms step_avg:98.13ms +step:1288/1670 train_time:126390ms step_avg:98.13ms +step:1289/1670 train_time:126487ms step_avg:98.13ms +step:1290/1670 train_time:126585ms step_avg:98.13ms +step:1291/1670 train_time:126683ms step_avg:98.13ms +step:1292/1670 train_time:126780ms step_avg:98.13ms +step:1293/1670 train_time:126878ms step_avg:98.13ms +step:1294/1670 train_time:126977ms step_avg:98.13ms +step:1295/1670 train_time:127077ms step_avg:98.13ms +step:1296/1670 train_time:127175ms step_avg:98.13ms +step:1297/1670 train_time:127273ms step_avg:98.13ms +step:1298/1670 train_time:127371ms step_avg:98.13ms +step:1299/1670 train_time:127469ms step_avg:98.13ms +step:1300/1670 train_time:127566ms step_avg:98.13ms +step:1301/1670 train_time:127664ms step_avg:98.13ms +step:1302/1670 train_time:127761ms step_avg:98.13ms +step:1303/1670 train_time:127859ms step_avg:98.13ms +step:1304/1670 train_time:127956ms step_avg:98.13ms +step:1305/1670 train_time:128055ms step_avg:98.13ms +step:1306/1670 train_time:128154ms step_avg:98.13ms +step:1307/1670 train_time:128253ms step_avg:98.13ms +step:1308/1670 train_time:128350ms step_avg:98.13ms +step:1309/1670 train_time:128448ms step_avg:98.13ms +step:1310/1670 train_time:128547ms step_avg:98.13ms +step:1311/1670 train_time:128644ms step_avg:98.13ms +step:1312/1670 train_time:128742ms step_avg:98.13ms +step:1313/1670 train_time:128839ms step_avg:98.13ms +step:1314/1670 train_time:128937ms step_avg:98.13ms +step:1315/1670 train_time:129035ms step_avg:98.13ms +step:1316/1670 train_time:129133ms step_avg:98.13ms +step:1317/1670 train_time:129230ms step_avg:98.12ms +step:1318/1670 train_time:129329ms step_avg:98.13ms +step:1319/1670 train_time:129427ms step_avg:98.13ms +step:1320/1670 train_time:129525ms step_avg:98.13ms +step:1321/1670 train_time:129623ms step_avg:98.12ms +step:1322/1670 train_time:129721ms step_avg:98.12ms +step:1323/1670 train_time:129818ms step_avg:98.12ms +step:1324/1670 train_time:129916ms step_avg:98.12ms +step:1325/1670 train_time:130015ms step_avg:98.12ms +step:1326/1670 train_time:130114ms step_avg:98.13ms +step:1327/1670 train_time:130212ms step_avg:98.12ms +step:1328/1670 train_time:130310ms step_avg:98.12ms +step:1329/1670 train_time:130407ms step_avg:98.12ms +step:1330/1670 train_time:130506ms step_avg:98.12ms +step:1331/1670 train_time:130604ms step_avg:98.12ms +step:1332/1670 train_time:130701ms step_avg:98.12ms +step:1333/1670 train_time:130799ms step_avg:98.12ms +step:1334/1670 train_time:130899ms step_avg:98.12ms +step:1335/1670 train_time:130997ms step_avg:98.13ms +step:1336/1670 train_time:131095ms step_avg:98.12ms +step:1337/1670 train_time:131192ms step_avg:98.12ms +step:1338/1670 train_time:131290ms step_avg:98.12ms +step:1339/1670 train_time:131388ms step_avg:98.12ms +step:1340/1670 train_time:131486ms step_avg:98.12ms +step:1341/1670 train_time:131583ms step_avg:98.12ms +step:1342/1670 train_time:131681ms step_avg:98.12ms +step:1343/1670 train_time:131780ms step_avg:98.12ms +step:1344/1670 train_time:131878ms step_avg:98.12ms +step:1345/1670 train_time:131975ms step_avg:98.12ms +step:1346/1670 train_time:132073ms step_avg:98.12ms +step:1347/1670 train_time:132171ms step_avg:98.12ms +step:1348/1670 train_time:132269ms step_avg:98.12ms +step:1349/1670 train_time:132367ms step_avg:98.12ms +step:1350/1670 train_time:132465ms step_avg:98.12ms +step:1351/1670 train_time:132564ms step_avg:98.12ms +step:1352/1670 train_time:132661ms step_avg:98.12ms +step:1353/1670 train_time:132760ms step_avg:98.12ms +step:1354/1670 train_time:132858ms step_avg:98.12ms +step:1355/1670 train_time:132956ms step_avg:98.12ms +step:1356/1670 train_time:133054ms step_avg:98.12ms +step:1357/1670 train_time:133152ms step_avg:98.12ms +step:1358/1670 train_time:133249ms step_avg:98.12ms +step:1359/1670 train_time:133347ms step_avg:98.12ms +step:1360/1670 train_time:133444ms step_avg:98.12ms +step:1361/1670 train_time:133542ms step_avg:98.12ms +step:1362/1670 train_time:133639ms step_avg:98.12ms +step:1363/1670 train_time:133737ms step_avg:98.12ms +step:1364/1670 train_time:133835ms step_avg:98.12ms +step:1365/1670 train_time:133934ms step_avg:98.12ms +step:1366/1670 train_time:134032ms step_avg:98.12ms +step:1367/1670 train_time:134130ms step_avg:98.12ms +step:1368/1670 train_time:134229ms step_avg:98.12ms +step:1369/1670 train_time:134328ms step_avg:98.12ms +step:1370/1670 train_time:134427ms step_avg:98.12ms +step:1371/1670 train_time:134525ms step_avg:98.12ms +step:1372/1670 train_time:134622ms step_avg:98.12ms +step:1373/1670 train_time:134721ms step_avg:98.12ms +step:1374/1670 train_time:134820ms step_avg:98.12ms +step:1375/1670 train_time:134917ms step_avg:98.12ms +step:1375/1670 val_loss:3.3425 train_time:135014ms step_avg:98.19ms +step:1376/1670 train_time:135036ms step_avg:98.14ms +step:1377/1670 train_time:135122ms step_avg:98.13ms +step:1378/1670 train_time:135224ms step_avg:98.13ms +step:1379/1670 train_time:135323ms step_avg:98.13ms +step:1380/1670 train_time:135421ms step_avg:98.13ms +step:1381/1670 train_time:135518ms step_avg:98.13ms +step:1382/1670 train_time:135615ms step_avg:98.13ms +step:1383/1670 train_time:135712ms step_avg:98.13ms +step:1384/1670 train_time:135809ms step_avg:98.13ms +step:1385/1670 train_time:135906ms step_avg:98.13ms +step:1386/1670 train_time:136006ms step_avg:98.13ms +step:1387/1670 train_time:136108ms step_avg:98.13ms +step:1388/1670 train_time:136208ms step_avg:98.13ms +step:1389/1670 train_time:136307ms step_avg:98.13ms +step:1390/1670 train_time:136406ms step_avg:98.13ms +step:1391/1670 train_time:136503ms step_avg:98.13ms +step:1392/1670 train_time:136602ms step_avg:98.13ms +step:1393/1670 train_time:136699ms step_avg:98.13ms +step:1394/1670 train_time:136796ms step_avg:98.13ms +step:1395/1670 train_time:136894ms step_avg:98.13ms +step:1396/1670 train_time:136992ms step_avg:98.13ms +step:1397/1670 train_time:137090ms step_avg:98.13ms +step:1398/1670 train_time:137189ms step_avg:98.13ms +step:1399/1670 train_time:137287ms step_avg:98.13ms +step:1400/1670 train_time:137386ms step_avg:98.13ms +step:1401/1670 train_time:137484ms step_avg:98.13ms +step:1402/1670 train_time:137581ms step_avg:98.13ms +step:1403/1670 train_time:137679ms step_avg:98.13ms +step:1404/1670 train_time:137777ms step_avg:98.13ms +step:1405/1670 train_time:137874ms step_avg:98.13ms +step:1406/1670 train_time:137973ms step_avg:98.13ms +step:1407/1670 train_time:138071ms step_avg:98.13ms +step:1408/1670 train_time:138169ms step_avg:98.13ms +step:1409/1670 train_time:138267ms step_avg:98.13ms +step:1410/1670 train_time:138367ms step_avg:98.13ms +step:1411/1670 train_time:138465ms step_avg:98.13ms +step:1412/1670 train_time:138563ms step_avg:98.13ms +step:1413/1670 train_time:138661ms step_avg:98.13ms +step:1414/1670 train_time:138759ms step_avg:98.13ms +step:1415/1670 train_time:138858ms step_avg:98.13ms +step:1416/1670 train_time:138956ms step_avg:98.13ms +step:1417/1670 train_time:139055ms step_avg:98.13ms +step:1418/1670 train_time:139152ms step_avg:98.13ms +step:1419/1670 train_time:139251ms step_avg:98.13ms +step:1420/1670 train_time:139349ms step_avg:98.13ms +step:1421/1670 train_time:139446ms step_avg:98.13ms +step:1422/1670 train_time:139544ms step_avg:98.13ms +step:1423/1670 train_time:139642ms step_avg:98.13ms +step:1424/1670 train_time:139740ms step_avg:98.13ms +step:1425/1670 train_time:139838ms step_avg:98.13ms +step:1426/1670 train_time:139936ms step_avg:98.13ms +step:1427/1670 train_time:140035ms step_avg:98.13ms +step:1428/1670 train_time:140133ms step_avg:98.13ms +step:1429/1670 train_time:140231ms step_avg:98.13ms +step:1430/1670 train_time:140330ms step_avg:98.13ms +step:1431/1670 train_time:140427ms step_avg:98.13ms +step:1432/1670 train_time:140525ms step_avg:98.13ms +step:1433/1670 train_time:140623ms step_avg:98.13ms +step:1434/1670 train_time:140720ms step_avg:98.13ms +step:1435/1670 train_time:140819ms step_avg:98.13ms +step:1436/1670 train_time:140917ms step_avg:98.13ms +step:1437/1670 train_time:141015ms step_avg:98.13ms +step:1438/1670 train_time:141114ms step_avg:98.13ms +step:1439/1670 train_time:141213ms step_avg:98.13ms +step:1440/1670 train_time:141312ms step_avg:98.13ms +step:1441/1670 train_time:141409ms step_avg:98.13ms +step:1442/1670 train_time:141506ms step_avg:98.13ms +step:1443/1670 train_time:141604ms step_avg:98.13ms +step:1444/1670 train_time:141702ms step_avg:98.13ms +step:1445/1670 train_time:141800ms step_avg:98.13ms +step:1446/1670 train_time:141899ms step_avg:98.13ms +step:1447/1670 train_time:141998ms step_avg:98.13ms +step:1448/1670 train_time:142097ms step_avg:98.13ms +step:1449/1670 train_time:142197ms step_avg:98.13ms +step:1450/1670 train_time:142296ms step_avg:98.14ms +step:1451/1670 train_time:142394ms step_avg:98.14ms +step:1452/1670 train_time:142493ms step_avg:98.14ms +step:1453/1670 train_time:142590ms step_avg:98.13ms +step:1454/1670 train_time:142688ms step_avg:98.13ms +step:1455/1670 train_time:142786ms step_avg:98.13ms +step:1456/1670 train_time:142885ms step_avg:98.14ms +step:1457/1670 train_time:142984ms step_avg:98.14ms +step:1458/1670 train_time:143085ms step_avg:98.14ms +step:1459/1670 train_time:143184ms step_avg:98.14ms +step:1460/1670 train_time:143284ms step_avg:98.14ms +step:1461/1670 train_time:143384ms step_avg:98.14ms +step:1462/1670 train_time:143483ms step_avg:98.14ms +step:1463/1670 train_time:143581ms step_avg:98.14ms +step:1464/1670 train_time:143680ms step_avg:98.14ms +step:1465/1670 train_time:143778ms step_avg:98.14ms +step:1466/1670 train_time:143875ms step_avg:98.14ms +step:1467/1670 train_time:143972ms step_avg:98.14ms +step:1468/1670 train_time:144070ms step_avg:98.14ms +step:1469/1670 train_time:144168ms step_avg:98.14ms +step:1470/1670 train_time:144266ms step_avg:98.14ms +step:1471/1670 train_time:144365ms step_avg:98.14ms +step:1472/1670 train_time:144465ms step_avg:98.14ms +step:1473/1670 train_time:144565ms step_avg:98.14ms +step:1474/1670 train_time:144662ms step_avg:98.14ms +step:1475/1670 train_time:144760ms step_avg:98.14ms +step:1476/1670 train_time:144859ms step_avg:98.14ms +step:1477/1670 train_time:144957ms step_avg:98.14ms +step:1478/1670 train_time:145055ms step_avg:98.14ms +step:1479/1670 train_time:145153ms step_avg:98.14ms +step:1480/1670 train_time:145251ms step_avg:98.14ms +step:1481/1670 train_time:145349ms step_avg:98.14ms +step:1482/1670 train_time:145447ms step_avg:98.14ms +step:1483/1670 train_time:145545ms step_avg:98.14ms +step:1484/1670 train_time:145642ms step_avg:98.14ms +step:1485/1670 train_time:145908ms step_avg:98.25ms +step:1486/1670 train_time:146091ms step_avg:98.31ms +step:1487/1670 train_time:146187ms step_avg:98.31ms +step:1488/1670 train_time:146284ms step_avg:98.31ms +step:1489/1670 train_time:146381ms step_avg:98.31ms +step:1490/1670 train_time:146479ms step_avg:98.31ms +step:1491/1670 train_time:146576ms step_avg:98.31ms +step:1492/1670 train_time:146673ms step_avg:98.31ms +step:1493/1670 train_time:146769ms step_avg:98.30ms +step:1494/1670 train_time:146867ms step_avg:98.30ms +step:1495/1670 train_time:146973ms step_avg:98.31ms +step:1496/1670 train_time:147073ms step_avg:98.31ms +step:1497/1670 train_time:147170ms step_avg:98.31ms +step:1498/1670 train_time:147267ms step_avg:98.31ms +step:1499/1670 train_time:147365ms step_avg:98.31ms +step:1500/1670 train_time:147462ms step_avg:98.31ms +step:1500/1670 val_loss:3.3106 train_time:147559ms step_avg:98.37ms +step:1501/1670 train_time:147582ms step_avg:98.32ms +step:1502/1670 train_time:147665ms step_avg:98.31ms +step:1503/1670 train_time:147766ms step_avg:98.31ms +step:1504/1670 train_time:147865ms step_avg:98.31ms +step:1505/1670 train_time:147962ms step_avg:98.31ms +step:1506/1670 train_time:148059ms step_avg:98.31ms +step:1507/1670 train_time:148155ms step_avg:98.31ms +step:1508/1670 train_time:148252ms step_avg:98.31ms +step:1509/1670 train_time:148349ms step_avg:98.31ms +step:1510/1670 train_time:148447ms step_avg:98.31ms +step:1511/1670 train_time:148547ms step_avg:98.31ms +step:1512/1670 train_time:148648ms step_avg:98.31ms +step:1513/1670 train_time:148748ms step_avg:98.31ms +step:1514/1670 train_time:148847ms step_avg:98.31ms +step:1515/1670 train_time:148945ms step_avg:98.31ms +step:1516/1670 train_time:149044ms step_avg:98.31ms +step:1517/1670 train_time:149141ms step_avg:98.31ms +step:1518/1670 train_time:149238ms step_avg:98.31ms +step:1519/1670 train_time:149335ms step_avg:98.31ms +step:1520/1670 train_time:149432ms step_avg:98.31ms +step:1521/1670 train_time:149530ms step_avg:98.31ms +step:1522/1670 train_time:149629ms step_avg:98.31ms +step:1523/1670 train_time:149729ms step_avg:98.31ms +step:1524/1670 train_time:149828ms step_avg:98.31ms +step:1525/1670 train_time:149927ms step_avg:98.31ms +step:1526/1670 train_time:150025ms step_avg:98.31ms +step:1527/1670 train_time:150124ms step_avg:98.31ms +step:1528/1670 train_time:150221ms step_avg:98.31ms +step:1529/1670 train_time:150319ms step_avg:98.31ms +step:1530/1670 train_time:150416ms step_avg:98.31ms +step:1531/1670 train_time:150513ms step_avg:98.31ms +step:1532/1670 train_time:150611ms step_avg:98.31ms +step:1533/1670 train_time:150710ms step_avg:98.31ms +step:1534/1670 train_time:150810ms step_avg:98.31ms +step:1535/1670 train_time:150908ms step_avg:98.31ms +step:1536/1670 train_time:151006ms step_avg:98.31ms +step:1537/1670 train_time:151104ms step_avg:98.31ms +step:1538/1670 train_time:151204ms step_avg:98.31ms +step:1539/1670 train_time:151302ms step_avg:98.31ms +step:1540/1670 train_time:151400ms step_avg:98.31ms +step:1541/1670 train_time:151499ms step_avg:98.31ms +step:1542/1670 train_time:151597ms step_avg:98.31ms +step:1543/1670 train_time:151695ms step_avg:98.31ms +step:1544/1670 train_time:151792ms step_avg:98.31ms +step:1545/1670 train_time:151889ms step_avg:98.31ms +step:1546/1670 train_time:151987ms step_avg:98.31ms +step:1547/1670 train_time:152085ms step_avg:98.31ms +step:1548/1670 train_time:152184ms step_avg:98.31ms +step:1549/1670 train_time:152282ms step_avg:98.31ms +step:1550/1670 train_time:152380ms step_avg:98.31ms +step:1551/1670 train_time:152478ms step_avg:98.31ms +step:1552/1670 train_time:152577ms step_avg:98.31ms +step:1553/1670 train_time:152674ms step_avg:98.31ms +step:1554/1670 train_time:152772ms step_avg:98.31ms +step:1555/1670 train_time:152870ms step_avg:98.31ms +step:1556/1670 train_time:152968ms step_avg:98.31ms +step:1557/1670 train_time:153066ms step_avg:98.31ms +step:1558/1670 train_time:153165ms step_avg:98.31ms +step:1559/1670 train_time:153264ms step_avg:98.31ms +step:1560/1670 train_time:153362ms step_avg:98.31ms +step:1561/1670 train_time:153461ms step_avg:98.31ms +step:1562/1670 train_time:153560ms step_avg:98.31ms +step:1563/1670 train_time:153660ms step_avg:98.31ms +step:1564/1670 train_time:153760ms step_avg:98.31ms +step:1565/1670 train_time:153858ms step_avg:98.31ms +step:1566/1670 train_time:153956ms step_avg:98.31ms +step:1567/1670 train_time:154052ms step_avg:98.31ms +step:1568/1670 train_time:154149ms step_avg:98.31ms +step:1569/1670 train_time:154247ms step_avg:98.31ms +step:1570/1670 train_time:154345ms step_avg:98.31ms +step:1571/1670 train_time:154443ms step_avg:98.31ms +step:1572/1670 train_time:154542ms step_avg:98.31ms +step:1573/1670 train_time:154642ms step_avg:98.31ms +step:1574/1670 train_time:154742ms step_avg:98.31ms +step:1575/1670 train_time:154840ms step_avg:98.31ms +step:1576/1670 train_time:154939ms step_avg:98.31ms +step:1577/1670 train_time:155037ms step_avg:98.31ms +step:1578/1670 train_time:155134ms step_avg:98.31ms +step:1579/1670 train_time:155231ms step_avg:98.31ms +step:1580/1670 train_time:155329ms step_avg:98.31ms +step:1581/1670 train_time:155428ms step_avg:98.31ms +step:1582/1670 train_time:155527ms step_avg:98.31ms +step:1583/1670 train_time:155627ms step_avg:98.31ms +step:1584/1670 train_time:155727ms step_avg:98.31ms +step:1585/1670 train_time:155827ms step_avg:98.31ms +step:1586/1670 train_time:155926ms step_avg:98.31ms +step:1587/1670 train_time:156025ms step_avg:98.31ms +step:1588/1670 train_time:156126ms step_avg:98.32ms +step:1589/1670 train_time:156225ms step_avg:98.32ms +step:1590/1670 train_time:156322ms step_avg:98.32ms +step:1591/1670 train_time:156421ms step_avg:98.32ms +step:1592/1670 train_time:156519ms step_avg:98.32ms +step:1593/1670 train_time:156617ms step_avg:98.32ms +step:1594/1670 train_time:156715ms step_avg:98.32ms +step:1595/1670 train_time:156813ms step_avg:98.32ms +step:1596/1670 train_time:156910ms step_avg:98.31ms +step:1597/1670 train_time:157009ms step_avg:98.32ms +step:1598/1670 train_time:157108ms step_avg:98.32ms +step:1599/1670 train_time:157207ms step_avg:98.32ms +step:1600/1670 train_time:157305ms step_avg:98.32ms +step:1601/1670 train_time:157404ms step_avg:98.32ms +step:1602/1670 train_time:157503ms step_avg:98.32ms +step:1603/1670 train_time:157601ms step_avg:98.32ms +step:1604/1670 train_time:157699ms step_avg:98.32ms +step:1605/1670 train_time:157798ms step_avg:98.32ms +step:1606/1670 train_time:157895ms step_avg:98.32ms +step:1607/1670 train_time:157993ms step_avg:98.32ms +step:1608/1670 train_time:158090ms step_avg:98.31ms +step:1609/1670 train_time:158188ms step_avg:98.31ms +step:1610/1670 train_time:158287ms step_avg:98.32ms +step:1611/1670 train_time:158386ms step_avg:98.32ms +step:1612/1670 train_time:158485ms step_avg:98.32ms +step:1613/1670 train_time:158583ms step_avg:98.32ms +step:1614/1670 train_time:158683ms step_avg:98.32ms +step:1615/1670 train_time:158783ms step_avg:98.32ms +step:1616/1670 train_time:158883ms step_avg:98.32ms +step:1617/1670 train_time:158983ms step_avg:98.32ms +step:1618/1670 train_time:159082ms step_avg:98.32ms +step:1619/1670 train_time:159181ms step_avg:98.32ms +step:1620/1670 train_time:159279ms step_avg:98.32ms +step:1621/1670 train_time:159376ms step_avg:98.32ms +step:1622/1670 train_time:159474ms step_avg:98.32ms +step:1623/1670 train_time:159571ms step_avg:98.32ms +step:1624/1670 train_time:159670ms step_avg:98.32ms +step:1625/1670 train_time:159769ms step_avg:98.32ms +step:1625/1670 val_loss:3.2839 train_time:159868ms step_avg:98.38ms +step:1626/1670 train_time:159893ms step_avg:98.33ms +step:1627/1670 train_time:159977ms step_avg:98.33ms +step:1628/1670 train_time:160077ms step_avg:98.33ms +step:1629/1670 train_time:160177ms step_avg:98.33ms +step:1630/1670 train_time:160275ms step_avg:98.33ms +step:1631/1670 train_time:160372ms step_avg:98.33ms +step:1632/1670 train_time:160468ms step_avg:98.33ms +step:1633/1670 train_time:160564ms step_avg:98.32ms +step:1634/1670 train_time:160661ms step_avg:98.32ms +step:1635/1670 train_time:160759ms step_avg:98.32ms +step:1636/1670 train_time:160858ms step_avg:98.32ms +step:1637/1670 train_time:160960ms step_avg:98.33ms +step:1638/1670 train_time:161060ms step_avg:98.33ms +step:1639/1670 train_time:161159ms step_avg:98.33ms +step:1640/1670 train_time:161257ms step_avg:98.33ms +step:1641/1670 train_time:161354ms step_avg:98.33ms +step:1642/1670 train_time:161451ms step_avg:98.33ms +step:1643/1670 train_time:161549ms step_avg:98.33ms +step:1644/1670 train_time:161647ms step_avg:98.33ms +step:1645/1670 train_time:161744ms step_avg:98.32ms +step:1646/1670 train_time:161841ms step_avg:98.32ms +step:1647/1670 train_time:161940ms step_avg:98.32ms +step:1648/1670 train_time:162041ms step_avg:98.33ms +step:1649/1670 train_time:162139ms step_avg:98.33ms +step:1650/1670 train_time:162237ms step_avg:98.33ms +step:1651/1670 train_time:162336ms step_avg:98.33ms +step:1652/1670 train_time:162433ms step_avg:98.32ms +step:1653/1670 train_time:162531ms step_avg:98.32ms +step:1654/1670 train_time:162628ms step_avg:98.32ms +step:1655/1670 train_time:162726ms step_avg:98.32ms +step:1656/1670 train_time:162824ms step_avg:98.32ms +step:1657/1670 train_time:162923ms step_avg:98.32ms +step:1658/1670 train_time:163021ms step_avg:98.32ms +step:1659/1670 train_time:163119ms step_avg:98.32ms +step:1660/1670 train_time:163217ms step_avg:98.32ms +step:1661/1670 train_time:163315ms step_avg:98.32ms +step:1662/1670 train_time:163414ms step_avg:98.32ms +step:1663/1670 train_time:163513ms step_avg:98.32ms +step:1664/1670 train_time:163612ms step_avg:98.32ms +step:1665/1670 train_time:163711ms step_avg:98.32ms +step:1666/1670 train_time:163809ms step_avg:98.32ms +step:1667/1670 train_time:163908ms step_avg:98.33ms +step:1668/1670 train_time:164007ms step_avg:98.33ms +step:1669/1670 train_time:164106ms step_avg:98.33ms +step:1670/1670 train_time:164204ms step_avg:98.33ms +step:1670/1670 val_loss:3.2760 train_time:164301ms step_avg:98.38ms +peak memory allocated: 34000 MiB reserved: 49796 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_c753588f-47c7-4107-9087-3c5da90cc0f4.txt b/records/090525_SkipMLPBlocks/comparison_c753588f-47c7-4107-9087-3c5da90cc0f4.txt new file mode 100644 index 000000000..c6951e93c --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_c753588f-47c7-4107-9087-3c5da90cc0f4.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:23:20 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 43C P0 129W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 43C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 33C P0 120W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 41C P0 122W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 34C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 42C P0 129W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 33C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 56619 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 56620 C /usr/bin/python3 610MiB | +| 0 N/A N/A 56621 C /usr/bin/python3 610MiB | +| 0 N/A N/A 56622 C /usr/bin/python3 610MiB | +| 0 N/A N/A 56623 C /usr/bin/python3 610MiB | +| 0 N/A N/A 56624 C /usr/bin/python3 610MiB | +| 0 N/A N/A 56625 C /usr/bin/python3 610MiB | +| 0 N/A N/A 56626 C /usr/bin/python3 610MiB | +| 1 N/A N/A 56620 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 56621 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 56622 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 56623 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 56624 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 56625 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 56626 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1670 train_time:377ms step_avg:376.56ms +step:2/1670 train_time:397ms step_avg:198.70ms +step:3/1670 train_time:470ms step_avg:156.65ms +step:4/1670 train_time:563ms step_avg:140.83ms +step:5/1670 train_time:658ms step_avg:131.53ms +step:6/1670 train_time:752ms step_avg:125.40ms +step:7/1670 train_time:847ms step_avg:121.00ms +step:8/1670 train_time:942ms step_avg:117.72ms +step:9/1670 train_time:1194ms step_avg:132.64ms +step:10/1670 train_time:1267ms step_avg:126.71ms +step:11/1670 train_time:1361ms step_avg:123.70ms +step:12/1670 train_time:1455ms step_avg:121.28ms +step:13/1670 train_time:1550ms step_avg:119.25ms +step:14/1670 train_time:1645ms step_avg:117.49ms +step:15/1670 train_time:1740ms step_avg:115.98ms +step:16/1670 train_time:1835ms step_avg:114.69ms +step:17/1670 train_time:1930ms step_avg:113.55ms +step:18/1670 train_time:2025ms step_avg:112.49ms +step:19/1670 train_time:2121ms step_avg:111.62ms +step:20/1670 train_time:2222ms step_avg:111.12ms +step:21/1670 train_time:2321ms step_avg:110.54ms +step:22/1670 train_time:2418ms step_avg:109.92ms +step:23/1670 train_time:2515ms step_avg:109.35ms +step:24/1670 train_time:2611ms step_avg:108.77ms +step:25/1670 train_time:2705ms step_avg:108.20ms +step:26/1670 train_time:2800ms step_avg:107.71ms +step:27/1670 train_time:2896ms step_avg:107.27ms +step:28/1670 train_time:2991ms step_avg:106.81ms +step:29/1670 train_time:3086ms step_avg:106.41ms +step:30/1670 train_time:3182ms step_avg:106.07ms +step:31/1670 train_time:3279ms step_avg:105.77ms +step:32/1670 train_time:3376ms step_avg:105.49ms +step:33/1670 train_time:3473ms step_avg:105.23ms +step:34/1670 train_time:3568ms step_avg:104.95ms +step:35/1670 train_time:3663ms step_avg:104.67ms +step:36/1670 train_time:3759ms step_avg:104.42ms +step:37/1670 train_time:3855ms step_avg:104.18ms +step:38/1670 train_time:3949ms step_avg:103.93ms +step:39/1670 train_time:4045ms step_avg:103.71ms +step:40/1670 train_time:4140ms step_avg:103.51ms +step:41/1670 train_time:4238ms step_avg:103.36ms +step:42/1670 train_time:4332ms step_avg:103.15ms +step:43/1670 train_time:4428ms step_avg:102.98ms +step:44/1670 train_time:4524ms step_avg:102.82ms +step:45/1670 train_time:4620ms step_avg:102.67ms +step:46/1670 train_time:4717ms step_avg:102.54ms +step:47/1670 train_time:4813ms step_avg:102.41ms +step:48/1670 train_time:4909ms step_avg:102.27ms +step:49/1670 train_time:5004ms step_avg:102.12ms +step:50/1670 train_time:5100ms step_avg:102.00ms +step:51/1670 train_time:5196ms step_avg:101.88ms +step:52/1670 train_time:5291ms step_avg:101.75ms +step:53/1670 train_time:5387ms step_avg:101.65ms +step:54/1670 train_time:5483ms step_avg:101.55ms +step:55/1670 train_time:5579ms step_avg:101.44ms +step:56/1670 train_time:5675ms step_avg:101.34ms +step:57/1670 train_time:5771ms step_avg:101.24ms +step:58/1670 train_time:5866ms step_avg:101.14ms +step:59/1670 train_time:5961ms step_avg:101.04ms +step:60/1670 train_time:6057ms step_avg:100.96ms +step:61/1670 train_time:6153ms step_avg:100.87ms +step:62/1670 train_time:6249ms step_avg:100.78ms +step:63/1670 train_time:6344ms step_avg:100.70ms +step:64/1670 train_time:6439ms step_avg:100.61ms +step:65/1670 train_time:6535ms step_avg:100.54ms +step:66/1670 train_time:6631ms step_avg:100.47ms +step:67/1670 train_time:6727ms step_avg:100.40ms +step:68/1670 train_time:6823ms step_avg:100.34ms +step:69/1670 train_time:6918ms step_avg:100.26ms +step:70/1670 train_time:7015ms step_avg:100.21ms +step:71/1670 train_time:7111ms step_avg:100.15ms +step:72/1670 train_time:7206ms step_avg:100.08ms +step:73/1670 train_time:7301ms step_avg:100.02ms +step:74/1670 train_time:7398ms step_avg:99.97ms +step:75/1670 train_time:7494ms step_avg:99.92ms +step:76/1670 train_time:7590ms step_avg:99.87ms +step:77/1670 train_time:7685ms step_avg:99.81ms +step:78/1670 train_time:7781ms step_avg:99.76ms +step:79/1670 train_time:7877ms step_avg:99.71ms +step:80/1670 train_time:7973ms step_avg:99.66ms +step:81/1670 train_time:8068ms step_avg:99.61ms +step:82/1670 train_time:8163ms step_avg:99.55ms +step:83/1670 train_time:8259ms step_avg:99.51ms +step:84/1670 train_time:8355ms step_avg:99.47ms +step:85/1670 train_time:8451ms step_avg:99.43ms +step:86/1670 train_time:8546ms step_avg:99.37ms +step:87/1670 train_time:8642ms step_avg:99.33ms +step:88/1670 train_time:8737ms step_avg:99.29ms +step:89/1670 train_time:8833ms step_avg:99.25ms +step:90/1670 train_time:8928ms step_avg:99.21ms +step:91/1670 train_time:9024ms step_avg:99.16ms +step:92/1670 train_time:9119ms step_avg:99.12ms +step:93/1670 train_time:9215ms step_avg:99.09ms +step:94/1670 train_time:9311ms step_avg:99.05ms +step:95/1670 train_time:9406ms step_avg:99.01ms +step:96/1670 train_time:9502ms step_avg:98.97ms +step:97/1670 train_time:9598ms step_avg:98.95ms +step:98/1670 train_time:9695ms step_avg:98.93ms +step:99/1670 train_time:9791ms step_avg:98.90ms +step:100/1670 train_time:9887ms step_avg:98.87ms +step:101/1670 train_time:9982ms step_avg:98.83ms +step:102/1670 train_time:10078ms step_avg:98.80ms +step:103/1670 train_time:10174ms step_avg:98.77ms +step:104/1670 train_time:10269ms step_avg:98.74ms +step:105/1670 train_time:10364ms step_avg:98.71ms +step:106/1670 train_time:10460ms step_avg:98.68ms +step:107/1670 train_time:10556ms step_avg:98.66ms +step:108/1670 train_time:10652ms step_avg:98.63ms +step:109/1670 train_time:10748ms step_avg:98.61ms +step:110/1670 train_time:10843ms step_avg:98.57ms +step:111/1670 train_time:10939ms step_avg:98.55ms +step:112/1670 train_time:11035ms step_avg:98.52ms +step:113/1670 train_time:11131ms step_avg:98.50ms +step:114/1670 train_time:11226ms step_avg:98.48ms +step:115/1670 train_time:11322ms step_avg:98.45ms +step:116/1670 train_time:11418ms step_avg:98.43ms +step:117/1670 train_time:11515ms step_avg:98.41ms +step:118/1670 train_time:11610ms step_avg:98.39ms +step:119/1670 train_time:11705ms step_avg:98.37ms +step:120/1670 train_time:11801ms step_avg:98.34ms +step:121/1670 train_time:11897ms step_avg:98.32ms +step:122/1670 train_time:11993ms step_avg:98.30ms +step:123/1670 train_time:12089ms step_avg:98.28ms +step:124/1670 train_time:12184ms step_avg:98.26ms +step:125/1670 train_time:12279ms step_avg:98.23ms +step:125/1670 val_loss:4.3009 train_time:12375ms step_avg:99.00ms +step:126/1670 train_time:12397ms step_avg:98.39ms +step:127/1670 train_time:12481ms step_avg:98.28ms +step:128/1670 train_time:12586ms step_avg:98.33ms +step:129/1670 train_time:12683ms step_avg:98.32ms +step:130/1670 train_time:12778ms step_avg:98.29ms +step:131/1670 train_time:12873ms step_avg:98.27ms +step:132/1670 train_time:12967ms step_avg:98.24ms +step:133/1670 train_time:13062ms step_avg:98.21ms +step:134/1670 train_time:13156ms step_avg:98.18ms +step:135/1670 train_time:13251ms step_avg:98.15ms +step:136/1670 train_time:13345ms step_avg:98.13ms +step:137/1670 train_time:13441ms step_avg:98.11ms +step:138/1670 train_time:13540ms step_avg:98.11ms +step:139/1670 train_time:13637ms step_avg:98.11ms +step:140/1670 train_time:13733ms step_avg:98.10ms +step:141/1670 train_time:13829ms step_avg:98.08ms +step:142/1670 train_time:13923ms step_avg:98.05ms +step:143/1670 train_time:14018ms step_avg:98.03ms +step:144/1670 train_time:14113ms step_avg:98.01ms +step:145/1670 train_time:14208ms step_avg:97.99ms +step:146/1670 train_time:14302ms step_avg:97.96ms +step:147/1670 train_time:14398ms step_avg:97.94ms +step:148/1670 train_time:14495ms step_avg:97.94ms +step:149/1670 train_time:14592ms step_avg:97.94ms +step:150/1670 train_time:14689ms step_avg:97.93ms +step:151/1670 train_time:14784ms step_avg:97.91ms +step:152/1670 train_time:14879ms step_avg:97.89ms +step:153/1670 train_time:14976ms step_avg:97.88ms +step:154/1670 train_time:15070ms step_avg:97.86ms +step:155/1670 train_time:15165ms step_avg:97.84ms +step:156/1670 train_time:15808ms step_avg:101.33ms +step:157/1670 train_time:15881ms step_avg:101.15ms +step:158/1670 train_time:15974ms step_avg:101.10ms +step:159/1670 train_time:16069ms step_avg:101.06ms +step:160/1670 train_time:16164ms step_avg:101.02ms +step:161/1670 train_time:16258ms step_avg:100.98ms +step:162/1670 train_time:16353ms step_avg:100.94ms +step:163/1670 train_time:16447ms step_avg:100.90ms +step:164/1670 train_time:16542ms step_avg:100.86ms +step:165/1670 train_time:16637ms step_avg:100.83ms +step:166/1670 train_time:16733ms step_avg:100.80ms +step:167/1670 train_time:16833ms step_avg:100.80ms +step:168/1670 train_time:16929ms step_avg:100.77ms +step:169/1670 train_time:17025ms step_avg:100.74ms +step:170/1670 train_time:17120ms step_avg:100.71ms +step:171/1670 train_time:17215ms step_avg:100.67ms +step:172/1670 train_time:17311ms step_avg:100.64ms +step:173/1670 train_time:17405ms step_avg:100.61ms +step:174/1670 train_time:17500ms step_avg:100.57ms +step:175/1670 train_time:17595ms step_avg:100.54ms +step:176/1670 train_time:17690ms step_avg:100.51ms +step:177/1670 train_time:17785ms step_avg:100.48ms +step:178/1670 train_time:17883ms step_avg:100.47ms +step:179/1670 train_time:17979ms step_avg:100.44ms +step:180/1670 train_time:18075ms step_avg:100.42ms +step:181/1670 train_time:18170ms step_avg:100.39ms +step:182/1670 train_time:18266ms step_avg:100.36ms +step:183/1670 train_time:18361ms step_avg:100.33ms +step:184/1670 train_time:18456ms step_avg:100.31ms +step:185/1670 train_time:18551ms step_avg:100.27ms +step:186/1670 train_time:18646ms step_avg:100.25ms +step:187/1670 train_time:18742ms step_avg:100.22ms +step:188/1670 train_time:18838ms step_avg:100.20ms +step:189/1670 train_time:18934ms step_avg:100.18ms +step:190/1670 train_time:19029ms step_avg:100.15ms +step:191/1670 train_time:19124ms step_avg:100.12ms +step:192/1670 train_time:19219ms step_avg:100.10ms +step:193/1670 train_time:19315ms step_avg:100.08ms +step:194/1670 train_time:19410ms step_avg:100.05ms +step:195/1670 train_time:19505ms step_avg:100.03ms +step:196/1670 train_time:19601ms step_avg:100.01ms +step:197/1670 train_time:19697ms step_avg:99.98ms +step:198/1670 train_time:19792ms step_avg:99.96ms +step:199/1670 train_time:19889ms step_avg:99.94ms +step:200/1670 train_time:19984ms step_avg:99.92ms +step:201/1670 train_time:20079ms step_avg:99.90ms +step:202/1670 train_time:20176ms step_avg:99.88ms +step:203/1670 train_time:20271ms step_avg:99.86ms +step:204/1670 train_time:20366ms step_avg:99.83ms +step:205/1670 train_time:20462ms step_avg:99.81ms +step:206/1670 train_time:20557ms step_avg:99.79ms +step:207/1670 train_time:20652ms step_avg:99.77ms +step:208/1670 train_time:20748ms step_avg:99.75ms +step:209/1670 train_time:20843ms step_avg:99.73ms +step:210/1670 train_time:20938ms step_avg:99.71ms +step:211/1670 train_time:21034ms step_avg:99.69ms +step:212/1670 train_time:21129ms step_avg:99.67ms +step:213/1670 train_time:21417ms step_avg:100.55ms +step:214/1670 train_time:21512ms step_avg:100.52ms +step:215/1670 train_time:21606ms step_avg:100.49ms +step:216/1670 train_time:21700ms step_avg:100.46ms +step:217/1670 train_time:21795ms step_avg:100.44ms +step:218/1670 train_time:21890ms step_avg:100.41ms +step:219/1670 train_time:21984ms step_avg:100.38ms +step:220/1670 train_time:22079ms step_avg:100.36ms +step:221/1670 train_time:22174ms step_avg:100.33ms +step:222/1670 train_time:22269ms step_avg:100.31ms +step:223/1670 train_time:22366ms step_avg:100.30ms +step:224/1670 train_time:22464ms step_avg:100.29ms +step:225/1670 train_time:22562ms step_avg:100.28ms +step:226/1670 train_time:22658ms step_avg:100.26ms +step:227/1670 train_time:22753ms step_avg:100.23ms +step:228/1670 train_time:22848ms step_avg:100.21ms +step:229/1670 train_time:22943ms step_avg:100.19ms +step:230/1670 train_time:23038ms step_avg:100.16ms +step:231/1670 train_time:23132ms step_avg:100.14ms +step:232/1670 train_time:23227ms step_avg:100.12ms +step:233/1670 train_time:23322ms step_avg:100.10ms +step:234/1670 train_time:23419ms step_avg:100.08ms +step:235/1670 train_time:23517ms step_avg:100.07ms +step:236/1670 train_time:23613ms step_avg:100.06ms +step:237/1670 train_time:23709ms step_avg:100.04ms +step:238/1670 train_time:23803ms step_avg:100.01ms +step:239/1670 train_time:23898ms step_avg:99.99ms +step:240/1670 train_time:23993ms step_avg:99.97ms +step:241/1670 train_time:24088ms step_avg:99.95ms +step:242/1670 train_time:24182ms step_avg:99.93ms +step:243/1670 train_time:24278ms step_avg:99.91ms +step:244/1670 train_time:24375ms step_avg:99.90ms +step:245/1670 train_time:24472ms step_avg:99.88ms +step:246/1670 train_time:24567ms step_avg:99.86ms +step:247/1670 train_time:24662ms step_avg:99.85ms +step:248/1670 train_time:24758ms step_avg:99.83ms +step:249/1670 train_time:24854ms step_avg:99.81ms +step:250/1670 train_time:24948ms step_avg:99.79ms +step:250/1670 val_loss:3.9641 train_time:25042ms step_avg:100.17ms +step:251/1670 train_time:25065ms step_avg:99.86ms +step:252/1670 train_time:25145ms step_avg:99.78ms +step:253/1670 train_time:25244ms step_avg:99.78ms +step:254/1670 train_time:25340ms step_avg:99.76ms +step:255/1670 train_time:25435ms step_avg:99.75ms +step:256/1670 train_time:25530ms step_avg:99.73ms +step:257/1670 train_time:25624ms step_avg:99.71ms +step:258/1670 train_time:25719ms step_avg:99.69ms +step:259/1670 train_time:25813ms step_avg:99.67ms +step:260/1670 train_time:25909ms step_avg:99.65ms +step:261/1670 train_time:26004ms step_avg:99.63ms +step:262/1670 train_time:26101ms step_avg:99.62ms +step:263/1670 train_time:26197ms step_avg:99.61ms +step:264/1670 train_time:26294ms step_avg:99.60ms +step:265/1670 train_time:26391ms step_avg:99.59ms +step:266/1670 train_time:26486ms step_avg:99.57ms +step:267/1670 train_time:26581ms step_avg:99.55ms +step:268/1670 train_time:26675ms step_avg:99.54ms +step:269/1670 train_time:26770ms step_avg:99.52ms +step:270/1670 train_time:26866ms step_avg:99.50ms +step:271/1670 train_time:26960ms step_avg:99.48ms +step:272/1670 train_time:27056ms step_avg:99.47ms +step:273/1670 train_time:27152ms step_avg:99.46ms +step:274/1670 train_time:27250ms step_avg:99.45ms +step:275/1670 train_time:27347ms step_avg:99.44ms +step:276/1670 train_time:27443ms step_avg:99.43ms +step:277/1670 train_time:27538ms step_avg:99.41ms +step:278/1670 train_time:27632ms step_avg:99.40ms +step:279/1670 train_time:27727ms step_avg:99.38ms +step:280/1670 train_time:27822ms step_avg:99.37ms +step:281/1670 train_time:27917ms step_avg:99.35ms +step:282/1670 train_time:28011ms step_avg:99.33ms +step:283/1670 train_time:28108ms step_avg:99.32ms +step:284/1670 train_time:28205ms step_avg:99.31ms +step:285/1670 train_time:28301ms step_avg:99.30ms +step:286/1670 train_time:28396ms step_avg:99.29ms +step:287/1670 train_time:28492ms step_avg:99.28ms +step:288/1670 train_time:28588ms step_avg:99.26ms +step:289/1670 train_time:28683ms step_avg:99.25ms +step:290/1670 train_time:28778ms step_avg:99.23ms +step:291/1670 train_time:28873ms step_avg:99.22ms +step:292/1670 train_time:28967ms step_avg:99.20ms +step:293/1670 train_time:29062ms step_avg:99.19ms +step:294/1670 train_time:29157ms step_avg:99.17ms +step:295/1670 train_time:29253ms step_avg:99.16ms +step:296/1670 train_time:29350ms step_avg:99.15ms +step:297/1670 train_time:29446ms step_avg:99.15ms +step:298/1670 train_time:29542ms step_avg:99.13ms +step:299/1670 train_time:29637ms step_avg:99.12ms +step:300/1670 train_time:29732ms step_avg:99.11ms +step:301/1670 train_time:29828ms step_avg:99.10ms +step:302/1670 train_time:29924ms step_avg:99.08ms +step:303/1670 train_time:30019ms step_avg:99.07ms +step:304/1670 train_time:30114ms step_avg:99.06ms +step:305/1670 train_time:30210ms step_avg:99.05ms +step:306/1670 train_time:30305ms step_avg:99.04ms +step:307/1670 train_time:30401ms step_avg:99.03ms +step:308/1670 train_time:30496ms step_avg:99.01ms +step:309/1670 train_time:30592ms step_avg:99.00ms +step:310/1670 train_time:30687ms step_avg:98.99ms +step:311/1670 train_time:30782ms step_avg:98.98ms +step:312/1670 train_time:30877ms step_avg:98.96ms +step:313/1670 train_time:30972ms step_avg:98.95ms +step:314/1670 train_time:31068ms step_avg:98.94ms +step:315/1670 train_time:31163ms step_avg:98.93ms +step:316/1670 train_time:31259ms step_avg:98.92ms +step:317/1670 train_time:31354ms step_avg:98.91ms +step:318/1670 train_time:31450ms step_avg:98.90ms +step:319/1670 train_time:31547ms step_avg:98.89ms +step:320/1670 train_time:31642ms step_avg:98.88ms +step:321/1670 train_time:31738ms step_avg:98.87ms +step:322/1670 train_time:31834ms step_avg:98.86ms +step:323/1670 train_time:31929ms step_avg:98.85ms +step:324/1670 train_time:32024ms step_avg:98.84ms +step:325/1670 train_time:32120ms step_avg:98.83ms +step:326/1670 train_time:32215ms step_avg:98.82ms +step:327/1670 train_time:32310ms step_avg:98.81ms +step:328/1670 train_time:32406ms step_avg:98.80ms +step:329/1670 train_time:32500ms step_avg:98.79ms +step:330/1670 train_time:32595ms step_avg:98.77ms +step:331/1670 train_time:32691ms step_avg:98.76ms +step:332/1670 train_time:32788ms step_avg:98.76ms +step:333/1670 train_time:32883ms step_avg:98.75ms +step:334/1670 train_time:32979ms step_avg:98.74ms +step:335/1670 train_time:33074ms step_avg:98.73ms +step:336/1670 train_time:33169ms step_avg:98.72ms +step:337/1670 train_time:33265ms step_avg:98.71ms +step:338/1670 train_time:33360ms step_avg:98.70ms +step:339/1670 train_time:33455ms step_avg:98.69ms +step:340/1670 train_time:33550ms step_avg:98.68ms +step:341/1670 train_time:33646ms step_avg:98.67ms +step:342/1670 train_time:33742ms step_avg:98.66ms +step:343/1670 train_time:33838ms step_avg:98.65ms +step:344/1670 train_time:33934ms step_avg:98.64ms +step:345/1670 train_time:34029ms step_avg:98.64ms +step:346/1670 train_time:34125ms step_avg:98.63ms +step:347/1670 train_time:34220ms step_avg:98.62ms +step:348/1670 train_time:34315ms step_avg:98.61ms +step:349/1670 train_time:34411ms step_avg:98.60ms +step:350/1670 train_time:34507ms step_avg:98.59ms +step:351/1670 train_time:34602ms step_avg:98.58ms +step:352/1670 train_time:34697ms step_avg:98.57ms +step:353/1670 train_time:34793ms step_avg:98.56ms +step:354/1670 train_time:34890ms step_avg:98.56ms +step:355/1670 train_time:34986ms step_avg:98.55ms +step:356/1670 train_time:35082ms step_avg:98.54ms +step:357/1670 train_time:35177ms step_avg:98.54ms +step:358/1670 train_time:35273ms step_avg:98.53ms +step:359/1670 train_time:35368ms step_avg:98.52ms +step:360/1670 train_time:35463ms step_avg:98.51ms +step:361/1670 train_time:35558ms step_avg:98.50ms +step:362/1670 train_time:35654ms step_avg:98.49ms +step:363/1670 train_time:35749ms step_avg:98.48ms +step:364/1670 train_time:35845ms step_avg:98.48ms +step:365/1670 train_time:35941ms step_avg:98.47ms +step:366/1670 train_time:36037ms step_avg:98.46ms +step:367/1670 train_time:36133ms step_avg:98.45ms +step:368/1670 train_time:36229ms step_avg:98.45ms +step:369/1670 train_time:36325ms step_avg:98.44ms +step:370/1670 train_time:36420ms step_avg:98.43ms +step:371/1670 train_time:36516ms step_avg:98.42ms +step:372/1670 train_time:36611ms step_avg:98.42ms +step:373/1670 train_time:36707ms step_avg:98.41ms +step:374/1670 train_time:36803ms step_avg:98.40ms +step:375/1670 train_time:36898ms step_avg:98.39ms +step:375/1670 val_loss:3.8163 train_time:36993ms step_avg:98.65ms +step:376/1670 train_time:37015ms step_avg:98.44ms +step:377/1670 train_time:37090ms step_avg:98.38ms +step:378/1670 train_time:37186ms step_avg:98.38ms +step:379/1670 train_time:37288ms step_avg:98.38ms +step:380/1670 train_time:37382ms step_avg:98.37ms +step:381/1670 train_time:37477ms step_avg:98.36ms +step:382/1670 train_time:37571ms step_avg:98.35ms +step:383/1670 train_time:37666ms step_avg:98.34ms +step:384/1670 train_time:37761ms step_avg:98.33ms +step:385/1670 train_time:37854ms step_avg:98.32ms +step:386/1670 train_time:37953ms step_avg:98.32ms +step:387/1670 train_time:38051ms step_avg:98.32ms +step:388/1670 train_time:38146ms step_avg:98.31ms +step:389/1670 train_time:38241ms step_avg:98.31ms +step:390/1670 train_time:38337ms step_avg:98.30ms +step:391/1670 train_time:38432ms step_avg:98.29ms +step:392/1670 train_time:38527ms step_avg:98.28ms +step:393/1670 train_time:38622ms step_avg:98.27ms +step:394/1670 train_time:38717ms step_avg:98.27ms +step:395/1670 train_time:38812ms step_avg:98.26ms +step:396/1670 train_time:38908ms step_avg:98.25ms +step:397/1670 train_time:39004ms step_avg:98.25ms +step:398/1670 train_time:39100ms step_avg:98.24ms +step:399/1670 train_time:39196ms step_avg:98.24ms +step:400/1670 train_time:39293ms step_avg:98.23ms +step:401/1670 train_time:39388ms step_avg:98.23ms +step:402/1670 train_time:39484ms step_avg:98.22ms +step:403/1670 train_time:39578ms step_avg:98.21ms +step:404/1670 train_time:39673ms step_avg:98.20ms +step:405/1670 train_time:39768ms step_avg:98.19ms +step:406/1670 train_time:39863ms step_avg:98.18ms +step:407/1670 train_time:39958ms step_avg:98.18ms +step:408/1670 train_time:40054ms step_avg:98.17ms +step:409/1670 train_time:40150ms step_avg:98.17ms +step:410/1670 train_time:40246ms step_avg:98.16ms +step:411/1670 train_time:40341ms step_avg:98.15ms +step:412/1670 train_time:40437ms step_avg:98.15ms +step:413/1670 train_time:40533ms step_avg:98.14ms +step:414/1670 train_time:40629ms step_avg:98.14ms +step:415/1670 train_time:40724ms step_avg:98.13ms +step:416/1670 train_time:40819ms step_avg:98.12ms +step:417/1670 train_time:40915ms step_avg:98.12ms +step:418/1670 train_time:41011ms step_avg:98.11ms +step:419/1670 train_time:41107ms step_avg:98.11ms +step:420/1670 train_time:41203ms step_avg:98.10ms +step:421/1670 train_time:41298ms step_avg:98.09ms +step:422/1670 train_time:41394ms step_avg:98.09ms +step:423/1670 train_time:41489ms step_avg:98.08ms +step:424/1670 train_time:41584ms step_avg:98.08ms +step:425/1670 train_time:41875ms step_avg:98.53ms +step:426/1670 train_time:41994ms step_avg:98.58ms +step:427/1670 train_time:42088ms step_avg:98.57ms +step:428/1670 train_time:42182ms step_avg:98.56ms +step:429/1670 train_time:42277ms step_avg:98.55ms +step:430/1670 train_time:42372ms step_avg:98.54ms +step:431/1670 train_time:42467ms step_avg:98.53ms +step:432/1670 train_time:42561ms step_avg:98.52ms +step:433/1670 train_time:42655ms step_avg:98.51ms +step:434/1670 train_time:42750ms step_avg:98.50ms +step:435/1670 train_time:42849ms step_avg:98.50ms +step:436/1670 train_time:42948ms step_avg:98.51ms +step:437/1670 train_time:43045ms step_avg:98.50ms +step:438/1670 train_time:43140ms step_avg:98.49ms +step:439/1670 train_time:43235ms step_avg:98.49ms +step:440/1670 train_time:43330ms step_avg:98.48ms +step:441/1670 train_time:43425ms step_avg:98.47ms +step:442/1670 train_time:43519ms step_avg:98.46ms +step:443/1670 train_time:43614ms step_avg:98.45ms +step:444/1670 train_time:43709ms step_avg:98.44ms +step:445/1670 train_time:43806ms step_avg:98.44ms +step:446/1670 train_time:43901ms step_avg:98.43ms +step:447/1670 train_time:43999ms step_avg:98.43ms +step:448/1670 train_time:44095ms step_avg:98.43ms +step:449/1670 train_time:44191ms step_avg:98.42ms +step:450/1670 train_time:44287ms step_avg:98.42ms +step:451/1670 train_time:44383ms step_avg:98.41ms +step:452/1670 train_time:44478ms step_avg:98.40ms +step:453/1670 train_time:44573ms step_avg:98.39ms +step:454/1670 train_time:44668ms step_avg:98.39ms +step:455/1670 train_time:44762ms step_avg:98.38ms +step:456/1670 train_time:44858ms step_avg:98.37ms +step:457/1670 train_time:44955ms step_avg:98.37ms +step:458/1670 train_time:45052ms step_avg:98.37ms +step:459/1670 train_time:45147ms step_avg:98.36ms +step:460/1670 train_time:45242ms step_avg:98.35ms +step:461/1670 train_time:45339ms step_avg:98.35ms +step:462/1670 train_time:45434ms step_avg:98.34ms +step:463/1670 train_time:45530ms step_avg:98.34ms +step:464/1670 train_time:45625ms step_avg:98.33ms +step:465/1670 train_time:45719ms step_avg:98.32ms +step:466/1670 train_time:45815ms step_avg:98.31ms +step:467/1670 train_time:45911ms step_avg:98.31ms +step:468/1670 train_time:46007ms step_avg:98.31ms +step:469/1670 train_time:46102ms step_avg:98.30ms +step:470/1670 train_time:46198ms step_avg:98.29ms +step:471/1670 train_time:46294ms step_avg:98.29ms +step:472/1670 train_time:46389ms step_avg:98.28ms +step:473/1670 train_time:46484ms step_avg:98.28ms +step:474/1670 train_time:46579ms step_avg:98.27ms +step:475/1670 train_time:46674ms step_avg:98.26ms +step:476/1670 train_time:46769ms step_avg:98.25ms +step:477/1670 train_time:46864ms step_avg:98.25ms +step:478/1670 train_time:46961ms step_avg:98.24ms +step:479/1670 train_time:47057ms step_avg:98.24ms +step:480/1670 train_time:47153ms step_avg:98.24ms +step:481/1670 train_time:47249ms step_avg:98.23ms +step:482/1670 train_time:47345ms step_avg:98.23ms +step:483/1670 train_time:47440ms step_avg:98.22ms +step:484/1670 train_time:47536ms step_avg:98.21ms +step:485/1670 train_time:47631ms step_avg:98.21ms +step:486/1670 train_time:47727ms step_avg:98.20ms +step:487/1670 train_time:47822ms step_avg:98.20ms +step:488/1670 train_time:47918ms step_avg:98.19ms +step:489/1670 train_time:48013ms step_avg:98.19ms +step:490/1670 train_time:48109ms step_avg:98.18ms +step:491/1670 train_time:48204ms step_avg:98.18ms +step:492/1670 train_time:48300ms step_avg:98.17ms +step:493/1670 train_time:48395ms step_avg:98.16ms +step:494/1670 train_time:48491ms step_avg:98.16ms +step:495/1670 train_time:48587ms step_avg:98.16ms +step:496/1670 train_time:48683ms step_avg:98.15ms +step:497/1670 train_time:48779ms step_avg:98.15ms +step:498/1670 train_time:48874ms step_avg:98.14ms +step:499/1670 train_time:48970ms step_avg:98.14ms +step:500/1670 train_time:49065ms step_avg:98.13ms +step:500/1670 val_loss:3.7142 train_time:49160ms step_avg:98.32ms +step:501/1670 train_time:49181ms step_avg:98.17ms +step:502/1670 train_time:49263ms step_avg:98.13ms +step:503/1670 train_time:49366ms step_avg:98.14ms +step:504/1670 train_time:49461ms step_avg:98.14ms +step:505/1670 train_time:49557ms step_avg:98.13ms +step:506/1670 train_time:49652ms step_avg:98.13ms +step:507/1670 train_time:49746ms step_avg:98.12ms +step:508/1670 train_time:49841ms step_avg:98.11ms +step:509/1670 train_time:49936ms step_avg:98.11ms +step:510/1670 train_time:50030ms step_avg:98.10ms +step:511/1670 train_time:50125ms step_avg:98.09ms +step:512/1670 train_time:50223ms step_avg:98.09ms +step:513/1670 train_time:50321ms step_avg:98.09ms +step:514/1670 train_time:50417ms step_avg:98.09ms +step:515/1670 train_time:50512ms step_avg:98.08ms +step:516/1670 train_time:50608ms step_avg:98.08ms +step:517/1670 train_time:50704ms step_avg:98.07ms +step:518/1670 train_time:50798ms step_avg:98.07ms +step:519/1670 train_time:50893ms step_avg:98.06ms +step:520/1670 train_time:50988ms step_avg:98.05ms +step:521/1670 train_time:51083ms step_avg:98.05ms +step:522/1670 train_time:51179ms step_avg:98.04ms +step:523/1670 train_time:51276ms step_avg:98.04ms +step:524/1670 train_time:51372ms step_avg:98.04ms +step:525/1670 train_time:51469ms step_avg:98.04ms +step:526/1670 train_time:51565ms step_avg:98.03ms +step:527/1670 train_time:51660ms step_avg:98.03ms +step:528/1670 train_time:51755ms step_avg:98.02ms +step:529/1670 train_time:51850ms step_avg:98.02ms +step:530/1670 train_time:51945ms step_avg:98.01ms +step:531/1670 train_time:52041ms step_avg:98.00ms +step:532/1670 train_time:52135ms step_avg:98.00ms +step:533/1670 train_time:52231ms step_avg:97.99ms +step:534/1670 train_time:52327ms step_avg:97.99ms +step:535/1670 train_time:52424ms step_avg:97.99ms +step:536/1670 train_time:52520ms step_avg:97.99ms +step:537/1670 train_time:52617ms step_avg:97.98ms +step:538/1670 train_time:52712ms step_avg:97.98ms +step:539/1670 train_time:52807ms step_avg:97.97ms +step:540/1670 train_time:52902ms step_avg:97.97ms +step:541/1670 train_time:52997ms step_avg:97.96ms +step:542/1670 train_time:53092ms step_avg:97.96ms +step:543/1670 train_time:53187ms step_avg:97.95ms +step:544/1670 train_time:53283ms step_avg:97.95ms +step:545/1670 train_time:53380ms step_avg:97.94ms +step:546/1670 train_time:53476ms step_avg:97.94ms +step:547/1670 train_time:53572ms step_avg:97.94ms +step:548/1670 train_time:53668ms step_avg:97.93ms +step:549/1670 train_time:53765ms step_avg:97.93ms +step:550/1670 train_time:53859ms step_avg:97.93ms +step:551/1670 train_time:53955ms step_avg:97.92ms +step:552/1670 train_time:54050ms step_avg:97.92ms +step:553/1670 train_time:54145ms step_avg:97.91ms +step:554/1670 train_time:54241ms step_avg:97.91ms +step:555/1670 train_time:54336ms step_avg:97.90ms +step:556/1670 train_time:54432ms step_avg:97.90ms +step:557/1670 train_time:54529ms step_avg:97.90ms +step:558/1670 train_time:54625ms step_avg:97.89ms +step:559/1670 train_time:54721ms step_avg:97.89ms +step:560/1670 train_time:54818ms step_avg:97.89ms +step:561/1670 train_time:54915ms step_avg:97.89ms +step:562/1670 train_time:55011ms step_avg:97.88ms +step:563/1670 train_time:55107ms step_avg:97.88ms +step:564/1670 train_time:55204ms step_avg:97.88ms +step:565/1670 train_time:55301ms step_avg:97.88ms +step:566/1670 train_time:55399ms step_avg:97.88ms +step:567/1670 train_time:55496ms step_avg:97.88ms +step:568/1670 train_time:55593ms step_avg:97.87ms +step:569/1670 train_time:55690ms step_avg:97.87ms +step:570/1670 train_time:55787ms step_avg:97.87ms +step:571/1670 train_time:55886ms step_avg:97.87ms +step:572/1670 train_time:55983ms step_avg:97.87ms +step:573/1670 train_time:56079ms step_avg:97.87ms +step:574/1670 train_time:56176ms step_avg:97.87ms +step:575/1670 train_time:56273ms step_avg:97.87ms +step:576/1670 train_time:56370ms step_avg:97.87ms +step:577/1670 train_time:56468ms step_avg:97.87ms +step:578/1670 train_time:56566ms step_avg:97.87ms +step:579/1670 train_time:56664ms step_avg:97.86ms +step:580/1670 train_time:56761ms step_avg:97.86ms +step:581/1670 train_time:56859ms step_avg:97.86ms +step:582/1670 train_time:56957ms step_avg:97.86ms +step:583/1670 train_time:57053ms step_avg:97.86ms +step:584/1670 train_time:57150ms step_avg:97.86ms +step:585/1670 train_time:57247ms step_avg:97.86ms +step:586/1670 train_time:57344ms step_avg:97.86ms +step:587/1670 train_time:57441ms step_avg:97.85ms +step:588/1670 train_time:57538ms step_avg:97.85ms +step:589/1670 train_time:57634ms step_avg:97.85ms +step:590/1670 train_time:57731ms step_avg:97.85ms +step:591/1670 train_time:57829ms step_avg:97.85ms +step:592/1670 train_time:57927ms step_avg:97.85ms +step:593/1670 train_time:58025ms step_avg:97.85ms +step:594/1670 train_time:58123ms step_avg:97.85ms +step:595/1670 train_time:58221ms step_avg:97.85ms +step:596/1670 train_time:58318ms step_avg:97.85ms +step:597/1670 train_time:58415ms step_avg:97.85ms +step:598/1670 train_time:58512ms step_avg:97.85ms +step:599/1670 train_time:58609ms step_avg:97.84ms +step:600/1670 train_time:58706ms step_avg:97.84ms +step:601/1670 train_time:58804ms step_avg:97.84ms +step:602/1670 train_time:58903ms step_avg:97.84ms +step:603/1670 train_time:58999ms step_avg:97.84ms +step:604/1670 train_time:59095ms step_avg:97.84ms +step:605/1670 train_time:59192ms step_avg:97.84ms +step:606/1670 train_time:59289ms step_avg:97.84ms +step:607/1670 train_time:59387ms step_avg:97.84ms +step:608/1670 train_time:59485ms step_avg:97.84ms +step:609/1670 train_time:59583ms step_avg:97.84ms +step:610/1670 train_time:59680ms step_avg:97.84ms +step:611/1670 train_time:59776ms step_avg:97.83ms +step:612/1670 train_time:59872ms step_avg:97.83ms +step:613/1670 train_time:59970ms step_avg:97.83ms +step:614/1670 train_time:60067ms step_avg:97.83ms +step:615/1670 train_time:60165ms step_avg:97.83ms +step:616/1670 train_time:60263ms step_avg:97.83ms +step:617/1670 train_time:60361ms step_avg:97.83ms +step:618/1670 train_time:60457ms step_avg:97.83ms +step:619/1670 train_time:60554ms step_avg:97.83ms +step:620/1670 train_time:60651ms step_avg:97.82ms +step:621/1670 train_time:60748ms step_avg:97.82ms +step:622/1670 train_time:60845ms step_avg:97.82ms +step:623/1670 train_time:60942ms step_avg:97.82ms +step:624/1670 train_time:61040ms step_avg:97.82ms +step:625/1670 train_time:61136ms step_avg:97.82ms +step:625/1670 val_loss:3.6151 train_time:61232ms step_avg:97.97ms +step:626/1670 train_time:61256ms step_avg:97.85ms +step:627/1670 train_time:61334ms step_avg:97.82ms +step:628/1670 train_time:61429ms step_avg:97.82ms +step:629/1670 train_time:61528ms step_avg:97.82ms +step:630/1670 train_time:61624ms step_avg:97.82ms +step:631/1670 train_time:61720ms step_avg:97.81ms +step:632/1670 train_time:61815ms step_avg:97.81ms +step:633/1670 train_time:61910ms step_avg:97.80ms +step:634/1670 train_time:62007ms step_avg:97.80ms +step:635/1670 train_time:62103ms step_avg:97.80ms +step:636/1670 train_time:62208ms step_avg:97.81ms +step:637/1670 train_time:62306ms step_avg:97.81ms +step:638/1670 train_time:62405ms step_avg:97.81ms +step:639/1670 train_time:62688ms step_avg:98.10ms +step:640/1670 train_time:62833ms step_avg:98.18ms +step:641/1670 train_time:62927ms step_avg:98.17ms +step:642/1670 train_time:63023ms step_avg:98.17ms +step:643/1670 train_time:63120ms step_avg:98.16ms +step:644/1670 train_time:63216ms step_avg:98.16ms +step:645/1670 train_time:63311ms step_avg:98.16ms +step:646/1670 train_time:63406ms step_avg:98.15ms +step:647/1670 train_time:63502ms step_avg:98.15ms +step:648/1670 train_time:63597ms step_avg:98.14ms +step:649/1670 train_time:63696ms step_avg:98.14ms +step:650/1670 train_time:63796ms step_avg:98.15ms +step:651/1670 train_time:63894ms step_avg:98.15ms +step:652/1670 train_time:63990ms step_avg:98.14ms +step:653/1670 train_time:64086ms step_avg:98.14ms +step:654/1670 train_time:64182ms step_avg:98.14ms +step:655/1670 train_time:64279ms step_avg:98.14ms +step:656/1670 train_time:64375ms step_avg:98.13ms +step:657/1670 train_time:64470ms step_avg:98.13ms +step:658/1670 train_time:64567ms step_avg:98.13ms +step:659/1670 train_time:64665ms step_avg:98.13ms +step:660/1670 train_time:64764ms step_avg:98.13ms +step:661/1670 train_time:64863ms step_avg:98.13ms +step:662/1670 train_time:64962ms step_avg:98.13ms +step:663/1670 train_time:65059ms step_avg:98.13ms +step:664/1670 train_time:65156ms step_avg:98.13ms +step:665/1670 train_time:65253ms step_avg:98.12ms +step:666/1670 train_time:65349ms step_avg:98.12ms +step:667/1670 train_time:65444ms step_avg:98.12ms +step:668/1670 train_time:65541ms step_avg:98.11ms +step:669/1670 train_time:65637ms step_avg:98.11ms +step:670/1670 train_time:65734ms step_avg:98.11ms +step:671/1670 train_time:65830ms step_avg:98.11ms +step:672/1670 train_time:65928ms step_avg:98.11ms +step:673/1670 train_time:66026ms step_avg:98.11ms +step:674/1670 train_time:66124ms step_avg:98.11ms +step:675/1670 train_time:66221ms step_avg:98.11ms +step:676/1670 train_time:66319ms step_avg:98.10ms +step:677/1670 train_time:66414ms step_avg:98.10ms +step:678/1670 train_time:66510ms step_avg:98.10ms +step:679/1670 train_time:66606ms step_avg:98.09ms +step:680/1670 train_time:66704ms step_avg:98.09ms +step:681/1670 train_time:66801ms step_avg:98.09ms +step:682/1670 train_time:66900ms step_avg:98.09ms +step:683/1670 train_time:66998ms step_avg:98.09ms +step:684/1670 train_time:67094ms step_avg:98.09ms +step:685/1670 train_time:67190ms step_avg:98.09ms +step:686/1670 train_time:67287ms step_avg:98.09ms +step:687/1670 train_time:67384ms step_avg:98.08ms +step:688/1670 train_time:67481ms step_avg:98.08ms +step:689/1670 train_time:67578ms step_avg:98.08ms +step:690/1670 train_time:67674ms step_avg:98.08ms +step:691/1670 train_time:67771ms step_avg:98.08ms +step:692/1670 train_time:67868ms step_avg:98.07ms +step:693/1670 train_time:67965ms step_avg:98.07ms +step:694/1670 train_time:68063ms step_avg:98.07ms +step:695/1670 train_time:68161ms step_avg:98.07ms +step:696/1670 train_time:68259ms step_avg:98.07ms +step:697/1670 train_time:68356ms step_avg:98.07ms +step:698/1670 train_time:68453ms step_avg:98.07ms +step:699/1670 train_time:68549ms step_avg:98.07ms +step:700/1670 train_time:68646ms step_avg:98.07ms +step:701/1670 train_time:68743ms step_avg:98.06ms +step:702/1670 train_time:68840ms step_avg:98.06ms +step:703/1670 train_time:68937ms step_avg:98.06ms +step:704/1670 train_time:69033ms step_avg:98.06ms +step:705/1670 train_time:69129ms step_avg:98.05ms +step:706/1670 train_time:69226ms step_avg:98.05ms +step:707/1670 train_time:69324ms step_avg:98.05ms +step:708/1670 train_time:69422ms step_avg:98.05ms +step:709/1670 train_time:69519ms step_avg:98.05ms +step:710/1670 train_time:69616ms step_avg:98.05ms +step:711/1670 train_time:69712ms step_avg:98.05ms +step:712/1670 train_time:69809ms step_avg:98.05ms +step:713/1670 train_time:69906ms step_avg:98.04ms +step:714/1670 train_time:70003ms step_avg:98.04ms +step:715/1670 train_time:70100ms step_avg:98.04ms +step:716/1670 train_time:70197ms step_avg:98.04ms +step:717/1670 train_time:70293ms step_avg:98.04ms +step:718/1670 train_time:70389ms step_avg:98.03ms +step:719/1670 train_time:70485ms step_avg:98.03ms +step:720/1670 train_time:70582ms step_avg:98.03ms +step:721/1670 train_time:70680ms step_avg:98.03ms +step:722/1670 train_time:70776ms step_avg:98.03ms +step:723/1670 train_time:70873ms step_avg:98.03ms +step:724/1670 train_time:70969ms step_avg:98.02ms +step:725/1670 train_time:71067ms step_avg:98.02ms +step:726/1670 train_time:71164ms step_avg:98.02ms +step:727/1670 train_time:71261ms step_avg:98.02ms +step:728/1670 train_time:71359ms step_avg:98.02ms +step:729/1670 train_time:71457ms step_avg:98.02ms +step:730/1670 train_time:71554ms step_avg:98.02ms +step:731/1670 train_time:71650ms step_avg:98.02ms +step:732/1670 train_time:71748ms step_avg:98.02ms +step:733/1670 train_time:71845ms step_avg:98.01ms +step:734/1670 train_time:71941ms step_avg:98.01ms +step:735/1670 train_time:72039ms step_avg:98.01ms +step:736/1670 train_time:72135ms step_avg:98.01ms +step:737/1670 train_time:72232ms step_avg:98.01ms +step:738/1670 train_time:72329ms step_avg:98.01ms +step:739/1670 train_time:72426ms step_avg:98.01ms +step:740/1670 train_time:72524ms step_avg:98.01ms +step:741/1670 train_time:72621ms step_avg:98.00ms +step:742/1670 train_time:72718ms step_avg:98.00ms +step:743/1670 train_time:72814ms step_avg:98.00ms +step:744/1670 train_time:72910ms step_avg:98.00ms +step:745/1670 train_time:73008ms step_avg:98.00ms +step:746/1670 train_time:73106ms step_avg:98.00ms +step:747/1670 train_time:73203ms step_avg:98.00ms +step:748/1670 train_time:73301ms step_avg:98.00ms +step:749/1670 train_time:73398ms step_avg:98.00ms +step:750/1670 train_time:73496ms step_avg:97.99ms +step:750/1670 val_loss:3.5640 train_time:73591ms step_avg:98.12ms +step:751/1670 train_time:73613ms step_avg:98.02ms +step:752/1670 train_time:73695ms step_avg:98.00ms +step:753/1670 train_time:73795ms step_avg:98.00ms +step:754/1670 train_time:73892ms step_avg:98.00ms +step:755/1670 train_time:73989ms step_avg:98.00ms +step:756/1670 train_time:74085ms step_avg:98.00ms +step:757/1670 train_time:74181ms step_avg:97.99ms +step:758/1670 train_time:74277ms step_avg:97.99ms +step:759/1670 train_time:74373ms step_avg:97.99ms +step:760/1670 train_time:74469ms step_avg:97.98ms +step:761/1670 train_time:74567ms step_avg:97.99ms +step:762/1670 train_time:74665ms step_avg:97.99ms +step:763/1670 train_time:74764ms step_avg:97.99ms +step:764/1670 train_time:74863ms step_avg:97.99ms +step:765/1670 train_time:74961ms step_avg:97.99ms +step:766/1670 train_time:75059ms step_avg:97.99ms +step:767/1670 train_time:75156ms step_avg:97.99ms +step:768/1670 train_time:75252ms step_avg:97.98ms +step:769/1670 train_time:75348ms step_avg:97.98ms +step:770/1670 train_time:75444ms step_avg:97.98ms +step:771/1670 train_time:75541ms step_avg:97.98ms +step:772/1670 train_time:75640ms step_avg:97.98ms +step:773/1670 train_time:75739ms step_avg:97.98ms +step:774/1670 train_time:75837ms step_avg:97.98ms +step:775/1670 train_time:75933ms step_avg:97.98ms +step:776/1670 train_time:76030ms step_avg:97.98ms +step:777/1670 train_time:76126ms step_avg:97.97ms +step:778/1670 train_time:76223ms step_avg:97.97ms +step:779/1670 train_time:76319ms step_avg:97.97ms +step:780/1670 train_time:76416ms step_avg:97.97ms +step:781/1670 train_time:76513ms step_avg:97.97ms +step:782/1670 train_time:76610ms step_avg:97.97ms +step:783/1670 train_time:76706ms step_avg:97.96ms +step:784/1670 train_time:76803ms step_avg:97.96ms +step:785/1670 train_time:76901ms step_avg:97.96ms +step:786/1670 train_time:76999ms step_avg:97.96ms +step:787/1670 train_time:77097ms step_avg:97.96ms +step:788/1670 train_time:77194ms step_avg:97.96ms +step:789/1670 train_time:77290ms step_avg:97.96ms +step:790/1670 train_time:77386ms step_avg:97.96ms +step:791/1670 train_time:77482ms step_avg:97.95ms +step:792/1670 train_time:77580ms step_avg:97.95ms +step:793/1670 train_time:77678ms step_avg:97.95ms +step:794/1670 train_time:77776ms step_avg:97.95ms +step:795/1670 train_time:77873ms step_avg:97.95ms +step:796/1670 train_time:77970ms step_avg:97.95ms +step:797/1670 train_time:78067ms step_avg:97.95ms +step:798/1670 train_time:78163ms step_avg:97.95ms +step:799/1670 train_time:78260ms step_avg:97.95ms +step:800/1670 train_time:78357ms step_avg:97.95ms +step:801/1670 train_time:78453ms step_avg:97.94ms +step:802/1670 train_time:78550ms step_avg:97.94ms +step:803/1670 train_time:78646ms step_avg:97.94ms +step:804/1670 train_time:78743ms step_avg:97.94ms +step:805/1670 train_time:78842ms step_avg:97.94ms +step:806/1670 train_time:78939ms step_avg:97.94ms +step:807/1670 train_time:79037ms step_avg:97.94ms +step:808/1670 train_time:79134ms step_avg:97.94ms +step:809/1670 train_time:79231ms step_avg:97.94ms +step:810/1670 train_time:79327ms step_avg:97.93ms +step:811/1670 train_time:79425ms step_avg:97.93ms +step:812/1670 train_time:79521ms step_avg:97.93ms +step:813/1670 train_time:79618ms step_avg:97.93ms +step:814/1670 train_time:79716ms step_avg:97.93ms +step:815/1670 train_time:79814ms step_avg:97.93ms +step:816/1670 train_time:79911ms step_avg:97.93ms +step:817/1670 train_time:80008ms step_avg:97.93ms +step:818/1670 train_time:80105ms step_avg:97.93ms +step:819/1670 train_time:80203ms step_avg:97.93ms +step:820/1670 train_time:80301ms step_avg:97.93ms +step:821/1670 train_time:80398ms step_avg:97.93ms +step:822/1670 train_time:80494ms step_avg:97.93ms +step:823/1670 train_time:80591ms step_avg:97.92ms +step:824/1670 train_time:80688ms step_avg:97.92ms +step:825/1670 train_time:80785ms step_avg:97.92ms +step:826/1670 train_time:80882ms step_avg:97.92ms +step:827/1670 train_time:80981ms step_avg:97.92ms +step:828/1670 train_time:81079ms step_avg:97.92ms +step:829/1670 train_time:81176ms step_avg:97.92ms +step:830/1670 train_time:81272ms step_avg:97.92ms +step:831/1670 train_time:81369ms step_avg:97.92ms +step:832/1670 train_time:81465ms step_avg:97.91ms +step:833/1670 train_time:81562ms step_avg:97.91ms +step:834/1670 train_time:81660ms step_avg:97.91ms +step:835/1670 train_time:81757ms step_avg:97.91ms +step:836/1670 train_time:81854ms step_avg:97.91ms +step:837/1670 train_time:81951ms step_avg:97.91ms +step:838/1670 train_time:82047ms step_avg:97.91ms +step:839/1670 train_time:82144ms step_avg:97.91ms +step:840/1670 train_time:82242ms step_avg:97.91ms +step:841/1670 train_time:82340ms step_avg:97.91ms +step:842/1670 train_time:82437ms step_avg:97.91ms +step:843/1670 train_time:82534ms step_avg:97.90ms +step:844/1670 train_time:82630ms step_avg:97.90ms +step:845/1670 train_time:82726ms step_avg:97.90ms +step:846/1670 train_time:82823ms step_avg:97.90ms +step:847/1670 train_time:82921ms step_avg:97.90ms +step:848/1670 train_time:83018ms step_avg:97.90ms +step:849/1670 train_time:83117ms step_avg:97.90ms +step:850/1670 train_time:83214ms step_avg:97.90ms +step:851/1670 train_time:83564ms step_avg:98.19ms +step:852/1670 train_time:83637ms step_avg:98.17ms +step:853/1670 train_time:83732ms step_avg:98.16ms +step:854/1670 train_time:83827ms step_avg:98.16ms +step:855/1670 train_time:83923ms step_avg:98.16ms +step:856/1670 train_time:84020ms step_avg:98.15ms +step:857/1670 train_time:84116ms step_avg:98.15ms +step:858/1670 train_time:84212ms step_avg:98.15ms +step:859/1670 train_time:84307ms step_avg:98.15ms +step:860/1670 train_time:84404ms step_avg:98.14ms +step:861/1670 train_time:84503ms step_avg:98.15ms +step:862/1670 train_time:84605ms step_avg:98.15ms +step:863/1670 train_time:84704ms step_avg:98.15ms +step:864/1670 train_time:84802ms step_avg:98.15ms +step:865/1670 train_time:84898ms step_avg:98.15ms +step:866/1670 train_time:84996ms step_avg:98.15ms +step:867/1670 train_time:85092ms step_avg:98.15ms +step:868/1670 train_time:85187ms step_avg:98.14ms +step:869/1670 train_time:85284ms step_avg:98.14ms +step:870/1670 train_time:85381ms step_avg:98.14ms +step:871/1670 train_time:85478ms step_avg:98.14ms +step:872/1670 train_time:85578ms step_avg:98.14ms +step:873/1670 train_time:85678ms step_avg:98.14ms +step:874/1670 train_time:85777ms step_avg:98.14ms +step:875/1670 train_time:85873ms step_avg:98.14ms +step:875/1670 val_loss:3.5224 train_time:85970ms step_avg:98.25ms +step:876/1670 train_time:85992ms step_avg:98.16ms +step:877/1670 train_time:86075ms step_avg:98.15ms +step:878/1670 train_time:86174ms step_avg:98.15ms +step:879/1670 train_time:86271ms step_avg:98.15ms +step:880/1670 train_time:86366ms step_avg:98.14ms +step:881/1670 train_time:86462ms step_avg:98.14ms +step:882/1670 train_time:86558ms step_avg:98.14ms +step:883/1670 train_time:86653ms step_avg:98.14ms +step:884/1670 train_time:86749ms step_avg:98.13ms +step:885/1670 train_time:86845ms step_avg:98.13ms +step:886/1670 train_time:86944ms step_avg:98.13ms +step:887/1670 train_time:87043ms step_avg:98.13ms +step:888/1670 train_time:87142ms step_avg:98.13ms +step:889/1670 train_time:87241ms step_avg:98.13ms +step:890/1670 train_time:87339ms step_avg:98.13ms +step:891/1670 train_time:87435ms step_avg:98.13ms +step:892/1670 train_time:87532ms step_avg:98.13ms +step:893/1670 train_time:87628ms step_avg:98.13ms +step:894/1670 train_time:87724ms step_avg:98.12ms +step:895/1670 train_time:87820ms step_avg:98.12ms +step:896/1670 train_time:87917ms step_avg:98.12ms +step:897/1670 train_time:88015ms step_avg:98.12ms +step:898/1670 train_time:88113ms step_avg:98.12ms +step:899/1670 train_time:88210ms step_avg:98.12ms +step:900/1670 train_time:88307ms step_avg:98.12ms +step:901/1670 train_time:88404ms step_avg:98.12ms +step:902/1670 train_time:88502ms step_avg:98.12ms +step:903/1670 train_time:88599ms step_avg:98.12ms +step:904/1670 train_time:88696ms step_avg:98.12ms +step:905/1670 train_time:88793ms step_avg:98.11ms +step:906/1670 train_time:88888ms step_avg:98.11ms +step:907/1670 train_time:88986ms step_avg:98.11ms +step:908/1670 train_time:89085ms step_avg:98.11ms +step:909/1670 train_time:89183ms step_avg:98.11ms +step:910/1670 train_time:89280ms step_avg:98.11ms +step:911/1670 train_time:89377ms step_avg:98.11ms +step:912/1670 train_time:89474ms step_avg:98.11ms +step:913/1670 train_time:89570ms step_avg:98.11ms +step:914/1670 train_time:89667ms step_avg:98.10ms +step:915/1670 train_time:89764ms step_avg:98.10ms +step:916/1670 train_time:89861ms step_avg:98.10ms +step:917/1670 train_time:89960ms step_avg:98.10ms +step:918/1670 train_time:90058ms step_avg:98.10ms +step:919/1670 train_time:90155ms step_avg:98.10ms +step:920/1670 train_time:90253ms step_avg:98.10ms +step:921/1670 train_time:90349ms step_avg:98.10ms +step:922/1670 train_time:90446ms step_avg:98.10ms +step:923/1670 train_time:90543ms step_avg:98.10ms +step:924/1670 train_time:90640ms step_avg:98.10ms +step:925/1670 train_time:90738ms step_avg:98.09ms +step:926/1670 train_time:90834ms step_avg:98.09ms +step:927/1670 train_time:90930ms step_avg:98.09ms +step:928/1670 train_time:91027ms step_avg:98.09ms +step:929/1670 train_time:91125ms step_avg:98.09ms +step:930/1670 train_time:91225ms step_avg:98.09ms +step:931/1670 train_time:91323ms step_avg:98.09ms +step:932/1670 train_time:91421ms step_avg:98.09ms +step:933/1670 train_time:91517ms step_avg:98.09ms +step:934/1670 train_time:91614ms step_avg:98.09ms +step:935/1670 train_time:91710ms step_avg:98.09ms +step:936/1670 train_time:91807ms step_avg:98.08ms +step:937/1670 train_time:91904ms step_avg:98.08ms +step:938/1670 train_time:92001ms step_avg:98.08ms +step:939/1670 train_time:92100ms step_avg:98.08ms +step:940/1670 train_time:92198ms step_avg:98.08ms +step:941/1670 train_time:92297ms step_avg:98.08ms +step:942/1670 train_time:92395ms step_avg:98.08ms +step:943/1670 train_time:92491ms step_avg:98.08ms +step:944/1670 train_time:92587ms step_avg:98.08ms +step:945/1670 train_time:92685ms step_avg:98.08ms +step:946/1670 train_time:92783ms step_avg:98.08ms +step:947/1670 train_time:92880ms step_avg:98.08ms +step:948/1670 train_time:92977ms step_avg:98.08ms +step:949/1670 train_time:93074ms step_avg:98.08ms +step:950/1670 train_time:93170ms step_avg:98.07ms +step:951/1670 train_time:93267ms step_avg:98.07ms +step:952/1670 train_time:93366ms step_avg:98.07ms +step:953/1670 train_time:93463ms step_avg:98.07ms +step:954/1670 train_time:93561ms step_avg:98.07ms +step:955/1670 train_time:93658ms step_avg:98.07ms +step:956/1670 train_time:93756ms step_avg:98.07ms +step:957/1670 train_time:93852ms step_avg:98.07ms +step:958/1670 train_time:93948ms step_avg:98.07ms +step:959/1670 train_time:94045ms step_avg:98.07ms +step:960/1670 train_time:94144ms step_avg:98.07ms +step:961/1670 train_time:94241ms step_avg:98.07ms +step:962/1670 train_time:94340ms step_avg:98.07ms +step:963/1670 train_time:94437ms step_avg:98.07ms +step:964/1670 train_time:94535ms step_avg:98.07ms +step:965/1670 train_time:94632ms step_avg:98.06ms +step:966/1670 train_time:94728ms step_avg:98.06ms +step:967/1670 train_time:94825ms step_avg:98.06ms +step:968/1670 train_time:94922ms step_avg:98.06ms +step:969/1670 train_time:95018ms step_avg:98.06ms +step:970/1670 train_time:95116ms step_avg:98.06ms +step:971/1670 train_time:95213ms step_avg:98.06ms +step:972/1670 train_time:95310ms step_avg:98.06ms +step:973/1670 train_time:95407ms step_avg:98.05ms +step:974/1670 train_time:95503ms step_avg:98.05ms +step:975/1670 train_time:95600ms step_avg:98.05ms +step:976/1670 train_time:95698ms step_avg:98.05ms +step:977/1670 train_time:95796ms step_avg:98.05ms +step:978/1670 train_time:95894ms step_avg:98.05ms +step:979/1670 train_time:95990ms step_avg:98.05ms +step:980/1670 train_time:96087ms step_avg:98.05ms +step:981/1670 train_time:96185ms step_avg:98.05ms +step:982/1670 train_time:96284ms step_avg:98.05ms +step:983/1670 train_time:96381ms step_avg:98.05ms +step:984/1670 train_time:96478ms step_avg:98.05ms +step:985/1670 train_time:96574ms step_avg:98.05ms +step:986/1670 train_time:96671ms step_avg:98.04ms +step:987/1670 train_time:96767ms step_avg:98.04ms +step:988/1670 train_time:96865ms step_avg:98.04ms +step:989/1670 train_time:96962ms step_avg:98.04ms +step:990/1670 train_time:97060ms step_avg:98.04ms +step:991/1670 train_time:97158ms step_avg:98.04ms +step:992/1670 train_time:97255ms step_avg:98.04ms +step:993/1670 train_time:97353ms step_avg:98.04ms +step:994/1670 train_time:97449ms step_avg:98.04ms +step:995/1670 train_time:97546ms step_avg:98.04ms +step:996/1670 train_time:97643ms step_avg:98.04ms +step:997/1670 train_time:97740ms step_avg:98.03ms +step:998/1670 train_time:97838ms step_avg:98.03ms +step:999/1670 train_time:97936ms step_avg:98.03ms +step:1000/1670 train_time:98034ms step_avg:98.03ms +step:1000/1670 val_loss:3.4803 train_time:98129ms step_avg:98.13ms +step:1001/1670 train_time:98151ms step_avg:98.05ms +step:1002/1670 train_time:98235ms step_avg:98.04ms +step:1003/1670 train_time:98338ms step_avg:98.04ms +step:1004/1670 train_time:98437ms step_avg:98.05ms +step:1005/1670 train_time:98534ms step_avg:98.04ms +step:1006/1670 train_time:98631ms step_avg:98.04ms +step:1007/1670 train_time:98726ms step_avg:98.04ms +step:1008/1670 train_time:98822ms step_avg:98.04ms +step:1009/1670 train_time:98918ms step_avg:98.04ms +step:1010/1670 train_time:99014ms step_avg:98.03ms +step:1011/1670 train_time:99113ms step_avg:98.03ms +step:1012/1670 train_time:99212ms step_avg:98.04ms +step:1013/1670 train_time:99312ms step_avg:98.04ms +step:1014/1670 train_time:99411ms step_avg:98.04ms +step:1015/1670 train_time:99507ms step_avg:98.04ms +step:1016/1670 train_time:99603ms step_avg:98.03ms +step:1017/1670 train_time:99700ms step_avg:98.03ms +step:1018/1670 train_time:99797ms step_avg:98.03ms +step:1019/1670 train_time:99893ms step_avg:98.03ms +step:1020/1670 train_time:99989ms step_avg:98.03ms +step:1021/1670 train_time:100085ms step_avg:98.03ms +step:1022/1670 train_time:100183ms step_avg:98.03ms +step:1023/1670 train_time:100284ms step_avg:98.03ms +step:1024/1670 train_time:100383ms step_avg:98.03ms +step:1025/1670 train_time:100480ms step_avg:98.03ms +step:1026/1670 train_time:100578ms step_avg:98.03ms +step:1027/1670 train_time:100675ms step_avg:98.03ms +step:1028/1670 train_time:100772ms step_avg:98.03ms +step:1029/1670 train_time:100867ms step_avg:98.02ms +step:1030/1670 train_time:100963ms step_avg:98.02ms +step:1031/1670 train_time:101060ms step_avg:98.02ms +step:1032/1670 train_time:101159ms step_avg:98.02ms +step:1033/1670 train_time:101258ms step_avg:98.02ms +step:1034/1670 train_time:101358ms step_avg:98.03ms +step:1035/1670 train_time:101456ms step_avg:98.03ms +step:1036/1670 train_time:101555ms step_avg:98.03ms +step:1037/1670 train_time:101652ms step_avg:98.03ms +step:1038/1670 train_time:101750ms step_avg:98.02ms +step:1039/1670 train_time:101846ms step_avg:98.02ms +step:1040/1670 train_time:101943ms step_avg:98.02ms +step:1041/1670 train_time:102040ms step_avg:98.02ms +step:1042/1670 train_time:102137ms step_avg:98.02ms +step:1043/1670 train_time:102234ms step_avg:98.02ms +step:1044/1670 train_time:102332ms step_avg:98.02ms +step:1045/1670 train_time:102430ms step_avg:98.02ms +step:1046/1670 train_time:102526ms step_avg:98.02ms +step:1047/1670 train_time:102623ms step_avg:98.02ms +step:1048/1670 train_time:102722ms step_avg:98.02ms +step:1049/1670 train_time:102819ms step_avg:98.02ms +step:1050/1670 train_time:102916ms step_avg:98.02ms +step:1051/1670 train_time:103013ms step_avg:98.01ms +step:1052/1670 train_time:103109ms step_avg:98.01ms +step:1053/1670 train_time:103206ms step_avg:98.01ms +step:1054/1670 train_time:103303ms step_avg:98.01ms +step:1055/1670 train_time:103401ms step_avg:98.01ms +step:1056/1670 train_time:103500ms step_avg:98.01ms +step:1057/1670 train_time:103597ms step_avg:98.01ms +step:1058/1670 train_time:103694ms step_avg:98.01ms +step:1059/1670 train_time:103791ms step_avg:98.01ms +step:1060/1670 train_time:103889ms step_avg:98.01ms +step:1061/1670 train_time:103985ms step_avg:98.01ms +step:1062/1670 train_time:104305ms step_avg:98.22ms +step:1063/1670 train_time:104444ms step_avg:98.25ms +step:1064/1670 train_time:104540ms step_avg:98.25ms +step:1065/1670 train_time:104636ms step_avg:98.25ms +step:1066/1670 train_time:104732ms step_avg:98.25ms +step:1067/1670 train_time:104827ms step_avg:98.24ms +step:1068/1670 train_time:104923ms step_avg:98.24ms +step:1069/1670 train_time:105019ms step_avg:98.24ms +step:1070/1670 train_time:105115ms step_avg:98.24ms +step:1071/1670 train_time:105211ms step_avg:98.24ms +step:1072/1670 train_time:105310ms step_avg:98.24ms +step:1073/1670 train_time:105413ms step_avg:98.24ms +step:1074/1670 train_time:105512ms step_avg:98.24ms +step:1075/1670 train_time:105610ms step_avg:98.24ms +step:1076/1670 train_time:105706ms step_avg:98.24ms +step:1077/1670 train_time:105802ms step_avg:98.24ms +step:1078/1670 train_time:105899ms step_avg:98.24ms +step:1079/1670 train_time:105996ms step_avg:98.23ms +step:1080/1670 train_time:106092ms step_avg:98.23ms +step:1081/1670 train_time:106187ms step_avg:98.23ms +step:1082/1670 train_time:106285ms step_avg:98.23ms +step:1083/1670 train_time:106384ms step_avg:98.23ms +step:1084/1670 train_time:106484ms step_avg:98.23ms +step:1085/1670 train_time:106583ms step_avg:98.23ms +step:1086/1670 train_time:106681ms step_avg:98.23ms +step:1087/1670 train_time:106778ms step_avg:98.23ms +step:1088/1670 train_time:106875ms step_avg:98.23ms +step:1089/1670 train_time:106972ms step_avg:98.23ms +step:1090/1670 train_time:107067ms step_avg:98.23ms +step:1091/1670 train_time:107163ms step_avg:98.22ms +step:1092/1670 train_time:107262ms step_avg:98.23ms +step:1093/1670 train_time:107360ms step_avg:98.23ms +step:1094/1670 train_time:107459ms step_avg:98.23ms +step:1095/1670 train_time:107558ms step_avg:98.23ms +step:1096/1670 train_time:107656ms step_avg:98.23ms +step:1097/1670 train_time:107753ms step_avg:98.22ms +step:1098/1670 train_time:107849ms step_avg:98.22ms +step:1099/1670 train_time:107945ms step_avg:98.22ms +step:1100/1670 train_time:108042ms step_avg:98.22ms +step:1101/1670 train_time:108139ms step_avg:98.22ms +step:1102/1670 train_time:108236ms step_avg:98.22ms +step:1103/1670 train_time:108333ms step_avg:98.22ms +step:1104/1670 train_time:108431ms step_avg:98.22ms +step:1105/1670 train_time:108530ms step_avg:98.22ms +step:1106/1670 train_time:108628ms step_avg:98.22ms +step:1107/1670 train_time:108725ms step_avg:98.22ms +step:1108/1670 train_time:108822ms step_avg:98.21ms +step:1109/1670 train_time:108920ms step_avg:98.21ms +step:1110/1670 train_time:109017ms step_avg:98.21ms +step:1111/1670 train_time:109113ms step_avg:98.21ms +step:1112/1670 train_time:109211ms step_avg:98.21ms +step:1113/1670 train_time:109308ms step_avg:98.21ms +step:1114/1670 train_time:109404ms step_avg:98.21ms +step:1115/1670 train_time:109502ms step_avg:98.21ms +step:1116/1670 train_time:109601ms step_avg:98.21ms +step:1117/1670 train_time:109699ms step_avg:98.21ms +step:1118/1670 train_time:109798ms step_avg:98.21ms +step:1119/1670 train_time:109896ms step_avg:98.21ms +step:1120/1670 train_time:109992ms step_avg:98.21ms +step:1121/1670 train_time:110090ms step_avg:98.21ms +step:1122/1670 train_time:110187ms step_avg:98.21ms +step:1123/1670 train_time:110284ms step_avg:98.21ms +step:1124/1670 train_time:110382ms step_avg:98.20ms +step:1125/1670 train_time:110481ms step_avg:98.20ms +step:1125/1670 val_loss:3.4260 train_time:110580ms step_avg:98.29ms +step:1126/1670 train_time:110603ms step_avg:98.23ms +step:1127/1670 train_time:110693ms step_avg:98.22ms +step:1128/1670 train_time:110791ms step_avg:98.22ms +step:1129/1670 train_time:110887ms step_avg:98.22ms +step:1130/1670 train_time:110984ms step_avg:98.22ms +step:1131/1670 train_time:111080ms step_avg:98.21ms +step:1132/1670 train_time:111176ms step_avg:98.21ms +step:1133/1670 train_time:111273ms step_avg:98.21ms +step:1134/1670 train_time:111369ms step_avg:98.21ms +step:1135/1670 train_time:111466ms step_avg:98.21ms +step:1136/1670 train_time:111569ms step_avg:98.21ms +step:1137/1670 train_time:111670ms step_avg:98.21ms +step:1138/1670 train_time:111769ms step_avg:98.22ms +step:1139/1670 train_time:111866ms step_avg:98.21ms +step:1140/1670 train_time:111962ms step_avg:98.21ms +step:1141/1670 train_time:112058ms step_avg:98.21ms +step:1142/1670 train_time:112155ms step_avg:98.21ms +step:1143/1670 train_time:112252ms step_avg:98.21ms +step:1144/1670 train_time:112348ms step_avg:98.21ms +step:1145/1670 train_time:112445ms step_avg:98.21ms +step:1146/1670 train_time:112544ms step_avg:98.21ms +step:1147/1670 train_time:112644ms step_avg:98.21ms +step:1148/1670 train_time:112745ms step_avg:98.21ms +step:1149/1670 train_time:112843ms step_avg:98.21ms +step:1150/1670 train_time:112940ms step_avg:98.21ms +step:1151/1670 train_time:113037ms step_avg:98.21ms +step:1152/1670 train_time:113133ms step_avg:98.21ms +step:1153/1670 train_time:113230ms step_avg:98.20ms +step:1154/1670 train_time:113326ms step_avg:98.20ms +step:1155/1670 train_time:113424ms step_avg:98.20ms +step:1156/1670 train_time:113522ms step_avg:98.20ms +step:1157/1670 train_time:113621ms step_avg:98.20ms +step:1158/1670 train_time:113721ms step_avg:98.20ms +step:1159/1670 train_time:113821ms step_avg:98.21ms +step:1160/1670 train_time:113919ms step_avg:98.21ms +step:1161/1670 train_time:114016ms step_avg:98.20ms +step:1162/1670 train_time:114112ms step_avg:98.20ms +step:1163/1670 train_time:114209ms step_avg:98.20ms +step:1164/1670 train_time:114306ms step_avg:98.20ms +step:1165/1670 train_time:114404ms step_avg:98.20ms +step:1166/1670 train_time:114501ms step_avg:98.20ms +step:1167/1670 train_time:114599ms step_avg:98.20ms +step:1168/1670 train_time:114698ms step_avg:98.20ms +step:1169/1670 train_time:114796ms step_avg:98.20ms +step:1170/1670 train_time:114894ms step_avg:98.20ms +step:1171/1670 train_time:114991ms step_avg:98.20ms +step:1172/1670 train_time:115088ms step_avg:98.20ms +step:1173/1670 train_time:115186ms step_avg:98.20ms +step:1174/1670 train_time:115285ms step_avg:98.20ms +step:1175/1670 train_time:115384ms step_avg:98.20ms +step:1176/1670 train_time:115483ms step_avg:98.20ms +step:1177/1670 train_time:115581ms step_avg:98.20ms +step:1178/1670 train_time:115678ms step_avg:98.20ms +step:1179/1670 train_time:115778ms step_avg:98.20ms +step:1180/1670 train_time:115876ms step_avg:98.20ms +step:1181/1670 train_time:115973ms step_avg:98.20ms +step:1182/1670 train_time:116071ms step_avg:98.20ms +step:1183/1670 train_time:116168ms step_avg:98.20ms +step:1184/1670 train_time:116265ms step_avg:98.20ms +step:1185/1670 train_time:116364ms step_avg:98.20ms +step:1186/1670 train_time:116462ms step_avg:98.20ms +step:1187/1670 train_time:116560ms step_avg:98.20ms +step:1188/1670 train_time:116657ms step_avg:98.20ms +step:1189/1670 train_time:116756ms step_avg:98.20ms +step:1190/1670 train_time:116855ms step_avg:98.20ms +step:1191/1670 train_time:116950ms step_avg:98.20ms +step:1192/1670 train_time:117048ms step_avg:98.19ms +step:1193/1670 train_time:117146ms step_avg:98.19ms +step:1194/1670 train_time:117243ms step_avg:98.19ms +step:1195/1670 train_time:117340ms step_avg:98.19ms +step:1196/1670 train_time:117438ms step_avg:98.19ms +step:1197/1670 train_time:117536ms step_avg:98.19ms +step:1198/1670 train_time:117634ms step_avg:98.19ms +step:1199/1670 train_time:117731ms step_avg:98.19ms +step:1200/1670 train_time:117828ms step_avg:98.19ms +step:1201/1670 train_time:117927ms step_avg:98.19ms +step:1202/1670 train_time:118025ms step_avg:98.19ms +step:1203/1670 train_time:118124ms step_avg:98.19ms +step:1204/1670 train_time:118222ms step_avg:98.19ms +step:1205/1670 train_time:118319ms step_avg:98.19ms +step:1206/1670 train_time:118417ms step_avg:98.19ms +step:1207/1670 train_time:118514ms step_avg:98.19ms +step:1208/1670 train_time:118611ms step_avg:98.19ms +step:1209/1670 train_time:118708ms step_avg:98.19ms +step:1210/1670 train_time:118807ms step_avg:98.19ms +step:1211/1670 train_time:118905ms step_avg:98.19ms +step:1212/1670 train_time:119004ms step_avg:98.19ms +step:1213/1670 train_time:119103ms step_avg:98.19ms +step:1214/1670 train_time:119202ms step_avg:98.19ms +step:1215/1670 train_time:119299ms step_avg:98.19ms +step:1216/1670 train_time:119396ms step_avg:98.19ms +step:1217/1670 train_time:119494ms step_avg:98.19ms +step:1218/1670 train_time:119591ms step_avg:98.19ms +step:1219/1670 train_time:119689ms step_avg:98.19ms +step:1220/1670 train_time:119788ms step_avg:98.19ms +step:1221/1670 train_time:119886ms step_avg:98.19ms +step:1222/1670 train_time:119984ms step_avg:98.19ms +step:1223/1670 train_time:120081ms step_avg:98.19ms +step:1224/1670 train_time:120180ms step_avg:98.19ms +step:1225/1670 train_time:120279ms step_avg:98.19ms +step:1226/1670 train_time:120376ms step_avg:98.19ms +step:1227/1670 train_time:120473ms step_avg:98.19ms +step:1228/1670 train_time:120570ms step_avg:98.18ms +step:1229/1670 train_time:120667ms step_avg:98.18ms +step:1230/1670 train_time:120766ms step_avg:98.18ms +step:1231/1670 train_time:120864ms step_avg:98.18ms +step:1232/1670 train_time:120962ms step_avg:98.18ms +step:1233/1670 train_time:121059ms step_avg:98.18ms +step:1234/1670 train_time:121156ms step_avg:98.18ms +step:1235/1670 train_time:121254ms step_avg:98.18ms +step:1236/1670 train_time:121351ms step_avg:98.18ms +step:1237/1670 train_time:121449ms step_avg:98.18ms +step:1238/1670 train_time:121547ms step_avg:98.18ms +step:1239/1670 train_time:121645ms step_avg:98.18ms +step:1240/1670 train_time:121742ms step_avg:98.18ms +step:1241/1670 train_time:121840ms step_avg:98.18ms +step:1242/1670 train_time:121938ms step_avg:98.18ms +step:1243/1670 train_time:122035ms step_avg:98.18ms +step:1244/1670 train_time:122132ms step_avg:98.18ms +step:1245/1670 train_time:122230ms step_avg:98.18ms +step:1246/1670 train_time:122328ms step_avg:98.18ms +step:1247/1670 train_time:122425ms step_avg:98.18ms +step:1248/1670 train_time:122523ms step_avg:98.18ms +step:1249/1670 train_time:122623ms step_avg:98.18ms +step:1250/1670 train_time:122721ms step_avg:98.18ms +step:1250/1670 val_loss:3.3835 train_time:122818ms step_avg:98.25ms +step:1251/1670 train_time:122841ms step_avg:98.19ms +step:1252/1670 train_time:122921ms step_avg:98.18ms +step:1253/1670 train_time:123021ms step_avg:98.18ms +step:1254/1670 train_time:123119ms step_avg:98.18ms +step:1255/1670 train_time:123215ms step_avg:98.18ms +step:1256/1670 train_time:123312ms step_avg:98.18ms +step:1257/1670 train_time:123409ms step_avg:98.18ms +step:1258/1670 train_time:123505ms step_avg:98.18ms +step:1259/1670 train_time:123602ms step_avg:98.17ms +step:1260/1670 train_time:123698ms step_avg:98.17ms +step:1261/1670 train_time:123796ms step_avg:98.17ms +step:1262/1670 train_time:123895ms step_avg:98.17ms +step:1263/1670 train_time:123996ms step_avg:98.18ms +step:1264/1670 train_time:124095ms step_avg:98.18ms +step:1265/1670 train_time:124192ms step_avg:98.18ms +step:1266/1670 train_time:124290ms step_avg:98.18ms +step:1267/1670 train_time:124388ms step_avg:98.18ms +step:1268/1670 train_time:124485ms step_avg:98.17ms +step:1269/1670 train_time:124582ms step_avg:98.17ms +step:1270/1670 train_time:124678ms step_avg:98.17ms +step:1271/1670 train_time:124776ms step_avg:98.17ms +step:1272/1670 train_time:124875ms step_avg:98.17ms +step:1273/1670 train_time:124974ms step_avg:98.17ms +step:1274/1670 train_time:125233ms step_avg:98.30ms +step:1275/1670 train_time:125395ms step_avg:98.35ms +step:1276/1670 train_time:125490ms step_avg:98.35ms +step:1277/1670 train_time:125587ms step_avg:98.35ms +step:1278/1670 train_time:125683ms step_avg:98.34ms +step:1279/1670 train_time:125780ms step_avg:98.34ms +step:1280/1670 train_time:125876ms step_avg:98.34ms +step:1281/1670 train_time:125973ms step_avg:98.34ms +step:1282/1670 train_time:126070ms step_avg:98.34ms +step:1283/1670 train_time:126167ms step_avg:98.34ms +step:1284/1670 train_time:126269ms step_avg:98.34ms +step:1285/1670 train_time:126372ms step_avg:98.34ms +step:1286/1670 train_time:126472ms step_avg:98.35ms +step:1287/1670 train_time:126571ms step_avg:98.35ms +step:1288/1670 train_time:126670ms step_avg:98.35ms +step:1289/1670 train_time:126767ms step_avg:98.35ms +step:1290/1670 train_time:126865ms step_avg:98.34ms +step:1291/1670 train_time:126962ms step_avg:98.34ms +step:1292/1670 train_time:127059ms step_avg:98.34ms +step:1293/1670 train_time:127155ms step_avg:98.34ms +step:1294/1670 train_time:127253ms step_avg:98.34ms +step:1295/1670 train_time:127351ms step_avg:98.34ms +step:1296/1670 train_time:127450ms step_avg:98.34ms +step:1297/1670 train_time:127549ms step_avg:98.34ms +step:1298/1670 train_time:127646ms step_avg:98.34ms +step:1299/1670 train_time:127744ms step_avg:98.34ms +step:1300/1670 train_time:127843ms step_avg:98.34ms +step:1301/1670 train_time:127942ms step_avg:98.34ms +step:1302/1670 train_time:128038ms step_avg:98.34ms +step:1303/1670 train_time:128135ms step_avg:98.34ms +step:1304/1670 train_time:128232ms step_avg:98.34ms +step:1305/1670 train_time:128330ms step_avg:98.34ms +step:1306/1670 train_time:128429ms step_avg:98.34ms +step:1307/1670 train_time:128527ms step_avg:98.34ms +step:1308/1670 train_time:128625ms step_avg:98.34ms +step:1309/1670 train_time:128723ms step_avg:98.34ms +step:1310/1670 train_time:128820ms step_avg:98.34ms +step:1311/1670 train_time:128917ms step_avg:98.34ms +step:1312/1670 train_time:129015ms step_avg:98.33ms +step:1313/1670 train_time:129112ms step_avg:98.33ms +step:1314/1670 train_time:129210ms step_avg:98.33ms +step:1315/1670 train_time:129308ms step_avg:98.33ms +step:1316/1670 train_time:129407ms step_avg:98.33ms +step:1317/1670 train_time:129505ms step_avg:98.33ms +step:1318/1670 train_time:129603ms step_avg:98.33ms +step:1319/1670 train_time:129700ms step_avg:98.33ms +step:1320/1670 train_time:129799ms step_avg:98.33ms +step:1321/1670 train_time:129896ms step_avg:98.33ms +step:1322/1670 train_time:129994ms step_avg:98.33ms +step:1323/1670 train_time:130091ms step_avg:98.33ms +step:1324/1670 train_time:130189ms step_avg:98.33ms +step:1325/1670 train_time:130286ms step_avg:98.33ms +step:1326/1670 train_time:130384ms step_avg:98.33ms +step:1327/1670 train_time:130482ms step_avg:98.33ms +step:1328/1670 train_time:130579ms step_avg:98.33ms +step:1329/1670 train_time:130677ms step_avg:98.33ms +step:1330/1670 train_time:130775ms step_avg:98.33ms +step:1331/1670 train_time:130874ms step_avg:98.33ms +step:1332/1670 train_time:130972ms step_avg:98.33ms +step:1333/1670 train_time:131071ms step_avg:98.33ms +step:1334/1670 train_time:131170ms step_avg:98.33ms +step:1335/1670 train_time:131267ms step_avg:98.33ms +step:1336/1670 train_time:131365ms step_avg:98.33ms +step:1337/1670 train_time:131463ms step_avg:98.33ms +step:1338/1670 train_time:131560ms step_avg:98.33ms +step:1339/1670 train_time:131658ms step_avg:98.33ms +step:1340/1670 train_time:131756ms step_avg:98.33ms +step:1341/1670 train_time:131854ms step_avg:98.32ms +step:1342/1670 train_time:131951ms step_avg:98.32ms +step:1343/1670 train_time:132050ms step_avg:98.32ms +step:1344/1670 train_time:132148ms step_avg:98.32ms +step:1345/1670 train_time:132246ms step_avg:98.32ms +step:1346/1670 train_time:132344ms step_avg:98.32ms +step:1347/1670 train_time:132442ms step_avg:98.32ms +step:1348/1670 train_time:132539ms step_avg:98.32ms +step:1349/1670 train_time:132637ms step_avg:98.32ms +step:1350/1670 train_time:132734ms step_avg:98.32ms +step:1351/1670 train_time:132833ms step_avg:98.32ms +step:1352/1670 train_time:132931ms step_avg:98.32ms +step:1353/1670 train_time:133029ms step_avg:98.32ms +step:1354/1670 train_time:133127ms step_avg:98.32ms +step:1355/1670 train_time:133225ms step_avg:98.32ms +step:1356/1670 train_time:133323ms step_avg:98.32ms +step:1357/1670 train_time:133421ms step_avg:98.32ms +step:1358/1670 train_time:133518ms step_avg:98.32ms +step:1359/1670 train_time:133616ms step_avg:98.32ms +step:1360/1670 train_time:133713ms step_avg:98.32ms +step:1361/1670 train_time:133811ms step_avg:98.32ms +step:1362/1670 train_time:133910ms step_avg:98.32ms +step:1363/1670 train_time:134008ms step_avg:98.32ms +step:1364/1670 train_time:134105ms step_avg:98.32ms +step:1365/1670 train_time:134203ms step_avg:98.32ms +step:1366/1670 train_time:134299ms step_avg:98.32ms +step:1367/1670 train_time:134397ms step_avg:98.32ms +step:1368/1670 train_time:134496ms step_avg:98.32ms +step:1369/1670 train_time:134594ms step_avg:98.32ms +step:1370/1670 train_time:134691ms step_avg:98.31ms +step:1371/1670 train_time:134790ms step_avg:98.32ms +step:1372/1670 train_time:134890ms step_avg:98.32ms +step:1373/1670 train_time:134988ms step_avg:98.32ms +step:1374/1670 train_time:135085ms step_avg:98.32ms +step:1375/1670 train_time:135183ms step_avg:98.32ms +step:1375/1670 val_loss:3.3460 train_time:135280ms step_avg:98.39ms +step:1376/1670 train_time:135302ms step_avg:98.33ms +step:1377/1670 train_time:135387ms step_avg:98.32ms +step:1378/1670 train_time:135485ms step_avg:98.32ms +step:1379/1670 train_time:135582ms step_avg:98.32ms +step:1380/1670 train_time:135679ms step_avg:98.32ms +step:1381/1670 train_time:135775ms step_avg:98.32ms +step:1382/1670 train_time:135872ms step_avg:98.32ms +step:1383/1670 train_time:135969ms step_avg:98.31ms +step:1384/1670 train_time:136065ms step_avg:98.31ms +step:1385/1670 train_time:136162ms step_avg:98.31ms +step:1386/1670 train_time:136263ms step_avg:98.31ms +step:1387/1670 train_time:136365ms step_avg:98.32ms +step:1388/1670 train_time:136464ms step_avg:98.32ms +step:1389/1670 train_time:136563ms step_avg:98.32ms +step:1390/1670 train_time:136661ms step_avg:98.32ms +step:1391/1670 train_time:136758ms step_avg:98.32ms +step:1392/1670 train_time:136855ms step_avg:98.32ms +step:1393/1670 train_time:136952ms step_avg:98.31ms +step:1394/1670 train_time:137049ms step_avg:98.31ms +step:1395/1670 train_time:137146ms step_avg:98.31ms +step:1396/1670 train_time:137244ms step_avg:98.31ms +step:1397/1670 train_time:137343ms step_avg:98.31ms +step:1398/1670 train_time:137443ms step_avg:98.31ms +step:1399/1670 train_time:137541ms step_avg:98.31ms +step:1400/1670 train_time:137639ms step_avg:98.31ms +step:1401/1670 train_time:137737ms step_avg:98.31ms +step:1402/1670 train_time:137834ms step_avg:98.31ms +step:1403/1670 train_time:137932ms step_avg:98.31ms +step:1404/1670 train_time:138029ms step_avg:98.31ms +step:1405/1670 train_time:138126ms step_avg:98.31ms +step:1406/1670 train_time:138224ms step_avg:98.31ms +step:1407/1670 train_time:138322ms step_avg:98.31ms +step:1408/1670 train_time:138422ms step_avg:98.31ms +step:1409/1670 train_time:138521ms step_avg:98.31ms +step:1410/1670 train_time:138620ms step_avg:98.31ms +step:1411/1670 train_time:138718ms step_avg:98.31ms +step:1412/1670 train_time:138815ms step_avg:98.31ms +step:1413/1670 train_time:138912ms step_avg:98.31ms +step:1414/1670 train_time:139009ms step_avg:98.31ms +step:1415/1670 train_time:139106ms step_avg:98.31ms +step:1416/1670 train_time:139203ms step_avg:98.31ms +step:1417/1670 train_time:139301ms step_avg:98.31ms +step:1418/1670 train_time:139400ms step_avg:98.31ms +step:1419/1670 train_time:139499ms step_avg:98.31ms +step:1420/1670 train_time:139598ms step_avg:98.31ms +step:1421/1670 train_time:139697ms step_avg:98.31ms +step:1422/1670 train_time:139794ms step_avg:98.31ms +step:1423/1670 train_time:139892ms step_avg:98.31ms +step:1424/1670 train_time:139989ms step_avg:98.31ms +step:1425/1670 train_time:140087ms step_avg:98.31ms +step:1426/1670 train_time:140185ms step_avg:98.31ms +step:1427/1670 train_time:140282ms step_avg:98.31ms +step:1428/1670 train_time:140381ms step_avg:98.31ms +step:1429/1670 train_time:140480ms step_avg:98.31ms +step:1430/1670 train_time:140578ms step_avg:98.31ms +step:1431/1670 train_time:140676ms step_avg:98.31ms +step:1432/1670 train_time:140774ms step_avg:98.31ms +step:1433/1670 train_time:140872ms step_avg:98.31ms +step:1434/1670 train_time:140969ms step_avg:98.30ms +step:1435/1670 train_time:141067ms step_avg:98.30ms +step:1436/1670 train_time:141165ms step_avg:98.30ms +step:1437/1670 train_time:141263ms step_avg:98.30ms +step:1438/1670 train_time:141360ms step_avg:98.30ms +step:1439/1670 train_time:141457ms step_avg:98.30ms +step:1440/1670 train_time:141555ms step_avg:98.30ms +step:1441/1670 train_time:141653ms step_avg:98.30ms +step:1442/1670 train_time:141751ms step_avg:98.30ms +step:1443/1670 train_time:141849ms step_avg:98.30ms +step:1444/1670 train_time:141946ms step_avg:98.30ms +step:1445/1670 train_time:142044ms step_avg:98.30ms +step:1446/1670 train_time:142142ms step_avg:98.30ms +step:1447/1670 train_time:142241ms step_avg:98.30ms +step:1448/1670 train_time:142338ms step_avg:98.30ms +step:1449/1670 train_time:142436ms step_avg:98.30ms +step:1450/1670 train_time:142535ms step_avg:98.30ms +step:1451/1670 train_time:142634ms step_avg:98.30ms +step:1452/1670 train_time:142731ms step_avg:98.30ms +step:1453/1670 train_time:142829ms step_avg:98.30ms +step:1454/1670 train_time:142926ms step_avg:98.30ms +step:1455/1670 train_time:143024ms step_avg:98.30ms +step:1456/1670 train_time:143122ms step_avg:98.30ms +step:1457/1670 train_time:143219ms step_avg:98.30ms +step:1458/1670 train_time:143318ms step_avg:98.30ms +step:1459/1670 train_time:143415ms step_avg:98.30ms +step:1460/1670 train_time:143514ms step_avg:98.30ms +step:1461/1670 train_time:143613ms step_avg:98.30ms +step:1462/1670 train_time:143711ms step_avg:98.30ms +step:1463/1670 train_time:143808ms step_avg:98.30ms +step:1464/1670 train_time:143906ms step_avg:98.30ms +step:1465/1670 train_time:144004ms step_avg:98.30ms +step:1466/1670 train_time:144102ms step_avg:98.30ms +step:1467/1670 train_time:144200ms step_avg:98.30ms +step:1468/1670 train_time:144299ms step_avg:98.30ms +step:1469/1670 train_time:144397ms step_avg:98.30ms +step:1470/1670 train_time:144496ms step_avg:98.30ms +step:1471/1670 train_time:144593ms step_avg:98.30ms +step:1472/1670 train_time:144691ms step_avg:98.30ms +step:1473/1670 train_time:144789ms step_avg:98.30ms +step:1474/1670 train_time:144887ms step_avg:98.29ms +step:1475/1670 train_time:144984ms step_avg:98.29ms +step:1476/1670 train_time:145081ms step_avg:98.29ms +step:1477/1670 train_time:145179ms step_avg:98.29ms +step:1478/1670 train_time:145277ms step_avg:98.29ms +step:1479/1670 train_time:145374ms step_avg:98.29ms +step:1480/1670 train_time:145472ms step_avg:98.29ms +step:1481/1670 train_time:145570ms step_avg:98.29ms +step:1482/1670 train_time:145668ms step_avg:98.29ms +step:1483/1670 train_time:145766ms step_avg:98.29ms +step:1484/1670 train_time:145863ms step_avg:98.29ms +step:1485/1670 train_time:146203ms step_avg:98.45ms +step:1486/1670 train_time:146277ms step_avg:98.44ms +step:1487/1670 train_time:146373ms step_avg:98.44ms +step:1488/1670 train_time:146470ms step_avg:98.43ms +step:1489/1670 train_time:146566ms step_avg:98.43ms +step:1490/1670 train_time:146663ms step_avg:98.43ms +step:1491/1670 train_time:146760ms step_avg:98.43ms +step:1492/1670 train_time:146857ms step_avg:98.43ms +step:1493/1670 train_time:146954ms step_avg:98.43ms +step:1494/1670 train_time:147051ms step_avg:98.43ms +step:1495/1670 train_time:147155ms step_avg:98.43ms +step:1496/1670 train_time:147259ms step_avg:98.44ms +step:1497/1670 train_time:147358ms step_avg:98.44ms +step:1498/1670 train_time:147457ms step_avg:98.44ms +step:1499/1670 train_time:147555ms step_avg:98.44ms +step:1500/1670 train_time:147653ms step_avg:98.44ms +step:1500/1670 val_loss:3.3137 train_time:147749ms step_avg:98.50ms +step:1501/1670 train_time:147772ms step_avg:98.45ms +step:1502/1670 train_time:147855ms step_avg:98.44ms +step:1503/1670 train_time:147954ms step_avg:98.44ms +step:1504/1670 train_time:148052ms step_avg:98.44ms +step:1505/1670 train_time:148150ms step_avg:98.44ms +step:1506/1670 train_time:148247ms step_avg:98.44ms +step:1507/1670 train_time:148344ms step_avg:98.44ms +step:1508/1670 train_time:148441ms step_avg:98.44ms +step:1509/1670 train_time:148538ms step_avg:98.43ms +step:1510/1670 train_time:148635ms step_avg:98.43ms +step:1511/1670 train_time:148735ms step_avg:98.43ms +step:1512/1670 train_time:148834ms step_avg:98.44ms +step:1513/1670 train_time:148932ms step_avg:98.44ms +step:1514/1670 train_time:149031ms step_avg:98.44ms +step:1515/1670 train_time:149129ms step_avg:98.43ms +step:1516/1670 train_time:149226ms step_avg:98.43ms +step:1517/1670 train_time:149323ms step_avg:98.43ms +step:1518/1670 train_time:149420ms step_avg:98.43ms +step:1519/1670 train_time:149517ms step_avg:98.43ms +step:1520/1670 train_time:149614ms step_avg:98.43ms +step:1521/1670 train_time:149712ms step_avg:98.43ms +step:1522/1670 train_time:149811ms step_avg:98.43ms +step:1523/1670 train_time:149910ms step_avg:98.43ms +step:1524/1670 train_time:150009ms step_avg:98.43ms +step:1525/1670 train_time:150107ms step_avg:98.43ms +step:1526/1670 train_time:150205ms step_avg:98.43ms +step:1527/1670 train_time:150303ms step_avg:98.43ms +step:1528/1670 train_time:150400ms step_avg:98.43ms +step:1529/1670 train_time:150498ms step_avg:98.43ms +step:1530/1670 train_time:150595ms step_avg:98.43ms +step:1531/1670 train_time:150692ms step_avg:98.43ms +step:1532/1670 train_time:150791ms step_avg:98.43ms +step:1533/1670 train_time:150889ms step_avg:98.43ms +step:1534/1670 train_time:150988ms step_avg:98.43ms +step:1535/1670 train_time:151087ms step_avg:98.43ms +step:1536/1670 train_time:151185ms step_avg:98.43ms +step:1537/1670 train_time:151282ms step_avg:98.43ms +step:1538/1670 train_time:151380ms step_avg:98.43ms +step:1539/1670 train_time:151477ms step_avg:98.43ms +step:1540/1670 train_time:151575ms step_avg:98.43ms +step:1541/1670 train_time:151673ms step_avg:98.42ms +step:1542/1670 train_time:151770ms step_avg:98.42ms +step:1543/1670 train_time:151869ms step_avg:98.42ms +step:1544/1670 train_time:151968ms step_avg:98.42ms +step:1545/1670 train_time:152067ms step_avg:98.43ms +step:1546/1670 train_time:152166ms step_avg:98.43ms +step:1547/1670 train_time:152265ms step_avg:98.43ms +step:1548/1670 train_time:152363ms step_avg:98.43ms +step:1549/1670 train_time:152461ms step_avg:98.43ms +step:1550/1670 train_time:152559ms step_avg:98.43ms +step:1551/1670 train_time:152658ms step_avg:98.43ms +step:1552/1670 train_time:152756ms step_avg:98.43ms +step:1553/1670 train_time:152855ms step_avg:98.43ms +step:1554/1670 train_time:152953ms step_avg:98.43ms +step:1555/1670 train_time:153052ms step_avg:98.43ms +step:1556/1670 train_time:153150ms step_avg:98.43ms +step:1557/1670 train_time:153247ms step_avg:98.42ms +step:1558/1670 train_time:153345ms step_avg:98.42ms +step:1559/1670 train_time:153444ms step_avg:98.42ms +step:1560/1670 train_time:153544ms step_avg:98.43ms +step:1561/1670 train_time:153643ms step_avg:98.43ms +step:1562/1670 train_time:153742ms step_avg:98.43ms +step:1563/1670 train_time:153841ms step_avg:98.43ms +step:1564/1670 train_time:153940ms step_avg:98.43ms +step:1565/1670 train_time:154039ms step_avg:98.43ms +step:1566/1670 train_time:154138ms step_avg:98.43ms +step:1567/1670 train_time:154236ms step_avg:98.43ms +step:1568/1670 train_time:154334ms step_avg:98.43ms +step:1569/1670 train_time:154431ms step_avg:98.43ms +step:1570/1670 train_time:154528ms step_avg:98.43ms +step:1571/1670 train_time:154626ms step_avg:98.43ms +step:1572/1670 train_time:154725ms step_avg:98.43ms +step:1573/1670 train_time:154824ms step_avg:98.43ms +step:1574/1670 train_time:154923ms step_avg:98.43ms +step:1575/1670 train_time:155022ms step_avg:98.43ms +step:1576/1670 train_time:155121ms step_avg:98.43ms +step:1577/1670 train_time:155220ms step_avg:98.43ms +step:1578/1670 train_time:155317ms step_avg:98.43ms +step:1579/1670 train_time:155415ms step_avg:98.43ms +step:1580/1670 train_time:155512ms step_avg:98.43ms +step:1581/1670 train_time:155610ms step_avg:98.43ms +step:1582/1670 train_time:155708ms step_avg:98.43ms +step:1583/1670 train_time:155807ms step_avg:98.43ms +step:1584/1670 train_time:155906ms step_avg:98.43ms +step:1585/1670 train_time:156005ms step_avg:98.43ms +step:1586/1670 train_time:156103ms step_avg:98.43ms +step:1587/1670 train_time:156202ms step_avg:98.43ms +step:1588/1670 train_time:156300ms step_avg:98.43ms +step:1589/1670 train_time:156398ms step_avg:98.43ms +step:1590/1670 train_time:156496ms step_avg:98.43ms +step:1591/1670 train_time:156594ms step_avg:98.42ms +step:1592/1670 train_time:156692ms step_avg:98.42ms +step:1593/1670 train_time:156790ms step_avg:98.42ms +step:1594/1670 train_time:156889ms step_avg:98.42ms +step:1595/1670 train_time:156987ms step_avg:98.42ms +step:1596/1670 train_time:157085ms step_avg:98.42ms +step:1597/1670 train_time:157184ms step_avg:98.42ms +step:1598/1670 train_time:157283ms step_avg:98.42ms +step:1599/1670 train_time:157381ms step_avg:98.42ms +step:1600/1670 train_time:157478ms step_avg:98.42ms +step:1601/1670 train_time:157577ms step_avg:98.42ms +step:1602/1670 train_time:157675ms step_avg:98.42ms +step:1603/1670 train_time:157772ms step_avg:98.42ms +step:1604/1670 train_time:157870ms step_avg:98.42ms +step:1605/1670 train_time:157968ms step_avg:98.42ms +step:1606/1670 train_time:158067ms step_avg:98.42ms +step:1607/1670 train_time:158165ms step_avg:98.42ms +step:1608/1670 train_time:158263ms step_avg:98.42ms +step:1609/1670 train_time:158361ms step_avg:98.42ms +step:1610/1670 train_time:158459ms step_avg:98.42ms +step:1611/1670 train_time:158558ms step_avg:98.42ms +step:1612/1670 train_time:158656ms step_avg:98.42ms +step:1613/1670 train_time:158753ms step_avg:98.42ms +step:1614/1670 train_time:158851ms step_avg:98.42ms +step:1615/1670 train_time:158948ms step_avg:98.42ms +step:1616/1670 train_time:159046ms step_avg:98.42ms +step:1617/1670 train_time:159144ms step_avg:98.42ms +step:1618/1670 train_time:159243ms step_avg:98.42ms +step:1619/1670 train_time:159342ms step_avg:98.42ms +step:1620/1670 train_time:159441ms step_avg:98.42ms +step:1621/1670 train_time:159538ms step_avg:98.42ms +step:1622/1670 train_time:159636ms step_avg:98.42ms +step:1623/1670 train_time:159734ms step_avg:98.42ms +step:1624/1670 train_time:159832ms step_avg:98.42ms +step:1625/1670 train_time:159930ms step_avg:98.42ms +step:1625/1670 val_loss:3.2871 train_time:160026ms step_avg:98.48ms +step:1626/1670 train_time:160048ms step_avg:98.43ms +step:1627/1670 train_time:160133ms step_avg:98.42ms +step:1628/1670 train_time:160233ms step_avg:98.42ms +step:1629/1670 train_time:160331ms step_avg:98.42ms +step:1630/1670 train_time:160428ms step_avg:98.42ms +step:1631/1670 train_time:160526ms step_avg:98.42ms +step:1632/1670 train_time:160623ms step_avg:98.42ms +step:1633/1670 train_time:160720ms step_avg:98.42ms +step:1634/1670 train_time:160816ms step_avg:98.42ms +step:1635/1670 train_time:160913ms step_avg:98.42ms +step:1636/1670 train_time:161013ms step_avg:98.42ms +step:1637/1670 train_time:161113ms step_avg:98.42ms +step:1638/1670 train_time:161214ms step_avg:98.42ms +step:1639/1670 train_time:161312ms step_avg:98.42ms +step:1640/1670 train_time:161410ms step_avg:98.42ms +step:1641/1670 train_time:161507ms step_avg:98.42ms +step:1642/1670 train_time:161605ms step_avg:98.42ms +step:1643/1670 train_time:161702ms step_avg:98.42ms +step:1644/1670 train_time:161799ms step_avg:98.42ms +step:1645/1670 train_time:161896ms step_avg:98.42ms +step:1646/1670 train_time:161995ms step_avg:98.42ms +step:1647/1670 train_time:162093ms step_avg:98.42ms +step:1648/1670 train_time:162193ms step_avg:98.42ms +step:1649/1670 train_time:162291ms step_avg:98.42ms +step:1650/1670 train_time:162390ms step_avg:98.42ms +step:1651/1670 train_time:162487ms step_avg:98.42ms +step:1652/1670 train_time:162585ms step_avg:98.42ms +step:1653/1670 train_time:162681ms step_avg:98.42ms +step:1654/1670 train_time:162779ms step_avg:98.42ms +step:1655/1670 train_time:162876ms step_avg:98.41ms +step:1656/1670 train_time:162974ms step_avg:98.41ms +step:1657/1670 train_time:163073ms step_avg:98.41ms +step:1658/1670 train_time:163171ms step_avg:98.41ms +step:1659/1670 train_time:163271ms step_avg:98.42ms +step:1660/1670 train_time:163371ms step_avg:98.42ms +step:1661/1670 train_time:163468ms step_avg:98.42ms +step:1662/1670 train_time:163566ms step_avg:98.42ms +step:1663/1670 train_time:163664ms step_avg:98.42ms +step:1664/1670 train_time:163763ms step_avg:98.42ms +step:1665/1670 train_time:163863ms step_avg:98.42ms +step:1666/1670 train_time:163962ms step_avg:98.42ms +step:1667/1670 train_time:164061ms step_avg:98.42ms +step:1668/1670 train_time:164161ms step_avg:98.42ms +step:1669/1670 train_time:164262ms step_avg:98.42ms +step:1670/1670 train_time:164361ms step_avg:98.42ms +step:1670/1670 val_loss:3.2790 train_time:164459ms step_avg:98.48ms +peak memory allocated: 34000 MiB reserved: 49496 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_d3bc9a09-09e9-450c-a8d7-f53a4f5aed01.txt b/records/090525_SkipMLPBlocks/comparison_d3bc9a09-09e9-450c-a8d7-f53a4f5aed01.txt new file mode 100644 index 000000000..09040c23f --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_d3bc9a09-09e9-450c-a8d7-f53a4f5aed01.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:40:57 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 44C P0 130W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 42C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 43C P0 130W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 71199 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 71200 C /usr/bin/python3 610MiB | +| 0 N/A N/A 71201 C /usr/bin/python3 610MiB | +| 0 N/A N/A 71202 C /usr/bin/python3 610MiB | +| 0 N/A N/A 71203 C /usr/bin/python3 610MiB | +| 0 N/A N/A 71204 C /usr/bin/python3 610MiB | +| 0 N/A N/A 71205 C /usr/bin/python3 610MiB | +| 0 N/A N/A 71206 C /usr/bin/python3 610MiB | +| 1 N/A N/A 71200 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 71201 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 71202 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 71203 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 71204 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 71205 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 71206 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:419ms step_avg:418.68ms +step:2/1670 train_time:439ms step_avg:219.57ms +step:3/1670 train_time:512ms step_avg:170.52ms +step:4/1670 train_time:606ms step_avg:151.42ms +step:5/1670 train_time:700ms step_avg:140.09ms +step:6/1670 train_time:796ms step_avg:132.63ms +step:7/1670 train_time:891ms step_avg:127.22ms +step:8/1670 train_time:985ms step_avg:123.18ms +step:9/1670 train_time:1081ms step_avg:120.10ms +step:10/1670 train_time:1176ms step_avg:117.60ms +step:11/1670 train_time:1271ms step_avg:115.54ms +step:12/1670 train_time:1370ms step_avg:114.14ms +step:13/1670 train_time:1469ms step_avg:112.98ms +step:14/1670 train_time:1565ms step_avg:111.79ms +step:15/1670 train_time:1661ms step_avg:110.75ms +step:16/1670 train_time:1758ms step_avg:109.86ms +step:17/1670 train_time:1853ms step_avg:108.98ms +step:18/1670 train_time:1948ms step_avg:108.23ms +step:19/1670 train_time:2044ms step_avg:107.57ms +step:20/1670 train_time:2139ms step_avg:106.97ms +step:21/1670 train_time:2236ms step_avg:106.46ms +step:22/1670 train_time:2332ms step_avg:106.00ms +step:23/1670 train_time:2429ms step_avg:105.59ms +step:24/1670 train_time:2525ms step_avg:105.23ms +step:25/1670 train_time:2622ms step_avg:104.88ms +step:26/1670 train_time:2717ms step_avg:104.51ms +step:27/1670 train_time:2813ms step_avg:104.19ms +step:28/1670 train_time:2908ms step_avg:103.86ms +step:29/1670 train_time:3004ms step_avg:103.58ms +step:30/1670 train_time:3100ms step_avg:103.34ms +step:31/1670 train_time:3196ms step_avg:103.10ms +step:32/1670 train_time:3291ms step_avg:102.86ms +step:33/1670 train_time:3387ms step_avg:102.65ms +step:34/1670 train_time:3484ms step_avg:102.48ms +step:35/1670 train_time:3581ms step_avg:102.31ms +step:36/1670 train_time:3678ms step_avg:102.16ms +step:37/1670 train_time:3774ms step_avg:102.00ms +step:38/1670 train_time:3869ms step_avg:101.81ms +step:39/1670 train_time:3965ms step_avg:101.66ms +step:40/1670 train_time:4060ms step_avg:101.51ms +step:41/1670 train_time:4156ms step_avg:101.37ms +step:42/1670 train_time:4251ms step_avg:101.22ms +step:43/1670 train_time:4347ms step_avg:101.10ms +step:44/1670 train_time:4443ms step_avg:100.99ms +step:45/1670 train_time:4540ms step_avg:100.89ms +step:46/1670 train_time:4636ms step_avg:100.79ms +step:47/1670 train_time:4732ms step_avg:100.69ms +step:48/1670 train_time:4829ms step_avg:100.60ms +step:49/1670 train_time:4924ms step_avg:100.49ms +step:50/1670 train_time:5020ms step_avg:100.40ms +step:51/1670 train_time:5117ms step_avg:100.33ms +step:52/1670 train_time:5213ms step_avg:100.25ms +step:53/1670 train_time:5308ms step_avg:100.15ms +step:54/1670 train_time:5404ms step_avg:100.07ms +step:55/1670 train_time:5500ms step_avg:100.00ms +step:56/1670 train_time:5596ms step_avg:99.93ms +step:57/1670 train_time:5692ms step_avg:99.86ms +step:58/1670 train_time:5788ms step_avg:99.79ms +step:59/1670 train_time:5884ms step_avg:99.74ms +step:60/1670 train_time:5981ms step_avg:99.68ms +step:61/1670 train_time:6077ms step_avg:99.62ms +step:62/1670 train_time:6173ms step_avg:99.56ms +step:63/1670 train_time:6269ms step_avg:99.51ms +step:64/1670 train_time:6365ms step_avg:99.45ms +step:65/1670 train_time:6460ms step_avg:99.39ms +step:66/1670 train_time:6556ms step_avg:99.33ms +step:67/1670 train_time:6651ms step_avg:99.27ms +step:68/1670 train_time:6747ms step_avg:99.22ms +step:69/1670 train_time:6844ms step_avg:99.19ms +step:70/1670 train_time:6941ms step_avg:99.15ms +step:71/1670 train_time:7037ms step_avg:99.11ms +step:72/1670 train_time:7132ms step_avg:99.06ms +step:73/1670 train_time:7228ms step_avg:99.01ms +step:74/1670 train_time:7324ms step_avg:98.97ms +step:75/1670 train_time:7420ms step_avg:98.93ms +step:76/1670 train_time:7515ms step_avg:98.89ms +step:77/1670 train_time:7610ms step_avg:98.84ms +step:78/1670 train_time:7706ms step_avg:98.79ms +step:79/1670 train_time:7802ms step_avg:98.76ms +step:80/1670 train_time:7898ms step_avg:98.72ms +step:81/1670 train_time:7993ms step_avg:98.68ms +step:82/1670 train_time:8089ms step_avg:98.65ms +step:83/1670 train_time:8185ms step_avg:98.61ms +step:84/1670 train_time:8281ms step_avg:98.58ms +step:85/1670 train_time:8377ms step_avg:98.55ms +step:86/1670 train_time:8472ms step_avg:98.52ms +step:87/1670 train_time:8568ms step_avg:98.48ms +step:88/1670 train_time:8663ms step_avg:98.45ms +step:89/1670 train_time:8759ms step_avg:98.41ms +step:90/1670 train_time:8853ms step_avg:98.37ms +step:91/1670 train_time:8950ms step_avg:98.35ms +step:92/1670 train_time:9045ms step_avg:98.32ms +step:93/1670 train_time:9142ms step_avg:98.30ms +step:94/1670 train_time:9239ms step_avg:98.29ms +step:95/1670 train_time:9335ms step_avg:98.26ms +step:96/1670 train_time:9431ms step_avg:98.23ms +step:97/1670 train_time:9526ms step_avg:98.21ms +step:98/1670 train_time:9623ms step_avg:98.19ms +step:99/1670 train_time:9719ms step_avg:98.17ms +step:100/1670 train_time:9814ms step_avg:98.14ms +step:101/1670 train_time:9909ms step_avg:98.11ms +step:102/1670 train_time:10005ms step_avg:98.09ms +step:103/1670 train_time:10101ms step_avg:98.07ms +step:104/1670 train_time:10197ms step_avg:98.05ms +step:105/1670 train_time:10293ms step_avg:98.03ms +step:106/1670 train_time:10389ms step_avg:98.01ms +step:107/1670 train_time:10485ms step_avg:97.99ms +step:108/1670 train_time:10581ms step_avg:97.97ms +step:109/1670 train_time:10676ms step_avg:97.95ms +step:110/1670 train_time:10772ms step_avg:97.93ms +step:111/1670 train_time:10867ms step_avg:97.90ms +step:112/1670 train_time:10962ms step_avg:97.88ms +step:113/1670 train_time:11058ms step_avg:97.86ms +step:114/1670 train_time:11153ms step_avg:97.84ms +step:115/1670 train_time:11249ms step_avg:97.82ms +step:116/1670 train_time:11346ms step_avg:97.81ms +step:117/1670 train_time:11441ms step_avg:97.79ms +step:118/1670 train_time:11536ms step_avg:97.77ms +step:119/1670 train_time:11632ms step_avg:97.75ms +step:120/1670 train_time:11727ms step_avg:97.73ms +step:121/1670 train_time:11824ms step_avg:97.72ms +step:122/1670 train_time:11919ms step_avg:97.70ms +step:123/1670 train_time:12016ms step_avg:97.69ms +step:124/1670 train_time:12110ms step_avg:97.66ms +step:125/1670 train_time:12206ms step_avg:97.65ms +step:125/1670 val_loss:4.3041 train_time:12301ms step_avg:98.41ms +step:126/1670 train_time:12323ms step_avg:97.80ms +step:127/1670 train_time:12404ms step_avg:97.67ms +step:128/1670 train_time:12505ms step_avg:97.70ms +step:129/1670 train_time:12603ms step_avg:97.70ms +step:130/1670 train_time:12699ms step_avg:97.69ms +step:131/1670 train_time:12794ms step_avg:97.67ms +step:132/1670 train_time:12888ms step_avg:97.64ms +step:133/1670 train_time:12983ms step_avg:97.62ms +step:134/1670 train_time:13078ms step_avg:97.60ms +step:135/1670 train_time:13172ms step_avg:97.57ms +step:136/1670 train_time:13267ms step_avg:97.55ms +step:137/1670 train_time:13364ms step_avg:97.55ms +step:138/1670 train_time:13462ms step_avg:97.55ms +step:139/1670 train_time:13560ms step_avg:97.55ms +step:140/1670 train_time:13656ms step_avg:97.54ms +step:141/1670 train_time:13752ms step_avg:97.53ms +step:142/1670 train_time:13846ms step_avg:97.51ms +step:143/1670 train_time:13941ms step_avg:97.49ms +step:144/1670 train_time:14036ms step_avg:97.47ms +step:145/1670 train_time:14130ms step_avg:97.45ms +step:146/1670 train_time:14225ms step_avg:97.43ms +step:147/1670 train_time:14322ms step_avg:97.43ms +step:148/1670 train_time:14418ms step_avg:97.42ms +step:149/1670 train_time:14515ms step_avg:97.42ms +step:150/1670 train_time:14611ms step_avg:97.41ms +step:151/1670 train_time:14706ms step_avg:97.39ms +step:152/1670 train_time:14802ms step_avg:97.38ms +step:153/1670 train_time:14897ms step_avg:97.37ms +step:154/1670 train_time:14993ms step_avg:97.35ms +step:155/1670 train_time:15087ms step_avg:97.34ms +step:156/1670 train_time:15182ms step_avg:97.32ms +step:157/1670 train_time:15277ms step_avg:97.31ms +step:158/1670 train_time:15373ms step_avg:97.30ms +step:159/1670 train_time:15469ms step_avg:97.29ms +step:160/1670 train_time:15565ms step_avg:97.28ms +step:161/1670 train_time:15662ms step_avg:97.28ms +step:162/1670 train_time:15759ms step_avg:97.28ms +step:163/1670 train_time:15854ms step_avg:97.26ms +step:164/1670 train_time:15948ms step_avg:97.25ms +step:165/1670 train_time:16044ms step_avg:97.23ms +step:166/1670 train_time:16140ms step_avg:97.23ms +step:167/1670 train_time:16234ms step_avg:97.21ms +step:168/1670 train_time:16330ms step_avg:97.20ms +step:169/1670 train_time:16425ms step_avg:97.19ms +step:170/1670 train_time:16522ms step_avg:97.19ms +step:171/1670 train_time:16618ms step_avg:97.18ms +step:172/1670 train_time:16713ms step_avg:97.17ms +step:173/1670 train_time:16809ms step_avg:97.16ms +step:174/1670 train_time:16904ms step_avg:97.15ms +step:175/1670 train_time:17000ms step_avg:97.14ms +step:176/1670 train_time:17095ms step_avg:97.13ms +step:177/1670 train_time:17190ms step_avg:97.12ms +step:178/1670 train_time:17285ms step_avg:97.11ms +step:179/1670 train_time:17381ms step_avg:97.10ms +step:180/1670 train_time:17478ms step_avg:97.10ms +step:181/1670 train_time:17573ms step_avg:97.09ms +step:182/1670 train_time:17669ms step_avg:97.08ms +step:183/1670 train_time:17765ms step_avg:97.08ms +step:184/1670 train_time:17862ms step_avg:97.07ms +step:185/1670 train_time:17957ms step_avg:97.06ms +step:186/1670 train_time:18052ms step_avg:97.05ms +step:187/1670 train_time:18147ms step_avg:97.04ms +step:188/1670 train_time:18242ms step_avg:97.03ms +step:189/1670 train_time:18338ms step_avg:97.03ms +step:190/1670 train_time:18434ms step_avg:97.02ms +step:191/1670 train_time:18529ms step_avg:97.01ms +step:192/1670 train_time:18625ms step_avg:97.00ms +step:193/1670 train_time:18721ms step_avg:97.00ms +step:194/1670 train_time:18818ms step_avg:97.00ms +step:195/1670 train_time:18914ms step_avg:96.99ms +step:196/1670 train_time:19009ms step_avg:96.99ms +step:197/1670 train_time:19104ms step_avg:96.98ms +step:198/1670 train_time:19200ms step_avg:96.97ms +step:199/1670 train_time:19295ms step_avg:96.96ms +step:200/1670 train_time:19391ms step_avg:96.95ms +step:201/1670 train_time:19486ms step_avg:96.95ms +step:202/1670 train_time:19582ms step_avg:96.94ms +step:203/1670 train_time:19677ms step_avg:96.93ms +step:204/1670 train_time:19774ms step_avg:96.93ms +step:205/1670 train_time:19869ms step_avg:96.92ms +step:206/1670 train_time:19965ms step_avg:96.92ms +step:207/1670 train_time:20061ms step_avg:96.91ms +step:208/1670 train_time:20157ms step_avg:96.91ms +step:209/1670 train_time:20252ms step_avg:96.90ms +step:210/1670 train_time:20347ms step_avg:96.89ms +step:211/1670 train_time:20442ms step_avg:96.88ms +step:212/1670 train_time:20539ms step_avg:96.88ms +step:213/1670 train_time:20842ms step_avg:97.85ms +step:214/1670 train_time:20947ms step_avg:97.88ms +step:215/1670 train_time:21042ms step_avg:97.87ms +step:216/1670 train_time:21137ms step_avg:97.85ms +step:217/1670 train_time:21231ms step_avg:97.84ms +step:218/1670 train_time:21326ms step_avg:97.82ms +step:219/1670 train_time:21420ms step_avg:97.81ms +step:220/1670 train_time:21515ms step_avg:97.80ms +step:221/1670 train_time:21610ms step_avg:97.78ms +step:222/1670 train_time:21704ms step_avg:97.77ms +step:223/1670 train_time:21802ms step_avg:97.77ms +step:224/1670 train_time:21901ms step_avg:97.77ms +step:225/1670 train_time:21999ms step_avg:97.77ms +step:226/1670 train_time:22095ms step_avg:97.77ms +step:227/1670 train_time:22190ms step_avg:97.75ms +step:228/1670 train_time:22285ms step_avg:97.74ms +step:229/1670 train_time:22380ms step_avg:97.73ms +step:230/1670 train_time:22475ms step_avg:97.72ms +step:231/1670 train_time:22569ms step_avg:97.70ms +step:232/1670 train_time:22664ms step_avg:97.69ms +step:233/1670 train_time:22760ms step_avg:97.68ms +step:234/1670 train_time:22857ms step_avg:97.68ms +step:235/1670 train_time:22954ms step_avg:97.67ms +step:236/1670 train_time:23050ms step_avg:97.67ms +step:237/1670 train_time:23145ms step_avg:97.66ms +step:238/1670 train_time:23241ms step_avg:97.65ms +step:239/1670 train_time:23337ms step_avg:97.64ms +step:240/1670 train_time:23432ms step_avg:97.63ms +step:241/1670 train_time:23526ms step_avg:97.62ms +step:242/1670 train_time:23622ms step_avg:97.61ms +step:243/1670 train_time:23717ms step_avg:97.60ms +step:244/1670 train_time:23813ms step_avg:97.59ms +step:245/1670 train_time:23909ms step_avg:97.59ms +step:246/1670 train_time:24005ms step_avg:97.58ms +step:247/1670 train_time:24101ms step_avg:97.57ms +step:248/1670 train_time:24197ms step_avg:97.57ms +step:249/1670 train_time:24293ms step_avg:97.56ms +step:250/1670 train_time:24387ms step_avg:97.55ms +step:250/1670 val_loss:3.9656 train_time:24482ms step_avg:97.93ms +step:251/1670 train_time:24503ms step_avg:97.62ms +step:252/1670 train_time:24579ms step_avg:97.54ms +step:253/1670 train_time:24680ms step_avg:97.55ms +step:254/1670 train_time:24783ms step_avg:97.57ms +step:255/1670 train_time:24881ms step_avg:97.57ms +step:256/1670 train_time:24976ms step_avg:97.56ms +step:257/1670 train_time:25071ms step_avg:97.55ms +step:258/1670 train_time:25165ms step_avg:97.54ms +step:259/1670 train_time:25260ms step_avg:97.53ms +step:260/1670 train_time:25355ms step_avg:97.52ms +step:261/1670 train_time:25452ms step_avg:97.52ms +step:262/1670 train_time:25548ms step_avg:97.51ms +step:263/1670 train_time:25643ms step_avg:97.50ms +step:264/1670 train_time:25740ms step_avg:97.50ms +step:265/1670 train_time:25838ms step_avg:97.50ms +step:266/1670 train_time:25934ms step_avg:97.49ms +step:267/1670 train_time:26029ms step_avg:97.49ms +step:268/1670 train_time:26124ms step_avg:97.48ms +step:269/1670 train_time:26219ms step_avg:97.47ms +step:270/1670 train_time:26314ms step_avg:97.46ms +step:271/1670 train_time:26410ms step_avg:97.45ms +step:272/1670 train_time:26505ms step_avg:97.45ms +step:273/1670 train_time:26601ms step_avg:97.44ms +step:274/1670 train_time:26697ms step_avg:97.43ms +step:275/1670 train_time:26793ms step_avg:97.43ms +step:276/1670 train_time:26889ms step_avg:97.43ms +step:277/1670 train_time:26985ms step_avg:97.42ms +step:278/1670 train_time:27080ms step_avg:97.41ms +step:279/1670 train_time:27175ms step_avg:97.40ms +step:280/1670 train_time:27270ms step_avg:97.39ms +step:281/1670 train_time:27365ms step_avg:97.38ms +step:282/1670 train_time:27461ms step_avg:97.38ms +step:283/1670 train_time:27557ms step_avg:97.37ms +step:284/1670 train_time:27653ms step_avg:97.37ms +step:285/1670 train_time:27749ms step_avg:97.36ms +step:286/1670 train_time:27845ms step_avg:97.36ms +step:287/1670 train_time:27940ms step_avg:97.35ms +step:288/1670 train_time:28036ms step_avg:97.35ms +step:289/1670 train_time:28132ms step_avg:97.34ms +step:290/1670 train_time:28228ms step_avg:97.34ms +step:291/1670 train_time:28322ms step_avg:97.33ms +step:292/1670 train_time:28418ms step_avg:97.32ms +step:293/1670 train_time:28513ms step_avg:97.32ms +step:294/1670 train_time:28609ms step_avg:97.31ms +step:295/1670 train_time:28705ms step_avg:97.31ms +step:296/1670 train_time:28800ms step_avg:97.30ms +step:297/1670 train_time:28896ms step_avg:97.29ms +step:298/1670 train_time:28992ms step_avg:97.29ms +step:299/1670 train_time:29087ms step_avg:97.28ms +step:300/1670 train_time:29182ms step_avg:97.27ms +step:301/1670 train_time:29277ms step_avg:97.27ms +step:302/1670 train_time:29373ms step_avg:97.26ms +step:303/1670 train_time:29468ms step_avg:97.26ms +step:304/1670 train_time:29563ms step_avg:97.25ms +step:305/1670 train_time:29659ms step_avg:97.24ms +step:306/1670 train_time:29755ms step_avg:97.24ms +step:307/1670 train_time:29851ms step_avg:97.23ms +step:308/1670 train_time:29946ms step_avg:97.23ms +step:309/1670 train_time:30041ms step_avg:97.22ms +step:310/1670 train_time:30137ms step_avg:97.22ms +step:311/1670 train_time:30233ms step_avg:97.21ms +step:312/1670 train_time:30329ms step_avg:97.21ms +step:313/1670 train_time:30423ms step_avg:97.20ms +step:314/1670 train_time:30519ms step_avg:97.19ms +step:315/1670 train_time:30615ms step_avg:97.19ms +step:316/1670 train_time:30711ms step_avg:97.19ms +step:317/1670 train_time:30806ms step_avg:97.18ms +step:318/1670 train_time:30901ms step_avg:97.17ms +step:319/1670 train_time:30997ms step_avg:97.17ms +step:320/1670 train_time:31093ms step_avg:97.17ms +step:321/1670 train_time:31188ms step_avg:97.16ms +step:322/1670 train_time:31284ms step_avg:97.15ms +step:323/1670 train_time:31379ms step_avg:97.15ms +step:324/1670 train_time:31475ms step_avg:97.14ms +step:325/1670 train_time:31570ms step_avg:97.14ms +step:326/1670 train_time:31665ms step_avg:97.13ms +step:327/1670 train_time:31761ms step_avg:97.13ms +step:328/1670 train_time:31857ms step_avg:97.12ms +step:329/1670 train_time:31953ms step_avg:97.12ms +step:330/1670 train_time:32048ms step_avg:97.12ms +step:331/1670 train_time:32144ms step_avg:97.11ms +step:332/1670 train_time:32240ms step_avg:97.11ms +step:333/1670 train_time:32336ms step_avg:97.11ms +step:334/1670 train_time:32432ms step_avg:97.10ms +step:335/1670 train_time:32527ms step_avg:97.09ms +step:336/1670 train_time:32622ms step_avg:97.09ms +step:337/1670 train_time:32718ms step_avg:97.09ms +step:338/1670 train_time:32813ms step_avg:97.08ms +step:339/1670 train_time:32909ms step_avg:97.08ms +step:340/1670 train_time:33004ms step_avg:97.07ms +step:341/1670 train_time:33099ms step_avg:97.07ms +step:342/1670 train_time:33195ms step_avg:97.06ms +step:343/1670 train_time:33291ms step_avg:97.06ms +step:344/1670 train_time:33388ms step_avg:97.06ms +step:345/1670 train_time:33483ms step_avg:97.05ms +step:346/1670 train_time:33579ms step_avg:97.05ms +step:347/1670 train_time:33674ms step_avg:97.04ms +step:348/1670 train_time:33770ms step_avg:97.04ms +step:349/1670 train_time:33865ms step_avg:97.03ms +step:350/1670 train_time:33960ms step_avg:97.03ms +step:351/1670 train_time:34057ms step_avg:97.03ms +step:352/1670 train_time:34153ms step_avg:97.02ms +step:353/1670 train_time:34248ms step_avg:97.02ms +step:354/1670 train_time:34343ms step_avg:97.01ms +step:355/1670 train_time:34439ms step_avg:97.01ms +step:356/1670 train_time:34535ms step_avg:97.01ms +step:357/1670 train_time:34631ms step_avg:97.00ms +step:358/1670 train_time:34726ms step_avg:97.00ms +step:359/1670 train_time:34822ms step_avg:97.00ms +step:360/1670 train_time:34918ms step_avg:96.99ms +step:361/1670 train_time:35014ms step_avg:96.99ms +step:362/1670 train_time:35109ms step_avg:96.99ms +step:363/1670 train_time:35204ms step_avg:96.98ms +step:364/1670 train_time:35300ms step_avg:96.98ms +step:365/1670 train_time:35396ms step_avg:96.97ms +step:366/1670 train_time:35492ms step_avg:96.97ms +step:367/1670 train_time:35588ms step_avg:96.97ms +step:368/1670 train_time:35683ms step_avg:96.96ms +step:369/1670 train_time:35778ms step_avg:96.96ms +step:370/1670 train_time:35874ms step_avg:96.96ms +step:371/1670 train_time:35971ms step_avg:96.96ms +step:372/1670 train_time:36066ms step_avg:96.95ms +step:373/1670 train_time:36161ms step_avg:96.95ms +step:374/1670 train_time:36256ms step_avg:96.94ms +step:375/1670 train_time:36352ms step_avg:96.94ms +step:375/1670 val_loss:3.8117 train_time:36447ms step_avg:97.19ms +step:376/1670 train_time:36468ms step_avg:96.99ms +step:377/1670 train_time:36549ms step_avg:96.95ms +step:378/1670 train_time:36651ms step_avg:96.96ms +step:379/1670 train_time:36747ms step_avg:96.96ms +step:380/1670 train_time:36842ms step_avg:96.95ms +step:381/1670 train_time:36937ms step_avg:96.95ms +step:382/1670 train_time:37032ms step_avg:96.94ms +step:383/1670 train_time:37127ms step_avg:96.94ms +step:384/1670 train_time:37223ms step_avg:96.93ms +step:385/1670 train_time:37317ms step_avg:96.93ms +step:386/1670 train_time:37412ms step_avg:96.92ms +step:387/1670 train_time:37510ms step_avg:96.93ms +step:388/1670 train_time:37609ms step_avg:96.93ms +step:389/1670 train_time:37706ms step_avg:96.93ms +step:390/1670 train_time:37802ms step_avg:96.93ms +step:391/1670 train_time:37898ms step_avg:96.92ms +step:392/1670 train_time:37992ms step_avg:96.92ms +step:393/1670 train_time:38088ms step_avg:96.92ms +step:394/1670 train_time:38183ms step_avg:96.91ms +step:395/1670 train_time:38277ms step_avg:96.90ms +step:396/1670 train_time:38373ms step_avg:96.90ms +step:397/1670 train_time:38470ms step_avg:96.90ms +step:398/1670 train_time:38566ms step_avg:96.90ms +step:399/1670 train_time:38663ms step_avg:96.90ms +step:400/1670 train_time:38759ms step_avg:96.90ms +step:401/1670 train_time:38854ms step_avg:96.89ms +step:402/1670 train_time:38949ms step_avg:96.89ms +step:403/1670 train_time:39045ms step_avg:96.89ms +step:404/1670 train_time:39140ms step_avg:96.88ms +step:405/1670 train_time:39235ms step_avg:96.88ms +step:406/1670 train_time:39330ms step_avg:96.87ms +step:407/1670 train_time:39426ms step_avg:96.87ms +step:408/1670 train_time:39522ms step_avg:96.87ms +step:409/1670 train_time:39618ms step_avg:96.86ms +step:410/1670 train_time:39713ms step_avg:96.86ms +step:411/1670 train_time:39810ms step_avg:96.86ms +step:412/1670 train_time:39905ms step_avg:96.86ms +step:413/1670 train_time:40001ms step_avg:96.85ms +step:414/1670 train_time:40096ms step_avg:96.85ms +step:415/1670 train_time:40191ms step_avg:96.85ms +step:416/1670 train_time:40286ms step_avg:96.84ms +step:417/1670 train_time:40382ms step_avg:96.84ms +step:418/1670 train_time:40478ms step_avg:96.84ms +step:419/1670 train_time:40573ms step_avg:96.83ms +step:420/1670 train_time:40669ms step_avg:96.83ms +step:421/1670 train_time:40765ms step_avg:96.83ms +step:422/1670 train_time:40862ms step_avg:96.83ms +step:423/1670 train_time:40957ms step_avg:96.83ms +step:424/1670 train_time:41052ms step_avg:96.82ms +step:425/1670 train_time:41347ms step_avg:97.29ms +step:426/1670 train_time:41488ms step_avg:97.39ms +step:427/1670 train_time:41582ms step_avg:97.38ms +step:428/1670 train_time:41677ms step_avg:97.38ms +step:429/1670 train_time:41771ms step_avg:97.37ms +step:430/1670 train_time:41865ms step_avg:97.36ms +step:431/1670 train_time:41960ms step_avg:97.36ms +step:432/1670 train_time:42055ms step_avg:97.35ms +step:433/1670 train_time:42149ms step_avg:97.34ms +step:434/1670 train_time:42244ms step_avg:97.34ms +step:435/1670 train_time:42344ms step_avg:97.34ms +step:436/1670 train_time:42444ms step_avg:97.35ms +step:437/1670 train_time:42542ms step_avg:97.35ms +step:438/1670 train_time:42638ms step_avg:97.35ms +step:439/1670 train_time:42733ms step_avg:97.34ms +step:440/1670 train_time:42828ms step_avg:97.34ms +step:441/1670 train_time:42923ms step_avg:97.33ms +step:442/1670 train_time:43018ms step_avg:97.33ms +step:443/1670 train_time:43112ms step_avg:97.32ms +step:444/1670 train_time:43207ms step_avg:97.31ms +step:445/1670 train_time:43304ms step_avg:97.31ms +step:446/1670 train_time:43400ms step_avg:97.31ms +step:447/1670 train_time:43496ms step_avg:97.31ms +step:448/1670 train_time:43592ms step_avg:97.30ms +step:449/1670 train_time:43688ms step_avg:97.30ms +step:450/1670 train_time:43784ms step_avg:97.30ms +step:451/1670 train_time:43879ms step_avg:97.29ms +step:452/1670 train_time:43975ms step_avg:97.29ms +step:453/1670 train_time:44069ms step_avg:97.28ms +step:454/1670 train_time:44163ms step_avg:97.28ms +step:455/1670 train_time:44258ms step_avg:97.27ms +step:456/1670 train_time:44354ms step_avg:97.27ms +step:457/1670 train_time:44451ms step_avg:97.27ms +step:458/1670 train_time:44548ms step_avg:97.27ms +step:459/1670 train_time:44644ms step_avg:97.26ms +step:460/1670 train_time:44742ms step_avg:97.26ms +step:461/1670 train_time:44837ms step_avg:97.26ms +step:462/1670 train_time:44932ms step_avg:97.26ms +step:463/1670 train_time:45028ms step_avg:97.25ms +step:464/1670 train_time:45123ms step_avg:97.25ms +step:465/1670 train_time:45218ms step_avg:97.24ms +step:466/1670 train_time:45314ms step_avg:97.24ms +step:467/1670 train_time:45409ms step_avg:97.24ms +step:468/1670 train_time:45506ms step_avg:97.23ms +step:469/1670 train_time:45602ms step_avg:97.23ms +step:470/1670 train_time:45698ms step_avg:97.23ms +step:471/1670 train_time:45793ms step_avg:97.23ms +step:472/1670 train_time:45889ms step_avg:97.22ms +step:473/1670 train_time:45985ms step_avg:97.22ms +step:474/1670 train_time:46080ms step_avg:97.22ms +step:475/1670 train_time:46176ms step_avg:97.21ms +step:476/1670 train_time:46271ms step_avg:97.21ms +step:477/1670 train_time:46367ms step_avg:97.20ms +step:478/1670 train_time:46463ms step_avg:97.20ms +step:479/1670 train_time:46559ms step_avg:97.20ms +step:480/1670 train_time:46654ms step_avg:97.20ms +step:481/1670 train_time:46750ms step_avg:97.19ms +step:482/1670 train_time:46846ms step_avg:97.19ms +step:483/1670 train_time:46943ms step_avg:97.19ms +step:484/1670 train_time:47039ms step_avg:97.19ms +step:485/1670 train_time:47134ms step_avg:97.18ms +step:486/1670 train_time:47229ms step_avg:97.18ms +step:487/1670 train_time:47325ms step_avg:97.18ms +step:488/1670 train_time:47421ms step_avg:97.17ms +step:489/1670 train_time:47517ms step_avg:97.17ms +step:490/1670 train_time:47613ms step_avg:97.17ms +step:491/1670 train_time:47708ms step_avg:97.17ms +step:492/1670 train_time:47804ms step_avg:97.16ms +step:493/1670 train_time:47900ms step_avg:97.16ms +step:494/1670 train_time:47995ms step_avg:97.16ms +step:495/1670 train_time:48091ms step_avg:97.15ms +step:496/1670 train_time:48187ms step_avg:97.15ms +step:497/1670 train_time:48282ms step_avg:97.15ms +step:498/1670 train_time:48378ms step_avg:97.14ms +step:499/1670 train_time:48474ms step_avg:97.14ms +step:500/1670 train_time:48569ms step_avg:97.14ms +step:500/1670 val_loss:3.7096 train_time:48665ms step_avg:97.33ms +step:501/1670 train_time:48687ms step_avg:97.18ms +step:502/1670 train_time:48767ms step_avg:97.15ms +step:503/1670 train_time:48867ms step_avg:97.15ms +step:504/1670 train_time:48963ms step_avg:97.15ms +step:505/1670 train_time:49059ms step_avg:97.15ms +step:506/1670 train_time:49154ms step_avg:97.14ms +step:507/1670 train_time:49249ms step_avg:97.14ms +step:508/1670 train_time:49344ms step_avg:97.13ms +step:509/1670 train_time:49439ms step_avg:97.13ms +step:510/1670 train_time:49534ms step_avg:97.13ms +step:511/1670 train_time:49629ms step_avg:97.12ms +step:512/1670 train_time:49725ms step_avg:97.12ms +step:513/1670 train_time:49823ms step_avg:97.12ms +step:514/1670 train_time:49920ms step_avg:97.12ms +step:515/1670 train_time:50017ms step_avg:97.12ms +step:516/1670 train_time:50113ms step_avg:97.12ms +step:517/1670 train_time:50207ms step_avg:97.11ms +step:518/1670 train_time:50303ms step_avg:97.11ms +step:519/1670 train_time:50398ms step_avg:97.11ms +step:520/1670 train_time:50494ms step_avg:97.10ms +step:521/1670 train_time:50588ms step_avg:97.10ms +step:522/1670 train_time:50684ms step_avg:97.10ms +step:523/1670 train_time:50781ms step_avg:97.10ms +step:524/1670 train_time:50878ms step_avg:97.10ms +step:525/1670 train_time:50974ms step_avg:97.09ms +step:526/1670 train_time:51070ms step_avg:97.09ms +step:527/1670 train_time:51165ms step_avg:97.09ms +step:528/1670 train_time:51261ms step_avg:97.08ms +step:529/1670 train_time:51356ms step_avg:97.08ms +step:530/1670 train_time:51452ms step_avg:97.08ms +step:531/1670 train_time:51548ms step_avg:97.08ms +step:532/1670 train_time:51643ms step_avg:97.07ms +step:533/1670 train_time:51740ms step_avg:97.07ms +step:534/1670 train_time:51837ms step_avg:97.07ms +step:535/1670 train_time:51933ms step_avg:97.07ms +step:536/1670 train_time:52029ms step_avg:97.07ms +step:537/1670 train_time:52124ms step_avg:97.06ms +step:538/1670 train_time:52219ms step_avg:97.06ms +step:539/1670 train_time:52315ms step_avg:97.06ms +step:540/1670 train_time:52410ms step_avg:97.06ms +step:541/1670 train_time:52505ms step_avg:97.05ms +step:542/1670 train_time:52601ms step_avg:97.05ms +step:543/1670 train_time:52698ms step_avg:97.05ms +step:544/1670 train_time:52794ms step_avg:97.05ms +step:545/1670 train_time:52890ms step_avg:97.05ms +step:546/1670 train_time:52985ms step_avg:97.04ms +step:547/1670 train_time:53081ms step_avg:97.04ms +step:548/1670 train_time:53178ms step_avg:97.04ms +step:549/1670 train_time:53273ms step_avg:97.04ms +step:550/1670 train_time:53369ms step_avg:97.03ms +step:551/1670 train_time:53464ms step_avg:97.03ms +step:552/1670 train_time:53560ms step_avg:97.03ms +step:553/1670 train_time:53656ms step_avg:97.03ms +step:554/1670 train_time:53752ms step_avg:97.02ms +step:555/1670 train_time:53848ms step_avg:97.02ms +step:556/1670 train_time:53944ms step_avg:97.02ms +step:557/1670 train_time:54040ms step_avg:97.02ms +step:558/1670 train_time:54135ms step_avg:97.02ms +step:559/1670 train_time:54233ms step_avg:97.02ms +step:560/1670 train_time:54329ms step_avg:97.02ms +step:561/1670 train_time:54426ms step_avg:97.02ms +step:562/1670 train_time:54522ms step_avg:97.02ms +step:563/1670 train_time:54621ms step_avg:97.02ms +step:564/1670 train_time:54718ms step_avg:97.02ms +step:565/1670 train_time:54817ms step_avg:97.02ms +step:566/1670 train_time:54914ms step_avg:97.02ms +step:567/1670 train_time:55011ms step_avg:97.02ms +step:568/1670 train_time:55107ms step_avg:97.02ms +step:569/1670 train_time:55205ms step_avg:97.02ms +step:570/1670 train_time:55302ms step_avg:97.02ms +step:571/1670 train_time:55399ms step_avg:97.02ms +step:572/1670 train_time:55497ms step_avg:97.02ms +step:573/1670 train_time:55593ms step_avg:97.02ms +step:574/1670 train_time:55690ms step_avg:97.02ms +step:575/1670 train_time:55786ms step_avg:97.02ms +step:576/1670 train_time:55884ms step_avg:97.02ms +step:577/1670 train_time:55982ms step_avg:97.02ms +step:578/1670 train_time:56080ms step_avg:97.02ms +step:579/1670 train_time:56178ms step_avg:97.03ms +step:580/1670 train_time:56276ms step_avg:97.03ms +step:581/1670 train_time:56373ms step_avg:97.03ms +step:582/1670 train_time:56471ms step_avg:97.03ms +step:583/1670 train_time:56567ms step_avg:97.03ms +step:584/1670 train_time:56665ms step_avg:97.03ms +step:585/1670 train_time:56762ms step_avg:97.03ms +step:586/1670 train_time:56860ms step_avg:97.03ms +step:587/1670 train_time:56958ms step_avg:97.03ms +step:588/1670 train_time:57056ms step_avg:97.03ms +step:589/1670 train_time:57153ms step_avg:97.03ms +step:590/1670 train_time:57251ms step_avg:97.04ms +step:591/1670 train_time:57347ms step_avg:97.03ms +step:592/1670 train_time:57444ms step_avg:97.03ms +step:593/1670 train_time:57541ms step_avg:97.03ms +step:594/1670 train_time:57638ms step_avg:97.03ms +step:595/1670 train_time:57735ms step_avg:97.03ms +step:596/1670 train_time:57832ms step_avg:97.03ms +step:597/1670 train_time:57928ms step_avg:97.03ms +step:598/1670 train_time:58026ms step_avg:97.03ms +step:599/1670 train_time:58124ms step_avg:97.03ms +step:600/1670 train_time:58222ms step_avg:97.04ms +step:601/1670 train_time:58319ms step_avg:97.04ms +step:602/1670 train_time:58418ms step_avg:97.04ms +step:603/1670 train_time:58516ms step_avg:97.04ms +step:604/1670 train_time:58612ms step_avg:97.04ms +step:605/1670 train_time:58709ms step_avg:97.04ms +step:606/1670 train_time:58805ms step_avg:97.04ms +step:607/1670 train_time:58902ms step_avg:97.04ms +step:608/1670 train_time:59000ms step_avg:97.04ms +step:609/1670 train_time:59098ms step_avg:97.04ms +step:610/1670 train_time:59195ms step_avg:97.04ms +step:611/1670 train_time:59292ms step_avg:97.04ms +step:612/1670 train_time:59388ms step_avg:97.04ms +step:613/1670 train_time:59485ms step_avg:97.04ms +step:614/1670 train_time:59583ms step_avg:97.04ms +step:615/1670 train_time:59682ms step_avg:97.04ms +step:616/1670 train_time:59780ms step_avg:97.05ms +step:617/1670 train_time:59879ms step_avg:97.05ms +step:618/1670 train_time:59976ms step_avg:97.05ms +step:619/1670 train_time:60073ms step_avg:97.05ms +step:620/1670 train_time:60170ms step_avg:97.05ms +step:621/1670 train_time:60267ms step_avg:97.05ms +step:622/1670 train_time:60364ms step_avg:97.05ms +step:623/1670 train_time:60461ms step_avg:97.05ms +step:624/1670 train_time:60559ms step_avg:97.05ms +step:625/1670 train_time:60656ms step_avg:97.05ms +step:625/1670 val_loss:3.6099 train_time:60752ms step_avg:97.20ms +step:626/1670 train_time:60774ms step_avg:97.08ms +step:627/1670 train_time:60860ms step_avg:97.07ms +step:628/1670 train_time:60956ms step_avg:97.06ms +step:629/1670 train_time:61052ms step_avg:97.06ms +step:630/1670 train_time:61148ms step_avg:97.06ms +step:631/1670 train_time:61244ms step_avg:97.06ms +step:632/1670 train_time:61340ms step_avg:97.06ms +step:633/1670 train_time:61436ms step_avg:97.06ms +step:634/1670 train_time:61532ms step_avg:97.05ms +step:635/1670 train_time:61628ms step_avg:97.05ms +step:636/1670 train_time:61727ms step_avg:97.06ms +step:637/1670 train_time:61829ms step_avg:97.06ms +step:638/1670 train_time:61929ms step_avg:97.07ms +step:639/1670 train_time:62303ms step_avg:97.50ms +step:640/1670 train_time:62392ms step_avg:97.49ms +step:641/1670 train_time:62488ms step_avg:97.48ms +step:642/1670 train_time:62584ms step_avg:97.48ms +step:643/1670 train_time:62680ms step_avg:97.48ms +step:644/1670 train_time:62776ms step_avg:97.48ms +step:645/1670 train_time:62872ms step_avg:97.48ms +step:646/1670 train_time:62968ms step_avg:97.47ms +step:647/1670 train_time:63064ms step_avg:97.47ms +step:648/1670 train_time:63161ms step_avg:97.47ms +step:649/1670 train_time:63263ms step_avg:97.48ms +step:650/1670 train_time:63361ms step_avg:97.48ms +step:651/1670 train_time:63459ms step_avg:97.48ms +step:652/1670 train_time:63556ms step_avg:97.48ms +step:653/1670 train_time:63653ms step_avg:97.48ms +step:654/1670 train_time:63750ms step_avg:97.48ms +step:655/1670 train_time:63847ms step_avg:97.48ms +step:656/1670 train_time:63944ms step_avg:97.47ms +step:657/1670 train_time:64040ms step_avg:97.47ms +step:658/1670 train_time:64137ms step_avg:97.47ms +step:659/1670 train_time:64234ms step_avg:97.47ms +step:660/1670 train_time:64333ms step_avg:97.47ms +step:661/1670 train_time:64432ms step_avg:97.48ms +step:662/1670 train_time:64529ms step_avg:97.48ms +step:663/1670 train_time:64627ms step_avg:97.48ms +step:664/1670 train_time:64725ms step_avg:97.48ms +step:665/1670 train_time:64821ms step_avg:97.47ms +step:666/1670 train_time:64916ms step_avg:97.47ms +step:667/1670 train_time:65013ms step_avg:97.47ms +step:668/1670 train_time:65110ms step_avg:97.47ms +step:669/1670 train_time:65209ms step_avg:97.47ms +step:670/1670 train_time:65306ms step_avg:97.47ms +step:671/1670 train_time:65405ms step_avg:97.47ms +step:672/1670 train_time:65503ms step_avg:97.47ms +step:673/1670 train_time:65600ms step_avg:97.47ms +step:674/1670 train_time:65697ms step_avg:97.47ms +step:675/1670 train_time:65793ms step_avg:97.47ms +step:676/1670 train_time:65890ms step_avg:97.47ms +step:677/1670 train_time:65987ms step_avg:97.47ms +step:678/1670 train_time:66086ms step_avg:97.47ms +step:679/1670 train_time:66183ms step_avg:97.47ms +step:680/1670 train_time:66280ms step_avg:97.47ms +step:681/1670 train_time:66377ms step_avg:97.47ms +step:682/1670 train_time:66475ms step_avg:97.47ms +step:683/1670 train_time:66572ms step_avg:97.47ms +step:684/1670 train_time:66670ms step_avg:97.47ms +step:685/1670 train_time:66767ms step_avg:97.47ms +step:686/1670 train_time:66864ms step_avg:97.47ms +step:687/1670 train_time:66962ms step_avg:97.47ms +step:688/1670 train_time:67060ms step_avg:97.47ms +step:689/1670 train_time:67156ms step_avg:97.47ms +step:690/1670 train_time:67253ms step_avg:97.47ms +step:691/1670 train_time:67351ms step_avg:97.47ms +step:692/1670 train_time:67449ms step_avg:97.47ms +step:693/1670 train_time:67547ms step_avg:97.47ms +step:694/1670 train_time:67644ms step_avg:97.47ms +step:695/1670 train_time:67742ms step_avg:97.47ms +step:696/1670 train_time:67838ms step_avg:97.47ms +step:697/1670 train_time:67934ms step_avg:97.47ms +step:698/1670 train_time:68031ms step_avg:97.47ms +step:699/1670 train_time:68129ms step_avg:97.47ms +step:700/1670 train_time:68227ms step_avg:97.47ms +step:701/1670 train_time:68325ms step_avg:97.47ms +step:702/1670 train_time:68422ms step_avg:97.47ms +step:703/1670 train_time:68519ms step_avg:97.47ms +step:704/1670 train_time:68615ms step_avg:97.46ms +step:705/1670 train_time:68713ms step_avg:97.46ms +step:706/1670 train_time:68811ms step_avg:97.47ms +step:707/1670 train_time:68909ms step_avg:97.47ms +step:708/1670 train_time:69006ms step_avg:97.47ms +step:709/1670 train_time:69103ms step_avg:97.47ms +step:710/1670 train_time:69199ms step_avg:97.46ms +step:711/1670 train_time:69296ms step_avg:97.46ms +step:712/1670 train_time:69393ms step_avg:97.46ms +step:713/1670 train_time:69490ms step_avg:97.46ms +step:714/1670 train_time:69588ms step_avg:97.46ms +step:715/1670 train_time:69686ms step_avg:97.46ms +step:716/1670 train_time:69784ms step_avg:97.46ms +step:717/1670 train_time:69881ms step_avg:97.46ms +step:718/1670 train_time:69977ms step_avg:97.46ms +step:719/1670 train_time:70073ms step_avg:97.46ms +step:720/1670 train_time:70171ms step_avg:97.46ms +step:721/1670 train_time:70268ms step_avg:97.46ms +step:722/1670 train_time:70366ms step_avg:97.46ms +step:723/1670 train_time:70464ms step_avg:97.46ms +step:724/1670 train_time:70562ms step_avg:97.46ms +step:725/1670 train_time:70659ms step_avg:97.46ms +step:726/1670 train_time:70754ms step_avg:97.46ms +step:727/1670 train_time:70852ms step_avg:97.46ms +step:728/1670 train_time:70949ms step_avg:97.46ms +step:729/1670 train_time:71048ms step_avg:97.46ms +step:730/1670 train_time:71146ms step_avg:97.46ms +step:731/1670 train_time:71243ms step_avg:97.46ms +step:732/1670 train_time:71339ms step_avg:97.46ms +step:733/1670 train_time:71436ms step_avg:97.46ms +step:734/1670 train_time:71533ms step_avg:97.46ms +step:735/1670 train_time:71630ms step_avg:97.46ms +step:736/1670 train_time:71728ms step_avg:97.46ms +step:737/1670 train_time:71826ms step_avg:97.46ms +step:738/1670 train_time:71924ms step_avg:97.46ms +step:739/1670 train_time:72022ms step_avg:97.46ms +step:740/1670 train_time:72118ms step_avg:97.46ms +step:741/1670 train_time:72215ms step_avg:97.46ms +step:742/1670 train_time:72313ms step_avg:97.46ms +step:743/1670 train_time:72410ms step_avg:97.46ms +step:744/1670 train_time:72507ms step_avg:97.46ms +step:745/1670 train_time:72605ms step_avg:97.46ms +step:746/1670 train_time:72703ms step_avg:97.46ms +step:747/1670 train_time:72801ms step_avg:97.46ms +step:748/1670 train_time:72897ms step_avg:97.46ms +step:749/1670 train_time:72993ms step_avg:97.45ms +step:750/1670 train_time:73091ms step_avg:97.45ms +step:750/1670 val_loss:3.5575 train_time:73188ms step_avg:97.58ms +step:751/1670 train_time:73211ms step_avg:97.48ms +step:752/1670 train_time:73293ms step_avg:97.46ms +step:753/1670 train_time:73392ms step_avg:97.47ms +step:754/1670 train_time:73489ms step_avg:97.47ms +step:755/1670 train_time:73586ms step_avg:97.47ms +step:756/1670 train_time:73683ms step_avg:97.46ms +step:757/1670 train_time:73779ms step_avg:97.46ms +step:758/1670 train_time:73875ms step_avg:97.46ms +step:759/1670 train_time:73971ms step_avg:97.46ms +step:760/1670 train_time:74067ms step_avg:97.46ms +step:761/1670 train_time:74165ms step_avg:97.46ms +step:762/1670 train_time:74268ms step_avg:97.46ms +step:763/1670 train_time:74367ms step_avg:97.47ms +step:764/1670 train_time:74466ms step_avg:97.47ms +step:765/1670 train_time:74563ms step_avg:97.47ms +step:766/1670 train_time:74660ms step_avg:97.47ms +step:767/1670 train_time:74756ms step_avg:97.47ms +step:768/1670 train_time:74852ms step_avg:97.46ms +step:769/1670 train_time:74948ms step_avg:97.46ms +step:770/1670 train_time:75046ms step_avg:97.46ms +step:771/1670 train_time:75144ms step_avg:97.46ms +step:772/1670 train_time:75243ms step_avg:97.47ms +step:773/1670 train_time:75343ms step_avg:97.47ms +step:774/1670 train_time:75441ms step_avg:97.47ms +step:775/1670 train_time:75538ms step_avg:97.47ms +step:776/1670 train_time:75635ms step_avg:97.47ms +step:777/1670 train_time:75732ms step_avg:97.47ms +step:778/1670 train_time:75829ms step_avg:97.47ms +step:779/1670 train_time:75925ms step_avg:97.47ms +step:780/1670 train_time:76022ms step_avg:97.46ms +step:781/1670 train_time:76118ms step_avg:97.46ms +step:782/1670 train_time:76215ms step_avg:97.46ms +step:783/1670 train_time:76313ms step_avg:97.46ms +step:784/1670 train_time:76410ms step_avg:97.46ms +step:785/1670 train_time:76508ms step_avg:97.46ms +step:786/1670 train_time:76606ms step_avg:97.46ms +step:787/1670 train_time:76705ms step_avg:97.47ms +step:788/1670 train_time:76804ms step_avg:97.47ms +step:789/1670 train_time:76901ms step_avg:97.47ms +step:790/1670 train_time:76998ms step_avg:97.47ms +step:791/1670 train_time:77093ms step_avg:97.46ms +step:792/1670 train_time:77190ms step_avg:97.46ms +step:793/1670 train_time:77290ms step_avg:97.47ms +step:794/1670 train_time:77388ms step_avg:97.47ms +step:795/1670 train_time:77486ms step_avg:97.47ms +step:796/1670 train_time:77584ms step_avg:97.47ms +step:797/1670 train_time:77681ms step_avg:97.47ms +step:798/1670 train_time:77779ms step_avg:97.47ms +step:799/1670 train_time:77875ms step_avg:97.47ms +step:800/1670 train_time:77971ms step_avg:97.46ms +step:801/1670 train_time:78068ms step_avg:97.46ms +step:802/1670 train_time:78165ms step_avg:97.46ms +step:803/1670 train_time:78264ms step_avg:97.46ms +step:804/1670 train_time:78362ms step_avg:97.46ms +step:805/1670 train_time:78459ms step_avg:97.46ms +step:806/1670 train_time:78555ms step_avg:97.46ms +step:807/1670 train_time:78653ms step_avg:97.46ms +step:808/1670 train_time:78751ms step_avg:97.46ms +step:809/1670 train_time:78848ms step_avg:97.46ms +step:810/1670 train_time:78945ms step_avg:97.46ms +step:811/1670 train_time:79042ms step_avg:97.46ms +step:812/1670 train_time:79140ms step_avg:97.46ms +step:813/1670 train_time:79237ms step_avg:97.46ms +step:814/1670 train_time:79334ms step_avg:97.46ms +step:815/1670 train_time:79431ms step_avg:97.46ms +step:816/1670 train_time:79529ms step_avg:97.46ms +step:817/1670 train_time:79627ms step_avg:97.46ms +step:818/1670 train_time:79725ms step_avg:97.46ms +step:819/1670 train_time:79823ms step_avg:97.46ms +step:820/1670 train_time:79920ms step_avg:97.46ms +step:821/1670 train_time:80017ms step_avg:97.46ms +step:822/1670 train_time:80113ms step_avg:97.46ms +step:823/1670 train_time:80210ms step_avg:97.46ms +step:824/1670 train_time:80308ms step_avg:97.46ms +step:825/1670 train_time:80407ms step_avg:97.46ms +step:826/1670 train_time:80505ms step_avg:97.46ms +step:827/1670 train_time:80602ms step_avg:97.46ms +step:828/1670 train_time:80698ms step_avg:97.46ms +step:829/1670 train_time:80795ms step_avg:97.46ms +step:830/1670 train_time:80891ms step_avg:97.46ms +step:831/1670 train_time:80989ms step_avg:97.46ms +step:832/1670 train_time:81086ms step_avg:97.46ms +step:833/1670 train_time:81184ms step_avg:97.46ms +step:834/1670 train_time:81281ms step_avg:97.46ms +step:835/1670 train_time:81378ms step_avg:97.46ms +step:836/1670 train_time:81475ms step_avg:97.46ms +step:837/1670 train_time:81572ms step_avg:97.46ms +step:838/1670 train_time:81670ms step_avg:97.46ms +step:839/1670 train_time:81767ms step_avg:97.46ms +step:840/1670 train_time:81865ms step_avg:97.46ms +step:841/1670 train_time:81963ms step_avg:97.46ms +step:842/1670 train_time:82060ms step_avg:97.46ms +step:843/1670 train_time:82157ms step_avg:97.46ms +step:844/1670 train_time:82254ms step_avg:97.46ms +step:845/1670 train_time:82351ms step_avg:97.46ms +step:846/1670 train_time:82449ms step_avg:97.46ms +step:847/1670 train_time:82546ms step_avg:97.46ms +step:848/1670 train_time:82644ms step_avg:97.46ms +step:849/1670 train_time:82742ms step_avg:97.46ms +step:850/1670 train_time:82839ms step_avg:97.46ms +step:851/1670 train_time:83087ms step_avg:97.63ms +step:852/1670 train_time:83289ms step_avg:97.76ms +step:853/1670 train_time:83384ms step_avg:97.75ms +step:854/1670 train_time:83480ms step_avg:97.75ms +step:855/1670 train_time:83576ms step_avg:97.75ms +step:856/1670 train_time:83672ms step_avg:97.75ms +step:857/1670 train_time:83768ms step_avg:97.75ms +step:858/1670 train_time:83865ms step_avg:97.74ms +step:859/1670 train_time:83961ms step_avg:97.74ms +step:860/1670 train_time:84060ms step_avg:97.74ms +step:861/1670 train_time:84162ms step_avg:97.75ms +step:862/1670 train_time:84262ms step_avg:97.75ms +step:863/1670 train_time:84360ms step_avg:97.75ms +step:864/1670 train_time:84458ms step_avg:97.75ms +step:865/1670 train_time:84554ms step_avg:97.75ms +step:866/1670 train_time:84650ms step_avg:97.75ms +step:867/1670 train_time:84746ms step_avg:97.75ms +step:868/1670 train_time:84843ms step_avg:97.75ms +step:869/1670 train_time:84939ms step_avg:97.74ms +step:870/1670 train_time:85035ms step_avg:97.74ms +step:871/1670 train_time:85132ms step_avg:97.74ms +step:872/1670 train_time:85231ms step_avg:97.74ms +step:873/1670 train_time:85330ms step_avg:97.74ms +step:874/1670 train_time:85428ms step_avg:97.74ms +step:875/1670 train_time:85527ms step_avg:97.74ms +step:875/1670 val_loss:3.5179 train_time:85623ms step_avg:97.86ms +step:876/1670 train_time:85645ms step_avg:97.77ms +step:877/1670 train_time:85729ms step_avg:97.75ms +step:878/1670 train_time:85827ms step_avg:97.75ms +step:879/1670 train_time:85926ms step_avg:97.75ms +step:880/1670 train_time:86022ms step_avg:97.75ms +step:881/1670 train_time:86118ms step_avg:97.75ms +step:882/1670 train_time:86214ms step_avg:97.75ms +step:883/1670 train_time:86310ms step_avg:97.75ms +step:884/1670 train_time:86406ms step_avg:97.74ms +step:885/1670 train_time:86504ms step_avg:97.74ms +step:886/1670 train_time:86604ms step_avg:97.75ms +step:887/1670 train_time:86704ms step_avg:97.75ms +step:888/1670 train_time:86803ms step_avg:97.75ms +step:889/1670 train_time:86901ms step_avg:97.75ms +step:890/1670 train_time:86998ms step_avg:97.75ms +step:891/1670 train_time:87095ms step_avg:97.75ms +step:892/1670 train_time:87191ms step_avg:97.75ms +step:893/1670 train_time:87288ms step_avg:97.75ms +step:894/1670 train_time:87384ms step_avg:97.74ms +step:895/1670 train_time:87481ms step_avg:97.74ms +step:896/1670 train_time:87578ms step_avg:97.74ms +step:897/1670 train_time:87676ms step_avg:97.74ms +step:898/1670 train_time:87774ms step_avg:97.74ms +step:899/1670 train_time:87871ms step_avg:97.74ms +step:900/1670 train_time:87969ms step_avg:97.74ms +step:901/1670 train_time:88066ms step_avg:97.74ms +step:902/1670 train_time:88164ms step_avg:97.74ms +step:903/1670 train_time:88261ms step_avg:97.74ms +step:904/1670 train_time:88358ms step_avg:97.74ms +step:905/1670 train_time:88455ms step_avg:97.74ms +step:906/1670 train_time:88552ms step_avg:97.74ms +step:907/1670 train_time:88649ms step_avg:97.74ms +step:908/1670 train_time:88747ms step_avg:97.74ms +step:909/1670 train_time:88845ms step_avg:97.74ms +step:910/1670 train_time:88944ms step_avg:97.74ms +step:911/1670 train_time:89042ms step_avg:97.74ms +step:912/1670 train_time:89139ms step_avg:97.74ms +step:913/1670 train_time:89236ms step_avg:97.74ms +step:914/1670 train_time:89332ms step_avg:97.74ms +step:915/1670 train_time:89428ms step_avg:97.74ms +step:916/1670 train_time:89525ms step_avg:97.73ms +step:917/1670 train_time:89623ms step_avg:97.73ms +step:918/1670 train_time:89721ms step_avg:97.74ms +step:919/1670 train_time:89819ms step_avg:97.74ms +step:920/1670 train_time:89918ms step_avg:97.74ms +step:921/1670 train_time:90015ms step_avg:97.74ms +step:922/1670 train_time:90111ms step_avg:97.73ms +step:923/1670 train_time:90209ms step_avg:97.73ms +step:924/1670 train_time:90306ms step_avg:97.73ms +step:925/1670 train_time:90404ms step_avg:97.73ms +step:926/1670 train_time:90500ms step_avg:97.73ms +step:927/1670 train_time:90597ms step_avg:97.73ms +step:928/1670 train_time:90695ms step_avg:97.73ms +step:929/1670 train_time:90793ms step_avg:97.73ms +step:930/1670 train_time:90891ms step_avg:97.73ms +step:931/1670 train_time:90988ms step_avg:97.73ms +step:932/1670 train_time:91085ms step_avg:97.73ms +step:933/1670 train_time:91183ms step_avg:97.73ms +step:934/1670 train_time:91280ms step_avg:97.73ms +step:935/1670 train_time:91377ms step_avg:97.73ms +step:936/1670 train_time:91474ms step_avg:97.73ms +step:937/1670 train_time:91571ms step_avg:97.73ms +step:938/1670 train_time:91667ms step_avg:97.73ms +step:939/1670 train_time:91764ms step_avg:97.73ms +step:940/1670 train_time:91862ms step_avg:97.73ms +step:941/1670 train_time:91960ms step_avg:97.73ms +step:942/1670 train_time:92057ms step_avg:97.73ms +step:943/1670 train_time:92155ms step_avg:97.73ms +step:944/1670 train_time:92251ms step_avg:97.72ms +step:945/1670 train_time:92349ms step_avg:97.72ms +step:946/1670 train_time:92446ms step_avg:97.72ms +step:947/1670 train_time:92543ms step_avg:97.72ms +step:948/1670 train_time:92642ms step_avg:97.72ms +step:949/1670 train_time:92739ms step_avg:97.72ms +step:950/1670 train_time:92836ms step_avg:97.72ms +step:951/1670 train_time:92933ms step_avg:97.72ms +step:952/1670 train_time:93030ms step_avg:97.72ms +step:953/1670 train_time:93127ms step_avg:97.72ms +step:954/1670 train_time:93225ms step_avg:97.72ms +step:955/1670 train_time:93322ms step_avg:97.72ms +step:956/1670 train_time:93419ms step_avg:97.72ms +step:957/1670 train_time:93517ms step_avg:97.72ms +step:958/1670 train_time:93614ms step_avg:97.72ms +step:959/1670 train_time:93710ms step_avg:97.72ms +step:960/1670 train_time:93807ms step_avg:97.72ms +step:961/1670 train_time:93906ms step_avg:97.72ms +step:962/1670 train_time:94004ms step_avg:97.72ms +step:963/1670 train_time:94102ms step_avg:97.72ms +step:964/1670 train_time:94201ms step_avg:97.72ms +step:965/1670 train_time:94298ms step_avg:97.72ms +step:966/1670 train_time:94395ms step_avg:97.72ms +step:967/1670 train_time:94492ms step_avg:97.72ms +step:968/1670 train_time:94588ms step_avg:97.72ms +step:969/1670 train_time:94685ms step_avg:97.71ms +step:970/1670 train_time:94783ms step_avg:97.71ms +step:971/1670 train_time:94881ms step_avg:97.71ms +step:972/1670 train_time:94979ms step_avg:97.71ms +step:973/1670 train_time:95076ms step_avg:97.71ms +step:974/1670 train_time:95173ms step_avg:97.71ms +step:975/1670 train_time:95269ms step_avg:97.71ms +step:976/1670 train_time:95366ms step_avg:97.71ms +step:977/1670 train_time:95465ms step_avg:97.71ms +step:978/1670 train_time:95563ms step_avg:97.71ms +step:979/1670 train_time:95660ms step_avg:97.71ms +step:980/1670 train_time:95758ms step_avg:97.71ms +step:981/1670 train_time:95855ms step_avg:97.71ms +step:982/1670 train_time:95952ms step_avg:97.71ms +step:983/1670 train_time:96049ms step_avg:97.71ms +step:984/1670 train_time:96148ms step_avg:97.71ms +step:985/1670 train_time:96245ms step_avg:97.71ms +step:986/1670 train_time:96342ms step_avg:97.71ms +step:987/1670 train_time:96440ms step_avg:97.71ms +step:988/1670 train_time:96537ms step_avg:97.71ms +step:989/1670 train_time:96634ms step_avg:97.71ms +step:990/1670 train_time:96731ms step_avg:97.71ms +step:991/1670 train_time:96827ms step_avg:97.71ms +step:992/1670 train_time:96925ms step_avg:97.71ms +step:993/1670 train_time:97022ms step_avg:97.71ms +step:994/1670 train_time:97121ms step_avg:97.71ms +step:995/1670 train_time:97219ms step_avg:97.71ms +step:996/1670 train_time:97316ms step_avg:97.71ms +step:997/1670 train_time:97413ms step_avg:97.71ms +step:998/1670 train_time:97510ms step_avg:97.71ms +step:999/1670 train_time:97607ms step_avg:97.70ms +step:1000/1670 train_time:97704ms step_avg:97.70ms +step:1000/1670 val_loss:3.4737 train_time:97801ms step_avg:97.80ms +step:1001/1670 train_time:97823ms step_avg:97.73ms +step:1002/1670 train_time:97906ms step_avg:97.71ms +step:1003/1670 train_time:98008ms step_avg:97.71ms +step:1004/1670 train_time:98104ms step_avg:97.71ms +step:1005/1670 train_time:98201ms step_avg:97.71ms +step:1006/1670 train_time:98297ms step_avg:97.71ms +step:1007/1670 train_time:98393ms step_avg:97.71ms +step:1008/1670 train_time:98491ms step_avg:97.71ms +step:1009/1670 train_time:98585ms step_avg:97.71ms +step:1010/1670 train_time:98682ms step_avg:97.70ms +step:1011/1670 train_time:98780ms step_avg:97.71ms +step:1012/1670 train_time:98879ms step_avg:97.71ms +step:1013/1670 train_time:98980ms step_avg:97.71ms +step:1014/1670 train_time:99078ms step_avg:97.71ms +step:1015/1670 train_time:99176ms step_avg:97.71ms +step:1016/1670 train_time:99272ms step_avg:97.71ms +step:1017/1670 train_time:99369ms step_avg:97.71ms +step:1018/1670 train_time:99465ms step_avg:97.71ms +step:1019/1670 train_time:99561ms step_avg:97.70ms +step:1020/1670 train_time:99658ms step_avg:97.70ms +step:1021/1670 train_time:99755ms step_avg:97.70ms +step:1022/1670 train_time:99854ms step_avg:97.70ms +step:1023/1670 train_time:99953ms step_avg:97.71ms +step:1024/1670 train_time:100051ms step_avg:97.71ms +step:1025/1670 train_time:100148ms step_avg:97.71ms +step:1026/1670 train_time:100245ms step_avg:97.70ms +step:1027/1670 train_time:100341ms step_avg:97.70ms +step:1028/1670 train_time:100438ms step_avg:97.70ms +step:1029/1670 train_time:100535ms step_avg:97.70ms +step:1030/1670 train_time:100632ms step_avg:97.70ms +step:1031/1670 train_time:100728ms step_avg:97.70ms +step:1032/1670 train_time:100825ms step_avg:97.70ms +step:1033/1670 train_time:100923ms step_avg:97.70ms +step:1034/1670 train_time:101022ms step_avg:97.70ms +step:1035/1670 train_time:101121ms step_avg:97.70ms +step:1036/1670 train_time:101218ms step_avg:97.70ms +step:1037/1670 train_time:101316ms step_avg:97.70ms +step:1038/1670 train_time:101413ms step_avg:97.70ms +step:1039/1670 train_time:101510ms step_avg:97.70ms +step:1040/1670 train_time:101606ms step_avg:97.70ms +step:1041/1670 train_time:101703ms step_avg:97.70ms +step:1042/1670 train_time:101800ms step_avg:97.70ms +step:1043/1670 train_time:101899ms step_avg:97.70ms +step:1044/1670 train_time:101998ms step_avg:97.70ms +step:1045/1670 train_time:102096ms step_avg:97.70ms +step:1046/1670 train_time:102193ms step_avg:97.70ms +step:1047/1670 train_time:102290ms step_avg:97.70ms +step:1048/1670 train_time:102387ms step_avg:97.70ms +step:1049/1670 train_time:102483ms step_avg:97.70ms +step:1050/1670 train_time:102580ms step_avg:97.70ms +step:1051/1670 train_time:102677ms step_avg:97.69ms +step:1052/1670 train_time:102775ms step_avg:97.69ms +step:1053/1670 train_time:102873ms step_avg:97.70ms +step:1054/1670 train_time:102971ms step_avg:97.70ms +step:1055/1670 train_time:103068ms step_avg:97.70ms +step:1056/1670 train_time:103165ms step_avg:97.69ms +step:1057/1670 train_time:103263ms step_avg:97.69ms +step:1058/1670 train_time:103361ms step_avg:97.69ms +step:1059/1670 train_time:103458ms step_avg:97.69ms +step:1060/1670 train_time:103556ms step_avg:97.69ms +step:1061/1670 train_time:103653ms step_avg:97.69ms +step:1062/1670 train_time:103921ms step_avg:97.85ms +step:1063/1670 train_time:104001ms step_avg:97.84ms +step:1064/1670 train_time:104098ms step_avg:97.84ms +step:1065/1670 train_time:104195ms step_avg:97.84ms +step:1066/1670 train_time:104290ms step_avg:97.83ms +step:1067/1670 train_time:104386ms step_avg:97.83ms +step:1068/1670 train_time:104482ms step_avg:97.83ms +step:1069/1670 train_time:104578ms step_avg:97.83ms +step:1070/1670 train_time:104674ms step_avg:97.83ms +step:1071/1670 train_time:104771ms step_avg:97.83ms +step:1072/1670 train_time:104874ms step_avg:97.83ms +step:1073/1670 train_time:104974ms step_avg:97.83ms +step:1074/1670 train_time:105072ms step_avg:97.83ms +step:1075/1670 train_time:105169ms step_avg:97.83ms +step:1076/1670 train_time:105265ms step_avg:97.83ms +step:1077/1670 train_time:105362ms step_avg:97.83ms +step:1078/1670 train_time:105457ms step_avg:97.83ms +step:1079/1670 train_time:105554ms step_avg:97.83ms +step:1080/1670 train_time:105650ms step_avg:97.82ms +step:1081/1670 train_time:105746ms step_avg:97.82ms +step:1082/1670 train_time:105845ms step_avg:97.82ms +step:1083/1670 train_time:105944ms step_avg:97.82ms +step:1084/1670 train_time:106043ms step_avg:97.83ms +step:1085/1670 train_time:106141ms step_avg:97.83ms +step:1086/1670 train_time:106239ms step_avg:97.83ms +step:1087/1670 train_time:106337ms step_avg:97.83ms +step:1088/1670 train_time:106434ms step_avg:97.83ms +step:1089/1670 train_time:106531ms step_avg:97.82ms +step:1090/1670 train_time:106627ms step_avg:97.82ms +step:1091/1670 train_time:106723ms step_avg:97.82ms +step:1092/1670 train_time:106821ms step_avg:97.82ms +step:1093/1670 train_time:106919ms step_avg:97.82ms +step:1094/1670 train_time:107019ms step_avg:97.82ms +step:1095/1670 train_time:107118ms step_avg:97.82ms +step:1096/1670 train_time:107216ms step_avg:97.82ms +step:1097/1670 train_time:107314ms step_avg:97.82ms +step:1098/1670 train_time:107410ms step_avg:97.82ms +step:1099/1670 train_time:107506ms step_avg:97.82ms +step:1100/1670 train_time:107602ms step_avg:97.82ms +step:1101/1670 train_time:107700ms step_avg:97.82ms +step:1102/1670 train_time:107797ms step_avg:97.82ms +step:1103/1670 train_time:107895ms step_avg:97.82ms +step:1104/1670 train_time:107993ms step_avg:97.82ms +step:1105/1670 train_time:108090ms step_avg:97.82ms +step:1106/1670 train_time:108187ms step_avg:97.82ms +step:1107/1670 train_time:108285ms step_avg:97.82ms +step:1108/1670 train_time:108382ms step_avg:97.82ms +step:1109/1670 train_time:108480ms step_avg:97.82ms +step:1110/1670 train_time:108578ms step_avg:97.82ms +step:1111/1670 train_time:108675ms step_avg:97.82ms +step:1112/1670 train_time:108772ms step_avg:97.82ms +step:1113/1670 train_time:108868ms step_avg:97.82ms +step:1114/1670 train_time:108965ms step_avg:97.81ms +step:1115/1670 train_time:109063ms step_avg:97.81ms +step:1116/1670 train_time:109163ms step_avg:97.82ms +step:1117/1670 train_time:109262ms step_avg:97.82ms +step:1118/1670 train_time:109359ms step_avg:97.82ms +step:1119/1670 train_time:109457ms step_avg:97.82ms +step:1120/1670 train_time:109555ms step_avg:97.82ms +step:1121/1670 train_time:109653ms step_avg:97.82ms +step:1122/1670 train_time:109751ms step_avg:97.82ms +step:1123/1670 train_time:109848ms step_avg:97.82ms +step:1124/1670 train_time:109945ms step_avg:97.82ms +step:1125/1670 train_time:110043ms step_avg:97.82ms +step:1125/1670 val_loss:3.4212 train_time:110141ms step_avg:97.90ms +step:1126/1670 train_time:110163ms step_avg:97.84ms +step:1127/1670 train_time:110245ms step_avg:97.82ms +step:1128/1670 train_time:110342ms step_avg:97.82ms +step:1129/1670 train_time:110440ms step_avg:97.82ms +step:1130/1670 train_time:110537ms step_avg:97.82ms +step:1131/1670 train_time:110634ms step_avg:97.82ms +step:1132/1670 train_time:110730ms step_avg:97.82ms +step:1133/1670 train_time:110827ms step_avg:97.82ms +step:1134/1670 train_time:110924ms step_avg:97.82ms +step:1135/1670 train_time:111022ms step_avg:97.82ms +step:1136/1670 train_time:111125ms step_avg:97.82ms +step:1137/1670 train_time:111225ms step_avg:97.82ms +step:1138/1670 train_time:111323ms step_avg:97.82ms +step:1139/1670 train_time:111422ms step_avg:97.82ms +step:1140/1670 train_time:111519ms step_avg:97.82ms +step:1141/1670 train_time:111617ms step_avg:97.82ms +step:1142/1670 train_time:111713ms step_avg:97.82ms +step:1143/1670 train_time:111810ms step_avg:97.82ms +step:1144/1670 train_time:111907ms step_avg:97.82ms +step:1145/1670 train_time:112004ms step_avg:97.82ms +step:1146/1670 train_time:112102ms step_avg:97.82ms +step:1147/1670 train_time:112202ms step_avg:97.82ms +step:1148/1670 train_time:112301ms step_avg:97.82ms +step:1149/1670 train_time:112401ms step_avg:97.83ms +step:1150/1670 train_time:112499ms step_avg:97.82ms +step:1151/1670 train_time:112597ms step_avg:97.82ms +step:1152/1670 train_time:112694ms step_avg:97.82ms +step:1153/1670 train_time:112792ms step_avg:97.82ms +step:1154/1670 train_time:112889ms step_avg:97.82ms +step:1155/1670 train_time:112986ms step_avg:97.82ms +step:1156/1670 train_time:113085ms step_avg:97.82ms +step:1157/1670 train_time:113183ms step_avg:97.82ms +step:1158/1670 train_time:113282ms step_avg:97.83ms +step:1159/1670 train_time:113381ms step_avg:97.83ms +step:1160/1670 train_time:113479ms step_avg:97.83ms +step:1161/1670 train_time:113577ms step_avg:97.83ms +step:1162/1670 train_time:113675ms step_avg:97.83ms +step:1163/1670 train_time:113773ms step_avg:97.83ms +step:1164/1670 train_time:113871ms step_avg:97.83ms +step:1165/1670 train_time:113968ms step_avg:97.83ms +step:1166/1670 train_time:114066ms step_avg:97.83ms +step:1167/1670 train_time:114163ms step_avg:97.83ms +step:1168/1670 train_time:114261ms step_avg:97.83ms +step:1169/1670 train_time:114360ms step_avg:97.83ms +step:1170/1670 train_time:114459ms step_avg:97.83ms +step:1171/1670 train_time:114557ms step_avg:97.83ms +step:1172/1670 train_time:114655ms step_avg:97.83ms +step:1173/1670 train_time:114753ms step_avg:97.83ms +step:1174/1670 train_time:114850ms step_avg:97.83ms +step:1175/1670 train_time:114947ms step_avg:97.83ms +step:1176/1670 train_time:115045ms step_avg:97.83ms +step:1177/1670 train_time:115143ms step_avg:97.83ms +step:1178/1670 train_time:115242ms step_avg:97.83ms +step:1179/1670 train_time:115341ms step_avg:97.83ms +step:1180/1670 train_time:115439ms step_avg:97.83ms +step:1181/1670 train_time:115537ms step_avg:97.83ms +step:1182/1670 train_time:115635ms step_avg:97.83ms +step:1183/1670 train_time:115734ms step_avg:97.83ms +step:1184/1670 train_time:115832ms step_avg:97.83ms +step:1185/1670 train_time:115929ms step_avg:97.83ms +step:1186/1670 train_time:116027ms step_avg:97.83ms +step:1187/1670 train_time:116125ms step_avg:97.83ms +step:1188/1670 train_time:116223ms step_avg:97.83ms +step:1189/1670 train_time:116321ms step_avg:97.83ms +step:1190/1670 train_time:116419ms step_avg:97.83ms +step:1191/1670 train_time:116517ms step_avg:97.83ms +step:1192/1670 train_time:116615ms step_avg:97.83ms +step:1193/1670 train_time:116713ms step_avg:97.83ms +step:1194/1670 train_time:116811ms step_avg:97.83ms +step:1195/1670 train_time:116909ms step_avg:97.83ms +step:1196/1670 train_time:117006ms step_avg:97.83ms +step:1197/1670 train_time:117104ms step_avg:97.83ms +step:1198/1670 train_time:117202ms step_avg:97.83ms +step:1199/1670 train_time:117299ms step_avg:97.83ms +step:1200/1670 train_time:117397ms step_avg:97.83ms +step:1201/1670 train_time:117494ms step_avg:97.83ms +step:1202/1670 train_time:117593ms step_avg:97.83ms +step:1203/1670 train_time:117691ms step_avg:97.83ms +step:1204/1670 train_time:117789ms step_avg:97.83ms +step:1205/1670 train_time:117887ms step_avg:97.83ms +step:1206/1670 train_time:117984ms step_avg:97.83ms +step:1207/1670 train_time:118082ms step_avg:97.83ms +step:1208/1670 train_time:118180ms step_avg:97.83ms +step:1209/1670 train_time:118279ms step_avg:97.83ms +step:1210/1670 train_time:118377ms step_avg:97.83ms +step:1211/1670 train_time:118474ms step_avg:97.83ms +step:1212/1670 train_time:118571ms step_avg:97.83ms +step:1213/1670 train_time:118670ms step_avg:97.83ms +step:1214/1670 train_time:118767ms step_avg:97.83ms +step:1215/1670 train_time:118865ms step_avg:97.83ms +step:1216/1670 train_time:118963ms step_avg:97.83ms +step:1217/1670 train_time:119061ms step_avg:97.83ms +step:1218/1670 train_time:119160ms step_avg:97.83ms +step:1219/1670 train_time:119259ms step_avg:97.83ms +step:1220/1670 train_time:119357ms step_avg:97.83ms +step:1221/1670 train_time:119454ms step_avg:97.83ms +step:1222/1670 train_time:119551ms step_avg:97.83ms +step:1223/1670 train_time:119649ms step_avg:97.83ms +step:1224/1670 train_time:119747ms step_avg:97.83ms +step:1225/1670 train_time:119846ms step_avg:97.83ms +step:1226/1670 train_time:119944ms step_avg:97.83ms +step:1227/1670 train_time:120042ms step_avg:97.83ms +step:1228/1670 train_time:120140ms step_avg:97.83ms +step:1229/1670 train_time:120238ms step_avg:97.83ms +step:1230/1670 train_time:120337ms step_avg:97.84ms +step:1231/1670 train_time:120435ms step_avg:97.84ms +step:1232/1670 train_time:120534ms step_avg:97.84ms +step:1233/1670 train_time:120632ms step_avg:97.84ms +step:1234/1670 train_time:120730ms step_avg:97.84ms +step:1235/1670 train_time:120830ms step_avg:97.84ms +step:1236/1670 train_time:120927ms step_avg:97.84ms +step:1237/1670 train_time:121025ms step_avg:97.84ms +step:1238/1670 train_time:121122ms step_avg:97.84ms +step:1239/1670 train_time:121221ms step_avg:97.84ms +step:1240/1670 train_time:121318ms step_avg:97.84ms +step:1241/1670 train_time:121416ms step_avg:97.84ms +step:1242/1670 train_time:121514ms step_avg:97.84ms +step:1243/1670 train_time:121612ms step_avg:97.84ms +step:1244/1670 train_time:121711ms step_avg:97.84ms +step:1245/1670 train_time:121809ms step_avg:97.84ms +step:1246/1670 train_time:121907ms step_avg:97.84ms +step:1247/1670 train_time:122004ms step_avg:97.84ms +step:1248/1670 train_time:122101ms step_avg:97.84ms +step:1249/1670 train_time:122199ms step_avg:97.84ms +step:1250/1670 train_time:122297ms step_avg:97.84ms +step:1250/1670 val_loss:3.3783 train_time:122395ms step_avg:97.92ms +step:1251/1670 train_time:122416ms step_avg:97.85ms +step:1252/1670 train_time:122502ms step_avg:97.85ms +step:1253/1670 train_time:122604ms step_avg:97.85ms +step:1254/1670 train_time:122703ms step_avg:97.85ms +step:1255/1670 train_time:122801ms step_avg:97.85ms +step:1256/1670 train_time:122898ms step_avg:97.85ms +step:1257/1670 train_time:122996ms step_avg:97.85ms +step:1258/1670 train_time:123092ms step_avg:97.85ms +step:1259/1670 train_time:123189ms step_avg:97.85ms +step:1260/1670 train_time:123285ms step_avg:97.85ms +step:1261/1670 train_time:123384ms step_avg:97.85ms +step:1262/1670 train_time:123483ms step_avg:97.85ms +step:1263/1670 train_time:123587ms step_avg:97.85ms +step:1264/1670 train_time:123686ms step_avg:97.85ms +step:1265/1670 train_time:123784ms step_avg:97.85ms +step:1266/1670 train_time:123881ms step_avg:97.85ms +step:1267/1670 train_time:123979ms step_avg:97.85ms +step:1268/1670 train_time:124077ms step_avg:97.85ms +step:1269/1670 train_time:124174ms step_avg:97.85ms +step:1270/1670 train_time:124271ms step_avg:97.85ms +step:1271/1670 train_time:124369ms step_avg:97.85ms +step:1272/1670 train_time:124468ms step_avg:97.85ms +step:1273/1670 train_time:124567ms step_avg:97.85ms +step:1274/1670 train_time:124951ms step_avg:98.08ms +step:1275/1670 train_time:125025ms step_avg:98.06ms +step:1276/1670 train_time:125122ms step_avg:98.06ms +step:1277/1670 train_time:125218ms step_avg:98.06ms +step:1278/1670 train_time:125315ms step_avg:98.06ms +step:1279/1670 train_time:125412ms step_avg:98.05ms +step:1280/1670 train_time:125509ms step_avg:98.05ms +step:1281/1670 train_time:125605ms step_avg:98.05ms +step:1282/1670 train_time:125702ms step_avg:98.05ms +step:1283/1670 train_time:125799ms step_avg:98.05ms +step:1284/1670 train_time:125900ms step_avg:98.05ms +step:1285/1670 train_time:126005ms step_avg:98.06ms +step:1286/1670 train_time:126104ms step_avg:98.06ms +step:1287/1670 train_time:126201ms step_avg:98.06ms +step:1288/1670 train_time:126300ms step_avg:98.06ms +step:1289/1670 train_time:126398ms step_avg:98.06ms +step:1290/1670 train_time:126495ms step_avg:98.06ms +step:1291/1670 train_time:126593ms step_avg:98.06ms +step:1292/1670 train_time:126690ms step_avg:98.06ms +step:1293/1670 train_time:126787ms step_avg:98.06ms +step:1294/1670 train_time:126886ms step_avg:98.06ms +step:1295/1670 train_time:126986ms step_avg:98.06ms +step:1296/1670 train_time:127086ms step_avg:98.06ms +step:1297/1670 train_time:127183ms step_avg:98.06ms +step:1298/1670 train_time:127281ms step_avg:98.06ms +step:1299/1670 train_time:127379ms step_avg:98.06ms +step:1300/1670 train_time:127477ms step_avg:98.06ms +step:1301/1670 train_time:127575ms step_avg:98.06ms +step:1302/1670 train_time:127672ms step_avg:98.06ms +step:1303/1670 train_time:127769ms step_avg:98.06ms +step:1304/1670 train_time:127867ms step_avg:98.06ms +step:1305/1670 train_time:127966ms step_avg:98.06ms +step:1306/1670 train_time:128065ms step_avg:98.06ms +step:1307/1670 train_time:128162ms step_avg:98.06ms +step:1308/1670 train_time:128261ms step_avg:98.06ms +step:1309/1670 train_time:128359ms step_avg:98.06ms +step:1310/1670 train_time:128458ms step_avg:98.06ms +step:1311/1670 train_time:128557ms step_avg:98.06ms +step:1312/1670 train_time:128654ms step_avg:98.06ms +step:1313/1670 train_time:128754ms step_avg:98.06ms +step:1314/1670 train_time:128854ms step_avg:98.06ms +step:1315/1670 train_time:128956ms step_avg:98.07ms +step:1316/1670 train_time:129055ms step_avg:98.07ms +step:1317/1670 train_time:129154ms step_avg:98.07ms +step:1318/1670 train_time:129253ms step_avg:98.07ms +step:1319/1670 train_time:129351ms step_avg:98.07ms +step:1320/1670 train_time:129447ms step_avg:98.07ms +step:1321/1670 train_time:129544ms step_avg:98.06ms +step:1322/1670 train_time:129641ms step_avg:98.06ms +step:1323/1670 train_time:129740ms step_avg:98.06ms +step:1324/1670 train_time:129839ms step_avg:98.07ms +step:1325/1670 train_time:129939ms step_avg:98.07ms +step:1326/1670 train_time:130039ms step_avg:98.07ms +step:1327/1670 train_time:130138ms step_avg:98.07ms +step:1328/1670 train_time:130237ms step_avg:98.07ms +step:1329/1670 train_time:130336ms step_avg:98.07ms +step:1330/1670 train_time:130435ms step_avg:98.07ms +step:1331/1670 train_time:130533ms step_avg:98.07ms +step:1332/1670 train_time:130632ms step_avg:98.07ms +step:1333/1670 train_time:130728ms step_avg:98.07ms +step:1334/1670 train_time:130825ms step_avg:98.07ms +step:1335/1670 train_time:130924ms step_avg:98.07ms +step:1336/1670 train_time:131022ms step_avg:98.07ms +step:1337/1670 train_time:131121ms step_avg:98.07ms +step:1338/1670 train_time:131220ms step_avg:98.07ms +step:1339/1670 train_time:131319ms step_avg:98.07ms +step:1340/1670 train_time:131416ms step_avg:98.07ms +step:1341/1670 train_time:131515ms step_avg:98.07ms +step:1342/1670 train_time:131612ms step_avg:98.07ms +step:1343/1670 train_time:131711ms step_avg:98.07ms +step:1344/1670 train_time:131808ms step_avg:98.07ms +step:1345/1670 train_time:131905ms step_avg:98.07ms +step:1346/1670 train_time:132003ms step_avg:98.07ms +step:1347/1670 train_time:132102ms step_avg:98.07ms +step:1348/1670 train_time:132201ms step_avg:98.07ms +step:1349/1670 train_time:132300ms step_avg:98.07ms +step:1350/1670 train_time:132399ms step_avg:98.07ms +step:1351/1670 train_time:132497ms step_avg:98.07ms +step:1352/1670 train_time:132595ms step_avg:98.07ms +step:1353/1670 train_time:132694ms step_avg:98.07ms +step:1354/1670 train_time:132793ms step_avg:98.07ms +step:1355/1670 train_time:132892ms step_avg:98.08ms +step:1356/1670 train_time:132991ms step_avg:98.08ms +step:1357/1670 train_time:133089ms step_avg:98.08ms +step:1358/1670 train_time:133187ms step_avg:98.08ms +step:1359/1670 train_time:133285ms step_avg:98.08ms +step:1360/1670 train_time:133383ms step_avg:98.08ms +step:1361/1670 train_time:133481ms step_avg:98.08ms +step:1362/1670 train_time:133579ms step_avg:98.08ms +step:1363/1670 train_time:133678ms step_avg:98.08ms +step:1364/1670 train_time:133777ms step_avg:98.08ms +step:1365/1670 train_time:133876ms step_avg:98.08ms +step:1366/1670 train_time:133974ms step_avg:98.08ms +step:1367/1670 train_time:134073ms step_avg:98.08ms +step:1368/1670 train_time:134174ms step_avg:98.08ms +step:1369/1670 train_time:134273ms step_avg:98.08ms +step:1370/1670 train_time:134371ms step_avg:98.08ms +step:1371/1670 train_time:134469ms step_avg:98.08ms +step:1372/1670 train_time:134566ms step_avg:98.08ms +step:1373/1670 train_time:134664ms step_avg:98.08ms +step:1374/1670 train_time:134762ms step_avg:98.08ms +step:1375/1670 train_time:134860ms step_avg:98.08ms +step:1375/1670 val_loss:3.3416 train_time:134958ms step_avg:98.15ms +step:1376/1670 train_time:134979ms step_avg:98.10ms +step:1377/1670 train_time:135063ms step_avg:98.09ms +step:1378/1670 train_time:135166ms step_avg:98.09ms +step:1379/1670 train_time:135267ms step_avg:98.09ms +step:1380/1670 train_time:135365ms step_avg:98.09ms +step:1381/1670 train_time:135462ms step_avg:98.09ms +step:1382/1670 train_time:135559ms step_avg:98.09ms +step:1383/1670 train_time:135657ms step_avg:98.09ms +step:1384/1670 train_time:135755ms step_avg:98.09ms +step:1385/1670 train_time:135851ms step_avg:98.09ms +step:1386/1670 train_time:135949ms step_avg:98.09ms +step:1387/1670 train_time:136050ms step_avg:98.09ms +step:1388/1670 train_time:136149ms step_avg:98.09ms +step:1389/1670 train_time:136248ms step_avg:98.09ms +step:1390/1670 train_time:136347ms step_avg:98.09ms +step:1391/1670 train_time:136445ms step_avg:98.09ms +step:1392/1670 train_time:136543ms step_avg:98.09ms +step:1393/1670 train_time:136641ms step_avg:98.09ms +step:1394/1670 train_time:136739ms step_avg:98.09ms +step:1395/1670 train_time:136837ms step_avg:98.09ms +step:1396/1670 train_time:136935ms step_avg:98.09ms +step:1397/1670 train_time:137034ms step_avg:98.09ms +step:1398/1670 train_time:137133ms step_avg:98.09ms +step:1399/1670 train_time:137231ms step_avg:98.09ms +step:1400/1670 train_time:137329ms step_avg:98.09ms +step:1401/1670 train_time:137426ms step_avg:98.09ms +step:1402/1670 train_time:137523ms step_avg:98.09ms +step:1403/1670 train_time:137621ms step_avg:98.09ms +step:1404/1670 train_time:137719ms step_avg:98.09ms +step:1405/1670 train_time:137817ms step_avg:98.09ms +step:1406/1670 train_time:137915ms step_avg:98.09ms +step:1407/1670 train_time:138013ms step_avg:98.09ms +step:1408/1670 train_time:138111ms step_avg:98.09ms +step:1409/1670 train_time:138209ms step_avg:98.09ms +step:1410/1670 train_time:138307ms step_avg:98.09ms +step:1411/1670 train_time:138405ms step_avg:98.09ms +step:1412/1670 train_time:138502ms step_avg:98.09ms +step:1413/1670 train_time:138600ms step_avg:98.09ms +step:1414/1670 train_time:138697ms step_avg:98.09ms +step:1415/1670 train_time:138795ms step_avg:98.09ms +step:1416/1670 train_time:138893ms step_avg:98.09ms +step:1417/1670 train_time:138991ms step_avg:98.09ms +step:1418/1670 train_time:139089ms step_avg:98.09ms +step:1419/1670 train_time:139188ms step_avg:98.09ms +step:1420/1670 train_time:139286ms step_avg:98.09ms +step:1421/1670 train_time:139383ms step_avg:98.09ms +step:1422/1670 train_time:139481ms step_avg:98.09ms +step:1423/1670 train_time:139579ms step_avg:98.09ms +step:1424/1670 train_time:139676ms step_avg:98.09ms +step:1425/1670 train_time:139774ms step_avg:98.09ms +step:1426/1670 train_time:139872ms step_avg:98.09ms +step:1427/1670 train_time:139969ms step_avg:98.09ms +step:1428/1670 train_time:140067ms step_avg:98.09ms +step:1429/1670 train_time:140167ms step_avg:98.09ms +step:1430/1670 train_time:140266ms step_avg:98.09ms +step:1431/1670 train_time:140364ms step_avg:98.09ms +step:1432/1670 train_time:140462ms step_avg:98.09ms +step:1433/1670 train_time:140561ms step_avg:98.09ms +step:1434/1670 train_time:140658ms step_avg:98.09ms +step:1435/1670 train_time:140756ms step_avg:98.09ms +step:1436/1670 train_time:140855ms step_avg:98.09ms +step:1437/1670 train_time:140954ms step_avg:98.09ms +step:1438/1670 train_time:141051ms step_avg:98.09ms +step:1439/1670 train_time:141148ms step_avg:98.09ms +step:1440/1670 train_time:141247ms step_avg:98.09ms +step:1441/1670 train_time:141345ms step_avg:98.09ms +step:1442/1670 train_time:141444ms step_avg:98.09ms +step:1443/1670 train_time:141542ms step_avg:98.09ms +step:1444/1670 train_time:141640ms step_avg:98.09ms +step:1445/1670 train_time:141739ms step_avg:98.09ms +step:1446/1670 train_time:141838ms step_avg:98.09ms +step:1447/1670 train_time:141937ms step_avg:98.09ms +step:1448/1670 train_time:142036ms step_avg:98.09ms +step:1449/1670 train_time:142134ms step_avg:98.09ms +step:1450/1670 train_time:142231ms step_avg:98.09ms +step:1451/1670 train_time:142328ms step_avg:98.09ms +step:1452/1670 train_time:142425ms step_avg:98.09ms +step:1453/1670 train_time:142523ms step_avg:98.09ms +step:1454/1670 train_time:142621ms step_avg:98.09ms +step:1455/1670 train_time:142718ms step_avg:98.09ms +step:1456/1670 train_time:142817ms step_avg:98.09ms +step:1457/1670 train_time:142915ms step_avg:98.09ms +step:1458/1670 train_time:143012ms step_avg:98.09ms +step:1459/1670 train_time:143110ms step_avg:98.09ms +step:1460/1670 train_time:143208ms step_avg:98.09ms +step:1461/1670 train_time:143306ms step_avg:98.09ms +step:1462/1670 train_time:143404ms step_avg:98.09ms +step:1463/1670 train_time:143502ms step_avg:98.09ms +step:1464/1670 train_time:143600ms step_avg:98.09ms +step:1465/1670 train_time:143697ms step_avg:98.09ms +step:1466/1670 train_time:143795ms step_avg:98.09ms +step:1467/1670 train_time:143893ms step_avg:98.09ms +step:1468/1670 train_time:143991ms step_avg:98.09ms +step:1469/1670 train_time:144090ms step_avg:98.09ms +step:1470/1670 train_time:144187ms step_avg:98.09ms +step:1471/1670 train_time:144285ms step_avg:98.09ms +step:1472/1670 train_time:144384ms step_avg:98.09ms +step:1473/1670 train_time:144483ms step_avg:98.09ms +step:1474/1670 train_time:144581ms step_avg:98.09ms +step:1475/1670 train_time:144679ms step_avg:98.09ms +step:1476/1670 train_time:144777ms step_avg:98.09ms +step:1477/1670 train_time:144876ms step_avg:98.09ms +step:1478/1670 train_time:144974ms step_avg:98.09ms +step:1479/1670 train_time:145072ms step_avg:98.09ms +step:1480/1670 train_time:145169ms step_avg:98.09ms +step:1481/1670 train_time:145268ms step_avg:98.09ms +step:1482/1670 train_time:145365ms step_avg:98.09ms +step:1483/1670 train_time:145464ms step_avg:98.09ms +step:1484/1670 train_time:145561ms step_avg:98.09ms +step:1485/1670 train_time:145838ms step_avg:98.21ms +step:1486/1670 train_time:146020ms step_avg:98.26ms +step:1487/1670 train_time:146116ms step_avg:98.26ms +step:1488/1670 train_time:146213ms step_avg:98.26ms +step:1489/1670 train_time:146309ms step_avg:98.26ms +step:1490/1670 train_time:146407ms step_avg:98.26ms +step:1491/1670 train_time:146504ms step_avg:98.26ms +step:1492/1670 train_time:146601ms step_avg:98.26ms +step:1493/1670 train_time:146698ms step_avg:98.26ms +step:1494/1670 train_time:146796ms step_avg:98.26ms +step:1495/1670 train_time:146894ms step_avg:98.26ms +step:1496/1670 train_time:146997ms step_avg:98.26ms +step:1497/1670 train_time:147098ms step_avg:98.26ms +step:1498/1670 train_time:147197ms step_avg:98.26ms +step:1499/1670 train_time:147294ms step_avg:98.26ms +step:1500/1670 train_time:147391ms step_avg:98.26ms +step:1500/1670 val_loss:3.3089 train_time:147487ms step_avg:98.32ms +step:1501/1670 train_time:147508ms step_avg:98.27ms +step:1502/1670 train_time:147591ms step_avg:98.26ms +step:1503/1670 train_time:147690ms step_avg:98.26ms +step:1504/1670 train_time:147787ms step_avg:98.26ms +step:1505/1670 train_time:147884ms step_avg:98.26ms +step:1506/1670 train_time:147981ms step_avg:98.26ms +step:1507/1670 train_time:148078ms step_avg:98.26ms +step:1508/1670 train_time:148176ms step_avg:98.26ms +step:1509/1670 train_time:148273ms step_avg:98.26ms +step:1510/1670 train_time:148371ms step_avg:98.26ms +step:1511/1670 train_time:148469ms step_avg:98.26ms +step:1512/1670 train_time:148572ms step_avg:98.26ms +step:1513/1670 train_time:148671ms step_avg:98.26ms +step:1514/1670 train_time:148769ms step_avg:98.26ms +step:1515/1670 train_time:148867ms step_avg:98.26ms +step:1516/1670 train_time:148964ms step_avg:98.26ms +step:1517/1670 train_time:149061ms step_avg:98.26ms +step:1518/1670 train_time:149159ms step_avg:98.26ms +step:1519/1670 train_time:149257ms step_avg:98.26ms +step:1520/1670 train_time:149355ms step_avg:98.26ms +step:1521/1670 train_time:149455ms step_avg:98.26ms +step:1522/1670 train_time:149555ms step_avg:98.26ms +step:1523/1670 train_time:149654ms step_avg:98.26ms +step:1524/1670 train_time:149752ms step_avg:98.26ms +step:1525/1670 train_time:149849ms step_avg:98.26ms +step:1526/1670 train_time:149946ms step_avg:98.26ms +step:1527/1670 train_time:150044ms step_avg:98.26ms +step:1528/1670 train_time:150142ms step_avg:98.26ms +step:1529/1670 train_time:150239ms step_avg:98.26ms +step:1530/1670 train_time:150336ms step_avg:98.26ms +step:1531/1670 train_time:150435ms step_avg:98.26ms +step:1532/1670 train_time:150534ms step_avg:98.26ms +step:1533/1670 train_time:150633ms step_avg:98.26ms +step:1534/1670 train_time:150731ms step_avg:98.26ms +step:1535/1670 train_time:150829ms step_avg:98.26ms +step:1536/1670 train_time:150926ms step_avg:98.26ms +step:1537/1670 train_time:151024ms step_avg:98.26ms +step:1538/1670 train_time:151121ms step_avg:98.26ms +step:1539/1670 train_time:151220ms step_avg:98.26ms +step:1540/1670 train_time:151318ms step_avg:98.26ms +step:1541/1670 train_time:151416ms step_avg:98.26ms +step:1542/1670 train_time:151515ms step_avg:98.26ms +step:1543/1670 train_time:151615ms step_avg:98.26ms +step:1544/1670 train_time:151713ms step_avg:98.26ms +step:1545/1670 train_time:151811ms step_avg:98.26ms +step:1546/1670 train_time:151908ms step_avg:98.26ms +step:1547/1670 train_time:152004ms step_avg:98.26ms +step:1548/1670 train_time:152103ms step_avg:98.26ms +step:1549/1670 train_time:152202ms step_avg:98.26ms +step:1550/1670 train_time:152301ms step_avg:98.26ms +step:1551/1670 train_time:152399ms step_avg:98.26ms +step:1552/1670 train_time:152499ms step_avg:98.26ms +step:1553/1670 train_time:152599ms step_avg:98.26ms +step:1554/1670 train_time:152698ms step_avg:98.26ms +step:1555/1670 train_time:152798ms step_avg:98.26ms +step:1556/1670 train_time:152896ms step_avg:98.26ms +step:1557/1670 train_time:152994ms step_avg:98.26ms +step:1558/1670 train_time:153092ms step_avg:98.26ms +step:1559/1670 train_time:153189ms step_avg:98.26ms +step:1560/1670 train_time:153287ms step_avg:98.26ms +step:1561/1670 train_time:153384ms step_avg:98.26ms +step:1562/1670 train_time:153483ms step_avg:98.26ms +step:1563/1670 train_time:153583ms step_avg:98.26ms +step:1564/1670 train_time:153685ms step_avg:98.26ms +step:1565/1670 train_time:153784ms step_avg:98.26ms +step:1566/1670 train_time:153884ms step_avg:98.27ms +step:1567/1670 train_time:153982ms step_avg:98.27ms +step:1568/1670 train_time:154081ms step_avg:98.27ms +step:1569/1670 train_time:154180ms step_avg:98.27ms +step:1570/1670 train_time:154278ms step_avg:98.27ms +step:1571/1670 train_time:154375ms step_avg:98.27ms +step:1572/1670 train_time:154473ms step_avg:98.27ms +step:1573/1670 train_time:154570ms step_avg:98.26ms +step:1574/1670 train_time:154668ms step_avg:98.26ms +step:1575/1670 train_time:154766ms step_avg:98.26ms +step:1576/1670 train_time:154865ms step_avg:98.26ms +step:1577/1670 train_time:154964ms step_avg:98.27ms +step:1578/1670 train_time:155065ms step_avg:98.27ms +step:1579/1670 train_time:155164ms step_avg:98.27ms +step:1580/1670 train_time:155263ms step_avg:98.27ms +step:1581/1670 train_time:155361ms step_avg:98.27ms +step:1582/1670 train_time:155460ms step_avg:98.27ms +step:1583/1670 train_time:155560ms step_avg:98.27ms +step:1584/1670 train_time:155658ms step_avg:98.27ms +step:1585/1670 train_time:155755ms step_avg:98.27ms +step:1586/1670 train_time:155853ms step_avg:98.27ms +step:1587/1670 train_time:155951ms step_avg:98.27ms +step:1588/1670 train_time:156048ms step_avg:98.27ms +step:1589/1670 train_time:156146ms step_avg:98.27ms +step:1590/1670 train_time:156245ms step_avg:98.27ms +step:1591/1670 train_time:156343ms step_avg:98.27ms +step:1592/1670 train_time:156442ms step_avg:98.27ms +step:1593/1670 train_time:156542ms step_avg:98.27ms +step:1594/1670 train_time:156643ms step_avg:98.27ms +step:1595/1670 train_time:156743ms step_avg:98.27ms +step:1596/1670 train_time:156843ms step_avg:98.27ms +step:1597/1670 train_time:156941ms step_avg:98.27ms +step:1598/1670 train_time:157040ms step_avg:98.27ms +step:1599/1670 train_time:157138ms step_avg:98.27ms +step:1600/1670 train_time:157237ms step_avg:98.27ms +step:1601/1670 train_time:157334ms step_avg:98.27ms +step:1602/1670 train_time:157431ms step_avg:98.27ms +step:1603/1670 train_time:157529ms step_avg:98.27ms +step:1604/1670 train_time:157628ms step_avg:98.27ms +step:1605/1670 train_time:157726ms step_avg:98.27ms +step:1606/1670 train_time:157825ms step_avg:98.27ms +step:1607/1670 train_time:157924ms step_avg:98.27ms +step:1608/1670 train_time:158022ms step_avg:98.27ms +step:1609/1670 train_time:158121ms step_avg:98.27ms +step:1610/1670 train_time:158220ms step_avg:98.27ms +step:1611/1670 train_time:158318ms step_avg:98.27ms +step:1612/1670 train_time:158417ms step_avg:98.27ms +step:1613/1670 train_time:158514ms step_avg:98.27ms +step:1614/1670 train_time:158612ms step_avg:98.27ms +step:1615/1670 train_time:158710ms step_avg:98.27ms +step:1616/1670 train_time:158808ms step_avg:98.27ms +step:1617/1670 train_time:158906ms step_avg:98.27ms +step:1618/1670 train_time:159005ms step_avg:98.27ms +step:1619/1670 train_time:159104ms step_avg:98.27ms +step:1620/1670 train_time:159202ms step_avg:98.27ms +step:1621/1670 train_time:159301ms step_avg:98.27ms +step:1622/1670 train_time:159401ms step_avg:98.27ms +step:1623/1670 train_time:159500ms step_avg:98.28ms +step:1624/1670 train_time:159601ms step_avg:98.28ms +step:1625/1670 train_time:159700ms step_avg:98.28ms +step:1625/1670 val_loss:3.2825 train_time:159800ms step_avg:98.34ms +step:1626/1670 train_time:159821ms step_avg:98.29ms +step:1627/1670 train_time:159906ms step_avg:98.28ms +step:1628/1670 train_time:160009ms step_avg:98.29ms +step:1629/1670 train_time:160108ms step_avg:98.29ms +step:1630/1670 train_time:160205ms step_avg:98.29ms +step:1631/1670 train_time:160302ms step_avg:98.28ms +step:1632/1670 train_time:160399ms step_avg:98.28ms +step:1633/1670 train_time:160495ms step_avg:98.28ms +step:1634/1670 train_time:160592ms step_avg:98.28ms +step:1635/1670 train_time:160690ms step_avg:98.28ms +step:1636/1670 train_time:160791ms step_avg:98.28ms +step:1637/1670 train_time:160893ms step_avg:98.29ms +step:1638/1670 train_time:160994ms step_avg:98.29ms +step:1639/1670 train_time:161093ms step_avg:98.29ms +step:1640/1670 train_time:161192ms step_avg:98.29ms +step:1641/1670 train_time:161290ms step_avg:98.29ms +step:1642/1670 train_time:161389ms step_avg:98.29ms +step:1643/1670 train_time:161487ms step_avg:98.29ms +step:1644/1670 train_time:161585ms step_avg:98.29ms +step:1645/1670 train_time:161682ms step_avg:98.29ms +step:1646/1670 train_time:161781ms step_avg:98.29ms +step:1647/1670 train_time:161880ms step_avg:98.29ms +step:1648/1670 train_time:161980ms step_avg:98.29ms +step:1649/1670 train_time:162081ms step_avg:98.29ms +step:1650/1670 train_time:162181ms step_avg:98.29ms +step:1651/1670 train_time:162278ms step_avg:98.29ms +step:1652/1670 train_time:162376ms step_avg:98.29ms +step:1653/1670 train_time:162474ms step_avg:98.29ms +step:1654/1670 train_time:162571ms step_avg:98.29ms +step:1655/1670 train_time:162670ms step_avg:98.29ms +step:1656/1670 train_time:162767ms step_avg:98.29ms +step:1657/1670 train_time:162866ms step_avg:98.29ms +step:1658/1670 train_time:162966ms step_avg:98.29ms +step:1659/1670 train_time:163067ms step_avg:98.29ms +step:1660/1670 train_time:163166ms step_avg:98.29ms +step:1661/1670 train_time:163264ms step_avg:98.29ms +step:1662/1670 train_time:163363ms step_avg:98.29ms +step:1663/1670 train_time:163460ms step_avg:98.29ms +step:1664/1670 train_time:163558ms step_avg:98.29ms +step:1665/1670 train_time:163656ms step_avg:98.29ms +step:1666/1670 train_time:163752ms step_avg:98.29ms +step:1667/1670 train_time:163850ms step_avg:98.29ms +step:1668/1670 train_time:163948ms step_avg:98.29ms +step:1669/1670 train_time:164047ms step_avg:98.29ms +step:1670/1670 train_time:164147ms step_avg:98.29ms +step:1670/1670 val_loss:3.2747 train_time:164244ms step_avg:98.35ms +peak memory allocated: 34000 MiB reserved: 49296 MiB diff --git a/records/090525_SkipMLPBlocks/comparison_e501e1e9-39fa-473b-bded-39427f349f37.txt b/records/090525_SkipMLPBlocks/comparison_e501e1e9-39fa-473b-bded-39427f349f37.txt new file mode 100644 index 000000000..64e30d506 --- /dev/null +++ b/records/090525_SkipMLPBlocks/comparison_e501e1e9-39fa-473b-bded-39427f349f37.txt @@ -0,0 +1,2815 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 15:32:36 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 129W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 44C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 123W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 118W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 44C P0 131W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 67814 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 67815 C /usr/bin/python3 610MiB | +| 0 N/A N/A 67816 C /usr/bin/python3 610MiB | +| 0 N/A N/A 67817 C /usr/bin/python3 610MiB | +| 0 N/A N/A 67818 C /usr/bin/python3 610MiB | +| 0 N/A N/A 67819 C /usr/bin/python3 610MiB | +| 0 N/A N/A 67820 C /usr/bin/python3 610MiB | +| 0 N/A N/A 67821 C /usr/bin/python3 610MiB | +| 1 N/A N/A 67815 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 67816 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 67817 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 67818 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 67819 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 67820 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 67821 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:357ms step_avg:357.26ms +step:2/1670 train_time:378ms step_avg:189.13ms +step:3/1670 train_time:451ms step_avg:150.28ms +step:4/1670 train_time:545ms step_avg:136.24ms +step:5/1670 train_time:641ms step_avg:128.13ms +step:6/1670 train_time:735ms step_avg:122.58ms +step:7/1670 train_time:830ms step_avg:118.59ms +step:8/1670 train_time:925ms step_avg:115.65ms +step:9/1670 train_time:1021ms step_avg:113.42ms +step:10/1670 train_time:1116ms step_avg:111.61ms +step:11/1670 train_time:1213ms step_avg:110.25ms +step:12/1670 train_time:1309ms step_avg:109.12ms +step:13/1670 train_time:1407ms step_avg:108.21ms +step:14/1670 train_time:1504ms step_avg:107.40ms +step:15/1670 train_time:1601ms step_avg:106.73ms +step:16/1670 train_time:1697ms step_avg:106.07ms +step:17/1670 train_time:1793ms step_avg:105.45ms +step:18/1670 train_time:1888ms step_avg:104.87ms +step:19/1670 train_time:1983ms step_avg:104.36ms +step:20/1670 train_time:2078ms step_avg:103.91ms +step:21/1670 train_time:2175ms step_avg:103.56ms +step:22/1670 train_time:2270ms step_avg:103.19ms +step:23/1670 train_time:2367ms step_avg:102.91ms +step:24/1670 train_time:2463ms step_avg:102.64ms +step:25/1670 train_time:2560ms step_avg:102.41ms +step:26/1670 train_time:2657ms step_avg:102.18ms +step:27/1670 train_time:2754ms step_avg:101.99ms +step:28/1670 train_time:2850ms step_avg:101.79ms +step:29/1670 train_time:2946ms step_avg:101.57ms +step:30/1670 train_time:3042ms step_avg:101.39ms +step:31/1670 train_time:3137ms step_avg:101.20ms +step:32/1670 train_time:3234ms step_avg:101.06ms +step:33/1670 train_time:3330ms step_avg:100.90ms +step:34/1670 train_time:3426ms step_avg:100.76ms +step:35/1670 train_time:3522ms step_avg:100.62ms +step:36/1670 train_time:3618ms step_avg:100.51ms +step:37/1670 train_time:3716ms step_avg:100.43ms +step:38/1670 train_time:3812ms step_avg:100.33ms +step:39/1670 train_time:3908ms step_avg:100.21ms +step:40/1670 train_time:4003ms step_avg:100.08ms +step:41/1670 train_time:4099ms step_avg:99.98ms +step:42/1670 train_time:4196ms step_avg:99.90ms +step:43/1670 train_time:4292ms step_avg:99.80ms +step:44/1670 train_time:4387ms step_avg:99.70ms +step:45/1670 train_time:4484ms step_avg:99.63ms +step:46/1670 train_time:4580ms step_avg:99.56ms +step:47/1670 train_time:4676ms step_avg:99.49ms +step:48/1670 train_time:4773ms step_avg:99.43ms +step:49/1670 train_time:4869ms step_avg:99.37ms +step:50/1670 train_time:4965ms step_avg:99.30ms +step:51/1670 train_time:5061ms step_avg:99.24ms +step:52/1670 train_time:5157ms step_avg:99.17ms +step:53/1670 train_time:5253ms step_avg:99.11ms +step:54/1670 train_time:5348ms step_avg:99.04ms +step:55/1670 train_time:5444ms step_avg:98.98ms +step:56/1670 train_time:5541ms step_avg:98.94ms +step:57/1670 train_time:5638ms step_avg:98.91ms +step:58/1670 train_time:5735ms step_avg:98.88ms +step:59/1670 train_time:5832ms step_avg:98.84ms +step:60/1670 train_time:5927ms step_avg:98.79ms +step:61/1670 train_time:6022ms step_avg:98.73ms +step:62/1670 train_time:6119ms step_avg:98.70ms +step:63/1670 train_time:6216ms step_avg:98.66ms +step:64/1670 train_time:6312ms step_avg:98.62ms +step:65/1670 train_time:6407ms step_avg:98.58ms +step:66/1670 train_time:6503ms step_avg:98.53ms +step:67/1670 train_time:6600ms step_avg:98.51ms +step:68/1670 train_time:6696ms step_avg:98.48ms +step:69/1670 train_time:6793ms step_avg:98.45ms +step:70/1670 train_time:6888ms step_avg:98.41ms +step:71/1670 train_time:6984ms step_avg:98.36ms +step:72/1670 train_time:7080ms step_avg:98.33ms +step:73/1670 train_time:7176ms step_avg:98.30ms +step:74/1670 train_time:7272ms step_avg:98.27ms +step:75/1670 train_time:7368ms step_avg:98.24ms +step:76/1670 train_time:7465ms step_avg:98.22ms +step:77/1670 train_time:7561ms step_avg:98.19ms +step:78/1670 train_time:7657ms step_avg:98.17ms +step:79/1670 train_time:7753ms step_avg:98.14ms +step:80/1670 train_time:7850ms step_avg:98.12ms +step:81/1670 train_time:7946ms step_avg:98.10ms +step:82/1670 train_time:8041ms step_avg:98.06ms +step:83/1670 train_time:8137ms step_avg:98.04ms +step:84/1670 train_time:8233ms step_avg:98.01ms +step:85/1670 train_time:8328ms step_avg:97.98ms +step:86/1670 train_time:8424ms step_avg:97.96ms +step:87/1670 train_time:8520ms step_avg:97.93ms +step:88/1670 train_time:8616ms step_avg:97.91ms +step:89/1670 train_time:8712ms step_avg:97.89ms +step:90/1670 train_time:8808ms step_avg:97.87ms +step:91/1670 train_time:8904ms step_avg:97.85ms +step:92/1670 train_time:9001ms step_avg:97.83ms +step:93/1670 train_time:9097ms step_avg:97.82ms +step:94/1670 train_time:9194ms step_avg:97.81ms +step:95/1670 train_time:9289ms step_avg:97.78ms +step:96/1670 train_time:9385ms step_avg:97.76ms +step:97/1670 train_time:9480ms step_avg:97.74ms +step:98/1670 train_time:9577ms step_avg:97.72ms +step:99/1670 train_time:9672ms step_avg:97.70ms +step:100/1670 train_time:9768ms step_avg:97.68ms +step:101/1670 train_time:9864ms step_avg:97.66ms +step:102/1670 train_time:9960ms step_avg:97.65ms +step:103/1670 train_time:10056ms step_avg:97.63ms +step:104/1670 train_time:10152ms step_avg:97.62ms +step:105/1670 train_time:10248ms step_avg:97.60ms +step:106/1670 train_time:10344ms step_avg:97.58ms +step:107/1670 train_time:10439ms step_avg:97.56ms +step:108/1670 train_time:10535ms step_avg:97.55ms +step:109/1670 train_time:10630ms step_avg:97.53ms +step:110/1670 train_time:10726ms step_avg:97.51ms +step:111/1670 train_time:10821ms step_avg:97.49ms +step:112/1670 train_time:10918ms step_avg:97.48ms +step:113/1670 train_time:11014ms step_avg:97.47ms +step:114/1670 train_time:11110ms step_avg:97.46ms +step:115/1670 train_time:11206ms step_avg:97.44ms +step:116/1670 train_time:11302ms step_avg:97.43ms +step:117/1670 train_time:11397ms step_avg:97.41ms +step:118/1670 train_time:11493ms step_avg:97.40ms +step:119/1670 train_time:11588ms step_avg:97.38ms +step:120/1670 train_time:11684ms step_avg:97.36ms +step:121/1670 train_time:11780ms step_avg:97.35ms +step:122/1670 train_time:11875ms step_avg:97.34ms +step:123/1670 train_time:11971ms step_avg:97.33ms +step:124/1670 train_time:12067ms step_avg:97.31ms +step:125/1670 train_time:12163ms step_avg:97.30ms +step:125/1670 val_loss:4.2943 train_time:12258ms step_avg:98.07ms +step:126/1670 train_time:12281ms step_avg:97.47ms +step:127/1670 train_time:12362ms step_avg:97.34ms +step:128/1670 train_time:12464ms step_avg:97.37ms +step:129/1670 train_time:12560ms step_avg:97.36ms +step:130/1670 train_time:12655ms step_avg:97.35ms +step:131/1670 train_time:12750ms step_avg:97.33ms +step:132/1670 train_time:12845ms step_avg:97.31ms +step:133/1670 train_time:12939ms step_avg:97.29ms +step:134/1670 train_time:13033ms step_avg:97.26ms +step:135/1670 train_time:13128ms step_avg:97.24ms +step:136/1670 train_time:13223ms step_avg:97.23ms +step:137/1670 train_time:13319ms step_avg:97.22ms +step:138/1670 train_time:13417ms step_avg:97.23ms +step:139/1670 train_time:13515ms step_avg:97.23ms +step:140/1670 train_time:13611ms step_avg:97.22ms +step:141/1670 train_time:13707ms step_avg:97.21ms +step:142/1670 train_time:13802ms step_avg:97.20ms +step:143/1670 train_time:13898ms step_avg:97.19ms +step:144/1670 train_time:13993ms step_avg:97.17ms +step:145/1670 train_time:14088ms step_avg:97.16ms +step:146/1670 train_time:14183ms step_avg:97.14ms +step:147/1670 train_time:14278ms step_avg:97.13ms +step:148/1670 train_time:14374ms step_avg:97.12ms +step:149/1670 train_time:14471ms step_avg:97.12ms +step:150/1670 train_time:14568ms step_avg:97.12ms +step:151/1670 train_time:14664ms step_avg:97.11ms +step:152/1670 train_time:14760ms step_avg:97.11ms +step:153/1670 train_time:14855ms step_avg:97.09ms +step:154/1670 train_time:14950ms step_avg:97.08ms +step:155/1670 train_time:15045ms step_avg:97.07ms +step:156/1670 train_time:15140ms step_avg:97.05ms +step:157/1670 train_time:15235ms step_avg:97.04ms +step:158/1670 train_time:15331ms step_avg:97.03ms +step:159/1670 train_time:15427ms step_avg:97.03ms +step:160/1670 train_time:15524ms step_avg:97.03ms +step:161/1670 train_time:15621ms step_avg:97.02ms +step:162/1670 train_time:15716ms step_avg:97.01ms +step:163/1670 train_time:15812ms step_avg:97.00ms +step:164/1670 train_time:15908ms step_avg:97.00ms +step:165/1670 train_time:16004ms step_avg:96.99ms +step:166/1670 train_time:16099ms step_avg:96.98ms +step:167/1670 train_time:16194ms step_avg:96.97ms +step:168/1670 train_time:16289ms step_avg:96.96ms +step:169/1670 train_time:16385ms step_avg:96.95ms +step:170/1670 train_time:16481ms step_avg:96.94ms +step:171/1670 train_time:16577ms step_avg:96.94ms +step:172/1670 train_time:16673ms step_avg:96.93ms +step:173/1670 train_time:16768ms step_avg:96.93ms +step:174/1670 train_time:16864ms step_avg:96.92ms +step:175/1670 train_time:16958ms step_avg:96.90ms +step:176/1670 train_time:17053ms step_avg:96.89ms +step:177/1670 train_time:17149ms step_avg:96.89ms +step:178/1670 train_time:17244ms step_avg:96.88ms +step:179/1670 train_time:17340ms step_avg:96.87ms +step:180/1670 train_time:17435ms step_avg:96.86ms +step:181/1670 train_time:17531ms step_avg:96.85ms +step:182/1670 train_time:17628ms step_avg:96.86ms +step:183/1670 train_time:17724ms step_avg:96.85ms +step:184/1670 train_time:17819ms step_avg:96.84ms +step:185/1670 train_time:17914ms step_avg:96.83ms +step:186/1670 train_time:18010ms step_avg:96.83ms +step:187/1670 train_time:18107ms step_avg:96.83ms +step:188/1670 train_time:18202ms step_avg:96.82ms +step:189/1670 train_time:18297ms step_avg:96.81ms +step:190/1670 train_time:18393ms step_avg:96.80ms +step:191/1670 train_time:18487ms step_avg:96.79ms +step:192/1670 train_time:18583ms step_avg:96.79ms +step:193/1670 train_time:18680ms step_avg:96.79ms +step:194/1670 train_time:18776ms step_avg:96.78ms +step:195/1670 train_time:18871ms step_avg:96.78ms +step:196/1670 train_time:18967ms step_avg:96.77ms +step:197/1670 train_time:19063ms step_avg:96.77ms +step:198/1670 train_time:19158ms step_avg:96.76ms +step:199/1670 train_time:19253ms step_avg:96.75ms +step:200/1670 train_time:19348ms step_avg:96.74ms +step:201/1670 train_time:19445ms step_avg:96.74ms +step:202/1670 train_time:19539ms step_avg:96.73ms +step:203/1670 train_time:19635ms step_avg:96.72ms +step:204/1670 train_time:19731ms step_avg:96.72ms +step:205/1670 train_time:19828ms step_avg:96.72ms +step:206/1670 train_time:19923ms step_avg:96.71ms +step:207/1670 train_time:20018ms step_avg:96.71ms +step:208/1670 train_time:20114ms step_avg:96.70ms +step:209/1670 train_time:20210ms step_avg:96.70ms +step:210/1670 train_time:20305ms step_avg:96.69ms +step:211/1670 train_time:20400ms step_avg:96.68ms +step:212/1670 train_time:20496ms step_avg:96.68ms +step:213/1670 train_time:20752ms step_avg:97.43ms +step:214/1670 train_time:20931ms step_avg:97.81ms +step:215/1670 train_time:21025ms step_avg:97.79ms +step:216/1670 train_time:21120ms step_avg:97.78ms +step:217/1670 train_time:21214ms step_avg:97.76ms +step:218/1670 train_time:21309ms step_avg:97.75ms +step:219/1670 train_time:21404ms step_avg:97.73ms +step:220/1670 train_time:21498ms step_avg:97.72ms +step:221/1670 train_time:21593ms step_avg:97.70ms +step:222/1670 train_time:21687ms step_avg:97.69ms +step:223/1670 train_time:21787ms step_avg:97.70ms +step:224/1670 train_time:21887ms step_avg:97.71ms +step:225/1670 train_time:21984ms step_avg:97.71ms +step:226/1670 train_time:22080ms step_avg:97.70ms +step:227/1670 train_time:22174ms step_avg:97.68ms +step:228/1670 train_time:22270ms step_avg:97.67ms +step:229/1670 train_time:22365ms step_avg:97.66ms +step:230/1670 train_time:22460ms step_avg:97.65ms +step:231/1670 train_time:22554ms step_avg:97.64ms +step:232/1670 train_time:22649ms step_avg:97.63ms +step:233/1670 train_time:22745ms step_avg:97.62ms +step:234/1670 train_time:22842ms step_avg:97.61ms +step:235/1670 train_time:22939ms step_avg:97.61ms +step:236/1670 train_time:23035ms step_avg:97.61ms +step:237/1670 train_time:23132ms step_avg:97.60ms +step:238/1670 train_time:23227ms step_avg:97.59ms +step:239/1670 train_time:23322ms step_avg:97.58ms +step:240/1670 train_time:23417ms step_avg:97.57ms +step:241/1670 train_time:23512ms step_avg:97.56ms +step:242/1670 train_time:23606ms step_avg:97.55ms +step:243/1670 train_time:23701ms step_avg:97.53ms +step:244/1670 train_time:23797ms step_avg:97.53ms +step:245/1670 train_time:23893ms step_avg:97.52ms +step:246/1670 train_time:23989ms step_avg:97.52ms +step:247/1670 train_time:24086ms step_avg:97.51ms +step:248/1670 train_time:24181ms step_avg:97.51ms +step:249/1670 train_time:24277ms step_avg:97.50ms +step:250/1670 train_time:24372ms step_avg:97.49ms +step:250/1670 val_loss:3.9672 train_time:24467ms step_avg:97.87ms +step:251/1670 train_time:24488ms step_avg:97.56ms +step:252/1670 train_time:24572ms step_avg:97.51ms +step:253/1670 train_time:24673ms step_avg:97.52ms +step:254/1670 train_time:24769ms step_avg:97.51ms +step:255/1670 train_time:24864ms step_avg:97.50ms +step:256/1670 train_time:24959ms step_avg:97.50ms +step:257/1670 train_time:25053ms step_avg:97.48ms +step:258/1670 train_time:25148ms step_avg:97.47ms +step:259/1670 train_time:25242ms step_avg:97.46ms +step:260/1670 train_time:25337ms step_avg:97.45ms +step:261/1670 train_time:25432ms step_avg:97.44ms +step:262/1670 train_time:25528ms step_avg:97.44ms +step:263/1670 train_time:25627ms step_avg:97.44ms +step:264/1670 train_time:25725ms step_avg:97.44ms +step:265/1670 train_time:25820ms step_avg:97.44ms +step:266/1670 train_time:25916ms step_avg:97.43ms +step:267/1670 train_time:26010ms step_avg:97.42ms +step:268/1670 train_time:26106ms step_avg:97.41ms +step:269/1670 train_time:26201ms step_avg:97.40ms +step:270/1670 train_time:26295ms step_avg:97.39ms +step:271/1670 train_time:26390ms step_avg:97.38ms +step:272/1670 train_time:26486ms step_avg:97.37ms +step:273/1670 train_time:26582ms step_avg:97.37ms +step:274/1670 train_time:26679ms step_avg:97.37ms +step:275/1670 train_time:26775ms step_avg:97.36ms +step:276/1670 train_time:26871ms step_avg:97.36ms +step:277/1670 train_time:26967ms step_avg:97.35ms +step:278/1670 train_time:27062ms step_avg:97.35ms +step:279/1670 train_time:27158ms step_avg:97.34ms +step:280/1670 train_time:27252ms step_avg:97.33ms +step:281/1670 train_time:27347ms step_avg:97.32ms +step:282/1670 train_time:27443ms step_avg:97.31ms +step:283/1670 train_time:27538ms step_avg:97.31ms +step:284/1670 train_time:27635ms step_avg:97.31ms +step:285/1670 train_time:27732ms step_avg:97.30ms +step:286/1670 train_time:27827ms step_avg:97.30ms +step:287/1670 train_time:27924ms step_avg:97.29ms +step:288/1670 train_time:28019ms step_avg:97.29ms +step:289/1670 train_time:28114ms step_avg:97.28ms +step:290/1670 train_time:28209ms step_avg:97.27ms +step:291/1670 train_time:28305ms step_avg:97.27ms +step:292/1670 train_time:28401ms step_avg:97.26ms +step:293/1670 train_time:28497ms step_avg:97.26ms +step:294/1670 train_time:28592ms step_avg:97.25ms +step:295/1670 train_time:28687ms step_avg:97.24ms +step:296/1670 train_time:28783ms step_avg:97.24ms +step:297/1670 train_time:28879ms step_avg:97.24ms +step:298/1670 train_time:28974ms step_avg:97.23ms +step:299/1670 train_time:29070ms step_avg:97.22ms +step:300/1670 train_time:29165ms step_avg:97.22ms +step:301/1670 train_time:29260ms step_avg:97.21ms +step:302/1670 train_time:29356ms step_avg:97.20ms +step:303/1670 train_time:29451ms step_avg:97.20ms +step:304/1670 train_time:29546ms step_avg:97.19ms +step:305/1670 train_time:29643ms step_avg:97.19ms +step:306/1670 train_time:29738ms step_avg:97.18ms +step:307/1670 train_time:29835ms step_avg:97.18ms +step:308/1670 train_time:29931ms step_avg:97.18ms +step:309/1670 train_time:30027ms step_avg:97.17ms +step:310/1670 train_time:30122ms step_avg:97.17ms +step:311/1670 train_time:30217ms step_avg:97.16ms +step:312/1670 train_time:30312ms step_avg:97.15ms +step:313/1670 train_time:30407ms step_avg:97.15ms +step:314/1670 train_time:30503ms step_avg:97.14ms +step:315/1670 train_time:30599ms step_avg:97.14ms +step:316/1670 train_time:30695ms step_avg:97.14ms +step:317/1670 train_time:30790ms step_avg:97.13ms +step:318/1670 train_time:30886ms step_avg:97.13ms +step:319/1670 train_time:30983ms step_avg:97.12ms +step:320/1670 train_time:31079ms step_avg:97.12ms +step:321/1670 train_time:31174ms step_avg:97.11ms +step:322/1670 train_time:31269ms step_avg:97.11ms +step:323/1670 train_time:31364ms step_avg:97.10ms +step:324/1670 train_time:31460ms step_avg:97.10ms +step:325/1670 train_time:31556ms step_avg:97.09ms +step:326/1670 train_time:31651ms step_avg:97.09ms +step:327/1670 train_time:31746ms step_avg:97.08ms +step:328/1670 train_time:31842ms step_avg:97.08ms +step:329/1670 train_time:31938ms step_avg:97.08ms +step:330/1670 train_time:32034ms step_avg:97.07ms +step:331/1670 train_time:32129ms step_avg:97.07ms +step:332/1670 train_time:32225ms step_avg:97.06ms +step:333/1670 train_time:32321ms step_avg:97.06ms +step:334/1670 train_time:32417ms step_avg:97.06ms +step:335/1670 train_time:32513ms step_avg:97.05ms +step:336/1670 train_time:32608ms step_avg:97.05ms +step:337/1670 train_time:32704ms step_avg:97.05ms +step:338/1670 train_time:32800ms step_avg:97.04ms +step:339/1670 train_time:32896ms step_avg:97.04ms +step:340/1670 train_time:32991ms step_avg:97.03ms +step:341/1670 train_time:33087ms step_avg:97.03ms +step:342/1670 train_time:33182ms step_avg:97.02ms +step:343/1670 train_time:33278ms step_avg:97.02ms +step:344/1670 train_time:33374ms step_avg:97.02ms +step:345/1670 train_time:33470ms step_avg:97.01ms +step:346/1670 train_time:33566ms step_avg:97.01ms +step:347/1670 train_time:33661ms step_avg:97.01ms +step:348/1670 train_time:33757ms step_avg:97.00ms +step:349/1670 train_time:33853ms step_avg:97.00ms +step:350/1670 train_time:33948ms step_avg:96.99ms +step:351/1670 train_time:34044ms step_avg:96.99ms +step:352/1670 train_time:34139ms step_avg:96.99ms +step:353/1670 train_time:34236ms step_avg:96.98ms +step:354/1670 train_time:34331ms step_avg:96.98ms +step:355/1670 train_time:34427ms step_avg:96.98ms +step:356/1670 train_time:34523ms step_avg:96.98ms +step:357/1670 train_time:34619ms step_avg:96.97ms +step:358/1670 train_time:34715ms step_avg:96.97ms +step:359/1670 train_time:34810ms step_avg:96.96ms +step:360/1670 train_time:34905ms step_avg:96.96ms +step:361/1670 train_time:35001ms step_avg:96.96ms +step:362/1670 train_time:35097ms step_avg:96.95ms +step:363/1670 train_time:35193ms step_avg:96.95ms +step:364/1670 train_time:35288ms step_avg:96.95ms +step:365/1670 train_time:35384ms step_avg:96.94ms +step:366/1670 train_time:35481ms step_avg:96.94ms +step:367/1670 train_time:35577ms step_avg:96.94ms +step:368/1670 train_time:35672ms step_avg:96.93ms +step:369/1670 train_time:35767ms step_avg:96.93ms +step:370/1670 train_time:35862ms step_avg:96.92ms +step:371/1670 train_time:35958ms step_avg:96.92ms +step:372/1670 train_time:36053ms step_avg:96.92ms +step:373/1670 train_time:36149ms step_avg:96.91ms +step:374/1670 train_time:36244ms step_avg:96.91ms +step:375/1670 train_time:36340ms step_avg:96.91ms +step:375/1670 val_loss:3.8109 train_time:36436ms step_avg:97.16ms +step:376/1670 train_time:36459ms step_avg:96.96ms +step:377/1670 train_time:36544ms step_avg:96.93ms +step:378/1670 train_time:36643ms step_avg:96.94ms +step:379/1670 train_time:36739ms step_avg:96.94ms +step:380/1670 train_time:36833ms step_avg:96.93ms +step:381/1670 train_time:36928ms step_avg:96.93ms +step:382/1670 train_time:37024ms step_avg:96.92ms +step:383/1670 train_time:37119ms step_avg:96.92ms +step:384/1670 train_time:37213ms step_avg:96.91ms +step:385/1670 train_time:37308ms step_avg:96.90ms +step:386/1670 train_time:37404ms step_avg:96.90ms +step:387/1670 train_time:37501ms step_avg:96.90ms +step:388/1670 train_time:37599ms step_avg:96.90ms +step:389/1670 train_time:37695ms step_avg:96.90ms +step:390/1670 train_time:37791ms step_avg:96.90ms +step:391/1670 train_time:37886ms step_avg:96.90ms +step:392/1670 train_time:37981ms step_avg:96.89ms +step:393/1670 train_time:38077ms step_avg:96.89ms +step:394/1670 train_time:38172ms step_avg:96.88ms +step:395/1670 train_time:38267ms step_avg:96.88ms +step:396/1670 train_time:38362ms step_avg:96.87ms +step:397/1670 train_time:38459ms step_avg:96.87ms +step:398/1670 train_time:38555ms step_avg:96.87ms +step:399/1670 train_time:38651ms step_avg:96.87ms +step:400/1670 train_time:38747ms step_avg:96.87ms +step:401/1670 train_time:38843ms step_avg:96.87ms +step:402/1670 train_time:38939ms step_avg:96.86ms +step:403/1670 train_time:39033ms step_avg:96.86ms +step:404/1670 train_time:39129ms step_avg:96.85ms +step:405/1670 train_time:39225ms step_avg:96.85ms +step:406/1670 train_time:39319ms step_avg:96.85ms +step:407/1670 train_time:39415ms step_avg:96.84ms +step:408/1670 train_time:39511ms step_avg:96.84ms +step:409/1670 train_time:39608ms step_avg:96.84ms +step:410/1670 train_time:39704ms step_avg:96.84ms +step:411/1670 train_time:39801ms step_avg:96.84ms +step:412/1670 train_time:39896ms step_avg:96.84ms +step:413/1670 train_time:39992ms step_avg:96.83ms +step:414/1670 train_time:40087ms step_avg:96.83ms +step:415/1670 train_time:40182ms step_avg:96.83ms +step:416/1670 train_time:40277ms step_avg:96.82ms +step:417/1670 train_time:40372ms step_avg:96.82ms +step:418/1670 train_time:40469ms step_avg:96.81ms +step:419/1670 train_time:40564ms step_avg:96.81ms +step:420/1670 train_time:40661ms step_avg:96.81ms +step:421/1670 train_time:40757ms step_avg:96.81ms +step:422/1670 train_time:40853ms step_avg:96.81ms +step:423/1670 train_time:40949ms step_avg:96.81ms +step:424/1670 train_time:41045ms step_avg:96.80ms +step:425/1670 train_time:41302ms step_avg:97.18ms +step:426/1670 train_time:41508ms step_avg:97.44ms +step:427/1670 train_time:41601ms step_avg:97.43ms +step:428/1670 train_time:41696ms step_avg:97.42ms +step:429/1670 train_time:41790ms step_avg:97.41ms +step:430/1670 train_time:41886ms step_avg:97.41ms +step:431/1670 train_time:41981ms step_avg:97.40ms +step:432/1670 train_time:42075ms step_avg:97.40ms +step:433/1670 train_time:42170ms step_avg:97.39ms +step:434/1670 train_time:42265ms step_avg:97.39ms +step:435/1670 train_time:42365ms step_avg:97.39ms +step:436/1670 train_time:42463ms step_avg:97.39ms +step:437/1670 train_time:42562ms step_avg:97.40ms +step:438/1670 train_time:42657ms step_avg:97.39ms +step:439/1670 train_time:42753ms step_avg:97.39ms +step:440/1670 train_time:42849ms step_avg:97.38ms +step:441/1670 train_time:42944ms step_avg:97.38ms +step:442/1670 train_time:43039ms step_avg:97.37ms +step:443/1670 train_time:43134ms step_avg:97.37ms +step:444/1670 train_time:43229ms step_avg:97.36ms +step:445/1670 train_time:43325ms step_avg:97.36ms +step:446/1670 train_time:43421ms step_avg:97.36ms +step:447/1670 train_time:43518ms step_avg:97.36ms +step:448/1670 train_time:43614ms step_avg:97.35ms +step:449/1670 train_time:43710ms step_avg:97.35ms +step:450/1670 train_time:43805ms step_avg:97.35ms +step:451/1670 train_time:43901ms step_avg:97.34ms +step:452/1670 train_time:43995ms step_avg:97.34ms +step:453/1670 train_time:44091ms step_avg:97.33ms +step:454/1670 train_time:44187ms step_avg:97.33ms +step:455/1670 train_time:44282ms step_avg:97.32ms +step:456/1670 train_time:44378ms step_avg:97.32ms +step:457/1670 train_time:44474ms step_avg:97.32ms +step:458/1670 train_time:44571ms step_avg:97.32ms +step:459/1670 train_time:44667ms step_avg:97.31ms +step:460/1670 train_time:44763ms step_avg:97.31ms +step:461/1670 train_time:44859ms step_avg:97.31ms +step:462/1670 train_time:44955ms step_avg:97.30ms +step:463/1670 train_time:45050ms step_avg:97.30ms +step:464/1670 train_time:45146ms step_avg:97.30ms +step:465/1670 train_time:45241ms step_avg:97.29ms +step:466/1670 train_time:45336ms step_avg:97.29ms +step:467/1670 train_time:45432ms step_avg:97.29ms +step:468/1670 train_time:45529ms step_avg:97.28ms +step:469/1670 train_time:45625ms step_avg:97.28ms +step:470/1670 train_time:45722ms step_avg:97.28ms +step:471/1670 train_time:45817ms step_avg:97.28ms +step:472/1670 train_time:45913ms step_avg:97.27ms +step:473/1670 train_time:46009ms step_avg:97.27ms +step:474/1670 train_time:46105ms step_avg:97.27ms +step:475/1670 train_time:46200ms step_avg:97.26ms +step:476/1670 train_time:46296ms step_avg:97.26ms +step:477/1670 train_time:46391ms step_avg:97.26ms +step:478/1670 train_time:46487ms step_avg:97.25ms +step:479/1670 train_time:46584ms step_avg:97.25ms +step:480/1670 train_time:46681ms step_avg:97.25ms +step:481/1670 train_time:46776ms step_avg:97.25ms +step:482/1670 train_time:46872ms step_avg:97.24ms +step:483/1670 train_time:46968ms step_avg:97.24ms +step:484/1670 train_time:47063ms step_avg:97.24ms +step:485/1670 train_time:47159ms step_avg:97.23ms +step:486/1670 train_time:47254ms step_avg:97.23ms +step:487/1670 train_time:47350ms step_avg:97.23ms +step:488/1670 train_time:47446ms step_avg:97.23ms +step:489/1670 train_time:47542ms step_avg:97.22ms +step:490/1670 train_time:47638ms step_avg:97.22ms +step:491/1670 train_time:47733ms step_avg:97.22ms +step:492/1670 train_time:47829ms step_avg:97.21ms +step:493/1670 train_time:47925ms step_avg:97.21ms +step:494/1670 train_time:48021ms step_avg:97.21ms +step:495/1670 train_time:48117ms step_avg:97.21ms +step:496/1670 train_time:48212ms step_avg:97.20ms +step:497/1670 train_time:48309ms step_avg:97.20ms +step:498/1670 train_time:48405ms step_avg:97.20ms +step:499/1670 train_time:48501ms step_avg:97.20ms +step:500/1670 train_time:48597ms step_avg:97.19ms +step:500/1670 val_loss:3.7118 train_time:48692ms step_avg:97.38ms +step:501/1670 train_time:48715ms step_avg:97.24ms +step:502/1670 train_time:48798ms step_avg:97.21ms +step:503/1670 train_time:48897ms step_avg:97.21ms +step:504/1670 train_time:48994ms step_avg:97.21ms +step:505/1670 train_time:49090ms step_avg:97.21ms +step:506/1670 train_time:49185ms step_avg:97.20ms +step:507/1670 train_time:49280ms step_avg:97.20ms +step:508/1670 train_time:49375ms step_avg:97.19ms +step:509/1670 train_time:49470ms step_avg:97.19ms +step:510/1670 train_time:49565ms step_avg:97.19ms +step:511/1670 train_time:49661ms step_avg:97.18ms +step:512/1670 train_time:49759ms step_avg:97.18ms +step:513/1670 train_time:49857ms step_avg:97.19ms +step:514/1670 train_time:49954ms step_avg:97.19ms +step:515/1670 train_time:50050ms step_avg:97.18ms +step:516/1670 train_time:50145ms step_avg:97.18ms +step:517/1670 train_time:50241ms step_avg:97.18ms +step:518/1670 train_time:50336ms step_avg:97.17ms +step:519/1670 train_time:50431ms step_avg:97.17ms +step:520/1670 train_time:50527ms step_avg:97.17ms +step:521/1670 train_time:50621ms step_avg:97.16ms +step:522/1670 train_time:50717ms step_avg:97.16ms +step:523/1670 train_time:50814ms step_avg:97.16ms +step:524/1670 train_time:50910ms step_avg:97.16ms +step:525/1670 train_time:51006ms step_avg:97.15ms +step:526/1670 train_time:51102ms step_avg:97.15ms +step:527/1670 train_time:51198ms step_avg:97.15ms +step:528/1670 train_time:51293ms step_avg:97.15ms +step:529/1670 train_time:51388ms step_avg:97.14ms +step:530/1670 train_time:51483ms step_avg:97.14ms +step:531/1670 train_time:51579ms step_avg:97.13ms +step:532/1670 train_time:51674ms step_avg:97.13ms +step:533/1670 train_time:51772ms step_avg:97.13ms +step:534/1670 train_time:51868ms step_avg:97.13ms +step:535/1670 train_time:51964ms step_avg:97.13ms +step:536/1670 train_time:52061ms step_avg:97.13ms +step:537/1670 train_time:52157ms step_avg:97.13ms +step:538/1670 train_time:52252ms step_avg:97.12ms +step:539/1670 train_time:52348ms step_avg:97.12ms +step:540/1670 train_time:52444ms step_avg:97.12ms +step:541/1670 train_time:52539ms step_avg:97.11ms +step:542/1670 train_time:52634ms step_avg:97.11ms +step:543/1670 train_time:52730ms step_avg:97.11ms +step:544/1670 train_time:52825ms step_avg:97.11ms +step:545/1670 train_time:52922ms step_avg:97.10ms +step:546/1670 train_time:53019ms step_avg:97.10ms +step:547/1670 train_time:53115ms step_avg:97.10ms +step:548/1670 train_time:53211ms step_avg:97.10ms +step:549/1670 train_time:53306ms step_avg:97.10ms +step:550/1670 train_time:53402ms step_avg:97.09ms +step:551/1670 train_time:53497ms step_avg:97.09ms +step:552/1670 train_time:53592ms step_avg:97.09ms +step:553/1670 train_time:53687ms step_avg:97.08ms +step:554/1670 train_time:53783ms step_avg:97.08ms +step:555/1670 train_time:53879ms step_avg:97.08ms +step:556/1670 train_time:53976ms step_avg:97.08ms +step:557/1670 train_time:54072ms step_avg:97.08ms +step:558/1670 train_time:54168ms step_avg:97.08ms +step:559/1670 train_time:54265ms step_avg:97.07ms +step:560/1670 train_time:54363ms step_avg:97.08ms +step:561/1670 train_time:54460ms step_avg:97.08ms +step:562/1670 train_time:54557ms step_avg:97.08ms +step:563/1670 train_time:54655ms step_avg:97.08ms +step:564/1670 train_time:54752ms step_avg:97.08ms +step:565/1670 train_time:54849ms step_avg:97.08ms +step:566/1670 train_time:54947ms step_avg:97.08ms +step:567/1670 train_time:55044ms step_avg:97.08ms +step:568/1670 train_time:55142ms step_avg:97.08ms +step:569/1670 train_time:55240ms step_avg:97.08ms +step:570/1670 train_time:55338ms step_avg:97.08ms +step:571/1670 train_time:55434ms step_avg:97.08ms +step:572/1670 train_time:55531ms step_avg:97.08ms +step:573/1670 train_time:55628ms step_avg:97.08ms +step:574/1670 train_time:55724ms step_avg:97.08ms +step:575/1670 train_time:55821ms step_avg:97.08ms +step:576/1670 train_time:55919ms step_avg:97.08ms +step:577/1670 train_time:56016ms step_avg:97.08ms +step:578/1670 train_time:56114ms step_avg:97.08ms +step:579/1670 train_time:56212ms step_avg:97.08ms +step:580/1670 train_time:56309ms step_avg:97.09ms +step:581/1670 train_time:56406ms step_avg:97.08ms +step:582/1670 train_time:56504ms step_avg:97.09ms +step:583/1670 train_time:56601ms step_avg:97.09ms +step:584/1670 train_time:56699ms step_avg:97.09ms +step:585/1670 train_time:56796ms step_avg:97.09ms +step:586/1670 train_time:56893ms step_avg:97.09ms +step:587/1670 train_time:56990ms step_avg:97.09ms +step:588/1670 train_time:57088ms step_avg:97.09ms +step:589/1670 train_time:57185ms step_avg:97.09ms +step:590/1670 train_time:57283ms step_avg:97.09ms +step:591/1670 train_time:57381ms step_avg:97.09ms +step:592/1670 train_time:57478ms step_avg:97.09ms +step:593/1670 train_time:57577ms step_avg:97.09ms +step:594/1670 train_time:57673ms step_avg:97.09ms +step:595/1670 train_time:57770ms step_avg:97.09ms +step:596/1670 train_time:57867ms step_avg:97.09ms +step:597/1670 train_time:57964ms step_avg:97.09ms +step:598/1670 train_time:58062ms step_avg:97.09ms +step:599/1670 train_time:58161ms step_avg:97.10ms +step:600/1670 train_time:58259ms step_avg:97.10ms +step:601/1670 train_time:58357ms step_avg:97.10ms +step:602/1670 train_time:58455ms step_avg:97.10ms +step:603/1670 train_time:58553ms step_avg:97.10ms +step:604/1670 train_time:58649ms step_avg:97.10ms +step:605/1670 train_time:58745ms step_avg:97.10ms +step:606/1670 train_time:58842ms step_avg:97.10ms +step:607/1670 train_time:58940ms step_avg:97.10ms +step:608/1670 train_time:59038ms step_avg:97.10ms +step:609/1670 train_time:59135ms step_avg:97.10ms +step:610/1670 train_time:59232ms step_avg:97.10ms +step:611/1670 train_time:59329ms step_avg:97.10ms +step:612/1670 train_time:59428ms step_avg:97.10ms +step:613/1670 train_time:59525ms step_avg:97.10ms +step:614/1670 train_time:59622ms step_avg:97.10ms +step:615/1670 train_time:59719ms step_avg:97.10ms +step:616/1670 train_time:59816ms step_avg:97.10ms +step:617/1670 train_time:59914ms step_avg:97.11ms +step:618/1670 train_time:60012ms step_avg:97.11ms +step:619/1670 train_time:60109ms step_avg:97.11ms +step:620/1670 train_time:60206ms step_avg:97.11ms +step:621/1670 train_time:60304ms step_avg:97.11ms +step:622/1670 train_time:60401ms step_avg:97.11ms +step:623/1670 train_time:60499ms step_avg:97.11ms +step:624/1670 train_time:60597ms step_avg:97.11ms +step:625/1670 train_time:60694ms step_avg:97.11ms +step:625/1670 val_loss:3.6134 train_time:60791ms step_avg:97.27ms +step:626/1670 train_time:60813ms step_avg:97.14ms +step:627/1670 train_time:60900ms step_avg:97.13ms +step:628/1670 train_time:60996ms step_avg:97.13ms +step:629/1670 train_time:61092ms step_avg:97.13ms +step:630/1670 train_time:61188ms step_avg:97.12ms +step:631/1670 train_time:61283ms step_avg:97.12ms +step:632/1670 train_time:61380ms step_avg:97.12ms +step:633/1670 train_time:61475ms step_avg:97.12ms +step:634/1670 train_time:61571ms step_avg:97.12ms +step:635/1670 train_time:61667ms step_avg:97.11ms +step:636/1670 train_time:61766ms step_avg:97.12ms +step:637/1670 train_time:61866ms step_avg:97.12ms +step:638/1670 train_time:61964ms step_avg:97.12ms +step:639/1670 train_time:62342ms step_avg:97.56ms +step:640/1670 train_time:62429ms step_avg:97.54ms +step:641/1670 train_time:62523ms step_avg:97.54ms +step:642/1670 train_time:62620ms step_avg:97.54ms +step:643/1670 train_time:62716ms step_avg:97.54ms +step:644/1670 train_time:62812ms step_avg:97.53ms +step:645/1670 train_time:62908ms step_avg:97.53ms +step:646/1670 train_time:63005ms step_avg:97.53ms +step:647/1670 train_time:63101ms step_avg:97.53ms +step:648/1670 train_time:63197ms step_avg:97.53ms +step:649/1670 train_time:63297ms step_avg:97.53ms +step:650/1670 train_time:63399ms step_avg:97.54ms +step:651/1670 train_time:63498ms step_avg:97.54ms +step:652/1670 train_time:63597ms step_avg:97.54ms +step:653/1670 train_time:63692ms step_avg:97.54ms +step:654/1670 train_time:63788ms step_avg:97.54ms +step:655/1670 train_time:63884ms step_avg:97.53ms +step:656/1670 train_time:63980ms step_avg:97.53ms +step:657/1670 train_time:64077ms step_avg:97.53ms +step:658/1670 train_time:64173ms step_avg:97.53ms +step:659/1670 train_time:64271ms step_avg:97.53ms +step:660/1670 train_time:64370ms step_avg:97.53ms +step:661/1670 train_time:64469ms step_avg:97.53ms +step:662/1670 train_time:64567ms step_avg:97.53ms +step:663/1670 train_time:64665ms step_avg:97.53ms +step:664/1670 train_time:64763ms step_avg:97.54ms +step:665/1670 train_time:64859ms step_avg:97.53ms +step:666/1670 train_time:64955ms step_avg:97.53ms +step:667/1670 train_time:65052ms step_avg:97.53ms +step:668/1670 train_time:65148ms step_avg:97.53ms +step:669/1670 train_time:65245ms step_avg:97.53ms +step:670/1670 train_time:65344ms step_avg:97.53ms +step:671/1670 train_time:65443ms step_avg:97.53ms +step:672/1670 train_time:65541ms step_avg:97.53ms +step:673/1670 train_time:65640ms step_avg:97.53ms +step:674/1670 train_time:65738ms step_avg:97.53ms +step:675/1670 train_time:65834ms step_avg:97.53ms +step:676/1670 train_time:65931ms step_avg:97.53ms +step:677/1670 train_time:66027ms step_avg:97.53ms +step:678/1670 train_time:66124ms step_avg:97.53ms +step:679/1670 train_time:66221ms step_avg:97.53ms +step:680/1670 train_time:66319ms step_avg:97.53ms +step:681/1670 train_time:66418ms step_avg:97.53ms +step:682/1670 train_time:66515ms step_avg:97.53ms +step:683/1670 train_time:66612ms step_avg:97.53ms +step:684/1670 train_time:66710ms step_avg:97.53ms +step:685/1670 train_time:66807ms step_avg:97.53ms +step:686/1670 train_time:66903ms step_avg:97.53ms +step:687/1670 train_time:67000ms step_avg:97.53ms +step:688/1670 train_time:67097ms step_avg:97.52ms +step:689/1670 train_time:67194ms step_avg:97.52ms +step:690/1670 train_time:67291ms step_avg:97.52ms +step:691/1670 train_time:67388ms step_avg:97.52ms +step:692/1670 train_time:67486ms step_avg:97.52ms +step:693/1670 train_time:67584ms step_avg:97.52ms +step:694/1670 train_time:67682ms step_avg:97.52ms +step:695/1670 train_time:67780ms step_avg:97.53ms +step:696/1670 train_time:67878ms step_avg:97.53ms +step:697/1670 train_time:67975ms step_avg:97.53ms +step:698/1670 train_time:68072ms step_avg:97.52ms +step:699/1670 train_time:68169ms step_avg:97.52ms +step:700/1670 train_time:68266ms step_avg:97.52ms +step:701/1670 train_time:68363ms step_avg:97.52ms +step:702/1670 train_time:68462ms step_avg:97.52ms +step:703/1670 train_time:68560ms step_avg:97.52ms +step:704/1670 train_time:68658ms step_avg:97.53ms +step:705/1670 train_time:68756ms step_avg:97.53ms +step:706/1670 train_time:68854ms step_avg:97.53ms +step:707/1670 train_time:68950ms step_avg:97.52ms +step:708/1670 train_time:69047ms step_avg:97.52ms +step:709/1670 train_time:69143ms step_avg:97.52ms +step:710/1670 train_time:69240ms step_avg:97.52ms +step:711/1670 train_time:69340ms step_avg:97.52ms +step:712/1670 train_time:69438ms step_avg:97.52ms +step:713/1670 train_time:69535ms step_avg:97.52ms +step:714/1670 train_time:69632ms step_avg:97.52ms +step:715/1670 train_time:69729ms step_avg:97.52ms +step:716/1670 train_time:69827ms step_avg:97.52ms +step:717/1670 train_time:69924ms step_avg:97.52ms +step:718/1670 train_time:70021ms step_avg:97.52ms +step:719/1670 train_time:70119ms step_avg:97.52ms +step:720/1670 train_time:70215ms step_avg:97.52ms +step:721/1670 train_time:70313ms step_avg:97.52ms +step:722/1670 train_time:70411ms step_avg:97.52ms +step:723/1670 train_time:70508ms step_avg:97.52ms +step:724/1670 train_time:70605ms step_avg:97.52ms +step:725/1670 train_time:70702ms step_avg:97.52ms +step:726/1670 train_time:70800ms step_avg:97.52ms +step:727/1670 train_time:70898ms step_avg:97.52ms +step:728/1670 train_time:70995ms step_avg:97.52ms +step:729/1670 train_time:71094ms step_avg:97.52ms +step:730/1670 train_time:71191ms step_avg:97.52ms +step:731/1670 train_time:71288ms step_avg:97.52ms +step:732/1670 train_time:71385ms step_avg:97.52ms +step:733/1670 train_time:71482ms step_avg:97.52ms +step:734/1670 train_time:71580ms step_avg:97.52ms +step:735/1670 train_time:71677ms step_avg:97.52ms +step:736/1670 train_time:71774ms step_avg:97.52ms +step:737/1670 train_time:71871ms step_avg:97.52ms +step:738/1670 train_time:71968ms step_avg:97.52ms +step:739/1670 train_time:72065ms step_avg:97.52ms +step:740/1670 train_time:72163ms step_avg:97.52ms +step:741/1670 train_time:72261ms step_avg:97.52ms +step:742/1670 train_time:72358ms step_avg:97.52ms +step:743/1670 train_time:72456ms step_avg:97.52ms +step:744/1670 train_time:72553ms step_avg:97.52ms +step:745/1670 train_time:72650ms step_avg:97.52ms +step:746/1670 train_time:72748ms step_avg:97.52ms +step:747/1670 train_time:72845ms step_avg:97.52ms +step:748/1670 train_time:72943ms step_avg:97.52ms +step:749/1670 train_time:73040ms step_avg:97.52ms +step:750/1670 train_time:73138ms step_avg:97.52ms +step:750/1670 val_loss:3.5618 train_time:73234ms step_avg:97.65ms +step:751/1670 train_time:73257ms step_avg:97.55ms +step:752/1670 train_time:73340ms step_avg:97.53ms +step:753/1670 train_time:73440ms step_avg:97.53ms +step:754/1670 train_time:73537ms step_avg:97.53ms +step:755/1670 train_time:73634ms step_avg:97.53ms +step:756/1670 train_time:73731ms step_avg:97.53ms +step:757/1670 train_time:73827ms step_avg:97.53ms +step:758/1670 train_time:73924ms step_avg:97.53ms +step:759/1670 train_time:74021ms step_avg:97.52ms +step:760/1670 train_time:74116ms step_avg:97.52ms +step:761/1670 train_time:74216ms step_avg:97.52ms +step:762/1670 train_time:74317ms step_avg:97.53ms +step:763/1670 train_time:74416ms step_avg:97.53ms +step:764/1670 train_time:74514ms step_avg:97.53ms +step:765/1670 train_time:74611ms step_avg:97.53ms +step:766/1670 train_time:74708ms step_avg:97.53ms +step:767/1670 train_time:74804ms step_avg:97.53ms +step:768/1670 train_time:74900ms step_avg:97.53ms +step:769/1670 train_time:74996ms step_avg:97.52ms +step:770/1670 train_time:75093ms step_avg:97.52ms +step:771/1670 train_time:75191ms step_avg:97.52ms +step:772/1670 train_time:75290ms step_avg:97.53ms +step:773/1670 train_time:75390ms step_avg:97.53ms +step:774/1670 train_time:75487ms step_avg:97.53ms +step:775/1670 train_time:75586ms step_avg:97.53ms +step:776/1670 train_time:75683ms step_avg:97.53ms +step:777/1670 train_time:75780ms step_avg:97.53ms +step:778/1670 train_time:75877ms step_avg:97.53ms +step:779/1670 train_time:75973ms step_avg:97.53ms +step:780/1670 train_time:76071ms step_avg:97.53ms +step:781/1670 train_time:76169ms step_avg:97.53ms +step:782/1670 train_time:76267ms step_avg:97.53ms +step:783/1670 train_time:76365ms step_avg:97.53ms +step:784/1670 train_time:76463ms step_avg:97.53ms +step:785/1670 train_time:76560ms step_avg:97.53ms +step:786/1670 train_time:76658ms step_avg:97.53ms +step:787/1670 train_time:76755ms step_avg:97.53ms +step:788/1670 train_time:76852ms step_avg:97.53ms +step:789/1670 train_time:76949ms step_avg:97.53ms +step:790/1670 train_time:77044ms step_avg:97.52ms +step:791/1670 train_time:77142ms step_avg:97.52ms +step:792/1670 train_time:77241ms step_avg:97.53ms +step:793/1670 train_time:77338ms step_avg:97.53ms +step:794/1670 train_time:77436ms step_avg:97.53ms +step:795/1670 train_time:77534ms step_avg:97.53ms +step:796/1670 train_time:77633ms step_avg:97.53ms +step:797/1670 train_time:77731ms step_avg:97.53ms +step:798/1670 train_time:77827ms step_avg:97.53ms +step:799/1670 train_time:77924ms step_avg:97.53ms +step:800/1670 train_time:78020ms step_avg:97.53ms +step:801/1670 train_time:78117ms step_avg:97.52ms +step:802/1670 train_time:78215ms step_avg:97.52ms +step:803/1670 train_time:78312ms step_avg:97.52ms +step:804/1670 train_time:78411ms step_avg:97.53ms +step:805/1670 train_time:78510ms step_avg:97.53ms +step:806/1670 train_time:78607ms step_avg:97.53ms +step:807/1670 train_time:78705ms step_avg:97.53ms +step:808/1670 train_time:78803ms step_avg:97.53ms +step:809/1670 train_time:78899ms step_avg:97.53ms +step:810/1670 train_time:78996ms step_avg:97.53ms +step:811/1670 train_time:79093ms step_avg:97.53ms +step:812/1670 train_time:79190ms step_avg:97.53ms +step:813/1670 train_time:79288ms step_avg:97.52ms +step:814/1670 train_time:79385ms step_avg:97.52ms +step:815/1670 train_time:79483ms step_avg:97.53ms +step:816/1670 train_time:79581ms step_avg:97.53ms +step:817/1670 train_time:79678ms step_avg:97.53ms +step:818/1670 train_time:79777ms step_avg:97.53ms +step:819/1670 train_time:79875ms step_avg:97.53ms +step:820/1670 train_time:79972ms step_avg:97.53ms +step:821/1670 train_time:80069ms step_avg:97.53ms +step:822/1670 train_time:80167ms step_avg:97.53ms +step:823/1670 train_time:80263ms step_avg:97.53ms +step:824/1670 train_time:80360ms step_avg:97.52ms +step:825/1670 train_time:80458ms step_avg:97.52ms +step:826/1670 train_time:80556ms step_avg:97.53ms +step:827/1670 train_time:80653ms step_avg:97.53ms +step:828/1670 train_time:80752ms step_avg:97.53ms +step:829/1670 train_time:80849ms step_avg:97.53ms +step:830/1670 train_time:80947ms step_avg:97.53ms +step:831/1670 train_time:81044ms step_avg:97.53ms +step:832/1670 train_time:81141ms step_avg:97.52ms +step:833/1670 train_time:81237ms step_avg:97.52ms +step:834/1670 train_time:81335ms step_avg:97.52ms +step:835/1670 train_time:81432ms step_avg:97.52ms +step:836/1670 train_time:81531ms step_avg:97.53ms +step:837/1670 train_time:81628ms step_avg:97.52ms +step:838/1670 train_time:81726ms step_avg:97.52ms +step:839/1670 train_time:81823ms step_avg:97.52ms +step:840/1670 train_time:81920ms step_avg:97.52ms +step:841/1670 train_time:82018ms step_avg:97.52ms +step:842/1670 train_time:82115ms step_avg:97.52ms +step:843/1670 train_time:82212ms step_avg:97.52ms +step:844/1670 train_time:82310ms step_avg:97.52ms +step:845/1670 train_time:82407ms step_avg:97.52ms +step:846/1670 train_time:82505ms step_avg:97.52ms +step:847/1670 train_time:82602ms step_avg:97.52ms +step:848/1670 train_time:82699ms step_avg:97.52ms +step:849/1670 train_time:82796ms step_avg:97.52ms +step:850/1670 train_time:82894ms step_avg:97.52ms +step:851/1670 train_time:83168ms step_avg:97.73ms +step:852/1670 train_time:83242ms step_avg:97.70ms +step:853/1670 train_time:83337ms step_avg:97.70ms +step:854/1670 train_time:83434ms step_avg:97.70ms +step:855/1670 train_time:83530ms step_avg:97.70ms +step:856/1670 train_time:83627ms step_avg:97.69ms +step:857/1670 train_time:83723ms step_avg:97.69ms +step:858/1670 train_time:83820ms step_avg:97.69ms +step:859/1670 train_time:83916ms step_avg:97.69ms +step:860/1670 train_time:84013ms step_avg:97.69ms +step:861/1670 train_time:84114ms step_avg:97.69ms +step:862/1670 train_time:84217ms step_avg:97.70ms +step:863/1670 train_time:84316ms step_avg:97.70ms +step:864/1670 train_time:84413ms step_avg:97.70ms +step:865/1670 train_time:84510ms step_avg:97.70ms +step:866/1670 train_time:84606ms step_avg:97.70ms +step:867/1670 train_time:84703ms step_avg:97.70ms +step:868/1670 train_time:84799ms step_avg:97.70ms +step:869/1670 train_time:84895ms step_avg:97.69ms +step:870/1670 train_time:84992ms step_avg:97.69ms +step:871/1670 train_time:85091ms step_avg:97.69ms +step:872/1670 train_time:85193ms step_avg:97.70ms +step:873/1670 train_time:85292ms step_avg:97.70ms +step:874/1670 train_time:85391ms step_avg:97.70ms +step:875/1670 train_time:85489ms step_avg:97.70ms +step:875/1670 val_loss:3.5195 train_time:85584ms step_avg:97.81ms +step:876/1670 train_time:85607ms step_avg:97.72ms +step:877/1670 train_time:85689ms step_avg:97.71ms +step:878/1670 train_time:85788ms step_avg:97.71ms +step:879/1670 train_time:85886ms step_avg:97.71ms +step:880/1670 train_time:85983ms step_avg:97.71ms +step:881/1670 train_time:86080ms step_avg:97.71ms +step:882/1670 train_time:86176ms step_avg:97.70ms +step:883/1670 train_time:86271ms step_avg:97.70ms +step:884/1670 train_time:86368ms step_avg:97.70ms +step:885/1670 train_time:86464ms step_avg:97.70ms +step:886/1670 train_time:86564ms step_avg:97.70ms +step:887/1670 train_time:86665ms step_avg:97.71ms +step:888/1670 train_time:86764ms step_avg:97.71ms +step:889/1670 train_time:86862ms step_avg:97.71ms +step:890/1670 train_time:86960ms step_avg:97.71ms +step:891/1670 train_time:87057ms step_avg:97.71ms +step:892/1670 train_time:87153ms step_avg:97.70ms +step:893/1670 train_time:87249ms step_avg:97.70ms +step:894/1670 train_time:87346ms step_avg:97.70ms +step:895/1670 train_time:87442ms step_avg:97.70ms +step:896/1670 train_time:87540ms step_avg:97.70ms +step:897/1670 train_time:87638ms step_avg:97.70ms +step:898/1670 train_time:87736ms step_avg:97.70ms +step:899/1670 train_time:87834ms step_avg:97.70ms +step:900/1670 train_time:87931ms step_avg:97.70ms +step:901/1670 train_time:88028ms step_avg:97.70ms +step:902/1670 train_time:88126ms step_avg:97.70ms +step:903/1670 train_time:88223ms step_avg:97.70ms +step:904/1670 train_time:88319ms step_avg:97.70ms +step:905/1670 train_time:88416ms step_avg:97.70ms +step:906/1670 train_time:88513ms step_avg:97.70ms +step:907/1670 train_time:88611ms step_avg:97.70ms +step:908/1670 train_time:88709ms step_avg:97.70ms +step:909/1670 train_time:88808ms step_avg:97.70ms +step:910/1670 train_time:88908ms step_avg:97.70ms +step:911/1670 train_time:89006ms step_avg:97.70ms +step:912/1670 train_time:89105ms step_avg:97.70ms +step:913/1670 train_time:89202ms step_avg:97.70ms +step:914/1670 train_time:89298ms step_avg:97.70ms +step:915/1670 train_time:89395ms step_avg:97.70ms +step:916/1670 train_time:89491ms step_avg:97.70ms +step:917/1670 train_time:89589ms step_avg:97.70ms +step:918/1670 train_time:89687ms step_avg:97.70ms +step:919/1670 train_time:89785ms step_avg:97.70ms +step:920/1670 train_time:89883ms step_avg:97.70ms +step:921/1670 train_time:89980ms step_avg:97.70ms +step:922/1670 train_time:90077ms step_avg:97.70ms +step:923/1670 train_time:90175ms step_avg:97.70ms +step:924/1670 train_time:90272ms step_avg:97.70ms +step:925/1670 train_time:90369ms step_avg:97.70ms +step:926/1670 train_time:90466ms step_avg:97.70ms +step:927/1670 train_time:90564ms step_avg:97.70ms +step:928/1670 train_time:90662ms step_avg:97.70ms +step:929/1670 train_time:90760ms step_avg:97.70ms +step:930/1670 train_time:90858ms step_avg:97.70ms +step:931/1670 train_time:90955ms step_avg:97.70ms +step:932/1670 train_time:91053ms step_avg:97.70ms +step:933/1670 train_time:91150ms step_avg:97.70ms +step:934/1670 train_time:91247ms step_avg:97.70ms +step:935/1670 train_time:91345ms step_avg:97.70ms +step:936/1670 train_time:91442ms step_avg:97.69ms +step:937/1670 train_time:91539ms step_avg:97.69ms +step:938/1670 train_time:91636ms step_avg:97.69ms +step:939/1670 train_time:91733ms step_avg:97.69ms +step:940/1670 train_time:91831ms step_avg:97.69ms +step:941/1670 train_time:91928ms step_avg:97.69ms +step:942/1670 train_time:92027ms step_avg:97.69ms +step:943/1670 train_time:92124ms step_avg:97.69ms +step:944/1670 train_time:92221ms step_avg:97.69ms +step:945/1670 train_time:92319ms step_avg:97.69ms +step:946/1670 train_time:92416ms step_avg:97.69ms +step:947/1670 train_time:92514ms step_avg:97.69ms +step:948/1670 train_time:92610ms step_avg:97.69ms +step:949/1670 train_time:92708ms step_avg:97.69ms +step:950/1670 train_time:92805ms step_avg:97.69ms +step:951/1670 train_time:92903ms step_avg:97.69ms +step:952/1670 train_time:93000ms step_avg:97.69ms +step:953/1670 train_time:93098ms step_avg:97.69ms +step:954/1670 train_time:93195ms step_avg:97.69ms +step:955/1670 train_time:93292ms step_avg:97.69ms +step:956/1670 train_time:93390ms step_avg:97.69ms +step:957/1670 train_time:93488ms step_avg:97.69ms +step:958/1670 train_time:93586ms step_avg:97.69ms +step:959/1670 train_time:93683ms step_avg:97.69ms +step:960/1670 train_time:93781ms step_avg:97.69ms +step:961/1670 train_time:93878ms step_avg:97.69ms +step:962/1670 train_time:93975ms step_avg:97.69ms +step:963/1670 train_time:94072ms step_avg:97.69ms +step:964/1670 train_time:94169ms step_avg:97.69ms +step:965/1670 train_time:94267ms step_avg:97.69ms +step:966/1670 train_time:94366ms step_avg:97.69ms +step:967/1670 train_time:94463ms step_avg:97.69ms +step:968/1670 train_time:94560ms step_avg:97.69ms +step:969/1670 train_time:94657ms step_avg:97.69ms +step:970/1670 train_time:94754ms step_avg:97.68ms +step:971/1670 train_time:94851ms step_avg:97.68ms +step:972/1670 train_time:94949ms step_avg:97.68ms +step:973/1670 train_time:95048ms step_avg:97.69ms +step:974/1670 train_time:95145ms step_avg:97.69ms +step:975/1670 train_time:95243ms step_avg:97.68ms +step:976/1670 train_time:95340ms step_avg:97.68ms +step:977/1670 train_time:95437ms step_avg:97.68ms +step:978/1670 train_time:95534ms step_avg:97.68ms +step:979/1670 train_time:95632ms step_avg:97.68ms +step:980/1670 train_time:95729ms step_avg:97.68ms +step:981/1670 train_time:95827ms step_avg:97.68ms +step:982/1670 train_time:95924ms step_avg:97.68ms +step:983/1670 train_time:96022ms step_avg:97.68ms +step:984/1670 train_time:96119ms step_avg:97.68ms +step:985/1670 train_time:96215ms step_avg:97.68ms +step:986/1670 train_time:96312ms step_avg:97.68ms +step:987/1670 train_time:96410ms step_avg:97.68ms +step:988/1670 train_time:96507ms step_avg:97.68ms +step:989/1670 train_time:96606ms step_avg:97.68ms +step:990/1670 train_time:96704ms step_avg:97.68ms +step:991/1670 train_time:96802ms step_avg:97.68ms +step:992/1670 train_time:96899ms step_avg:97.68ms +step:993/1670 train_time:96997ms step_avg:97.68ms +step:994/1670 train_time:97093ms step_avg:97.68ms +step:995/1670 train_time:97190ms step_avg:97.68ms +step:996/1670 train_time:97287ms step_avg:97.68ms +step:997/1670 train_time:97385ms step_avg:97.68ms +step:998/1670 train_time:97483ms step_avg:97.68ms +step:999/1670 train_time:97580ms step_avg:97.68ms +step:1000/1670 train_time:97676ms step_avg:97.68ms +step:1000/1670 val_loss:3.4765 train_time:97773ms step_avg:97.77ms +step:1001/1670 train_time:97795ms step_avg:97.70ms +step:1002/1670 train_time:97877ms step_avg:97.68ms +step:1003/1670 train_time:97977ms step_avg:97.68ms +step:1004/1670 train_time:98075ms step_avg:97.68ms +step:1005/1670 train_time:98172ms step_avg:97.68ms +step:1006/1670 train_time:98268ms step_avg:97.68ms +step:1007/1670 train_time:98364ms step_avg:97.68ms +step:1008/1670 train_time:98460ms step_avg:97.68ms +step:1009/1670 train_time:98557ms step_avg:97.68ms +step:1010/1670 train_time:98654ms step_avg:97.68ms +step:1011/1670 train_time:98752ms step_avg:97.68ms +step:1012/1670 train_time:98851ms step_avg:97.68ms +step:1013/1670 train_time:98950ms step_avg:97.68ms +step:1014/1670 train_time:99048ms step_avg:97.68ms +step:1015/1670 train_time:99145ms step_avg:97.68ms +step:1016/1670 train_time:99242ms step_avg:97.68ms +step:1017/1670 train_time:99340ms step_avg:97.68ms +step:1018/1670 train_time:99437ms step_avg:97.68ms +step:1019/1670 train_time:99533ms step_avg:97.68ms +step:1020/1670 train_time:99629ms step_avg:97.68ms +step:1021/1670 train_time:99726ms step_avg:97.68ms +step:1022/1670 train_time:99824ms step_avg:97.68ms +step:1023/1670 train_time:99924ms step_avg:97.68ms +step:1024/1670 train_time:100024ms step_avg:97.68ms +step:1025/1670 train_time:100121ms step_avg:97.68ms +step:1026/1670 train_time:100220ms step_avg:97.68ms +step:1027/1670 train_time:100317ms step_avg:97.68ms +step:1028/1670 train_time:100414ms step_avg:97.68ms +step:1029/1670 train_time:100511ms step_avg:97.68ms +step:1030/1670 train_time:100607ms step_avg:97.68ms +step:1031/1670 train_time:100704ms step_avg:97.68ms +step:1032/1670 train_time:100802ms step_avg:97.68ms +step:1033/1670 train_time:100900ms step_avg:97.68ms +step:1034/1670 train_time:100999ms step_avg:97.68ms +step:1035/1670 train_time:101096ms step_avg:97.68ms +step:1036/1670 train_time:101194ms step_avg:97.68ms +step:1037/1670 train_time:101291ms step_avg:97.68ms +step:1038/1670 train_time:101388ms step_avg:97.68ms +step:1039/1670 train_time:101484ms step_avg:97.67ms +step:1040/1670 train_time:101581ms step_avg:97.67ms +step:1041/1670 train_time:101678ms step_avg:97.67ms +step:1042/1670 train_time:101778ms step_avg:97.68ms +step:1043/1670 train_time:101877ms step_avg:97.68ms +step:1044/1670 train_time:101976ms step_avg:97.68ms +step:1045/1670 train_time:102073ms step_avg:97.68ms +step:1046/1670 train_time:102171ms step_avg:97.68ms +step:1047/1670 train_time:102267ms step_avg:97.68ms +step:1048/1670 train_time:102364ms step_avg:97.68ms +step:1049/1670 train_time:102462ms step_avg:97.68ms +step:1050/1670 train_time:102559ms step_avg:97.68ms +step:1051/1670 train_time:102657ms step_avg:97.68ms +step:1052/1670 train_time:102755ms step_avg:97.68ms +step:1053/1670 train_time:102852ms step_avg:97.68ms +step:1054/1670 train_time:102949ms step_avg:97.68ms +step:1055/1670 train_time:103047ms step_avg:97.68ms +step:1056/1670 train_time:103145ms step_avg:97.68ms +step:1057/1670 train_time:103243ms step_avg:97.68ms +step:1058/1670 train_time:103340ms step_avg:97.67ms +step:1059/1670 train_time:103438ms step_avg:97.68ms +step:1060/1670 train_time:103535ms step_avg:97.67ms +step:1061/1670 train_time:103632ms step_avg:97.67ms +step:1062/1670 train_time:103903ms step_avg:97.84ms +step:1063/1670 train_time:104062ms step_avg:97.89ms +step:1064/1670 train_time:104157ms step_avg:97.89ms +step:1065/1670 train_time:104253ms step_avg:97.89ms +step:1066/1670 train_time:104349ms step_avg:97.89ms +step:1067/1670 train_time:104444ms step_avg:97.89ms +step:1068/1670 train_time:104540ms step_avg:97.88ms +step:1069/1670 train_time:104637ms step_avg:97.88ms +step:1070/1670 train_time:104733ms step_avg:97.88ms +step:1071/1670 train_time:104828ms step_avg:97.88ms +step:1072/1670 train_time:104930ms step_avg:97.88ms +step:1073/1670 train_time:105033ms step_avg:97.89ms +step:1074/1670 train_time:105131ms step_avg:97.89ms +step:1075/1670 train_time:105228ms step_avg:97.89ms +step:1076/1670 train_time:105324ms step_avg:97.88ms +step:1077/1670 train_time:105421ms step_avg:97.88ms +step:1078/1670 train_time:105518ms step_avg:97.88ms +step:1079/1670 train_time:105615ms step_avg:97.88ms +step:1080/1670 train_time:105711ms step_avg:97.88ms +step:1081/1670 train_time:105807ms step_avg:97.88ms +step:1082/1670 train_time:105905ms step_avg:97.88ms +step:1083/1670 train_time:106005ms step_avg:97.88ms +step:1084/1670 train_time:106104ms step_avg:97.88ms +step:1085/1670 train_time:106203ms step_avg:97.88ms +step:1086/1670 train_time:106300ms step_avg:97.88ms +step:1087/1670 train_time:106397ms step_avg:97.88ms +step:1088/1670 train_time:106493ms step_avg:97.88ms +step:1089/1670 train_time:106589ms step_avg:97.88ms +step:1090/1670 train_time:106686ms step_avg:97.88ms +step:1091/1670 train_time:106783ms step_avg:97.88ms +step:1092/1670 train_time:106882ms step_avg:97.88ms +step:1093/1670 train_time:106981ms step_avg:97.88ms +step:1094/1670 train_time:107080ms step_avg:97.88ms +step:1095/1670 train_time:107179ms step_avg:97.88ms +step:1096/1670 train_time:107276ms step_avg:97.88ms +step:1097/1670 train_time:107373ms step_avg:97.88ms +step:1098/1670 train_time:107470ms step_avg:97.88ms +step:1099/1670 train_time:107566ms step_avg:97.88ms +step:1100/1670 train_time:107663ms step_avg:97.88ms +step:1101/1670 train_time:107761ms step_avg:97.88ms +step:1102/1670 train_time:107859ms step_avg:97.88ms +step:1103/1670 train_time:107957ms step_avg:97.88ms +step:1104/1670 train_time:108054ms step_avg:97.88ms +step:1105/1670 train_time:108153ms step_avg:97.88ms +step:1106/1670 train_time:108251ms step_avg:97.88ms +step:1107/1670 train_time:108348ms step_avg:97.88ms +step:1108/1670 train_time:108445ms step_avg:97.87ms +step:1109/1670 train_time:108542ms step_avg:97.87ms +step:1110/1670 train_time:108640ms step_avg:97.87ms +step:1111/1670 train_time:108737ms step_avg:97.87ms +step:1112/1670 train_time:108834ms step_avg:97.87ms +step:1113/1670 train_time:108930ms step_avg:97.87ms +step:1114/1670 train_time:109028ms step_avg:97.87ms +step:1115/1670 train_time:109126ms step_avg:97.87ms +step:1116/1670 train_time:109224ms step_avg:97.87ms +step:1117/1670 train_time:109325ms step_avg:97.87ms +step:1118/1670 train_time:109422ms step_avg:97.87ms +step:1119/1670 train_time:109520ms step_avg:97.87ms +step:1120/1670 train_time:109619ms step_avg:97.87ms +step:1121/1670 train_time:109717ms step_avg:97.87ms +step:1122/1670 train_time:109815ms step_avg:97.87ms +step:1123/1670 train_time:109913ms step_avg:97.87ms +step:1124/1670 train_time:110011ms step_avg:97.87ms +step:1125/1670 train_time:110108ms step_avg:97.87ms +step:1125/1670 val_loss:3.4250 train_time:110206ms step_avg:97.96ms +step:1126/1670 train_time:110228ms step_avg:97.89ms +step:1127/1670 train_time:110312ms step_avg:97.88ms +step:1128/1670 train_time:110409ms step_avg:97.88ms +step:1129/1670 train_time:110506ms step_avg:97.88ms +step:1130/1670 train_time:110602ms step_avg:97.88ms +step:1131/1670 train_time:110698ms step_avg:97.88ms +step:1132/1670 train_time:110795ms step_avg:97.88ms +step:1133/1670 train_time:110892ms step_avg:97.88ms +step:1134/1670 train_time:110990ms step_avg:97.87ms +step:1135/1670 train_time:111087ms step_avg:97.87ms +step:1136/1670 train_time:111192ms step_avg:97.88ms +step:1137/1670 train_time:111294ms step_avg:97.88ms +step:1138/1670 train_time:111393ms step_avg:97.88ms +step:1139/1670 train_time:111491ms step_avg:97.88ms +step:1140/1670 train_time:111588ms step_avg:97.88ms +step:1141/1670 train_time:111686ms step_avg:97.88ms +step:1142/1670 train_time:111784ms step_avg:97.88ms +step:1143/1670 train_time:111880ms step_avg:97.88ms +step:1144/1670 train_time:111977ms step_avg:97.88ms +step:1145/1670 train_time:112075ms step_avg:97.88ms +step:1146/1670 train_time:112174ms step_avg:97.88ms +step:1147/1670 train_time:112274ms step_avg:97.89ms +step:1148/1670 train_time:112375ms step_avg:97.89ms +step:1149/1670 train_time:112474ms step_avg:97.89ms +step:1150/1670 train_time:112573ms step_avg:97.89ms +step:1151/1670 train_time:112671ms step_avg:97.89ms +step:1152/1670 train_time:112769ms step_avg:97.89ms +step:1153/1670 train_time:112866ms step_avg:97.89ms +step:1154/1670 train_time:112964ms step_avg:97.89ms +step:1155/1670 train_time:113061ms step_avg:97.89ms +step:1156/1670 train_time:113158ms step_avg:97.89ms +step:1157/1670 train_time:113255ms step_avg:97.89ms +step:1158/1670 train_time:113354ms step_avg:97.89ms +step:1159/1670 train_time:113453ms step_avg:97.89ms +step:1160/1670 train_time:113552ms step_avg:97.89ms +step:1161/1670 train_time:113651ms step_avg:97.89ms +step:1162/1670 train_time:113750ms step_avg:97.89ms +step:1163/1670 train_time:113847ms step_avg:97.89ms +step:1164/1670 train_time:113945ms step_avg:97.89ms +step:1165/1670 train_time:114042ms step_avg:97.89ms +step:1166/1670 train_time:114140ms step_avg:97.89ms +step:1167/1670 train_time:114237ms step_avg:97.89ms +step:1168/1670 train_time:114335ms step_avg:97.89ms +step:1169/1670 train_time:114434ms step_avg:97.89ms +step:1170/1670 train_time:114533ms step_avg:97.89ms +step:1171/1670 train_time:114632ms step_avg:97.89ms +step:1172/1670 train_time:114732ms step_avg:97.89ms +step:1173/1670 train_time:114832ms step_avg:97.90ms +step:1174/1670 train_time:114932ms step_avg:97.90ms +step:1175/1670 train_time:115033ms step_avg:97.90ms +step:1176/1670 train_time:115133ms step_avg:97.90ms +step:1177/1670 train_time:115233ms step_avg:97.90ms +step:1178/1670 train_time:115331ms step_avg:97.90ms +step:1179/1670 train_time:115429ms step_avg:97.90ms +step:1180/1670 train_time:115527ms step_avg:97.90ms +step:1181/1670 train_time:115624ms step_avg:97.90ms +step:1182/1670 train_time:115721ms step_avg:97.90ms +step:1183/1670 train_time:115819ms step_avg:97.90ms +step:1184/1670 train_time:115916ms step_avg:97.90ms +step:1185/1670 train_time:116015ms step_avg:97.90ms +step:1186/1670 train_time:116113ms step_avg:97.90ms +step:1187/1670 train_time:116211ms step_avg:97.90ms +step:1188/1670 train_time:116310ms step_avg:97.90ms +step:1189/1670 train_time:116408ms step_avg:97.90ms +step:1190/1670 train_time:116505ms step_avg:97.90ms +step:1191/1670 train_time:116603ms step_avg:97.90ms +step:1192/1670 train_time:116700ms step_avg:97.90ms +step:1193/1670 train_time:116798ms step_avg:97.90ms +step:1194/1670 train_time:116896ms step_avg:97.90ms +step:1195/1670 train_time:116994ms step_avg:97.90ms +step:1196/1670 train_time:117094ms step_avg:97.90ms +step:1197/1670 train_time:117191ms step_avg:97.90ms +step:1198/1670 train_time:117289ms step_avg:97.90ms +step:1199/1670 train_time:117387ms step_avg:97.90ms +step:1200/1670 train_time:117485ms step_avg:97.90ms +step:1201/1670 train_time:117583ms step_avg:97.90ms +step:1202/1670 train_time:117681ms step_avg:97.90ms +step:1203/1670 train_time:117779ms step_avg:97.90ms +step:1204/1670 train_time:117876ms step_avg:97.90ms +step:1205/1670 train_time:117975ms step_avg:97.90ms +step:1206/1670 train_time:118073ms step_avg:97.90ms +step:1207/1670 train_time:118171ms step_avg:97.90ms +step:1208/1670 train_time:118270ms step_avg:97.91ms +step:1209/1670 train_time:118368ms step_avg:97.91ms +step:1210/1670 train_time:118466ms step_avg:97.91ms +step:1211/1670 train_time:118563ms step_avg:97.91ms +step:1212/1670 train_time:118661ms step_avg:97.91ms +step:1213/1670 train_time:118759ms step_avg:97.91ms +step:1214/1670 train_time:118856ms step_avg:97.90ms +step:1215/1670 train_time:118953ms step_avg:97.90ms +step:1216/1670 train_time:119051ms step_avg:97.90ms +step:1217/1670 train_time:119149ms step_avg:97.90ms +step:1218/1670 train_time:119248ms step_avg:97.90ms +step:1219/1670 train_time:119346ms step_avg:97.90ms +step:1220/1670 train_time:119443ms step_avg:97.90ms +step:1221/1670 train_time:119541ms step_avg:97.90ms +step:1222/1670 train_time:119639ms step_avg:97.90ms +step:1223/1670 train_time:119736ms step_avg:97.90ms +step:1224/1670 train_time:119835ms step_avg:97.90ms +step:1225/1670 train_time:119934ms step_avg:97.91ms +step:1226/1670 train_time:120031ms step_avg:97.90ms +step:1227/1670 train_time:120129ms step_avg:97.90ms +step:1228/1670 train_time:120227ms step_avg:97.90ms +step:1229/1670 train_time:120325ms step_avg:97.90ms +step:1230/1670 train_time:120423ms step_avg:97.90ms +step:1231/1670 train_time:120521ms step_avg:97.91ms +step:1232/1670 train_time:120619ms step_avg:97.90ms +step:1233/1670 train_time:120717ms step_avg:97.91ms +step:1234/1670 train_time:120815ms step_avg:97.91ms +step:1235/1670 train_time:120913ms step_avg:97.91ms +step:1236/1670 train_time:121011ms step_avg:97.91ms +step:1237/1670 train_time:121108ms step_avg:97.90ms +step:1238/1670 train_time:121206ms step_avg:97.90ms +step:1239/1670 train_time:121303ms step_avg:97.90ms +step:1240/1670 train_time:121402ms step_avg:97.90ms +step:1241/1670 train_time:121499ms step_avg:97.90ms +step:1242/1670 train_time:121597ms step_avg:97.90ms +step:1243/1670 train_time:121695ms step_avg:97.90ms +step:1244/1670 train_time:121793ms step_avg:97.90ms +step:1245/1670 train_time:121891ms step_avg:97.90ms +step:1246/1670 train_time:121989ms step_avg:97.90ms +step:1247/1670 train_time:122086ms step_avg:97.90ms +step:1248/1670 train_time:122185ms step_avg:97.90ms +step:1249/1670 train_time:122282ms step_avg:97.90ms +step:1250/1670 train_time:122380ms step_avg:97.90ms +step:1250/1670 val_loss:3.3821 train_time:122477ms step_avg:97.98ms +step:1251/1670 train_time:122499ms step_avg:97.92ms +step:1252/1670 train_time:122586ms step_avg:97.91ms +step:1253/1670 train_time:122689ms step_avg:97.92ms +step:1254/1670 train_time:122786ms step_avg:97.92ms +step:1255/1670 train_time:122883ms step_avg:97.92ms +step:1256/1670 train_time:122980ms step_avg:97.91ms +step:1257/1670 train_time:123077ms step_avg:97.91ms +step:1258/1670 train_time:123175ms step_avg:97.91ms +step:1259/1670 train_time:123272ms step_avg:97.91ms +step:1260/1670 train_time:123369ms step_avg:97.91ms +step:1261/1670 train_time:123469ms step_avg:97.91ms +step:1262/1670 train_time:123570ms step_avg:97.92ms +step:1263/1670 train_time:123669ms step_avg:97.92ms +step:1264/1670 train_time:123767ms step_avg:97.92ms +step:1265/1670 train_time:123865ms step_avg:97.92ms +step:1266/1670 train_time:123961ms step_avg:97.92ms +step:1267/1670 train_time:124059ms step_avg:97.92ms +step:1268/1670 train_time:124157ms step_avg:97.92ms +step:1269/1670 train_time:124254ms step_avg:97.91ms +step:1270/1670 train_time:124352ms step_avg:97.91ms +step:1271/1670 train_time:124451ms step_avg:97.92ms +step:1272/1670 train_time:124551ms step_avg:97.92ms +step:1273/1670 train_time:124651ms step_avg:97.92ms +step:1274/1670 train_time:124995ms step_avg:98.11ms +step:1275/1670 train_time:125113ms step_avg:98.13ms +step:1276/1670 train_time:125208ms step_avg:98.13ms +step:1277/1670 train_time:125305ms step_avg:98.12ms +step:1278/1670 train_time:125402ms step_avg:98.12ms +step:1279/1670 train_time:125499ms step_avg:98.12ms +step:1280/1670 train_time:125596ms step_avg:98.12ms +step:1281/1670 train_time:125692ms step_avg:98.12ms +step:1282/1670 train_time:125790ms step_avg:98.12ms +step:1283/1670 train_time:125887ms step_avg:98.12ms +step:1284/1670 train_time:125990ms step_avg:98.12ms +step:1285/1670 train_time:126090ms step_avg:98.12ms +step:1286/1670 train_time:126191ms step_avg:98.13ms +step:1287/1670 train_time:126291ms step_avg:98.13ms +step:1288/1670 train_time:126389ms step_avg:98.13ms +step:1289/1670 train_time:126486ms step_avg:98.13ms +step:1290/1670 train_time:126584ms step_avg:98.13ms +step:1291/1670 train_time:126681ms step_avg:98.13ms +step:1292/1670 train_time:126779ms step_avg:98.13ms +step:1293/1670 train_time:126876ms step_avg:98.12ms +step:1294/1670 train_time:126974ms step_avg:98.13ms +step:1295/1670 train_time:127075ms step_avg:98.13ms +step:1296/1670 train_time:127176ms step_avg:98.13ms +step:1297/1670 train_time:127276ms step_avg:98.13ms +step:1298/1670 train_time:127376ms step_avg:98.13ms +step:1299/1670 train_time:127476ms step_avg:98.13ms +step:1300/1670 train_time:127575ms step_avg:98.13ms +step:1301/1670 train_time:127675ms step_avg:98.14ms +step:1302/1670 train_time:127773ms step_avg:98.14ms +step:1303/1670 train_time:127870ms step_avg:98.13ms +step:1304/1670 train_time:127967ms step_avg:98.13ms +step:1305/1670 train_time:128065ms step_avg:98.13ms +step:1306/1670 train_time:128163ms step_avg:98.13ms +step:1307/1670 train_time:128260ms step_avg:98.13ms +step:1308/1670 train_time:128359ms step_avg:98.13ms +step:1309/1670 train_time:128458ms step_avg:98.13ms +step:1310/1670 train_time:128557ms step_avg:98.14ms +step:1311/1670 train_time:128656ms step_avg:98.14ms +step:1312/1670 train_time:128756ms step_avg:98.14ms +step:1313/1670 train_time:128854ms step_avg:98.14ms +step:1314/1670 train_time:128952ms step_avg:98.14ms +step:1315/1670 train_time:129050ms step_avg:98.14ms +step:1316/1670 train_time:129150ms step_avg:98.14ms +step:1317/1670 train_time:129249ms step_avg:98.14ms +step:1318/1670 train_time:129347ms step_avg:98.14ms +step:1319/1670 train_time:129445ms step_avg:98.14ms +step:1320/1670 train_time:129543ms step_avg:98.14ms +step:1321/1670 train_time:129642ms step_avg:98.14ms +step:1322/1670 train_time:129741ms step_avg:98.14ms +step:1323/1670 train_time:129840ms step_avg:98.14ms +step:1324/1670 train_time:129938ms step_avg:98.14ms +step:1325/1670 train_time:130037ms step_avg:98.14ms +step:1326/1670 train_time:130136ms step_avg:98.14ms +step:1327/1670 train_time:130235ms step_avg:98.14ms +step:1328/1670 train_time:130334ms step_avg:98.14ms +step:1329/1670 train_time:130434ms step_avg:98.14ms +step:1330/1670 train_time:130534ms step_avg:98.15ms +step:1331/1670 train_time:130632ms step_avg:98.15ms +step:1332/1670 train_time:130731ms step_avg:98.15ms +step:1333/1670 train_time:130830ms step_avg:98.15ms +step:1334/1670 train_time:130929ms step_avg:98.15ms +step:1335/1670 train_time:131026ms step_avg:98.15ms +step:1336/1670 train_time:131124ms step_avg:98.15ms +step:1337/1670 train_time:131222ms step_avg:98.15ms +step:1338/1670 train_time:131320ms step_avg:98.15ms +step:1339/1670 train_time:131419ms step_avg:98.15ms +step:1340/1670 train_time:131518ms step_avg:98.15ms +step:1341/1670 train_time:131617ms step_avg:98.15ms +step:1342/1670 train_time:131716ms step_avg:98.15ms +step:1343/1670 train_time:131815ms step_avg:98.15ms +step:1344/1670 train_time:131915ms step_avg:98.15ms +step:1345/1670 train_time:132012ms step_avg:98.15ms +step:1346/1670 train_time:132112ms step_avg:98.15ms +step:1347/1670 train_time:132210ms step_avg:98.15ms +step:1348/1670 train_time:132307ms step_avg:98.15ms +step:1349/1670 train_time:132405ms step_avg:98.15ms +step:1350/1670 train_time:132503ms step_avg:98.15ms +step:1351/1670 train_time:132602ms step_avg:98.15ms +step:1352/1670 train_time:132700ms step_avg:98.15ms +step:1353/1670 train_time:132799ms step_avg:98.15ms +step:1354/1670 train_time:132900ms step_avg:98.15ms +step:1355/1670 train_time:132999ms step_avg:98.15ms +step:1356/1670 train_time:133098ms step_avg:98.15ms +step:1357/1670 train_time:133196ms step_avg:98.15ms +step:1358/1670 train_time:133294ms step_avg:98.15ms +step:1359/1670 train_time:133392ms step_avg:98.15ms +step:1360/1670 train_time:133490ms step_avg:98.15ms +step:1361/1670 train_time:133588ms step_avg:98.15ms +step:1362/1670 train_time:133686ms step_avg:98.15ms +step:1363/1670 train_time:133784ms step_avg:98.15ms +step:1364/1670 train_time:133882ms step_avg:98.15ms +step:1365/1670 train_time:133981ms step_avg:98.15ms +step:1366/1670 train_time:134079ms step_avg:98.15ms +step:1367/1670 train_time:134177ms step_avg:98.15ms +step:1368/1670 train_time:134276ms step_avg:98.15ms +step:1369/1670 train_time:134376ms step_avg:98.16ms +step:1370/1670 train_time:134476ms step_avg:98.16ms +step:1371/1670 train_time:134574ms step_avg:98.16ms +step:1372/1670 train_time:134672ms step_avg:98.16ms +step:1373/1670 train_time:134770ms step_avg:98.16ms +step:1374/1670 train_time:134869ms step_avg:98.16ms +step:1375/1670 train_time:134966ms step_avg:98.16ms +step:1375/1670 val_loss:3.3440 train_time:135063ms step_avg:98.23ms +step:1376/1670 train_time:135085ms step_avg:98.17ms +step:1377/1670 train_time:135170ms step_avg:98.16ms +step:1378/1670 train_time:135271ms step_avg:98.16ms +step:1379/1670 train_time:135371ms step_avg:98.17ms +step:1380/1670 train_time:135468ms step_avg:98.17ms +step:1381/1670 train_time:135565ms step_avg:98.16ms +step:1382/1670 train_time:135662ms step_avg:98.16ms +step:1383/1670 train_time:135760ms step_avg:98.16ms +step:1384/1670 train_time:135858ms step_avg:98.16ms +step:1385/1670 train_time:135955ms step_avg:98.16ms +step:1386/1670 train_time:136054ms step_avg:98.16ms +step:1387/1670 train_time:136155ms step_avg:98.17ms +step:1388/1670 train_time:136254ms step_avg:98.17ms +step:1389/1670 train_time:136353ms step_avg:98.17ms +step:1390/1670 train_time:136450ms step_avg:98.17ms +step:1391/1670 train_time:136548ms step_avg:98.16ms +step:1392/1670 train_time:136645ms step_avg:98.16ms +step:1393/1670 train_time:136743ms step_avg:98.16ms +step:1394/1670 train_time:136841ms step_avg:98.16ms +step:1395/1670 train_time:136939ms step_avg:98.16ms +step:1396/1670 train_time:137039ms step_avg:98.17ms +step:1397/1670 train_time:137139ms step_avg:98.17ms +step:1398/1670 train_time:137239ms step_avg:98.17ms +step:1399/1670 train_time:137339ms step_avg:98.17ms +step:1400/1670 train_time:137436ms step_avg:98.17ms +step:1401/1670 train_time:137535ms step_avg:98.17ms +step:1402/1670 train_time:137633ms step_avg:98.17ms +step:1403/1670 train_time:137731ms step_avg:98.17ms +step:1404/1670 train_time:137829ms step_avg:98.17ms +step:1405/1670 train_time:137928ms step_avg:98.17ms +step:1406/1670 train_time:138028ms step_avg:98.17ms +step:1407/1670 train_time:138127ms step_avg:98.17ms +step:1408/1670 train_time:138226ms step_avg:98.17ms +step:1409/1670 train_time:138325ms step_avg:98.17ms +step:1410/1670 train_time:138426ms step_avg:98.17ms +step:1411/1670 train_time:138526ms step_avg:98.18ms +step:1412/1670 train_time:138625ms step_avg:98.18ms +step:1413/1670 train_time:138724ms step_avg:98.18ms +step:1414/1670 train_time:138823ms step_avg:98.18ms +step:1415/1670 train_time:138920ms step_avg:98.18ms +step:1416/1670 train_time:139018ms step_avg:98.18ms +step:1417/1670 train_time:139116ms step_avg:98.18ms +step:1418/1670 train_time:139214ms step_avg:98.18ms +step:1419/1670 train_time:139313ms step_avg:98.18ms +step:1420/1670 train_time:139410ms step_avg:98.18ms +step:1421/1670 train_time:139509ms step_avg:98.18ms +step:1422/1670 train_time:139608ms step_avg:98.18ms +step:1423/1670 train_time:139707ms step_avg:98.18ms +step:1424/1670 train_time:139804ms step_avg:98.18ms +step:1425/1670 train_time:139903ms step_avg:98.18ms +step:1426/1670 train_time:140001ms step_avg:98.18ms +step:1427/1670 train_time:140099ms step_avg:98.18ms +step:1428/1670 train_time:140197ms step_avg:98.18ms +step:1429/1670 train_time:140294ms step_avg:98.18ms +step:1430/1670 train_time:140393ms step_avg:98.18ms +step:1431/1670 train_time:140491ms step_avg:98.18ms +step:1432/1670 train_time:140590ms step_avg:98.18ms +step:1433/1670 train_time:140688ms step_avg:98.18ms +step:1434/1670 train_time:140787ms step_avg:98.18ms +step:1435/1670 train_time:140885ms step_avg:98.18ms +step:1436/1670 train_time:140983ms step_avg:98.18ms +step:1437/1670 train_time:141082ms step_avg:98.18ms +step:1438/1670 train_time:141180ms step_avg:98.18ms +step:1439/1670 train_time:141278ms step_avg:98.18ms +step:1440/1670 train_time:141378ms step_avg:98.18ms +step:1441/1670 train_time:141476ms step_avg:98.18ms +step:1442/1670 train_time:141574ms step_avg:98.18ms +step:1443/1670 train_time:141671ms step_avg:98.18ms +step:1444/1670 train_time:141769ms step_avg:98.18ms +step:1445/1670 train_time:141867ms step_avg:98.18ms +step:1446/1670 train_time:141967ms step_avg:98.18ms +step:1447/1670 train_time:142066ms step_avg:98.18ms +step:1448/1670 train_time:142166ms step_avg:98.18ms +step:1449/1670 train_time:142266ms step_avg:98.18ms +step:1450/1670 train_time:142367ms step_avg:98.18ms +step:1451/1670 train_time:142466ms step_avg:98.18ms +step:1452/1670 train_time:142564ms step_avg:98.18ms +step:1453/1670 train_time:142664ms step_avg:98.19ms +step:1454/1670 train_time:142762ms step_avg:98.19ms +step:1455/1670 train_time:142861ms step_avg:98.19ms +step:1456/1670 train_time:142958ms step_avg:98.19ms +step:1457/1670 train_time:143055ms step_avg:98.18ms +step:1458/1670 train_time:143154ms step_avg:98.19ms +step:1459/1670 train_time:143253ms step_avg:98.19ms +step:1460/1670 train_time:143351ms step_avg:98.19ms +step:1461/1670 train_time:143451ms step_avg:98.19ms +step:1462/1670 train_time:143549ms step_avg:98.19ms +step:1463/1670 train_time:143648ms step_avg:98.19ms +step:1464/1670 train_time:143746ms step_avg:98.19ms +step:1465/1670 train_time:143845ms step_avg:98.19ms +step:1466/1670 train_time:143943ms step_avg:98.19ms +step:1467/1670 train_time:144040ms step_avg:98.19ms +step:1468/1670 train_time:144138ms step_avg:98.19ms +step:1469/1670 train_time:144237ms step_avg:98.19ms +step:1470/1670 train_time:144335ms step_avg:98.19ms +step:1471/1670 train_time:144433ms step_avg:98.19ms +step:1472/1670 train_time:144531ms step_avg:98.19ms +step:1473/1670 train_time:144629ms step_avg:98.19ms +step:1474/1670 train_time:144728ms step_avg:98.19ms +step:1475/1670 train_time:144825ms step_avg:98.19ms +step:1476/1670 train_time:144924ms step_avg:98.19ms +step:1477/1670 train_time:145021ms step_avg:98.19ms +step:1478/1670 train_time:145120ms step_avg:98.19ms +step:1479/1670 train_time:145218ms step_avg:98.19ms +step:1480/1670 train_time:145317ms step_avg:98.19ms +step:1481/1670 train_time:145415ms step_avg:98.19ms +step:1482/1670 train_time:145513ms step_avg:98.19ms +step:1483/1670 train_time:145611ms step_avg:98.19ms +step:1484/1670 train_time:145710ms step_avg:98.19ms +step:1485/1670 train_time:145969ms step_avg:98.30ms +step:1486/1670 train_time:146064ms step_avg:98.29ms +step:1487/1670 train_time:146160ms step_avg:98.29ms +step:1488/1670 train_time:146256ms step_avg:98.29ms +step:1489/1670 train_time:146353ms step_avg:98.29ms +step:1490/1670 train_time:146450ms step_avg:98.29ms +step:1491/1670 train_time:146548ms step_avg:98.29ms +step:1492/1670 train_time:146646ms step_avg:98.29ms +step:1493/1670 train_time:146742ms step_avg:98.29ms +step:1494/1670 train_time:146841ms step_avg:98.29ms +step:1495/1670 train_time:146943ms step_avg:98.29ms +step:1496/1670 train_time:147045ms step_avg:98.29ms +step:1497/1670 train_time:147144ms step_avg:98.29ms +step:1498/1670 train_time:147244ms step_avg:98.29ms +step:1499/1670 train_time:147342ms step_avg:98.29ms +step:1500/1670 train_time:147440ms step_avg:98.29ms +step:1500/1670 val_loss:3.3119 train_time:147536ms step_avg:98.36ms +step:1501/1670 train_time:147558ms step_avg:98.31ms +step:1502/1670 train_time:147641ms step_avg:98.30ms +step:1503/1670 train_time:147742ms step_avg:98.30ms +step:1504/1670 train_time:147839ms step_avg:98.30ms +step:1505/1670 train_time:147937ms step_avg:98.30ms +step:1506/1670 train_time:148034ms step_avg:98.30ms +step:1507/1670 train_time:148131ms step_avg:98.30ms +step:1508/1670 train_time:148228ms step_avg:98.29ms +step:1509/1670 train_time:148325ms step_avg:98.29ms +step:1510/1670 train_time:148423ms step_avg:98.29ms +step:1511/1670 train_time:148522ms step_avg:98.29ms +step:1512/1670 train_time:148622ms step_avg:98.30ms +step:1513/1670 train_time:148720ms step_avg:98.29ms +step:1514/1670 train_time:148819ms step_avg:98.29ms +step:1515/1670 train_time:148917ms step_avg:98.29ms +step:1516/1670 train_time:149014ms step_avg:98.29ms +step:1517/1670 train_time:149111ms step_avg:98.29ms +step:1518/1670 train_time:149208ms step_avg:98.29ms +step:1519/1670 train_time:149305ms step_avg:98.29ms +step:1520/1670 train_time:149404ms step_avg:98.29ms +step:1521/1670 train_time:149502ms step_avg:98.29ms +step:1522/1670 train_time:149602ms step_avg:98.29ms +step:1523/1670 train_time:149700ms step_avg:98.29ms +step:1524/1670 train_time:149799ms step_avg:98.29ms +step:1525/1670 train_time:149897ms step_avg:98.29ms +step:1526/1670 train_time:149995ms step_avg:98.29ms +step:1527/1670 train_time:150092ms step_avg:98.29ms +step:1528/1670 train_time:150190ms step_avg:98.29ms +step:1529/1670 train_time:150287ms step_avg:98.29ms +step:1530/1670 train_time:150385ms step_avg:98.29ms +step:1531/1670 train_time:150483ms step_avg:98.29ms +step:1532/1670 train_time:150581ms step_avg:98.29ms +step:1533/1670 train_time:150679ms step_avg:98.29ms +step:1534/1670 train_time:150777ms step_avg:98.29ms +step:1535/1670 train_time:150876ms step_avg:98.29ms +step:1536/1670 train_time:150974ms step_avg:98.29ms +step:1537/1670 train_time:151071ms step_avg:98.29ms +step:1538/1670 train_time:151169ms step_avg:98.29ms +step:1539/1670 train_time:151267ms step_avg:98.29ms +step:1540/1670 train_time:151365ms step_avg:98.29ms +step:1541/1670 train_time:151463ms step_avg:98.29ms +step:1542/1670 train_time:151561ms step_avg:98.29ms +step:1543/1670 train_time:151659ms step_avg:98.29ms +step:1544/1670 train_time:151757ms step_avg:98.29ms +step:1545/1670 train_time:151856ms step_avg:98.29ms +step:1546/1670 train_time:151953ms step_avg:98.29ms +step:1547/1670 train_time:152051ms step_avg:98.29ms +step:1548/1670 train_time:152149ms step_avg:98.29ms +step:1549/1670 train_time:152247ms step_avg:98.29ms +step:1550/1670 train_time:152345ms step_avg:98.29ms +step:1551/1670 train_time:152444ms step_avg:98.29ms +step:1552/1670 train_time:152543ms step_avg:98.29ms +step:1553/1670 train_time:152643ms step_avg:98.29ms +step:1554/1670 train_time:152742ms step_avg:98.29ms +step:1555/1670 train_time:152841ms step_avg:98.29ms +step:1556/1670 train_time:152938ms step_avg:98.29ms +step:1557/1670 train_time:153036ms step_avg:98.29ms +step:1558/1670 train_time:153134ms step_avg:98.29ms +step:1559/1670 train_time:153233ms step_avg:98.29ms +step:1560/1670 train_time:153331ms step_avg:98.29ms +step:1561/1670 train_time:153431ms step_avg:98.29ms +step:1562/1670 train_time:153531ms step_avg:98.29ms +step:1563/1670 train_time:153631ms step_avg:98.29ms +step:1564/1670 train_time:153731ms step_avg:98.29ms +step:1565/1670 train_time:153832ms step_avg:98.29ms +step:1566/1670 train_time:153932ms step_avg:98.30ms +step:1567/1670 train_time:154031ms step_avg:98.30ms +step:1568/1670 train_time:154130ms step_avg:98.30ms +step:1569/1670 train_time:154228ms step_avg:98.30ms +step:1570/1670 train_time:154326ms step_avg:98.30ms +step:1571/1670 train_time:154424ms step_avg:98.30ms +step:1572/1670 train_time:154522ms step_avg:98.30ms +step:1573/1670 train_time:154620ms step_avg:98.30ms +step:1574/1670 train_time:154719ms step_avg:98.30ms +step:1575/1670 train_time:154817ms step_avg:98.30ms +step:1576/1670 train_time:154915ms step_avg:98.30ms +step:1577/1670 train_time:155013ms step_avg:98.30ms +step:1578/1670 train_time:155112ms step_avg:98.30ms +step:1579/1670 train_time:155210ms step_avg:98.30ms +step:1580/1670 train_time:155308ms step_avg:98.30ms +step:1581/1670 train_time:155406ms step_avg:98.30ms +step:1582/1670 train_time:155504ms step_avg:98.30ms +step:1583/1670 train_time:155603ms step_avg:98.30ms +step:1584/1670 train_time:155701ms step_avg:98.30ms +step:1585/1670 train_time:155799ms step_avg:98.30ms +step:1586/1670 train_time:155897ms step_avg:98.30ms +step:1587/1670 train_time:155996ms step_avg:98.30ms +step:1588/1670 train_time:156093ms step_avg:98.30ms +step:1589/1670 train_time:156191ms step_avg:98.30ms +step:1590/1670 train_time:156290ms step_avg:98.30ms +step:1591/1670 train_time:156389ms step_avg:98.30ms +step:1592/1670 train_time:156489ms step_avg:98.30ms +step:1593/1670 train_time:156587ms step_avg:98.30ms +step:1594/1670 train_time:156688ms step_avg:98.30ms +step:1595/1670 train_time:156789ms step_avg:98.30ms +step:1596/1670 train_time:156890ms step_avg:98.30ms +step:1597/1670 train_time:156989ms step_avg:98.30ms +step:1598/1670 train_time:157088ms step_avg:98.30ms +step:1599/1670 train_time:157185ms step_avg:98.30ms +step:1600/1670 train_time:157282ms step_avg:98.30ms +step:1601/1670 train_time:157380ms step_avg:98.30ms +step:1602/1670 train_time:157477ms step_avg:98.30ms +step:1603/1670 train_time:157574ms step_avg:98.30ms +step:1604/1670 train_time:157674ms step_avg:98.30ms +step:1605/1670 train_time:157774ms step_avg:98.30ms +step:1606/1670 train_time:157874ms step_avg:98.30ms +step:1607/1670 train_time:157972ms step_avg:98.30ms +step:1608/1670 train_time:158071ms step_avg:98.30ms +step:1609/1670 train_time:158170ms step_avg:98.30ms +step:1610/1670 train_time:158268ms step_avg:98.30ms +step:1611/1670 train_time:158367ms step_avg:98.30ms +step:1612/1670 train_time:158464ms step_avg:98.30ms +step:1613/1670 train_time:158563ms step_avg:98.30ms +step:1614/1670 train_time:158662ms step_avg:98.30ms +step:1615/1670 train_time:158761ms step_avg:98.30ms +step:1616/1670 train_time:158859ms step_avg:98.30ms +step:1617/1670 train_time:158957ms step_avg:98.30ms +step:1618/1670 train_time:159056ms step_avg:98.30ms +step:1619/1670 train_time:159153ms step_avg:98.30ms +step:1620/1670 train_time:159252ms step_avg:98.30ms +step:1621/1670 train_time:159351ms step_avg:98.30ms +step:1622/1670 train_time:159449ms step_avg:98.30ms +step:1623/1670 train_time:159549ms step_avg:98.30ms +step:1624/1670 train_time:159648ms step_avg:98.31ms +step:1625/1670 train_time:159748ms step_avg:98.31ms +step:1625/1670 val_loss:3.2852 train_time:159848ms step_avg:98.37ms +step:1626/1670 train_time:159872ms step_avg:98.32ms +step:1627/1670 train_time:159956ms step_avg:98.31ms +step:1628/1670 train_time:160056ms step_avg:98.31ms +step:1629/1670 train_time:160154ms step_avg:98.31ms +step:1630/1670 train_time:160252ms step_avg:98.31ms +step:1631/1670 train_time:160349ms step_avg:98.31ms +step:1632/1670 train_time:160446ms step_avg:98.31ms +step:1633/1670 train_time:160542ms step_avg:98.31ms +step:1634/1670 train_time:160640ms step_avg:98.31ms +step:1635/1670 train_time:160738ms step_avg:98.31ms +step:1636/1670 train_time:160840ms step_avg:98.31ms +step:1637/1670 train_time:160942ms step_avg:98.31ms +step:1638/1670 train_time:161042ms step_avg:98.32ms +step:1639/1670 train_time:161142ms step_avg:98.32ms +step:1640/1670 train_time:161240ms step_avg:98.32ms +step:1641/1670 train_time:161340ms step_avg:98.32ms +step:1642/1670 train_time:161438ms step_avg:98.32ms +step:1643/1670 train_time:161536ms step_avg:98.32ms +step:1644/1670 train_time:161634ms step_avg:98.32ms +step:1645/1670 train_time:161732ms step_avg:98.32ms +step:1646/1670 train_time:161830ms step_avg:98.32ms +step:1647/1670 train_time:161931ms step_avg:98.32ms +step:1648/1670 train_time:162031ms step_avg:98.32ms +step:1649/1670 train_time:162132ms step_avg:98.32ms +step:1650/1670 train_time:162232ms step_avg:98.32ms +step:1651/1670 train_time:162330ms step_avg:98.32ms +step:1652/1670 train_time:162428ms step_avg:98.32ms +step:1653/1670 train_time:162526ms step_avg:98.32ms +step:1654/1670 train_time:162624ms step_avg:98.32ms +step:1655/1670 train_time:162723ms step_avg:98.32ms +step:1656/1670 train_time:162821ms step_avg:98.32ms +step:1657/1670 train_time:162922ms step_avg:98.32ms +step:1658/1670 train_time:163021ms step_avg:98.32ms +step:1659/1670 train_time:163121ms step_avg:98.33ms +step:1660/1670 train_time:163221ms step_avg:98.33ms +step:1661/1670 train_time:163320ms step_avg:98.33ms +step:1662/1670 train_time:163418ms step_avg:98.33ms +step:1663/1670 train_time:163516ms step_avg:98.33ms +step:1664/1670 train_time:163616ms step_avg:98.33ms +step:1665/1670 train_time:163714ms step_avg:98.33ms +step:1666/1670 train_time:163814ms step_avg:98.33ms +step:1667/1670 train_time:163912ms step_avg:98.33ms +step:1668/1670 train_time:164012ms step_avg:98.33ms +step:1669/1670 train_time:164110ms step_avg:98.33ms +step:1670/1670 train_time:164209ms step_avg:98.33ms +step:1670/1670 val_loss:3.2772 train_time:164306ms step_avg:98.39ms +peak memory allocated: 34001 MiB reserved: 49136 MiB diff --git a/records/090525_SkipMLPBlocks/f01447c9-da70-405a-8ed0-858caadd1194.txt b/records/090525_SkipMLPBlocks/f01447c9-da70-405a-8ed0-858caadd1194.txt new file mode 100644 index 000000000..1d6a73fd4 --- /dev/null +++ b/records/090525_SkipMLPBlocks/f01447c9-da70-405a-8ed0-858caadd1194.txt @@ -0,0 +1,2853 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + SKIPPED_MLP_BLOCKS = [0, 12] # skip MLP blocks for first and last layers by @EmelyanenkoK + self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, + seqlens: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1705 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + ws = get_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Sep 5 16:18:22 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.144.03 Driver Version: 550.144.03 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:8D:00.0 Off | 0 | +| N/A 45C P0 129W / 700W | 5826MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:91:00.0 Off | 0 | +| N/A 35C P0 119W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:95:00.0 Off | 0 | +| N/A 45C P0 127W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:99:00.0 Off | 0 | +| N/A 34C P0 121W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:AB:00.0 Off | 0 | +| N/A 43C P0 125W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:AF:00.0 Off | 0 | +| N/A 35C P0 117W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:B3:00.0 Off | 0 | +| N/A 44C P0 131W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:B7:00.0 Off | 0 | +| N/A 34C P0 124W / 700W | 1516MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 85777 C /usr/bin/python3 1506MiB | +| 0 N/A N/A 85778 C /usr/bin/python3 610MiB | +| 0 N/A N/A 85779 C /usr/bin/python3 610MiB | +| 0 N/A N/A 85780 C /usr/bin/python3 610MiB | +| 0 N/A N/A 85781 C /usr/bin/python3 610MiB | +| 0 N/A N/A 85782 C /usr/bin/python3 610MiB | +| 0 N/A N/A 85783 C /usr/bin/python3 610MiB | +| 0 N/A N/A 85784 C /usr/bin/python3 610MiB | +| 1 N/A N/A 85778 C /usr/bin/python3 1506MiB | +| 2 N/A N/A 85779 C /usr/bin/python3 1506MiB | +| 3 N/A N/A 85780 C /usr/bin/python3 1506MiB | +| 4 N/A N/A 85781 C /usr/bin/python3 1506MiB | +| 5 N/A N/A 85782 C /usr/bin/python3 1506MiB | +| 6 N/A N/A 85783 C /usr/bin/python3 1506MiB | +| 7 N/A N/A 85784 C /usr/bin/python3 1506MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1705 val_loss:10.8258 train_time:0ms step_avg:0.04ms +step:1/1705 train_time:431ms step_avg:431.48ms +step:2/1705 train_time:450ms step_avg:225.20ms +step:3/1705 train_time:520ms step_avg:173.49ms +step:4/1705 train_time:612ms step_avg:152.98ms +step:5/1705 train_time:704ms step_avg:140.78ms +step:6/1705 train_time:796ms step_avg:132.63ms +step:7/1705 train_time:888ms step_avg:126.83ms +step:8/1705 train_time:980ms step_avg:122.52ms +step:9/1705 train_time:1073ms step_avg:119.21ms +step:10/1705 train_time:1165ms step_avg:116.51ms +step:11/1705 train_time:1258ms step_avg:114.32ms +step:12/1705 train_time:1351ms step_avg:112.62ms +step:13/1705 train_time:1446ms step_avg:111.26ms +step:14/1705 train_time:1540ms step_avg:110.03ms +step:15/1705 train_time:1634ms step_avg:108.94ms +step:16/1705 train_time:1727ms step_avg:107.91ms +step:17/1705 train_time:1820ms step_avg:107.06ms +step:18/1705 train_time:1913ms step_avg:106.27ms +step:19/1705 train_time:2006ms step_avg:105.57ms +step:20/1705 train_time:2099ms step_avg:104.95ms +step:21/1705 train_time:2192ms step_avg:104.36ms +step:22/1705 train_time:2284ms step_avg:103.84ms +step:23/1705 train_time:2379ms step_avg:103.44ms +step:24/1705 train_time:2475ms step_avg:103.14ms +step:25/1705 train_time:2567ms step_avg:102.68ms +step:26/1705 train_time:2661ms step_avg:102.34ms +step:27/1705 train_time:2754ms step_avg:102.01ms +step:28/1705 train_time:2847ms step_avg:101.67ms +step:29/1705 train_time:2941ms step_avg:101.40ms +step:30/1705 train_time:3034ms step_avg:101.13ms +step:31/1705 train_time:3126ms step_avg:100.84ms +step:32/1705 train_time:3220ms step_avg:100.63ms +step:33/1705 train_time:3313ms step_avg:100.38ms +step:34/1705 train_time:3406ms step_avg:100.17ms +step:35/1705 train_time:3500ms step_avg:100.00ms +step:36/1705 train_time:3594ms step_avg:99.84ms +step:37/1705 train_time:3687ms step_avg:99.65ms +step:38/1705 train_time:3780ms step_avg:99.48ms +step:39/1705 train_time:3873ms step_avg:99.30ms +step:40/1705 train_time:3966ms step_avg:99.14ms +step:41/1705 train_time:4059ms step_avg:99.00ms +step:42/1705 train_time:4151ms step_avg:98.83ms +step:43/1705 train_time:4244ms step_avg:98.69ms +step:44/1705 train_time:4338ms step_avg:98.59ms +step:45/1705 train_time:4431ms step_avg:98.46ms +step:46/1705 train_time:4525ms step_avg:98.37ms +step:47/1705 train_time:4618ms step_avg:98.26ms +step:48/1705 train_time:4712ms step_avg:98.16ms +step:49/1705 train_time:4805ms step_avg:98.05ms +step:50/1705 train_time:4899ms step_avg:97.97ms +step:51/1705 train_time:4991ms step_avg:97.87ms +step:52/1705 train_time:5084ms step_avg:97.77ms +step:53/1705 train_time:5177ms step_avg:97.68ms +step:54/1705 train_time:5270ms step_avg:97.58ms +step:55/1705 train_time:5363ms step_avg:97.51ms +step:56/1705 train_time:5457ms step_avg:97.45ms +step:57/1705 train_time:5549ms step_avg:97.36ms +step:58/1705 train_time:5643ms step_avg:97.29ms +step:59/1705 train_time:5737ms step_avg:97.24ms +step:60/1705 train_time:5831ms step_avg:97.18ms +step:61/1705 train_time:5923ms step_avg:97.11ms +step:62/1705 train_time:6016ms step_avg:97.04ms +step:63/1705 train_time:6109ms step_avg:96.97ms +step:64/1705 train_time:6202ms step_avg:96.91ms +step:65/1705 train_time:6295ms step_avg:96.85ms +step:66/1705 train_time:6388ms step_avg:96.78ms +step:67/1705 train_time:6481ms step_avg:96.74ms +step:68/1705 train_time:6575ms step_avg:96.69ms +step:69/1705 train_time:6668ms step_avg:96.63ms +step:70/1705 train_time:6761ms step_avg:96.59ms +step:71/1705 train_time:6855ms step_avg:96.55ms +step:72/1705 train_time:6947ms step_avg:96.49ms +step:73/1705 train_time:7041ms step_avg:96.46ms +step:74/1705 train_time:7135ms step_avg:96.42ms +step:75/1705 train_time:7227ms step_avg:96.36ms +step:76/1705 train_time:7320ms step_avg:96.31ms +step:77/1705 train_time:7413ms step_avg:96.27ms +step:78/1705 train_time:7505ms step_avg:96.22ms +step:79/1705 train_time:7599ms step_avg:96.18ms +step:80/1705 train_time:7692ms step_avg:96.14ms +step:81/1705 train_time:7785ms step_avg:96.11ms +step:82/1705 train_time:7878ms step_avg:96.08ms +step:83/1705 train_time:7972ms step_avg:96.04ms +step:84/1705 train_time:8065ms step_avg:96.02ms +step:85/1705 train_time:8159ms step_avg:95.99ms +step:86/1705 train_time:8253ms step_avg:95.96ms +step:87/1705 train_time:8345ms step_avg:95.92ms +step:88/1705 train_time:8440ms step_avg:95.91ms +step:89/1705 train_time:8532ms step_avg:95.87ms +step:90/1705 train_time:8625ms step_avg:95.84ms +step:91/1705 train_time:8719ms step_avg:95.81ms +step:92/1705 train_time:8813ms step_avg:95.79ms +step:93/1705 train_time:8906ms step_avg:95.76ms +step:94/1705 train_time:8999ms step_avg:95.74ms +step:95/1705 train_time:9092ms step_avg:95.70ms +step:96/1705 train_time:9184ms step_avg:95.67ms +step:97/1705 train_time:9278ms step_avg:95.65ms +step:98/1705 train_time:9371ms step_avg:95.62ms +step:99/1705 train_time:9465ms step_avg:95.60ms +step:100/1705 train_time:9558ms step_avg:95.58ms +step:101/1705 train_time:9650ms step_avg:95.55ms +step:102/1705 train_time:9744ms step_avg:95.53ms +step:103/1705 train_time:9836ms step_avg:95.50ms +step:104/1705 train_time:9930ms step_avg:95.48ms +step:105/1705 train_time:10024ms step_avg:95.46ms +step:106/1705 train_time:10117ms step_avg:95.44ms +step:107/1705 train_time:10209ms step_avg:95.41ms +step:108/1705 train_time:10302ms step_avg:95.39ms +step:109/1705 train_time:10395ms step_avg:95.37ms +step:110/1705 train_time:10488ms step_avg:95.34ms +step:111/1705 train_time:10581ms step_avg:95.32ms +step:112/1705 train_time:10674ms step_avg:95.30ms +step:113/1705 train_time:10766ms step_avg:95.28ms +step:114/1705 train_time:10860ms step_avg:95.27ms +step:115/1705 train_time:10953ms step_avg:95.25ms +step:116/1705 train_time:11046ms step_avg:95.23ms +step:117/1705 train_time:11139ms step_avg:95.21ms +step:118/1705 train_time:11233ms step_avg:95.19ms +step:119/1705 train_time:11326ms step_avg:95.17ms +step:120/1705 train_time:11419ms step_avg:95.16ms +step:121/1705 train_time:11512ms step_avg:95.14ms +step:122/1705 train_time:11604ms step_avg:95.12ms +step:123/1705 train_time:11697ms step_avg:95.10ms +step:124/1705 train_time:11790ms step_avg:95.08ms +step:125/1705 train_time:11884ms step_avg:95.07ms +step:125/1705 val_loss:4.3069 train_time:11977ms step_avg:95.82ms +step:126/1705 train_time:11999ms step_avg:95.23ms +step:127/1705 train_time:12076ms step_avg:95.09ms +step:128/1705 train_time:12179ms step_avg:95.15ms +step:129/1705 train_time:12276ms step_avg:95.16ms +step:130/1705 train_time:12369ms step_avg:95.15ms +step:131/1705 train_time:12461ms step_avg:95.12ms +step:132/1705 train_time:12553ms step_avg:95.10ms +step:133/1705 train_time:12645ms step_avg:95.08ms +step:134/1705 train_time:12737ms step_avg:95.05ms +step:135/1705 train_time:12829ms step_avg:95.03ms +step:136/1705 train_time:12921ms step_avg:95.01ms +step:137/1705 train_time:13014ms step_avg:94.99ms +step:138/1705 train_time:13110ms step_avg:95.00ms +step:139/1705 train_time:13203ms step_avg:94.99ms +step:140/1705 train_time:13297ms step_avg:94.98ms +step:141/1705 train_time:13391ms step_avg:94.97ms +step:142/1705 train_time:13484ms step_avg:94.96ms +step:143/1705 train_time:13577ms step_avg:94.94ms +step:144/1705 train_time:13669ms step_avg:94.92ms +step:145/1705 train_time:13761ms step_avg:94.90ms +step:146/1705 train_time:13853ms step_avg:94.88ms +step:147/1705 train_time:13945ms step_avg:94.87ms +step:148/1705 train_time:14038ms step_avg:94.85ms +step:149/1705 train_time:14132ms step_avg:94.84ms +step:150/1705 train_time:14226ms step_avg:94.84ms +step:151/1705 train_time:14320ms step_avg:94.83ms +step:152/1705 train_time:14413ms step_avg:94.82ms +step:153/1705 train_time:14506ms step_avg:94.81ms +step:154/1705 train_time:14598ms step_avg:94.79ms +step:155/1705 train_time:14691ms step_avg:94.78ms +step:156/1705 train_time:14784ms step_avg:94.77ms +step:157/1705 train_time:14877ms step_avg:94.76ms +step:158/1705 train_time:14971ms step_avg:94.75ms +step:159/1705 train_time:15063ms step_avg:94.74ms +step:160/1705 train_time:15157ms step_avg:94.73ms +step:161/1705 train_time:15250ms step_avg:94.72ms +step:162/1705 train_time:15343ms step_avg:94.71ms +step:163/1705 train_time:15437ms step_avg:94.70ms +step:164/1705 train_time:15530ms step_avg:94.70ms +step:165/1705 train_time:15622ms step_avg:94.68ms +step:166/1705 train_time:15715ms step_avg:94.67ms +step:167/1705 train_time:15808ms step_avg:94.66ms +step:168/1705 train_time:15900ms step_avg:94.64ms +step:169/1705 train_time:15994ms step_avg:94.64ms +step:170/1705 train_time:16087ms step_avg:94.63ms +step:171/1705 train_time:16180ms step_avg:94.62ms +step:172/1705 train_time:16274ms step_avg:94.62ms +step:173/1705 train_time:16368ms step_avg:94.61ms +step:174/1705 train_time:16460ms step_avg:94.60ms +step:175/1705 train_time:16553ms step_avg:94.59ms +step:176/1705 train_time:16646ms step_avg:94.58ms +step:177/1705 train_time:16739ms step_avg:94.57ms +step:178/1705 train_time:16832ms step_avg:94.56ms +step:179/1705 train_time:16925ms step_avg:94.55ms +step:180/1705 train_time:17017ms step_avg:94.54ms +step:181/1705 train_time:17111ms step_avg:94.53ms +step:182/1705 train_time:17202ms step_avg:94.52ms +step:183/1705 train_time:17296ms step_avg:94.51ms +step:184/1705 train_time:17390ms step_avg:94.51ms +step:185/1705 train_time:17483ms step_avg:94.50ms +step:186/1705 train_time:17576ms step_avg:94.49ms +step:187/1705 train_time:17669ms step_avg:94.49ms +step:188/1705 train_time:17762ms step_avg:94.48ms +step:189/1705 train_time:17854ms step_avg:94.47ms +step:190/1705 train_time:17947ms step_avg:94.46ms +step:191/1705 train_time:18040ms step_avg:94.45ms +step:192/1705 train_time:18133ms step_avg:94.44ms +step:193/1705 train_time:18227ms step_avg:94.44ms +step:194/1705 train_time:18318ms step_avg:94.42ms +step:195/1705 train_time:18412ms step_avg:94.42ms +step:196/1705 train_time:18505ms step_avg:94.41ms +step:197/1705 train_time:18598ms step_avg:94.41ms +step:198/1705 train_time:18691ms step_avg:94.40ms +step:199/1705 train_time:18785ms step_avg:94.40ms +step:200/1705 train_time:18878ms step_avg:94.39ms +step:201/1705 train_time:18970ms step_avg:94.38ms +step:202/1705 train_time:19062ms step_avg:94.37ms +step:203/1705 train_time:19156ms step_avg:94.36ms +step:204/1705 train_time:19249ms step_avg:94.36ms +step:205/1705 train_time:19342ms step_avg:94.35ms +step:206/1705 train_time:19434ms step_avg:94.34ms +step:207/1705 train_time:19527ms step_avg:94.33ms +step:208/1705 train_time:19620ms step_avg:94.33ms +step:209/1705 train_time:19714ms step_avg:94.33ms +step:210/1705 train_time:19807ms step_avg:94.32ms +step:211/1705 train_time:19900ms step_avg:94.31ms +step:212/1705 train_time:19992ms step_avg:94.30ms +step:213/1705 train_time:20316ms step_avg:95.38ms +step:214/1705 train_time:20421ms step_avg:95.43ms +step:215/1705 train_time:20513ms step_avg:95.41ms +step:216/1705 train_time:20605ms step_avg:95.39ms +step:217/1705 train_time:20697ms step_avg:95.38ms +step:218/1705 train_time:20789ms step_avg:95.36ms +step:219/1705 train_time:20881ms step_avg:95.35ms +step:220/1705 train_time:20974ms step_avg:95.33ms +step:221/1705 train_time:21065ms step_avg:95.32ms +step:222/1705 train_time:21157ms step_avg:95.30ms +step:223/1705 train_time:21252ms step_avg:95.30ms +step:224/1705 train_time:21348ms step_avg:95.30ms +step:225/1705 train_time:21443ms step_avg:95.30ms +step:226/1705 train_time:21535ms step_avg:95.29ms +step:227/1705 train_time:21629ms step_avg:95.28ms +step:228/1705 train_time:21721ms step_avg:95.27ms +step:229/1705 train_time:21813ms step_avg:95.25ms +step:230/1705 train_time:21906ms step_avg:95.24ms +step:231/1705 train_time:21997ms step_avg:95.23ms +step:232/1705 train_time:22089ms step_avg:95.21ms +step:233/1705 train_time:22182ms step_avg:95.20ms +step:234/1705 train_time:22277ms step_avg:95.20ms +step:235/1705 train_time:22372ms step_avg:95.20ms +step:236/1705 train_time:22466ms step_avg:95.19ms +step:237/1705 train_time:22558ms step_avg:95.18ms +step:238/1705 train_time:22651ms step_avg:95.17ms +step:239/1705 train_time:22744ms step_avg:95.16ms +step:240/1705 train_time:22837ms step_avg:95.15ms +step:241/1705 train_time:22929ms step_avg:95.14ms +step:242/1705 train_time:23022ms step_avg:95.13ms +step:243/1705 train_time:23115ms step_avg:95.12ms +step:244/1705 train_time:23208ms step_avg:95.11ms +step:245/1705 train_time:23300ms step_avg:95.10ms +step:246/1705 train_time:23396ms step_avg:95.10ms +step:247/1705 train_time:23490ms step_avg:95.10ms +step:248/1705 train_time:23583ms step_avg:95.09ms +step:249/1705 train_time:23675ms step_avg:95.08ms +step:250/1705 train_time:23768ms step_avg:95.07ms +step:250/1705 val_loss:3.9798 train_time:23860ms step_avg:95.44ms +step:251/1705 train_time:23882ms step_avg:95.15ms +step:252/1705 train_time:23957ms step_avg:95.07ms +step:253/1705 train_time:24055ms step_avg:95.08ms +step:254/1705 train_time:24149ms step_avg:95.07ms +step:255/1705 train_time:24241ms step_avg:95.06ms +step:256/1705 train_time:24334ms step_avg:95.05ms +step:257/1705 train_time:24426ms step_avg:95.04ms +step:258/1705 train_time:24518ms step_avg:95.03ms +step:259/1705 train_time:24610ms step_avg:95.02ms +step:260/1705 train_time:24702ms step_avg:95.01ms +step:261/1705 train_time:24794ms step_avg:95.00ms +step:262/1705 train_time:24888ms step_avg:94.99ms +step:263/1705 train_time:24982ms step_avg:94.99ms +step:264/1705 train_time:25077ms step_avg:94.99ms +step:265/1705 train_time:25171ms step_avg:94.98ms +step:266/1705 train_time:25264ms step_avg:94.98ms +step:267/1705 train_time:25356ms step_avg:94.97ms +step:268/1705 train_time:25449ms step_avg:94.96ms +step:269/1705 train_time:25541ms step_avg:94.95ms +step:270/1705 train_time:25633ms step_avg:94.94ms +step:271/1705 train_time:25725ms step_avg:94.93ms +step:272/1705 train_time:25818ms step_avg:94.92ms +step:273/1705 train_time:25912ms step_avg:94.92ms +step:274/1705 train_time:26006ms step_avg:94.91ms +step:275/1705 train_time:26100ms step_avg:94.91ms +step:276/1705 train_time:26193ms step_avg:94.90ms +step:277/1705 train_time:26286ms step_avg:94.89ms +step:278/1705 train_time:26378ms step_avg:94.88ms +step:279/1705 train_time:26471ms step_avg:94.88ms +step:280/1705 train_time:26563ms step_avg:94.87ms +step:281/1705 train_time:26656ms step_avg:94.86ms +step:282/1705 train_time:26749ms step_avg:94.85ms +step:283/1705 train_time:26841ms step_avg:94.85ms +step:284/1705 train_time:26934ms step_avg:94.84ms +step:285/1705 train_time:27028ms step_avg:94.84ms +step:286/1705 train_time:27122ms step_avg:94.83ms +step:287/1705 train_time:27216ms step_avg:94.83ms +step:288/1705 train_time:27309ms step_avg:94.82ms +step:289/1705 train_time:27401ms step_avg:94.81ms +step:290/1705 train_time:27494ms step_avg:94.81ms +step:291/1705 train_time:27586ms step_avg:94.80ms +step:292/1705 train_time:27679ms step_avg:94.79ms +step:293/1705 train_time:27772ms step_avg:94.79ms +step:294/1705 train_time:27865ms step_avg:94.78ms +step:295/1705 train_time:27958ms step_avg:94.77ms +step:296/1705 train_time:28051ms step_avg:94.77ms +step:297/1705 train_time:28145ms step_avg:94.77ms +step:298/1705 train_time:28239ms step_avg:94.76ms +step:299/1705 train_time:28332ms step_avg:94.76ms +step:300/1705 train_time:28425ms step_avg:94.75ms +step:301/1705 train_time:28518ms step_avg:94.74ms +step:302/1705 train_time:28610ms step_avg:94.73ms +step:303/1705 train_time:28702ms step_avg:94.73ms +step:304/1705 train_time:28795ms step_avg:94.72ms +step:305/1705 train_time:28888ms step_avg:94.71ms +step:306/1705 train_time:28980ms step_avg:94.71ms +step:307/1705 train_time:29074ms step_avg:94.70ms +step:308/1705 train_time:29169ms step_avg:94.70ms +step:309/1705 train_time:29262ms step_avg:94.70ms +step:310/1705 train_time:29355ms step_avg:94.69ms +step:311/1705 train_time:29449ms step_avg:94.69ms +step:312/1705 train_time:29543ms step_avg:94.69ms +step:313/1705 train_time:29635ms step_avg:94.68ms +step:314/1705 train_time:29728ms step_avg:94.68ms +step:315/1705 train_time:29821ms step_avg:94.67ms +step:316/1705 train_time:29913ms step_avg:94.66ms +step:317/1705 train_time:30006ms step_avg:94.66ms +step:318/1705 train_time:30099ms step_avg:94.65ms +step:319/1705 train_time:30192ms step_avg:94.65ms +step:320/1705 train_time:30285ms step_avg:94.64ms +step:321/1705 train_time:30378ms step_avg:94.64ms +step:322/1705 train_time:30471ms step_avg:94.63ms +step:323/1705 train_time:30564ms step_avg:94.63ms +step:324/1705 train_time:30656ms step_avg:94.62ms +step:325/1705 train_time:30749ms step_avg:94.61ms +step:326/1705 train_time:30843ms step_avg:94.61ms +step:327/1705 train_time:30935ms step_avg:94.60ms +step:328/1705 train_time:31029ms step_avg:94.60ms +step:329/1705 train_time:31120ms step_avg:94.59ms +step:330/1705 train_time:31213ms step_avg:94.58ms +step:331/1705 train_time:31307ms step_avg:94.58ms +step:332/1705 train_time:31399ms step_avg:94.57ms +step:333/1705 train_time:31492ms step_avg:94.57ms +step:334/1705 train_time:31585ms step_avg:94.57ms +step:335/1705 train_time:31677ms step_avg:94.56ms +step:336/1705 train_time:31770ms step_avg:94.55ms +step:337/1705 train_time:31863ms step_avg:94.55ms +step:338/1705 train_time:31956ms step_avg:94.54ms +step:339/1705 train_time:32050ms step_avg:94.54ms +step:340/1705 train_time:32143ms step_avg:94.54ms +step:341/1705 train_time:32236ms step_avg:94.53ms +step:342/1705 train_time:32329ms step_avg:94.53ms +step:343/1705 train_time:32423ms step_avg:94.53ms +step:344/1705 train_time:32515ms step_avg:94.52ms +step:345/1705 train_time:32609ms step_avg:94.52ms +step:346/1705 train_time:32702ms step_avg:94.51ms +step:347/1705 train_time:32794ms step_avg:94.51ms +step:348/1705 train_time:32887ms step_avg:94.50ms +step:349/1705 train_time:32980ms step_avg:94.50ms +step:350/1705 train_time:33073ms step_avg:94.50ms +step:351/1705 train_time:33167ms step_avg:94.49ms +step:352/1705 train_time:33259ms step_avg:94.48ms +step:353/1705 train_time:33352ms step_avg:94.48ms +step:354/1705 train_time:33445ms step_avg:94.48ms +step:355/1705 train_time:33538ms step_avg:94.47ms +step:356/1705 train_time:33630ms step_avg:94.47ms +step:357/1705 train_time:33723ms step_avg:94.46ms +step:358/1705 train_time:33816ms step_avg:94.46ms +step:359/1705 train_time:33909ms step_avg:94.45ms +step:360/1705 train_time:34001ms step_avg:94.45ms +step:361/1705 train_time:34094ms step_avg:94.44ms +step:362/1705 train_time:34187ms step_avg:94.44ms +step:363/1705 train_time:34280ms step_avg:94.43ms +step:364/1705 train_time:34373ms step_avg:94.43ms +step:365/1705 train_time:34466ms step_avg:94.43ms +step:366/1705 train_time:34558ms step_avg:94.42ms +step:367/1705 train_time:34652ms step_avg:94.42ms +step:368/1705 train_time:34745ms step_avg:94.42ms +step:369/1705 train_time:34837ms step_avg:94.41ms +step:370/1705 train_time:34930ms step_avg:94.41ms +step:371/1705 train_time:35023ms step_avg:94.40ms +step:372/1705 train_time:35116ms step_avg:94.40ms +step:373/1705 train_time:35209ms step_avg:94.39ms +step:374/1705 train_time:35302ms step_avg:94.39ms +step:375/1705 train_time:35395ms step_avg:94.39ms +step:375/1705 val_loss:3.8258 train_time:35489ms step_avg:94.64ms +step:376/1705 train_time:35510ms step_avg:94.44ms +step:377/1705 train_time:35587ms step_avg:94.39ms +step:378/1705 train_time:35685ms step_avg:94.41ms +step:379/1705 train_time:35779ms step_avg:94.40ms +step:380/1705 train_time:35871ms step_avg:94.40ms +step:381/1705 train_time:35963ms step_avg:94.39ms +step:382/1705 train_time:36055ms step_avg:94.39ms +step:383/1705 train_time:36147ms step_avg:94.38ms +step:384/1705 train_time:36239ms step_avg:94.37ms +step:385/1705 train_time:36331ms step_avg:94.37ms +step:386/1705 train_time:36424ms step_avg:94.36ms +step:387/1705 train_time:36519ms step_avg:94.36ms +step:388/1705 train_time:36614ms step_avg:94.37ms +step:389/1705 train_time:36709ms step_avg:94.37ms +step:390/1705 train_time:36802ms step_avg:94.36ms +step:391/1705 train_time:36895ms step_avg:94.36ms +step:392/1705 train_time:36988ms step_avg:94.36ms +step:393/1705 train_time:37081ms step_avg:94.35ms +step:394/1705 train_time:37173ms step_avg:94.35ms +step:395/1705 train_time:37265ms step_avg:94.34ms +step:396/1705 train_time:37358ms step_avg:94.34ms +step:397/1705 train_time:37450ms step_avg:94.33ms +step:398/1705 train_time:37544ms step_avg:94.33ms +step:399/1705 train_time:37639ms step_avg:94.33ms +step:400/1705 train_time:37732ms step_avg:94.33ms +step:401/1705 train_time:37825ms step_avg:94.33ms +step:402/1705 train_time:37919ms step_avg:94.33ms +step:403/1705 train_time:38011ms step_avg:94.32ms +step:404/1705 train_time:38104ms step_avg:94.32ms +step:405/1705 train_time:38196ms step_avg:94.31ms +step:406/1705 train_time:38290ms step_avg:94.31ms +step:407/1705 train_time:38383ms step_avg:94.31ms +step:408/1705 train_time:38476ms step_avg:94.30ms +step:409/1705 train_time:38569ms step_avg:94.30ms +step:410/1705 train_time:38662ms step_avg:94.30ms +step:411/1705 train_time:38755ms step_avg:94.29ms +step:412/1705 train_time:38848ms step_avg:94.29ms +step:413/1705 train_time:38941ms step_avg:94.29ms +step:414/1705 train_time:39034ms step_avg:94.29ms +step:415/1705 train_time:39126ms step_avg:94.28ms +step:416/1705 train_time:39219ms step_avg:94.28ms +step:417/1705 train_time:39312ms step_avg:94.27ms +step:418/1705 train_time:39405ms step_avg:94.27ms +step:419/1705 train_time:39499ms step_avg:94.27ms +step:420/1705 train_time:39593ms step_avg:94.27ms +step:421/1705 train_time:39686ms step_avg:94.27ms +step:422/1705 train_time:39780ms step_avg:94.27ms +step:423/1705 train_time:39874ms step_avg:94.26ms +step:424/1705 train_time:39967ms step_avg:94.26ms +step:425/1705 train_time:40241ms step_avg:94.68ms +step:426/1705 train_time:40348ms step_avg:94.71ms +step:427/1705 train_time:40439ms step_avg:94.70ms +step:428/1705 train_time:40530ms step_avg:94.70ms +step:429/1705 train_time:40622ms step_avg:94.69ms +step:430/1705 train_time:40715ms step_avg:94.69ms +step:431/1705 train_time:40807ms step_avg:94.68ms +step:432/1705 train_time:40899ms step_avg:94.67ms +step:433/1705 train_time:40991ms step_avg:94.67ms +step:434/1705 train_time:41083ms step_avg:94.66ms +step:435/1705 train_time:41177ms step_avg:94.66ms +step:436/1705 train_time:41272ms step_avg:94.66ms +step:437/1705 train_time:41367ms step_avg:94.66ms +step:438/1705 train_time:41461ms step_avg:94.66ms +step:439/1705 train_time:41554ms step_avg:94.66ms +step:440/1705 train_time:41646ms step_avg:94.65ms +step:441/1705 train_time:41738ms step_avg:94.64ms +step:442/1705 train_time:41830ms step_avg:94.64ms +step:443/1705 train_time:41922ms step_avg:94.63ms +step:444/1705 train_time:42015ms step_avg:94.63ms +step:445/1705 train_time:42107ms step_avg:94.62ms +step:446/1705 train_time:42201ms step_avg:94.62ms +step:447/1705 train_time:42296ms step_avg:94.62ms +step:448/1705 train_time:42389ms step_avg:94.62ms +step:449/1705 train_time:42484ms step_avg:94.62ms +step:450/1705 train_time:42577ms step_avg:94.62ms +step:451/1705 train_time:42669ms step_avg:94.61ms +step:452/1705 train_time:42762ms step_avg:94.61ms +step:453/1705 train_time:42854ms step_avg:94.60ms +step:454/1705 train_time:42946ms step_avg:94.59ms +step:455/1705 train_time:43038ms step_avg:94.59ms +step:456/1705 train_time:43130ms step_avg:94.58ms +step:457/1705 train_time:43224ms step_avg:94.58ms +step:458/1705 train_time:43319ms step_avg:94.58ms +step:459/1705 train_time:43412ms step_avg:94.58ms +step:460/1705 train_time:43505ms step_avg:94.58ms +step:461/1705 train_time:43599ms step_avg:94.58ms +step:462/1705 train_time:43692ms step_avg:94.57ms +step:463/1705 train_time:43785ms step_avg:94.57ms +step:464/1705 train_time:43877ms step_avg:94.56ms +step:465/1705 train_time:43969ms step_avg:94.56ms +step:466/1705 train_time:44061ms step_avg:94.55ms +step:467/1705 train_time:44155ms step_avg:94.55ms +step:468/1705 train_time:44248ms step_avg:94.55ms +step:469/1705 train_time:44341ms step_avg:94.54ms +step:470/1705 train_time:44435ms step_avg:94.54ms +step:471/1705 train_time:44528ms step_avg:94.54ms +step:472/1705 train_time:44621ms step_avg:94.54ms +step:473/1705 train_time:44714ms step_avg:94.53ms +step:474/1705 train_time:44806ms step_avg:94.53ms +step:475/1705 train_time:44900ms step_avg:94.53ms +step:476/1705 train_time:44993ms step_avg:94.52ms +step:477/1705 train_time:45085ms step_avg:94.52ms +step:478/1705 train_time:45179ms step_avg:94.52ms +step:479/1705 train_time:45272ms step_avg:94.51ms +step:480/1705 train_time:45365ms step_avg:94.51ms +step:481/1705 train_time:45459ms step_avg:94.51ms +step:482/1705 train_time:45552ms step_avg:94.51ms +step:483/1705 train_time:45644ms step_avg:94.50ms +step:484/1705 train_time:45737ms step_avg:94.50ms +step:485/1705 train_time:45831ms step_avg:94.50ms +step:486/1705 train_time:45923ms step_avg:94.49ms +step:487/1705 train_time:46017ms step_avg:94.49ms +step:488/1705 train_time:46109ms step_avg:94.49ms +step:489/1705 train_time:46202ms step_avg:94.48ms +step:490/1705 train_time:46296ms step_avg:94.48ms +step:491/1705 train_time:46389ms step_avg:94.48ms +step:492/1705 train_time:46483ms step_avg:94.48ms +step:493/1705 train_time:46576ms step_avg:94.47ms +step:494/1705 train_time:46669ms step_avg:94.47ms +step:495/1705 train_time:46762ms step_avg:94.47ms +step:496/1705 train_time:46855ms step_avg:94.47ms +step:497/1705 train_time:46948ms step_avg:94.46ms +step:498/1705 train_time:47040ms step_avg:94.46ms +step:499/1705 train_time:47133ms step_avg:94.46ms +step:500/1705 train_time:47227ms step_avg:94.45ms +step:500/1705 val_loss:3.7225 train_time:47320ms step_avg:94.64ms +step:501/1705 train_time:47342ms step_avg:94.49ms +step:502/1705 train_time:47419ms step_avg:94.46ms +step:503/1705 train_time:47516ms step_avg:94.47ms +step:504/1705 train_time:47610ms step_avg:94.46ms +step:505/1705 train_time:47702ms step_avg:94.46ms +step:506/1705 train_time:47795ms step_avg:94.46ms +step:507/1705 train_time:47887ms step_avg:94.45ms +step:508/1705 train_time:47979ms step_avg:94.45ms +step:509/1705 train_time:48071ms step_avg:94.44ms +step:510/1705 train_time:48163ms step_avg:94.44ms +step:511/1705 train_time:48255ms step_avg:94.43ms +step:512/1705 train_time:48351ms step_avg:94.44ms +step:513/1705 train_time:48447ms step_avg:94.44ms +step:514/1705 train_time:48541ms step_avg:94.44ms +step:515/1705 train_time:48635ms step_avg:94.44ms +step:516/1705 train_time:48727ms step_avg:94.43ms +step:517/1705 train_time:48820ms step_avg:94.43ms +step:518/1705 train_time:48912ms step_avg:94.42ms +step:519/1705 train_time:49005ms step_avg:94.42ms +step:520/1705 train_time:49097ms step_avg:94.42ms +step:521/1705 train_time:49189ms step_avg:94.41ms +step:522/1705 train_time:49282ms step_avg:94.41ms +step:523/1705 train_time:49377ms step_avg:94.41ms +step:524/1705 train_time:49471ms step_avg:94.41ms +step:525/1705 train_time:49565ms step_avg:94.41ms +step:526/1705 train_time:49658ms step_avg:94.41ms +step:527/1705 train_time:49751ms step_avg:94.40ms +step:528/1705 train_time:49844ms step_avg:94.40ms +step:529/1705 train_time:49936ms step_avg:94.40ms +step:530/1705 train_time:50029ms step_avg:94.39ms +step:531/1705 train_time:50121ms step_avg:94.39ms +step:532/1705 train_time:50214ms step_avg:94.39ms +step:533/1705 train_time:50306ms step_avg:94.38ms +step:534/1705 train_time:50400ms step_avg:94.38ms +step:535/1705 train_time:50493ms step_avg:94.38ms +step:536/1705 train_time:50587ms step_avg:94.38ms +step:537/1705 train_time:50680ms step_avg:94.38ms +step:538/1705 train_time:50774ms step_avg:94.37ms +step:539/1705 train_time:50867ms step_avg:94.37ms +step:540/1705 train_time:50959ms step_avg:94.37ms +step:541/1705 train_time:51052ms step_avg:94.37ms +step:542/1705 train_time:51145ms step_avg:94.36ms +step:543/1705 train_time:51237ms step_avg:94.36ms +step:544/1705 train_time:51330ms step_avg:94.36ms +step:545/1705 train_time:51423ms step_avg:94.35ms +step:546/1705 train_time:51516ms step_avg:94.35ms +step:547/1705 train_time:51609ms step_avg:94.35ms +step:548/1705 train_time:51702ms step_avg:94.35ms +step:549/1705 train_time:51795ms step_avg:94.34ms +step:550/1705 train_time:51888ms step_avg:94.34ms +step:551/1705 train_time:51980ms step_avg:94.34ms +step:552/1705 train_time:52073ms step_avg:94.34ms +step:553/1705 train_time:52167ms step_avg:94.33ms +step:554/1705 train_time:52259ms step_avg:94.33ms +step:555/1705 train_time:52353ms step_avg:94.33ms +step:556/1705 train_time:52446ms step_avg:94.33ms +step:557/1705 train_time:52539ms step_avg:94.32ms +step:558/1705 train_time:52632ms step_avg:94.32ms +step:559/1705 train_time:52725ms step_avg:94.32ms +step:560/1705 train_time:52817ms step_avg:94.32ms +step:561/1705 train_time:52911ms step_avg:94.32ms +step:562/1705 train_time:53004ms step_avg:94.31ms +step:563/1705 train_time:53096ms step_avg:94.31ms +step:564/1705 train_time:53189ms step_avg:94.31ms +step:565/1705 train_time:53282ms step_avg:94.30ms +step:566/1705 train_time:53374ms step_avg:94.30ms +step:567/1705 train_time:53468ms step_avg:94.30ms +step:568/1705 train_time:53561ms step_avg:94.30ms +step:569/1705 train_time:53654ms step_avg:94.30ms +step:570/1705 train_time:53748ms step_avg:94.29ms +step:571/1705 train_time:53842ms step_avg:94.29ms +step:572/1705 train_time:53937ms step_avg:94.29ms +step:573/1705 train_time:54031ms step_avg:94.29ms +step:574/1705 train_time:54126ms step_avg:94.30ms +step:575/1705 train_time:54220ms step_avg:94.30ms +step:576/1705 train_time:54314ms step_avg:94.30ms +step:577/1705 train_time:54409ms step_avg:94.30ms +step:578/1705 train_time:54504ms step_avg:94.30ms +step:579/1705 train_time:54597ms step_avg:94.30ms +step:580/1705 train_time:54692ms step_avg:94.30ms +step:581/1705 train_time:54788ms step_avg:94.30ms +step:582/1705 train_time:54882ms step_avg:94.30ms +step:583/1705 train_time:54977ms step_avg:94.30ms +step:584/1705 train_time:55072ms step_avg:94.30ms +step:585/1705 train_time:55166ms step_avg:94.30ms +step:586/1705 train_time:55259ms step_avg:94.30ms +step:587/1705 train_time:55353ms step_avg:94.30ms +step:588/1705 train_time:55448ms step_avg:94.30ms +step:589/1705 train_time:55542ms step_avg:94.30ms +step:590/1705 train_time:55636ms step_avg:94.30ms +step:591/1705 train_time:55732ms step_avg:94.30ms +step:592/1705 train_time:55827ms step_avg:94.30ms +step:593/1705 train_time:55920ms step_avg:94.30ms +step:594/1705 train_time:56015ms step_avg:94.30ms +step:595/1705 train_time:56111ms step_avg:94.30ms +step:596/1705 train_time:56205ms step_avg:94.30ms +step:597/1705 train_time:56299ms step_avg:94.30ms +step:598/1705 train_time:56394ms step_avg:94.30ms +step:599/1705 train_time:56488ms step_avg:94.30ms +step:600/1705 train_time:56582ms step_avg:94.30ms +step:601/1705 train_time:56676ms step_avg:94.30ms +step:602/1705 train_time:56772ms step_avg:94.31ms +step:603/1705 train_time:56866ms step_avg:94.31ms +step:604/1705 train_time:56960ms step_avg:94.30ms +step:605/1705 train_time:57054ms step_avg:94.30ms +step:606/1705 train_time:57150ms step_avg:94.31ms +step:607/1705 train_time:57245ms step_avg:94.31ms +step:608/1705 train_time:57339ms step_avg:94.31ms +step:609/1705 train_time:57434ms step_avg:94.31ms +step:610/1705 train_time:57528ms step_avg:94.31ms +step:611/1705 train_time:57622ms step_avg:94.31ms +step:612/1705 train_time:57716ms step_avg:94.31ms +step:613/1705 train_time:57811ms step_avg:94.31ms +step:614/1705 train_time:57906ms step_avg:94.31ms +step:615/1705 train_time:57999ms step_avg:94.31ms +step:616/1705 train_time:58093ms step_avg:94.31ms +step:617/1705 train_time:58187ms step_avg:94.31ms +step:618/1705 train_time:58281ms step_avg:94.31ms +step:619/1705 train_time:58375ms step_avg:94.31ms +step:620/1705 train_time:58470ms step_avg:94.31ms +step:621/1705 train_time:58564ms step_avg:94.31ms +step:622/1705 train_time:58659ms step_avg:94.31ms +step:623/1705 train_time:58753ms step_avg:94.31ms +step:624/1705 train_time:58848ms step_avg:94.31ms +step:625/1705 train_time:58942ms step_avg:94.31ms +step:625/1705 val_loss:3.6215 train_time:59037ms step_avg:94.46ms +step:626/1705 train_time:59060ms step_avg:94.35ms +step:627/1705 train_time:59132ms step_avg:94.31ms +step:628/1705 train_time:59227ms step_avg:94.31ms +step:629/1705 train_time:59331ms step_avg:94.33ms +step:630/1705 train_time:59427ms step_avg:94.33ms +step:631/1705 train_time:59521ms step_avg:94.33ms +step:632/1705 train_time:59614ms step_avg:94.33ms +step:633/1705 train_time:59708ms step_avg:94.32ms +step:634/1705 train_time:59802ms step_avg:94.32ms +step:635/1705 train_time:59895ms step_avg:94.32ms +step:636/1705 train_time:59990ms step_avg:94.32ms +step:637/1705 train_time:60085ms step_avg:94.33ms +step:638/1705 train_time:60180ms step_avg:94.33ms +step:639/1705 train_time:60537ms step_avg:94.74ms +step:640/1705 train_time:60635ms step_avg:94.74ms +step:641/1705 train_time:60728ms step_avg:94.74ms +step:642/1705 train_time:60821ms step_avg:94.74ms +step:643/1705 train_time:60915ms step_avg:94.74ms +step:644/1705 train_time:61008ms step_avg:94.73ms +step:645/1705 train_time:61102ms step_avg:94.73ms +step:646/1705 train_time:61194ms step_avg:94.73ms +step:647/1705 train_time:61287ms step_avg:94.73ms +step:648/1705 train_time:61381ms step_avg:94.72ms +step:649/1705 train_time:61478ms step_avg:94.73ms +step:650/1705 train_time:61575ms step_avg:94.73ms +step:651/1705 train_time:61669ms step_avg:94.73ms +step:652/1705 train_time:61764ms step_avg:94.73ms +step:653/1705 train_time:61858ms step_avg:94.73ms +step:654/1705 train_time:61952ms step_avg:94.73ms +step:655/1705 train_time:62046ms step_avg:94.73ms +step:656/1705 train_time:62140ms step_avg:94.73ms +step:657/1705 train_time:62232ms step_avg:94.72ms +step:658/1705 train_time:62326ms step_avg:94.72ms +step:659/1705 train_time:62422ms step_avg:94.72ms +step:660/1705 train_time:62518ms step_avg:94.72ms +step:661/1705 train_time:62614ms step_avg:94.73ms +step:662/1705 train_time:62708ms step_avg:94.73ms +step:663/1705 train_time:62803ms step_avg:94.73ms +step:664/1705 train_time:62898ms step_avg:94.73ms +step:665/1705 train_time:62992ms step_avg:94.72ms +step:666/1705 train_time:63085ms step_avg:94.72ms +step:667/1705 train_time:63180ms step_avg:94.72ms +step:668/1705 train_time:63274ms step_avg:94.72ms +step:669/1705 train_time:63367ms step_avg:94.72ms +step:670/1705 train_time:63462ms step_avg:94.72ms +step:671/1705 train_time:63557ms step_avg:94.72ms +step:672/1705 train_time:63652ms step_avg:94.72ms +step:673/1705 train_time:63747ms step_avg:94.72ms +step:674/1705 train_time:63841ms step_avg:94.72ms +step:675/1705 train_time:63937ms step_avg:94.72ms +step:676/1705 train_time:64031ms step_avg:94.72ms +step:677/1705 train_time:64124ms step_avg:94.72ms +step:678/1705 train_time:64219ms step_avg:94.72ms +step:679/1705 train_time:64311ms step_avg:94.71ms +step:680/1705 train_time:64406ms step_avg:94.71ms +step:681/1705 train_time:64501ms step_avg:94.71ms +step:682/1705 train_time:64595ms step_avg:94.71ms +step:683/1705 train_time:64690ms step_avg:94.71ms +step:684/1705 train_time:64784ms step_avg:94.71ms +step:685/1705 train_time:64880ms step_avg:94.71ms +step:686/1705 train_time:64975ms step_avg:94.72ms +step:687/1705 train_time:65068ms step_avg:94.71ms +step:688/1705 train_time:65162ms step_avg:94.71ms +step:689/1705 train_time:65256ms step_avg:94.71ms +step:690/1705 train_time:65350ms step_avg:94.71ms +step:691/1705 train_time:65444ms step_avg:94.71ms +step:692/1705 train_time:65539ms step_avg:94.71ms +step:693/1705 train_time:65634ms step_avg:94.71ms +step:694/1705 train_time:65728ms step_avg:94.71ms +step:695/1705 train_time:65825ms step_avg:94.71ms +step:696/1705 train_time:65919ms step_avg:94.71ms +step:697/1705 train_time:66015ms step_avg:94.71ms +step:698/1705 train_time:66108ms step_avg:94.71ms +step:699/1705 train_time:66202ms step_avg:94.71ms +step:700/1705 train_time:66296ms step_avg:94.71ms +step:701/1705 train_time:66390ms step_avg:94.71ms +step:702/1705 train_time:66485ms step_avg:94.71ms +step:703/1705 train_time:66579ms step_avg:94.71ms +step:704/1705 train_time:66674ms step_avg:94.71ms +step:705/1705 train_time:66768ms step_avg:94.71ms +step:706/1705 train_time:66863ms step_avg:94.71ms +step:707/1705 train_time:66958ms step_avg:94.71ms +step:708/1705 train_time:67052ms step_avg:94.71ms +step:709/1705 train_time:67146ms step_avg:94.71ms +step:710/1705 train_time:67242ms step_avg:94.71ms +step:711/1705 train_time:67337ms step_avg:94.71ms +step:712/1705 train_time:67431ms step_avg:94.71ms +step:713/1705 train_time:67525ms step_avg:94.71ms +step:714/1705 train_time:67620ms step_avg:94.71ms +step:715/1705 train_time:67715ms step_avg:94.71ms +step:716/1705 train_time:67808ms step_avg:94.70ms +step:717/1705 train_time:67904ms step_avg:94.71ms +step:718/1705 train_time:67999ms step_avg:94.71ms +step:719/1705 train_time:68093ms step_avg:94.71ms +step:720/1705 train_time:68187ms step_avg:94.70ms +step:721/1705 train_time:68281ms step_avg:94.70ms +step:722/1705 train_time:68376ms step_avg:94.70ms +step:723/1705 train_time:68470ms step_avg:94.70ms +step:724/1705 train_time:68564ms step_avg:94.70ms +step:725/1705 train_time:68659ms step_avg:94.70ms +step:726/1705 train_time:68754ms step_avg:94.70ms +step:727/1705 train_time:68849ms step_avg:94.70ms +step:728/1705 train_time:68943ms step_avg:94.70ms +step:729/1705 train_time:69038ms step_avg:94.70ms +step:730/1705 train_time:69132ms step_avg:94.70ms +step:731/1705 train_time:69226ms step_avg:94.70ms +step:732/1705 train_time:69320ms step_avg:94.70ms +step:733/1705 train_time:69416ms step_avg:94.70ms +step:734/1705 train_time:69509ms step_avg:94.70ms +step:735/1705 train_time:69604ms step_avg:94.70ms +step:736/1705 train_time:69699ms step_avg:94.70ms +step:737/1705 train_time:69794ms step_avg:94.70ms +step:738/1705 train_time:69888ms step_avg:94.70ms +step:739/1705 train_time:69982ms step_avg:94.70ms +step:740/1705 train_time:70077ms step_avg:94.70ms +step:741/1705 train_time:70171ms step_avg:94.70ms +step:742/1705 train_time:70265ms step_avg:94.70ms +step:743/1705 train_time:70360ms step_avg:94.70ms +step:744/1705 train_time:70455ms step_avg:94.70ms +step:745/1705 train_time:70549ms step_avg:94.70ms +step:746/1705 train_time:70644ms step_avg:94.70ms +step:747/1705 train_time:70739ms step_avg:94.70ms +step:748/1705 train_time:70834ms step_avg:94.70ms +step:749/1705 train_time:70928ms step_avg:94.70ms +step:750/1705 train_time:71023ms step_avg:94.70ms +step:750/1705 val_loss:3.5671 train_time:71119ms step_avg:94.82ms +step:751/1705 train_time:71139ms step_avg:94.73ms +step:752/1705 train_time:71217ms step_avg:94.70ms +step:753/1705 train_time:71315ms step_avg:94.71ms +step:754/1705 train_time:71412ms step_avg:94.71ms +step:755/1705 train_time:71506ms step_avg:94.71ms +step:756/1705 train_time:71599ms step_avg:94.71ms +step:757/1705 train_time:71692ms step_avg:94.71ms +step:758/1705 train_time:71786ms step_avg:94.70ms +step:759/1705 train_time:71879ms step_avg:94.70ms +step:760/1705 train_time:71973ms step_avg:94.70ms +step:761/1705 train_time:72067ms step_avg:94.70ms +step:762/1705 train_time:72162ms step_avg:94.70ms +step:763/1705 train_time:72259ms step_avg:94.70ms +step:764/1705 train_time:72355ms step_avg:94.71ms +step:765/1705 train_time:72450ms step_avg:94.71ms +step:766/1705 train_time:72544ms step_avg:94.71ms +step:767/1705 train_time:72638ms step_avg:94.70ms +step:768/1705 train_time:72732ms step_avg:94.70ms +step:769/1705 train_time:72826ms step_avg:94.70ms +step:770/1705 train_time:72919ms step_avg:94.70ms +step:771/1705 train_time:73012ms step_avg:94.70ms +step:772/1705 train_time:73107ms step_avg:94.70ms +step:773/1705 train_time:73202ms step_avg:94.70ms +step:774/1705 train_time:73299ms step_avg:94.70ms +step:775/1705 train_time:73394ms step_avg:94.70ms +step:776/1705 train_time:73489ms step_avg:94.70ms +step:777/1705 train_time:73584ms step_avg:94.70ms +step:778/1705 train_time:73677ms step_avg:94.70ms +step:779/1705 train_time:73771ms step_avg:94.70ms +step:780/1705 train_time:73865ms step_avg:94.70ms +step:781/1705 train_time:73960ms step_avg:94.70ms +step:782/1705 train_time:74053ms step_avg:94.70ms +step:783/1705 train_time:74149ms step_avg:94.70ms +step:784/1705 train_time:74246ms step_avg:94.70ms +step:785/1705 train_time:74342ms step_avg:94.70ms +step:786/1705 train_time:74437ms step_avg:94.70ms +step:787/1705 train_time:74532ms step_avg:94.70ms +step:788/1705 train_time:74626ms step_avg:94.70ms +step:789/1705 train_time:74721ms step_avg:94.70ms +step:790/1705 train_time:74815ms step_avg:94.70ms +step:791/1705 train_time:74909ms step_avg:94.70ms +step:792/1705 train_time:75004ms step_avg:94.70ms +step:793/1705 train_time:75097ms step_avg:94.70ms +step:794/1705 train_time:75191ms step_avg:94.70ms +step:795/1705 train_time:75287ms step_avg:94.70ms +step:796/1705 train_time:75382ms step_avg:94.70ms +step:797/1705 train_time:75477ms step_avg:94.70ms +step:798/1705 train_time:75571ms step_avg:94.70ms +step:799/1705 train_time:75665ms step_avg:94.70ms +step:800/1705 train_time:75761ms step_avg:94.70ms +step:801/1705 train_time:75855ms step_avg:94.70ms +step:802/1705 train_time:75950ms step_avg:94.70ms +step:803/1705 train_time:76045ms step_avg:94.70ms +step:804/1705 train_time:76139ms step_avg:94.70ms +step:805/1705 train_time:76234ms step_avg:94.70ms +step:806/1705 train_time:76328ms step_avg:94.70ms +step:807/1705 train_time:76425ms step_avg:94.70ms +step:808/1705 train_time:76519ms step_avg:94.70ms +step:809/1705 train_time:76613ms step_avg:94.70ms +step:810/1705 train_time:76708ms step_avg:94.70ms +step:811/1705 train_time:76802ms step_avg:94.70ms +step:812/1705 train_time:76896ms step_avg:94.70ms +step:813/1705 train_time:76991ms step_avg:94.70ms +step:814/1705 train_time:77086ms step_avg:94.70ms +step:815/1705 train_time:77181ms step_avg:94.70ms +step:816/1705 train_time:77275ms step_avg:94.70ms +step:817/1705 train_time:77369ms step_avg:94.70ms +step:818/1705 train_time:77464ms step_avg:94.70ms +step:819/1705 train_time:77559ms step_avg:94.70ms +step:820/1705 train_time:77653ms step_avg:94.70ms +step:821/1705 train_time:77747ms step_avg:94.70ms +step:822/1705 train_time:77842ms step_avg:94.70ms +step:823/1705 train_time:77936ms step_avg:94.70ms +step:824/1705 train_time:78030ms step_avg:94.70ms +step:825/1705 train_time:78125ms step_avg:94.70ms +step:826/1705 train_time:78220ms step_avg:94.70ms +step:827/1705 train_time:78314ms step_avg:94.70ms +step:828/1705 train_time:78408ms step_avg:94.70ms +step:829/1705 train_time:78503ms step_avg:94.70ms +step:830/1705 train_time:78598ms step_avg:94.70ms +step:831/1705 train_time:78693ms step_avg:94.70ms +step:832/1705 train_time:78788ms step_avg:94.70ms +step:833/1705 train_time:78883ms step_avg:94.70ms +step:834/1705 train_time:78978ms step_avg:94.70ms +step:835/1705 train_time:79071ms step_avg:94.70ms +step:836/1705 train_time:79168ms step_avg:94.70ms +step:837/1705 train_time:79262ms step_avg:94.70ms +step:838/1705 train_time:79356ms step_avg:94.70ms +step:839/1705 train_time:79450ms step_avg:94.70ms +step:840/1705 train_time:79547ms step_avg:94.70ms +step:841/1705 train_time:79641ms step_avg:94.70ms +step:842/1705 train_time:79735ms step_avg:94.70ms +step:843/1705 train_time:79829ms step_avg:94.70ms +step:844/1705 train_time:79925ms step_avg:94.70ms +step:845/1705 train_time:80020ms step_avg:94.70ms +step:846/1705 train_time:80113ms step_avg:94.70ms +step:847/1705 train_time:80207ms step_avg:94.70ms +step:848/1705 train_time:80302ms step_avg:94.70ms +step:849/1705 train_time:80396ms step_avg:94.69ms +step:850/1705 train_time:80490ms step_avg:94.69ms +step:851/1705 train_time:80770ms step_avg:94.91ms +step:852/1705 train_time:80872ms step_avg:94.92ms +step:853/1705 train_time:80966ms step_avg:94.92ms +step:854/1705 train_time:81059ms step_avg:94.92ms +step:855/1705 train_time:81152ms step_avg:94.91ms +step:856/1705 train_time:81246ms step_avg:94.91ms +step:857/1705 train_time:81339ms step_avg:94.91ms +step:858/1705 train_time:81433ms step_avg:94.91ms +step:859/1705 train_time:81526ms step_avg:94.91ms +step:860/1705 train_time:81620ms step_avg:94.91ms +step:861/1705 train_time:81716ms step_avg:94.91ms +step:862/1705 train_time:81814ms step_avg:94.91ms +step:863/1705 train_time:81910ms step_avg:94.91ms +step:864/1705 train_time:82005ms step_avg:94.91ms +step:865/1705 train_time:82099ms step_avg:94.91ms +step:866/1705 train_time:82193ms step_avg:94.91ms +step:867/1705 train_time:82286ms step_avg:94.91ms +step:868/1705 train_time:82380ms step_avg:94.91ms +step:869/1705 train_time:82473ms step_avg:94.91ms +step:870/1705 train_time:82567ms step_avg:94.90ms +step:871/1705 train_time:82661ms step_avg:94.90ms +step:872/1705 train_time:82756ms step_avg:94.90ms +step:873/1705 train_time:82851ms step_avg:94.90ms +step:874/1705 train_time:82947ms step_avg:94.90ms +step:875/1705 train_time:83043ms step_avg:94.91ms +step:875/1705 val_loss:3.5249 train_time:83139ms step_avg:95.02ms +step:876/1705 train_time:83160ms step_avg:94.93ms +step:877/1705 train_time:83238ms step_avg:94.91ms +step:878/1705 train_time:83337ms step_avg:94.92ms +step:879/1705 train_time:83432ms step_avg:94.92ms +step:880/1705 train_time:83528ms step_avg:94.92ms +step:881/1705 train_time:83621ms step_avg:94.92ms +step:882/1705 train_time:83714ms step_avg:94.91ms +step:883/1705 train_time:83808ms step_avg:94.91ms +step:884/1705 train_time:83901ms step_avg:94.91ms +step:885/1705 train_time:83995ms step_avg:94.91ms +step:886/1705 train_time:84090ms step_avg:94.91ms +step:887/1705 train_time:84187ms step_avg:94.91ms +step:888/1705 train_time:84284ms step_avg:94.91ms +step:889/1705 train_time:84380ms step_avg:94.92ms +step:890/1705 train_time:84474ms step_avg:94.91ms +step:891/1705 train_time:84568ms step_avg:94.91ms +step:892/1705 train_time:84662ms step_avg:94.91ms +step:893/1705 train_time:84756ms step_avg:94.91ms +step:894/1705 train_time:84849ms step_avg:94.91ms +step:895/1705 train_time:84943ms step_avg:94.91ms +step:896/1705 train_time:85037ms step_avg:94.91ms +step:897/1705 train_time:85131ms step_avg:94.91ms +step:898/1705 train_time:85227ms step_avg:94.91ms +step:899/1705 train_time:85323ms step_avg:94.91ms +step:900/1705 train_time:85417ms step_avg:94.91ms +step:901/1705 train_time:85512ms step_avg:94.91ms +step:902/1705 train_time:85607ms step_avg:94.91ms +step:903/1705 train_time:85701ms step_avg:94.91ms +step:904/1705 train_time:85795ms step_avg:94.91ms +step:905/1705 train_time:85889ms step_avg:94.91ms +step:906/1705 train_time:85984ms step_avg:94.91ms +step:907/1705 train_time:86077ms step_avg:94.90ms +step:908/1705 train_time:86172ms step_avg:94.90ms +step:909/1705 train_time:86267ms step_avg:94.90ms +step:910/1705 train_time:86363ms step_avg:94.90ms +step:911/1705 train_time:86457ms step_avg:94.90ms +step:912/1705 train_time:86552ms step_avg:94.90ms +step:913/1705 train_time:86647ms step_avg:94.90ms +step:914/1705 train_time:86742ms step_avg:94.90ms +step:915/1705 train_time:86836ms step_avg:94.90ms +step:916/1705 train_time:86930ms step_avg:94.90ms +step:917/1705 train_time:87024ms step_avg:94.90ms +step:918/1705 train_time:87118ms step_avg:94.90ms +step:919/1705 train_time:87212ms step_avg:94.90ms +step:920/1705 train_time:87308ms step_avg:94.90ms +step:921/1705 train_time:87405ms step_avg:94.90ms +step:922/1705 train_time:87499ms step_avg:94.90ms +step:923/1705 train_time:87594ms step_avg:94.90ms +step:924/1705 train_time:87689ms step_avg:94.90ms +step:925/1705 train_time:87783ms step_avg:94.90ms +step:926/1705 train_time:87876ms step_avg:94.90ms +step:927/1705 train_time:87970ms step_avg:94.90ms +step:928/1705 train_time:88064ms step_avg:94.90ms +step:929/1705 train_time:88158ms step_avg:94.90ms +step:930/1705 train_time:88253ms step_avg:94.90ms +step:931/1705 train_time:88348ms step_avg:94.90ms +step:932/1705 train_time:88443ms step_avg:94.90ms +step:933/1705 train_time:88538ms step_avg:94.90ms +step:934/1705 train_time:88633ms step_avg:94.90ms +step:935/1705 train_time:88728ms step_avg:94.90ms +step:936/1705 train_time:88823ms step_avg:94.90ms +step:937/1705 train_time:88916ms step_avg:94.89ms +step:938/1705 train_time:89010ms step_avg:94.89ms +step:939/1705 train_time:89105ms step_avg:94.89ms +step:940/1705 train_time:89200ms step_avg:94.89ms +step:941/1705 train_time:89294ms step_avg:94.89ms +step:942/1705 train_time:89388ms step_avg:94.89ms +step:943/1705 train_time:89483ms step_avg:94.89ms +step:944/1705 train_time:89578ms step_avg:94.89ms +step:945/1705 train_time:89672ms step_avg:94.89ms +step:946/1705 train_time:89768ms step_avg:94.89ms +step:947/1705 train_time:89862ms step_avg:94.89ms +step:948/1705 train_time:89956ms step_avg:94.89ms +step:949/1705 train_time:90050ms step_avg:94.89ms +step:950/1705 train_time:90145ms step_avg:94.89ms +step:951/1705 train_time:90239ms step_avg:94.89ms +step:952/1705 train_time:90334ms step_avg:94.89ms +step:953/1705 train_time:90429ms step_avg:94.89ms +step:954/1705 train_time:90524ms step_avg:94.89ms +step:955/1705 train_time:90618ms step_avg:94.89ms +step:956/1705 train_time:90712ms step_avg:94.89ms +step:957/1705 train_time:90808ms step_avg:94.89ms +step:958/1705 train_time:90903ms step_avg:94.89ms +step:959/1705 train_time:90997ms step_avg:94.89ms +step:960/1705 train_time:91091ms step_avg:94.89ms +step:961/1705 train_time:91186ms step_avg:94.89ms +step:962/1705 train_time:91281ms step_avg:94.89ms +step:963/1705 train_time:91375ms step_avg:94.89ms +step:964/1705 train_time:91469ms step_avg:94.89ms +step:965/1705 train_time:91564ms step_avg:94.88ms +step:966/1705 train_time:91658ms step_avg:94.88ms +step:967/1705 train_time:91753ms step_avg:94.88ms +step:968/1705 train_time:91848ms step_avg:94.88ms +step:969/1705 train_time:91943ms step_avg:94.88ms +step:970/1705 train_time:92036ms step_avg:94.88ms +step:971/1705 train_time:92131ms step_avg:94.88ms +step:972/1705 train_time:92225ms step_avg:94.88ms +step:973/1705 train_time:92320ms step_avg:94.88ms +step:974/1705 train_time:92414ms step_avg:94.88ms +step:975/1705 train_time:92509ms step_avg:94.88ms +step:976/1705 train_time:92603ms step_avg:94.88ms +step:977/1705 train_time:92697ms step_avg:94.88ms +step:978/1705 train_time:92791ms step_avg:94.88ms +step:979/1705 train_time:92886ms step_avg:94.88ms +step:980/1705 train_time:92981ms step_avg:94.88ms +step:981/1705 train_time:93075ms step_avg:94.88ms +step:982/1705 train_time:93170ms step_avg:94.88ms +step:983/1705 train_time:93265ms step_avg:94.88ms +step:984/1705 train_time:93358ms step_avg:94.88ms +step:985/1705 train_time:93453ms step_avg:94.88ms +step:986/1705 train_time:93548ms step_avg:94.88ms +step:987/1705 train_time:93643ms step_avg:94.88ms +step:988/1705 train_time:93738ms step_avg:94.88ms +step:989/1705 train_time:93833ms step_avg:94.88ms +step:990/1705 train_time:93928ms step_avg:94.88ms +step:991/1705 train_time:94023ms step_avg:94.88ms +step:992/1705 train_time:94117ms step_avg:94.88ms +step:993/1705 train_time:94212ms step_avg:94.88ms +step:994/1705 train_time:94306ms step_avg:94.88ms +step:995/1705 train_time:94400ms step_avg:94.87ms +step:996/1705 train_time:94494ms step_avg:94.87ms +step:997/1705 train_time:94590ms step_avg:94.87ms +step:998/1705 train_time:94685ms step_avg:94.87ms +step:999/1705 train_time:94779ms step_avg:94.87ms +step:1000/1705 train_time:94873ms step_avg:94.87ms +step:1000/1705 val_loss:3.4856 train_time:94969ms step_avg:94.97ms +step:1001/1705 train_time:94990ms step_avg:94.90ms +step:1002/1705 train_time:95068ms step_avg:94.88ms +step:1003/1705 train_time:95169ms step_avg:94.88ms +step:1004/1705 train_time:95263ms step_avg:94.88ms +step:1005/1705 train_time:95357ms step_avg:94.88ms +step:1006/1705 train_time:95450ms step_avg:94.88ms +step:1007/1705 train_time:95544ms step_avg:94.88ms +step:1008/1705 train_time:95636ms step_avg:94.88ms +step:1009/1705 train_time:95730ms step_avg:94.88ms +step:1010/1705 train_time:95824ms step_avg:94.88ms +step:1011/1705 train_time:95919ms step_avg:94.87ms +step:1012/1705 train_time:96015ms step_avg:94.88ms +step:1013/1705 train_time:96112ms step_avg:94.88ms +step:1014/1705 train_time:96208ms step_avg:94.88ms +step:1015/1705 train_time:96304ms step_avg:94.88ms +step:1016/1705 train_time:96397ms step_avg:94.88ms +step:1017/1705 train_time:96492ms step_avg:94.88ms +step:1018/1705 train_time:96585ms step_avg:94.88ms +step:1019/1705 train_time:96679ms step_avg:94.88ms +step:1020/1705 train_time:96772ms step_avg:94.87ms +step:1021/1705 train_time:96866ms step_avg:94.87ms +step:1022/1705 train_time:96962ms step_avg:94.87ms +step:1023/1705 train_time:97056ms step_avg:94.87ms +step:1024/1705 train_time:97153ms step_avg:94.88ms +step:1025/1705 train_time:97249ms step_avg:94.88ms +step:1026/1705 train_time:97345ms step_avg:94.88ms +step:1027/1705 train_time:97439ms step_avg:94.88ms +step:1028/1705 train_time:97533ms step_avg:94.88ms +step:1029/1705 train_time:97627ms step_avg:94.88ms +step:1030/1705 train_time:97721ms step_avg:94.87ms +step:1031/1705 train_time:97815ms step_avg:94.87ms +step:1032/1705 train_time:97909ms step_avg:94.87ms +step:1033/1705 train_time:98004ms step_avg:94.87ms +step:1034/1705 train_time:98100ms step_avg:94.87ms +step:1035/1705 train_time:98195ms step_avg:94.87ms +step:1036/1705 train_time:98290ms step_avg:94.87ms +step:1037/1705 train_time:98385ms step_avg:94.87ms +step:1038/1705 train_time:98479ms step_avg:94.87ms +step:1039/1705 train_time:98573ms step_avg:94.87ms +step:1040/1705 train_time:98668ms step_avg:94.87ms +step:1041/1705 train_time:98762ms step_avg:94.87ms +step:1042/1705 train_time:98856ms step_avg:94.87ms +step:1043/1705 train_time:98950ms step_avg:94.87ms +step:1044/1705 train_time:99045ms step_avg:94.87ms +step:1045/1705 train_time:99141ms step_avg:94.87ms +step:1046/1705 train_time:99236ms step_avg:94.87ms +step:1047/1705 train_time:99331ms step_avg:94.87ms +step:1048/1705 train_time:99426ms step_avg:94.87ms +step:1049/1705 train_time:99521ms step_avg:94.87ms +step:1050/1705 train_time:99615ms step_avg:94.87ms +step:1051/1705 train_time:99710ms step_avg:94.87ms +step:1052/1705 train_time:99805ms step_avg:94.87ms +step:1053/1705 train_time:99899ms step_avg:94.87ms +step:1054/1705 train_time:99993ms step_avg:94.87ms +step:1055/1705 train_time:100089ms step_avg:94.87ms +step:1056/1705 train_time:100184ms step_avg:94.87ms +step:1057/1705 train_time:100278ms step_avg:94.87ms +step:1058/1705 train_time:100372ms step_avg:94.87ms +step:1059/1705 train_time:100467ms step_avg:94.87ms +step:1060/1705 train_time:100562ms step_avg:94.87ms +step:1061/1705 train_time:100655ms step_avg:94.87ms +step:1062/1705 train_time:100895ms step_avg:95.00ms +step:1063/1705 train_time:101100ms step_avg:95.11ms +step:1064/1705 train_time:101192ms step_avg:95.11ms +step:1065/1705 train_time:101286ms step_avg:95.10ms +step:1066/1705 train_time:101379ms step_avg:95.10ms +step:1067/1705 train_time:101472ms step_avg:95.10ms +step:1068/1705 train_time:101565ms step_avg:95.10ms +step:1069/1705 train_time:101659ms step_avg:95.10ms +step:1070/1705 train_time:101752ms step_avg:95.10ms +step:1071/1705 train_time:101846ms step_avg:95.09ms +step:1072/1705 train_time:101949ms step_avg:95.10ms +step:1073/1705 train_time:102047ms step_avg:95.10ms +step:1074/1705 train_time:102145ms step_avg:95.11ms +step:1075/1705 train_time:102240ms step_avg:95.11ms +step:1076/1705 train_time:102334ms step_avg:95.11ms +step:1077/1705 train_time:102428ms step_avg:95.10ms +step:1078/1705 train_time:102521ms step_avg:95.10ms +step:1079/1705 train_time:102615ms step_avg:95.10ms +step:1080/1705 train_time:102708ms step_avg:95.10ms +step:1081/1705 train_time:102802ms step_avg:95.10ms +step:1082/1705 train_time:102896ms step_avg:95.10ms +step:1083/1705 train_time:102993ms step_avg:95.10ms +step:1084/1705 train_time:103090ms step_avg:95.10ms +step:1085/1705 train_time:103187ms step_avg:95.10ms +step:1086/1705 train_time:103281ms step_avg:95.10ms +step:1087/1705 train_time:103375ms step_avg:95.10ms +step:1088/1705 train_time:103469ms step_avg:95.10ms +step:1089/1705 train_time:103563ms step_avg:95.10ms +step:1090/1705 train_time:103656ms step_avg:95.10ms +step:1091/1705 train_time:103750ms step_avg:95.10ms +step:1092/1705 train_time:103845ms step_avg:95.10ms +step:1093/1705 train_time:103940ms step_avg:95.10ms +step:1094/1705 train_time:104035ms step_avg:95.10ms +step:1095/1705 train_time:104131ms step_avg:95.10ms +step:1096/1705 train_time:104227ms step_avg:95.10ms +step:1097/1705 train_time:104322ms step_avg:95.10ms +step:1098/1705 train_time:104416ms step_avg:95.10ms +step:1099/1705 train_time:104510ms step_avg:95.10ms +step:1100/1705 train_time:104604ms step_avg:95.09ms +step:1101/1705 train_time:104698ms step_avg:95.09ms +step:1102/1705 train_time:104792ms step_avg:95.09ms +step:1103/1705 train_time:104888ms step_avg:95.09ms +step:1104/1705 train_time:104982ms step_avg:95.09ms +step:1105/1705 train_time:105076ms step_avg:95.09ms +step:1106/1705 train_time:105172ms step_avg:95.09ms +step:1107/1705 train_time:105268ms step_avg:95.09ms +step:1108/1705 train_time:105363ms step_avg:95.09ms +step:1109/1705 train_time:105456ms step_avg:95.09ms +step:1110/1705 train_time:105551ms step_avg:95.09ms +step:1111/1705 train_time:105645ms step_avg:95.09ms +step:1112/1705 train_time:105739ms step_avg:95.09ms +step:1113/1705 train_time:105834ms step_avg:95.09ms +step:1114/1705 train_time:105930ms step_avg:95.09ms +step:1115/1705 train_time:106025ms step_avg:95.09ms +step:1116/1705 train_time:106119ms step_avg:95.09ms +step:1117/1705 train_time:106214ms step_avg:95.09ms +step:1118/1705 train_time:106309ms step_avg:95.09ms +step:1119/1705 train_time:106404ms step_avg:95.09ms +step:1120/1705 train_time:106498ms step_avg:95.09ms +step:1121/1705 train_time:106592ms step_avg:95.09ms +step:1122/1705 train_time:106686ms step_avg:95.09ms +step:1123/1705 train_time:106781ms step_avg:95.09ms +step:1124/1705 train_time:106875ms step_avg:95.08ms +step:1125/1705 train_time:106970ms step_avg:95.08ms +step:1125/1705 val_loss:3.4374 train_time:107065ms step_avg:95.17ms +step:1126/1705 train_time:107086ms step_avg:95.10ms +step:1127/1705 train_time:107165ms step_avg:95.09ms +step:1128/1705 train_time:107264ms step_avg:95.09ms +step:1129/1705 train_time:107359ms step_avg:95.09ms +step:1130/1705 train_time:107453ms step_avg:95.09ms +step:1131/1705 train_time:107547ms step_avg:95.09ms +step:1132/1705 train_time:107641ms step_avg:95.09ms +step:1133/1705 train_time:107734ms step_avg:95.09ms +step:1134/1705 train_time:107827ms step_avg:95.09ms +step:1135/1705 train_time:107920ms step_avg:95.08ms +step:1136/1705 train_time:108015ms step_avg:95.08ms +step:1137/1705 train_time:108112ms step_avg:95.09ms +step:1138/1705 train_time:108210ms step_avg:95.09ms +step:1139/1705 train_time:108306ms step_avg:95.09ms +step:1140/1705 train_time:108402ms step_avg:95.09ms +step:1141/1705 train_time:108496ms step_avg:95.09ms +step:1142/1705 train_time:108591ms step_avg:95.09ms +step:1143/1705 train_time:108686ms step_avg:95.09ms +step:1144/1705 train_time:108780ms step_avg:95.09ms +step:1145/1705 train_time:108875ms step_avg:95.09ms +step:1146/1705 train_time:108970ms step_avg:95.09ms +step:1147/1705 train_time:109065ms step_avg:95.09ms +step:1148/1705 train_time:109161ms step_avg:95.09ms +step:1149/1705 train_time:109257ms step_avg:95.09ms +step:1150/1705 train_time:109353ms step_avg:95.09ms +step:1151/1705 train_time:109449ms step_avg:95.09ms +step:1152/1705 train_time:109545ms step_avg:95.09ms +step:1153/1705 train_time:109639ms step_avg:95.09ms +step:1154/1705 train_time:109733ms step_avg:95.09ms +step:1155/1705 train_time:109828ms step_avg:95.09ms +step:1156/1705 train_time:109923ms step_avg:95.09ms +step:1157/1705 train_time:110017ms step_avg:95.09ms +step:1158/1705 train_time:110114ms step_avg:95.09ms +step:1159/1705 train_time:110210ms step_avg:95.09ms +step:1160/1705 train_time:110308ms step_avg:95.09ms +step:1161/1705 train_time:110404ms step_avg:95.09ms +step:1162/1705 train_time:110498ms step_avg:95.09ms +step:1163/1705 train_time:110594ms step_avg:95.09ms +step:1164/1705 train_time:110689ms step_avg:95.09ms +step:1165/1705 train_time:110784ms step_avg:95.09ms +step:1166/1705 train_time:110879ms step_avg:95.09ms +step:1167/1705 train_time:110974ms step_avg:95.09ms +step:1168/1705 train_time:111070ms step_avg:95.09ms +step:1169/1705 train_time:111165ms step_avg:95.09ms +step:1170/1705 train_time:111260ms step_avg:95.09ms +step:1171/1705 train_time:111356ms step_avg:95.09ms +step:1172/1705 train_time:111452ms step_avg:95.10ms +step:1173/1705 train_time:111548ms step_avg:95.10ms +step:1174/1705 train_time:111643ms step_avg:95.10ms +step:1175/1705 train_time:111737ms step_avg:95.10ms +step:1176/1705 train_time:111832ms step_avg:95.10ms +step:1177/1705 train_time:111927ms step_avg:95.10ms +step:1178/1705 train_time:112022ms step_avg:95.10ms +step:1179/1705 train_time:112117ms step_avg:95.09ms +step:1180/1705 train_time:112213ms step_avg:95.10ms +step:1181/1705 train_time:112310ms step_avg:95.10ms +step:1182/1705 train_time:112406ms step_avg:95.10ms +step:1183/1705 train_time:112501ms step_avg:95.10ms +step:1184/1705 train_time:112596ms step_avg:95.10ms +step:1185/1705 train_time:112691ms step_avg:95.10ms +step:1186/1705 train_time:112786ms step_avg:95.10ms +step:1187/1705 train_time:112881ms step_avg:95.10ms +step:1188/1705 train_time:112976ms step_avg:95.10ms +step:1189/1705 train_time:113072ms step_avg:95.10ms +step:1190/1705 train_time:113168ms step_avg:95.10ms +step:1191/1705 train_time:113265ms step_avg:95.10ms +step:1192/1705 train_time:113362ms step_avg:95.10ms +step:1193/1705 train_time:113458ms step_avg:95.10ms +step:1194/1705 train_time:113553ms step_avg:95.10ms +step:1195/1705 train_time:113649ms step_avg:95.10ms +step:1196/1705 train_time:113743ms step_avg:95.10ms +step:1197/1705 train_time:113838ms step_avg:95.10ms +step:1198/1705 train_time:113933ms step_avg:95.10ms +step:1199/1705 train_time:114028ms step_avg:95.10ms +step:1200/1705 train_time:114123ms step_avg:95.10ms +step:1201/1705 train_time:114218ms step_avg:95.10ms +step:1202/1705 train_time:114315ms step_avg:95.10ms +step:1203/1705 train_time:114410ms step_avg:95.10ms +step:1204/1705 train_time:114506ms step_avg:95.10ms +step:1205/1705 train_time:114601ms step_avg:95.10ms +step:1206/1705 train_time:114696ms step_avg:95.10ms +step:1207/1705 train_time:114791ms step_avg:95.10ms +step:1208/1705 train_time:114887ms step_avg:95.11ms +step:1209/1705 train_time:114982ms step_avg:95.11ms +step:1210/1705 train_time:115076ms step_avg:95.10ms +step:1211/1705 train_time:115171ms step_avg:95.10ms +step:1212/1705 train_time:115268ms step_avg:95.11ms +step:1213/1705 train_time:115364ms step_avg:95.11ms +step:1214/1705 train_time:115459ms step_avg:95.11ms +step:1215/1705 train_time:115554ms step_avg:95.11ms +step:1216/1705 train_time:115650ms step_avg:95.11ms +step:1217/1705 train_time:115745ms step_avg:95.11ms +step:1218/1705 train_time:115840ms step_avg:95.11ms +step:1219/1705 train_time:115935ms step_avg:95.11ms +step:1220/1705 train_time:116031ms step_avg:95.11ms +step:1221/1705 train_time:116125ms step_avg:95.11ms +step:1222/1705 train_time:116221ms step_avg:95.11ms +step:1223/1705 train_time:116315ms step_avg:95.11ms +step:1224/1705 train_time:116412ms step_avg:95.11ms +step:1225/1705 train_time:116510ms step_avg:95.11ms +step:1226/1705 train_time:116605ms step_avg:95.11ms +step:1227/1705 train_time:116701ms step_avg:95.11ms +step:1228/1705 train_time:116796ms step_avg:95.11ms +step:1229/1705 train_time:116891ms step_avg:95.11ms +step:1230/1705 train_time:116986ms step_avg:95.11ms +step:1231/1705 train_time:117081ms step_avg:95.11ms +step:1232/1705 train_time:117176ms step_avg:95.11ms +step:1233/1705 train_time:117272ms step_avg:95.11ms +step:1234/1705 train_time:117367ms step_avg:95.11ms +step:1235/1705 train_time:117462ms step_avg:95.11ms +step:1236/1705 train_time:117558ms step_avg:95.11ms +step:1237/1705 train_time:117654ms step_avg:95.11ms +step:1238/1705 train_time:117751ms step_avg:95.11ms +step:1239/1705 train_time:117848ms step_avg:95.12ms +step:1240/1705 train_time:117943ms step_avg:95.12ms +step:1241/1705 train_time:118039ms step_avg:95.12ms +step:1242/1705 train_time:118133ms step_avg:95.12ms +step:1243/1705 train_time:118228ms step_avg:95.11ms +step:1244/1705 train_time:118323ms step_avg:95.11ms +step:1245/1705 train_time:118418ms step_avg:95.11ms +step:1246/1705 train_time:118513ms step_avg:95.11ms +step:1247/1705 train_time:118609ms step_avg:95.12ms +step:1248/1705 train_time:118705ms step_avg:95.12ms +step:1249/1705 train_time:118799ms step_avg:95.12ms +step:1250/1705 train_time:118894ms step_avg:95.11ms +step:1250/1705 val_loss:3.3898 train_time:118991ms step_avg:95.19ms +step:1251/1705 train_time:119012ms step_avg:95.13ms +step:1252/1705 train_time:119098ms step_avg:95.13ms +step:1253/1705 train_time:119194ms step_avg:95.13ms +step:1254/1705 train_time:119288ms step_avg:95.13ms +step:1255/1705 train_time:119382ms step_avg:95.13ms +step:1256/1705 train_time:119477ms step_avg:95.12ms +step:1257/1705 train_time:119571ms step_avg:95.12ms +step:1258/1705 train_time:119665ms step_avg:95.12ms +step:1259/1705 train_time:119759ms step_avg:95.12ms +step:1260/1705 train_time:119853ms step_avg:95.12ms +step:1261/1705 train_time:119951ms step_avg:95.12ms +step:1262/1705 train_time:120051ms step_avg:95.13ms +step:1263/1705 train_time:120149ms step_avg:95.13ms +step:1264/1705 train_time:120244ms step_avg:95.13ms +step:1265/1705 train_time:120339ms step_avg:95.13ms +step:1266/1705 train_time:120433ms step_avg:95.13ms +step:1267/1705 train_time:120528ms step_avg:95.13ms +step:1268/1705 train_time:120622ms step_avg:95.13ms +step:1269/1705 train_time:120716ms step_avg:95.13ms +step:1270/1705 train_time:120810ms step_avg:95.13ms +step:1271/1705 train_time:120906ms step_avg:95.13ms +step:1272/1705 train_time:121003ms step_avg:95.13ms +step:1273/1705 train_time:121100ms step_avg:95.13ms +step:1274/1705 train_time:121476ms step_avg:95.35ms +step:1275/1705 train_time:121552ms step_avg:95.33ms +step:1276/1705 train_time:121647ms step_avg:95.33ms +step:1277/1705 train_time:121742ms step_avg:95.33ms +step:1278/1705 train_time:121836ms step_avg:95.33ms +step:1279/1705 train_time:121930ms step_avg:95.33ms +step:1280/1705 train_time:122024ms step_avg:95.33ms +step:1281/1705 train_time:122118ms step_avg:95.33ms +step:1282/1705 train_time:122212ms step_avg:95.33ms +step:1283/1705 train_time:122306ms step_avg:95.33ms +step:1284/1705 train_time:122408ms step_avg:95.33ms +step:1285/1705 train_time:122507ms step_avg:95.34ms +step:1286/1705 train_time:122603ms step_avg:95.34ms +step:1287/1705 train_time:122699ms step_avg:95.34ms +step:1288/1705 train_time:122794ms step_avg:95.34ms +step:1289/1705 train_time:122889ms step_avg:95.34ms +step:1290/1705 train_time:122983ms step_avg:95.34ms +step:1291/1705 train_time:123077ms step_avg:95.33ms +step:1292/1705 train_time:123171ms step_avg:95.33ms +step:1293/1705 train_time:123266ms step_avg:95.33ms +step:1294/1705 train_time:123364ms step_avg:95.34ms +step:1295/1705 train_time:123462ms step_avg:95.34ms +step:1296/1705 train_time:123559ms step_avg:95.34ms +step:1297/1705 train_time:123654ms step_avg:95.34ms +step:1298/1705 train_time:123749ms step_avg:95.34ms +step:1299/1705 train_time:123845ms step_avg:95.34ms +step:1300/1705 train_time:123940ms step_avg:95.34ms +step:1301/1705 train_time:124035ms step_avg:95.34ms +step:1302/1705 train_time:124130ms step_avg:95.34ms +step:1303/1705 train_time:124224ms step_avg:95.34ms +step:1304/1705 train_time:124320ms step_avg:95.34ms +step:1305/1705 train_time:124417ms step_avg:95.34ms +step:1306/1705 train_time:124512ms step_avg:95.34ms +step:1307/1705 train_time:124607ms step_avg:95.34ms +step:1308/1705 train_time:124703ms step_avg:95.34ms +step:1309/1705 train_time:124799ms step_avg:95.34ms +step:1310/1705 train_time:124894ms step_avg:95.34ms +step:1311/1705 train_time:124988ms step_avg:95.34ms +step:1312/1705 train_time:125083ms step_avg:95.34ms +step:1313/1705 train_time:125179ms step_avg:95.34ms +step:1314/1705 train_time:125274ms step_avg:95.34ms +step:1315/1705 train_time:125370ms step_avg:95.34ms +step:1316/1705 train_time:125466ms step_avg:95.34ms +step:1317/1705 train_time:125562ms step_avg:95.34ms +step:1318/1705 train_time:125658ms step_avg:95.34ms +step:1319/1705 train_time:125755ms step_avg:95.34ms +step:1320/1705 train_time:125849ms step_avg:95.34ms +step:1321/1705 train_time:125944ms step_avg:95.34ms +step:1322/1705 train_time:126039ms step_avg:95.34ms +step:1323/1705 train_time:126134ms step_avg:95.34ms +step:1324/1705 train_time:126229ms step_avg:95.34ms +step:1325/1705 train_time:126324ms step_avg:95.34ms +step:1326/1705 train_time:126420ms step_avg:95.34ms +step:1327/1705 train_time:126516ms step_avg:95.34ms +step:1328/1705 train_time:126610ms step_avg:95.34ms +step:1329/1705 train_time:126706ms step_avg:95.34ms +step:1330/1705 train_time:126801ms step_avg:95.34ms +step:1331/1705 train_time:126896ms step_avg:95.34ms +step:1332/1705 train_time:126991ms step_avg:95.34ms +step:1333/1705 train_time:127087ms step_avg:95.34ms +step:1334/1705 train_time:127182ms step_avg:95.34ms +step:1335/1705 train_time:127277ms step_avg:95.34ms +step:1336/1705 train_time:127372ms step_avg:95.34ms +step:1337/1705 train_time:127468ms step_avg:95.34ms +step:1338/1705 train_time:127565ms step_avg:95.34ms +step:1339/1705 train_time:127661ms step_avg:95.34ms +step:1340/1705 train_time:127756ms step_avg:95.34ms +step:1341/1705 train_time:127851ms step_avg:95.34ms +step:1342/1705 train_time:127946ms step_avg:95.34ms +step:1343/1705 train_time:128041ms step_avg:95.34ms +step:1344/1705 train_time:128136ms step_avg:95.34ms +step:1345/1705 train_time:128230ms step_avg:95.34ms +step:1346/1705 train_time:128326ms step_avg:95.34ms +step:1347/1705 train_time:128422ms step_avg:95.34ms +step:1348/1705 train_time:128519ms step_avg:95.34ms +step:1349/1705 train_time:128613ms step_avg:95.34ms +step:1350/1705 train_time:128708ms step_avg:95.34ms +step:1351/1705 train_time:128804ms step_avg:95.34ms +step:1352/1705 train_time:128899ms step_avg:95.34ms +step:1353/1705 train_time:128994ms step_avg:95.34ms +step:1354/1705 train_time:129089ms step_avg:95.34ms +step:1355/1705 train_time:129184ms step_avg:95.34ms +step:1356/1705 train_time:129279ms step_avg:95.34ms +step:1357/1705 train_time:129374ms step_avg:95.34ms +step:1358/1705 train_time:129470ms step_avg:95.34ms +step:1359/1705 train_time:129567ms step_avg:95.34ms +step:1360/1705 train_time:129662ms step_avg:95.34ms +step:1361/1705 train_time:129758ms step_avg:95.34ms +step:1362/1705 train_time:129852ms step_avg:95.34ms +step:1363/1705 train_time:129949ms step_avg:95.34ms +step:1364/1705 train_time:130044ms step_avg:95.34ms +step:1365/1705 train_time:130139ms step_avg:95.34ms +step:1366/1705 train_time:130233ms step_avg:95.34ms +step:1367/1705 train_time:130329ms step_avg:95.34ms +step:1368/1705 train_time:130425ms step_avg:95.34ms +step:1369/1705 train_time:130520ms step_avg:95.34ms +step:1370/1705 train_time:130616ms step_avg:95.34ms +step:1371/1705 train_time:130712ms step_avg:95.34ms +step:1372/1705 train_time:130807ms step_avg:95.34ms +step:1373/1705 train_time:130903ms step_avg:95.34ms +step:1374/1705 train_time:130998ms step_avg:95.34ms +step:1375/1705 train_time:131093ms step_avg:95.34ms +step:1375/1705 val_loss:3.3522 train_time:131189ms step_avg:95.41ms +step:1376/1705 train_time:131210ms step_avg:95.36ms +step:1377/1705 train_time:131288ms step_avg:95.34ms +step:1378/1705 train_time:131389ms step_avg:95.35ms +step:1379/1705 train_time:131483ms step_avg:95.35ms +step:1380/1705 train_time:131578ms step_avg:95.35ms +step:1381/1705 train_time:131672ms step_avg:95.35ms +step:1382/1705 train_time:131767ms step_avg:95.34ms +step:1383/1705 train_time:131861ms step_avg:95.34ms +step:1384/1705 train_time:131955ms step_avg:95.34ms +step:1385/1705 train_time:132049ms step_avg:95.34ms +step:1386/1705 train_time:132146ms step_avg:95.34ms +step:1387/1705 train_time:132242ms step_avg:95.34ms +step:1388/1705 train_time:132338ms step_avg:95.34ms +step:1389/1705 train_time:132434ms step_avg:95.35ms +step:1390/1705 train_time:132530ms step_avg:95.35ms +step:1391/1705 train_time:132625ms step_avg:95.35ms +step:1392/1705 train_time:132720ms step_avg:95.34ms +step:1393/1705 train_time:132815ms step_avg:95.34ms +step:1394/1705 train_time:132909ms step_avg:95.34ms +step:1395/1705 train_time:133004ms step_avg:95.34ms +step:1396/1705 train_time:133098ms step_avg:95.34ms +step:1397/1705 train_time:133194ms step_avg:95.34ms +step:1398/1705 train_time:133290ms step_avg:95.34ms +step:1399/1705 train_time:133386ms step_avg:95.34ms +step:1400/1705 train_time:133481ms step_avg:95.34ms +step:1401/1705 train_time:133576ms step_avg:95.34ms +step:1402/1705 train_time:133672ms step_avg:95.34ms +step:1403/1705 train_time:133768ms step_avg:95.34ms +step:1404/1705 train_time:133863ms step_avg:95.34ms +step:1405/1705 train_time:133957ms step_avg:95.34ms +step:1406/1705 train_time:134053ms step_avg:95.34ms +step:1407/1705 train_time:134149ms step_avg:95.34ms +step:1408/1705 train_time:134246ms step_avg:95.34ms +step:1409/1705 train_time:134341ms step_avg:95.35ms +step:1410/1705 train_time:134436ms step_avg:95.34ms +step:1411/1705 train_time:134532ms step_avg:95.34ms +step:1412/1705 train_time:134627ms step_avg:95.35ms +step:1413/1705 train_time:134723ms step_avg:95.35ms +step:1414/1705 train_time:134818ms step_avg:95.35ms +step:1415/1705 train_time:134913ms step_avg:95.34ms +step:1416/1705 train_time:135007ms step_avg:95.34ms +step:1417/1705 train_time:135102ms step_avg:95.34ms +step:1418/1705 train_time:135197ms step_avg:95.34ms +step:1419/1705 train_time:135293ms step_avg:95.34ms +step:1420/1705 train_time:135390ms step_avg:95.35ms +step:1421/1705 train_time:135485ms step_avg:95.35ms +step:1422/1705 train_time:135581ms step_avg:95.35ms +step:1423/1705 train_time:135677ms step_avg:95.35ms +step:1424/1705 train_time:135772ms step_avg:95.35ms +step:1425/1705 train_time:135868ms step_avg:95.35ms +step:1426/1705 train_time:135963ms step_avg:95.35ms +step:1427/1705 train_time:136058ms step_avg:95.35ms +step:1428/1705 train_time:136154ms step_avg:95.35ms +step:1429/1705 train_time:136249ms step_avg:95.35ms +step:1430/1705 train_time:136346ms step_avg:95.35ms +step:1431/1705 train_time:136441ms step_avg:95.35ms +step:1432/1705 train_time:136536ms step_avg:95.35ms +step:1433/1705 train_time:136632ms step_avg:95.35ms +step:1434/1705 train_time:136728ms step_avg:95.35ms +step:1435/1705 train_time:136823ms step_avg:95.35ms +step:1436/1705 train_time:136918ms step_avg:95.35ms +step:1437/1705 train_time:137013ms step_avg:95.35ms +step:1438/1705 train_time:137108ms step_avg:95.35ms +step:1439/1705 train_time:137204ms step_avg:95.35ms +step:1440/1705 train_time:137299ms step_avg:95.35ms +step:1441/1705 train_time:137396ms step_avg:95.35ms +step:1442/1705 train_time:137491ms step_avg:95.35ms +step:1443/1705 train_time:137587ms step_avg:95.35ms +step:1444/1705 train_time:137681ms step_avg:95.35ms +step:1445/1705 train_time:137776ms step_avg:95.35ms +step:1446/1705 train_time:137872ms step_avg:95.35ms +step:1447/1705 train_time:137968ms step_avg:95.35ms +step:1448/1705 train_time:138064ms step_avg:95.35ms +step:1449/1705 train_time:138158ms step_avg:95.35ms +step:1450/1705 train_time:138255ms step_avg:95.35ms +step:1451/1705 train_time:138351ms step_avg:95.35ms +step:1452/1705 train_time:138447ms step_avg:95.35ms +step:1453/1705 train_time:138543ms step_avg:95.35ms +step:1454/1705 train_time:138638ms step_avg:95.35ms +step:1455/1705 train_time:138732ms step_avg:95.35ms +step:1456/1705 train_time:138829ms step_avg:95.35ms +step:1457/1705 train_time:138924ms step_avg:95.35ms +step:1458/1705 train_time:139020ms step_avg:95.35ms +step:1459/1705 train_time:139115ms step_avg:95.35ms +step:1460/1705 train_time:139211ms step_avg:95.35ms +step:1461/1705 train_time:139306ms step_avg:95.35ms +step:1462/1705 train_time:139401ms step_avg:95.35ms +step:1463/1705 train_time:139496ms step_avg:95.35ms +step:1464/1705 train_time:139592ms step_avg:95.35ms +step:1465/1705 train_time:139687ms step_avg:95.35ms +step:1466/1705 train_time:139782ms step_avg:95.35ms +step:1467/1705 train_time:139877ms step_avg:95.35ms +step:1468/1705 train_time:139973ms step_avg:95.35ms +step:1469/1705 train_time:140068ms step_avg:95.35ms +step:1470/1705 train_time:140164ms step_avg:95.35ms +step:1471/1705 train_time:140259ms step_avg:95.35ms +step:1472/1705 train_time:140355ms step_avg:95.35ms +step:1473/1705 train_time:140451ms step_avg:95.35ms +step:1474/1705 train_time:140547ms step_avg:95.35ms +step:1475/1705 train_time:140642ms step_avg:95.35ms +step:1476/1705 train_time:140736ms step_avg:95.35ms +step:1477/1705 train_time:140833ms step_avg:95.35ms +step:1478/1705 train_time:140928ms step_avg:95.35ms +step:1479/1705 train_time:141024ms step_avg:95.35ms +step:1480/1705 train_time:141120ms step_avg:95.35ms +step:1481/1705 train_time:141215ms step_avg:95.35ms +step:1482/1705 train_time:141311ms step_avg:95.35ms +step:1483/1705 train_time:141408ms step_avg:95.35ms +step:1484/1705 train_time:141503ms step_avg:95.35ms +step:1485/1705 train_time:141768ms step_avg:95.47ms +step:1486/1705 train_time:141957ms step_avg:95.53ms +step:1487/1705 train_time:142051ms step_avg:95.53ms +step:1488/1705 train_time:142145ms step_avg:95.53ms +step:1489/1705 train_time:142239ms step_avg:95.53ms +step:1490/1705 train_time:142334ms step_avg:95.53ms +step:1491/1705 train_time:142428ms step_avg:95.53ms +step:1492/1705 train_time:142523ms step_avg:95.52ms +step:1493/1705 train_time:142617ms step_avg:95.52ms +step:1494/1705 train_time:142711ms step_avg:95.52ms +step:1495/1705 train_time:142811ms step_avg:95.53ms +step:1496/1705 train_time:142911ms step_avg:95.53ms +step:1497/1705 train_time:143008ms step_avg:95.53ms +step:1498/1705 train_time:143103ms step_avg:95.53ms +step:1499/1705 train_time:143197ms step_avg:95.53ms +step:1500/1705 train_time:143293ms step_avg:95.53ms +step:1500/1705 val_loss:3.3199 train_time:143387ms step_avg:95.59ms +step:1501/1705 train_time:143409ms step_avg:95.54ms +step:1502/1705 train_time:143489ms step_avg:95.53ms +step:1503/1705 train_time:143588ms step_avg:95.53ms +step:1504/1705 train_time:143683ms step_avg:95.53ms +step:1505/1705 train_time:143778ms step_avg:95.53ms +step:1506/1705 train_time:143871ms step_avg:95.53ms +step:1507/1705 train_time:143965ms step_avg:95.53ms +step:1508/1705 train_time:144061ms step_avg:95.53ms +step:1509/1705 train_time:144154ms step_avg:95.53ms +step:1510/1705 train_time:144249ms step_avg:95.53ms +step:1511/1705 train_time:144345ms step_avg:95.53ms +step:1512/1705 train_time:144443ms step_avg:95.53ms +step:1513/1705 train_time:144539ms step_avg:95.53ms +step:1514/1705 train_time:144636ms step_avg:95.53ms +step:1515/1705 train_time:144731ms step_avg:95.53ms +step:1516/1705 train_time:144825ms step_avg:95.53ms +step:1517/1705 train_time:144920ms step_avg:95.53ms +step:1518/1705 train_time:145014ms step_avg:95.53ms +step:1519/1705 train_time:145108ms step_avg:95.53ms +step:1520/1705 train_time:145204ms step_avg:95.53ms +step:1521/1705 train_time:145300ms step_avg:95.53ms +step:1522/1705 train_time:145397ms step_avg:95.53ms +step:1523/1705 train_time:145494ms step_avg:95.53ms +step:1524/1705 train_time:145589ms step_avg:95.53ms +step:1525/1705 train_time:145687ms step_avg:95.53ms +step:1526/1705 train_time:145783ms step_avg:95.53ms +step:1527/1705 train_time:145877ms step_avg:95.53ms +step:1528/1705 train_time:145972ms step_avg:95.53ms +step:1529/1705 train_time:146066ms step_avg:95.53ms +step:1530/1705 train_time:146160ms step_avg:95.53ms +step:1531/1705 train_time:146255ms step_avg:95.53ms +step:1532/1705 train_time:146351ms step_avg:95.53ms +step:1533/1705 train_time:146449ms step_avg:95.53ms +step:1534/1705 train_time:146545ms step_avg:95.53ms +step:1535/1705 train_time:146641ms step_avg:95.53ms +step:1536/1705 train_time:146737ms step_avg:95.53ms +step:1537/1705 train_time:146831ms step_avg:95.53ms +step:1538/1705 train_time:146927ms step_avg:95.53ms +step:1539/1705 train_time:147022ms step_avg:95.53ms +step:1540/1705 train_time:147116ms step_avg:95.53ms +step:1541/1705 train_time:147210ms step_avg:95.53ms +step:1542/1705 train_time:147306ms step_avg:95.53ms +step:1543/1705 train_time:147403ms step_avg:95.53ms +step:1544/1705 train_time:147498ms step_avg:95.53ms +step:1545/1705 train_time:147593ms step_avg:95.53ms +step:1546/1705 train_time:147688ms step_avg:95.53ms +step:1547/1705 train_time:147785ms step_avg:95.53ms +step:1548/1705 train_time:147882ms step_avg:95.53ms +step:1549/1705 train_time:147979ms step_avg:95.53ms +step:1550/1705 train_time:148074ms step_avg:95.53ms +step:1551/1705 train_time:148168ms step_avg:95.53ms +step:1552/1705 train_time:148264ms step_avg:95.53ms +step:1553/1705 train_time:148360ms step_avg:95.53ms +step:1554/1705 train_time:148455ms step_avg:95.53ms +step:1555/1705 train_time:148550ms step_avg:95.53ms +step:1556/1705 train_time:148647ms step_avg:95.53ms +step:1557/1705 train_time:148742ms step_avg:95.53ms +step:1558/1705 train_time:148837ms step_avg:95.53ms +step:1559/1705 train_time:148932ms step_avg:95.53ms +step:1560/1705 train_time:149028ms step_avg:95.53ms +step:1561/1705 train_time:149124ms step_avg:95.53ms +step:1562/1705 train_time:149219ms step_avg:95.53ms +step:1563/1705 train_time:149313ms step_avg:95.53ms +step:1564/1705 train_time:149408ms step_avg:95.53ms +step:1565/1705 train_time:149505ms step_avg:95.53ms +step:1566/1705 train_time:149601ms step_avg:95.53ms +step:1567/1705 train_time:149697ms step_avg:95.53ms +step:1568/1705 train_time:149790ms step_avg:95.53ms +step:1569/1705 train_time:149887ms step_avg:95.53ms +step:1570/1705 train_time:149984ms step_avg:95.53ms +step:1571/1705 train_time:150079ms step_avg:95.53ms +step:1572/1705 train_time:150175ms step_avg:95.53ms +step:1573/1705 train_time:150270ms step_avg:95.53ms +step:1574/1705 train_time:150366ms step_avg:95.53ms +step:1575/1705 train_time:150462ms step_avg:95.53ms +step:1576/1705 train_time:150557ms step_avg:95.53ms +step:1577/1705 train_time:150652ms step_avg:95.53ms +step:1578/1705 train_time:150747ms step_avg:95.53ms +step:1579/1705 train_time:150842ms step_avg:95.53ms +step:1580/1705 train_time:150938ms step_avg:95.53ms +step:1581/1705 train_time:151035ms step_avg:95.53ms +step:1582/1705 train_time:151128ms step_avg:95.53ms +step:1583/1705 train_time:151224ms step_avg:95.53ms +step:1584/1705 train_time:151320ms step_avg:95.53ms +step:1585/1705 train_time:151415ms step_avg:95.53ms +step:1586/1705 train_time:151510ms step_avg:95.53ms +step:1587/1705 train_time:151606ms step_avg:95.53ms +step:1588/1705 train_time:151702ms step_avg:95.53ms +step:1589/1705 train_time:151798ms step_avg:95.53ms +step:1590/1705 train_time:151892ms step_avg:95.53ms +step:1591/1705 train_time:151988ms step_avg:95.53ms +step:1592/1705 train_time:152084ms step_avg:95.53ms +step:1593/1705 train_time:152179ms step_avg:95.53ms +step:1594/1705 train_time:152274ms step_avg:95.53ms +step:1595/1705 train_time:152369ms step_avg:95.53ms +step:1596/1705 train_time:152464ms step_avg:95.53ms +step:1597/1705 train_time:152560ms step_avg:95.53ms +step:1598/1705 train_time:152655ms step_avg:95.53ms +step:1599/1705 train_time:152750ms step_avg:95.53ms +step:1600/1705 train_time:152845ms step_avg:95.53ms +step:1601/1705 train_time:152941ms step_avg:95.53ms +step:1602/1705 train_time:153037ms step_avg:95.53ms +step:1603/1705 train_time:153131ms step_avg:95.53ms +step:1604/1705 train_time:153227ms step_avg:95.53ms +step:1605/1705 train_time:153323ms step_avg:95.53ms +step:1606/1705 train_time:153418ms step_avg:95.53ms +step:1607/1705 train_time:153513ms step_avg:95.53ms +step:1608/1705 train_time:153608ms step_avg:95.53ms +step:1609/1705 train_time:153704ms step_avg:95.53ms +step:1610/1705 train_time:153800ms step_avg:95.53ms +step:1611/1705 train_time:153895ms step_avg:95.53ms +step:1612/1705 train_time:153990ms step_avg:95.53ms +step:1613/1705 train_time:154086ms step_avg:95.53ms +step:1614/1705 train_time:154183ms step_avg:95.53ms +step:1615/1705 train_time:154278ms step_avg:95.53ms +step:1616/1705 train_time:154373ms step_avg:95.53ms +step:1617/1705 train_time:154467ms step_avg:95.53ms +step:1618/1705 train_time:154564ms step_avg:95.53ms +step:1619/1705 train_time:154660ms step_avg:95.53ms +step:1620/1705 train_time:154756ms step_avg:95.53ms +step:1621/1705 train_time:154851ms step_avg:95.53ms +step:1622/1705 train_time:154946ms step_avg:95.53ms +step:1623/1705 train_time:155041ms step_avg:95.53ms +step:1624/1705 train_time:155137ms step_avg:95.53ms +step:1625/1705 train_time:155232ms step_avg:95.53ms +step:1625/1705 val_loss:3.2922 train_time:155329ms step_avg:95.59ms +step:1626/1705 train_time:155351ms step_avg:95.54ms +step:1627/1705 train_time:155430ms step_avg:95.53ms +step:1628/1705 train_time:155530ms step_avg:95.53ms +step:1629/1705 train_time:155625ms step_avg:95.53ms +step:1630/1705 train_time:155720ms step_avg:95.53ms +step:1631/1705 train_time:155814ms step_avg:95.53ms +step:1632/1705 train_time:155909ms step_avg:95.53ms +step:1633/1705 train_time:156002ms step_avg:95.53ms +step:1634/1705 train_time:156097ms step_avg:95.53ms +step:1635/1705 train_time:156191ms step_avg:95.53ms +step:1636/1705 train_time:156287ms step_avg:95.53ms +step:1637/1705 train_time:156383ms step_avg:95.53ms +step:1638/1705 train_time:156483ms step_avg:95.53ms +step:1639/1705 train_time:156580ms step_avg:95.53ms +step:1640/1705 train_time:156675ms step_avg:95.53ms +step:1641/1705 train_time:156770ms step_avg:95.53ms +step:1642/1705 train_time:156865ms step_avg:95.53ms +step:1643/1705 train_time:156958ms step_avg:95.53ms +step:1644/1705 train_time:157054ms step_avg:95.53ms +step:1645/1705 train_time:157148ms step_avg:95.53ms +step:1646/1705 train_time:157243ms step_avg:95.53ms +step:1647/1705 train_time:157339ms step_avg:95.53ms +step:1648/1705 train_time:157437ms step_avg:95.53ms +step:1649/1705 train_time:157535ms step_avg:95.53ms +step:1650/1705 train_time:157632ms step_avg:95.53ms +step:1651/1705 train_time:157728ms step_avg:95.54ms +step:1652/1705 train_time:157823ms step_avg:95.53ms +step:1653/1705 train_time:157918ms step_avg:95.53ms +step:1654/1705 train_time:158012ms step_avg:95.53ms +step:1655/1705 train_time:158107ms step_avg:95.53ms +step:1656/1705 train_time:158201ms step_avg:95.53ms +step:1657/1705 train_time:158296ms step_avg:95.53ms +step:1658/1705 train_time:158392ms step_avg:95.53ms +step:1659/1705 train_time:158488ms step_avg:95.53ms +step:1660/1705 train_time:158585ms step_avg:95.53ms +step:1661/1705 train_time:158680ms step_avg:95.53ms +step:1662/1705 train_time:158776ms step_avg:95.53ms +step:1663/1705 train_time:158872ms step_avg:95.53ms +step:1664/1705 train_time:158967ms step_avg:95.53ms +step:1665/1705 train_time:159061ms step_avg:95.53ms +step:1666/1705 train_time:159156ms step_avg:95.53ms +step:1667/1705 train_time:159251ms step_avg:95.53ms +step:1668/1705 train_time:159345ms step_avg:95.53ms +step:1669/1705 train_time:159441ms step_avg:95.53ms +step:1670/1705 train_time:159537ms step_avg:95.53ms +step:1671/1705 train_time:159634ms step_avg:95.53ms +step:1672/1705 train_time:159730ms step_avg:95.53ms +step:1673/1705 train_time:159826ms step_avg:95.53ms +step:1674/1705 train_time:159920ms step_avg:95.53ms +step:1675/1705 train_time:160015ms step_avg:95.53ms +step:1676/1705 train_time:160110ms step_avg:95.53ms +step:1677/1705 train_time:160205ms step_avg:95.53ms +step:1678/1705 train_time:160300ms step_avg:95.53ms +step:1679/1705 train_time:160395ms step_avg:95.53ms +step:1680/1705 train_time:160492ms step_avg:95.53ms +step:1681/1705 train_time:160589ms step_avg:95.53ms +step:1682/1705 train_time:160685ms step_avg:95.53ms +step:1683/1705 train_time:160779ms step_avg:95.53ms +step:1684/1705 train_time:160874ms step_avg:95.53ms +step:1685/1705 train_time:160970ms step_avg:95.53ms +step:1686/1705 train_time:161065ms step_avg:95.53ms +step:1687/1705 train_time:161160ms step_avg:95.53ms +step:1688/1705 train_time:161255ms step_avg:95.53ms +step:1689/1705 train_time:161350ms step_avg:95.53ms +step:1690/1705 train_time:161445ms step_avg:95.53ms +step:1691/1705 train_time:161541ms step_avg:95.53ms +step:1692/1705 train_time:161637ms step_avg:95.53ms +step:1693/1705 train_time:161733ms step_avg:95.53ms +step:1694/1705 train_time:161830ms step_avg:95.53ms +step:1695/1705 train_time:161924ms step_avg:95.53ms +step:1696/1705 train_time:162020ms step_avg:95.53ms +step:1697/1705 train_time:162115ms step_avg:95.53ms +step:1698/1705 train_time:162370ms step_avg:95.62ms +step:1699/1705 train_time:162565ms step_avg:95.68ms +step:1700/1705 train_time:162658ms step_avg:95.68ms +step:1701/1705 train_time:162753ms step_avg:95.68ms +step:1702/1705 train_time:162847ms step_avg:95.68ms +step:1703/1705 train_time:162941ms step_avg:95.68ms +step:1704/1705 train_time:163035ms step_avg:95.68ms +step:1705/1705 train_time:163129ms step_avg:95.68ms +step:1705/1705 val_loss:3.2779 train_time:163224ms step_avg:95.73ms +peak memory allocated: 33750 MiB reserved: 49456 MiB diff --git a/records/091025_Yarn/0ecdb695-510b-4c3b-b030-09861a162ce8.txt b/records/091025_Yarn/0ecdb695-510b-4c3b-b030-09861a162ce8.txt new file mode 100644 index 000000000..cbf11baca --- /dev/null +++ b/records/091025_Yarn/0ecdb695-510b-4c3b-b030-09861a162ce8.txt @@ -0,0 +1,2863 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math + +from dataclasses import dataclass +from functools import lru_cache +from itertools import accumulate +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate = CastedLinear(12, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, attn_args: AttnArgs): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) + if ve is not None: + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = sa_lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None + + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args: AttnArgs): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), attn_args) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, x0, lambdas[i], attn_args) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = f"yarn/{uuid.uuid4()}" + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + head_dim=128, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +ws = get_ws(0) +for step in range(train_steps + 1): + last_step = (step == train_steps) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Thu Sep 11 05:40:13 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | Off | +| N/A 31C P0 115W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | Off | +| N/A 32C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | Off | +| N/A 33C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | Off | +| N/A 30C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | Off | +| N/A 31C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | Off | +| N/A 33C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | Off | +| N/A 32C P0 117W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | Off | +| N/A 32C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 97165 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 97166 C /usr/bin/python3 614MiB | +| 0 N/A N/A 97167 C /usr/bin/python3 614MiB | +| 0 N/A N/A 97168 C /usr/bin/python3 614MiB | +| 0 N/A N/A 97169 C /usr/bin/python3 614MiB | +| 0 N/A N/A 97170 C /usr/bin/python3 614MiB | +| 0 N/A N/A 97171 C /usr/bin/python3 614MiB | +| 0 N/A N/A 97172 C /usr/bin/python3 614MiB | +| 1 N/A N/A 97166 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 97167 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 97168 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 97169 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 97170 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 97171 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 97172 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1670 train_time:482ms step_avg:481.84ms +step:2/1670 train_time:506ms step_avg:252.88ms +step:3/1670 train_time:574ms step_avg:191.30ms +step:4/1670 train_time:664ms step_avg:166.06ms +step:5/1670 train_time:756ms step_avg:151.16ms +step:6/1670 train_time:848ms step_avg:141.26ms +step:7/1670 train_time:940ms step_avg:134.29ms +step:8/1670 train_time:1032ms step_avg:128.96ms +step:9/1670 train_time:1123ms step_avg:124.79ms +step:10/1670 train_time:1215ms step_avg:121.54ms +step:11/1670 train_time:1306ms step_avg:118.73ms +step:12/1670 train_time:1398ms step_avg:116.54ms +step:13/1670 train_time:1498ms step_avg:115.20ms +step:14/1670 train_time:1593ms step_avg:113.80ms +step:15/1670 train_time:1687ms step_avg:112.44ms +step:16/1670 train_time:1778ms step_avg:111.12ms +step:17/1670 train_time:1870ms step_avg:110.00ms +step:18/1670 train_time:1963ms step_avg:109.06ms +step:19/1670 train_time:2055ms step_avg:108.18ms +step:20/1670 train_time:2147ms step_avg:107.37ms +step:21/1670 train_time:2239ms step_avg:106.64ms +step:22/1670 train_time:2331ms step_avg:105.98ms +step:23/1670 train_time:2424ms step_avg:105.39ms +step:24/1670 train_time:2517ms step_avg:104.87ms +step:25/1670 train_time:2609ms step_avg:104.38ms +step:26/1670 train_time:2702ms step_avg:103.91ms +step:27/1670 train_time:2795ms step_avg:103.51ms +step:28/1670 train_time:2887ms step_avg:103.12ms +step:29/1670 train_time:2980ms step_avg:102.76ms +step:30/1670 train_time:3072ms step_avg:102.42ms +step:31/1670 train_time:3165ms step_avg:102.09ms +step:32/1670 train_time:3257ms step_avg:101.77ms +step:33/1670 train_time:3350ms step_avg:101.51ms +step:34/1670 train_time:3442ms step_avg:101.25ms +step:35/1670 train_time:3535ms step_avg:100.99ms +step:36/1670 train_time:3627ms step_avg:100.76ms +step:37/1670 train_time:3720ms step_avg:100.53ms +step:38/1670 train_time:3813ms step_avg:100.35ms +step:39/1670 train_time:3905ms step_avg:100.13ms +step:40/1670 train_time:3997ms step_avg:99.92ms +step:41/1670 train_time:4090ms step_avg:99.75ms +step:42/1670 train_time:4182ms step_avg:99.58ms +step:43/1670 train_time:4274ms step_avg:99.41ms +step:44/1670 train_time:4367ms step_avg:99.26ms +step:45/1670 train_time:4460ms step_avg:99.12ms +step:46/1670 train_time:4553ms step_avg:98.97ms +step:47/1670 train_time:4646ms step_avg:98.84ms +step:48/1670 train_time:4738ms step_avg:98.71ms +step:49/1670 train_time:4831ms step_avg:98.59ms +step:50/1670 train_time:4924ms step_avg:98.47ms +step:51/1670 train_time:5016ms step_avg:98.35ms +step:52/1670 train_time:5109ms step_avg:98.25ms +step:53/1670 train_time:5201ms step_avg:98.14ms +step:54/1670 train_time:5295ms step_avg:98.05ms +step:55/1670 train_time:5388ms step_avg:97.96ms +step:56/1670 train_time:5480ms step_avg:97.86ms +step:57/1670 train_time:5572ms step_avg:97.76ms +step:58/1670 train_time:5665ms step_avg:97.67ms +step:59/1670 train_time:5757ms step_avg:97.58ms +step:60/1670 train_time:5850ms step_avg:97.50ms +step:61/1670 train_time:5942ms step_avg:97.41ms +step:62/1670 train_time:6034ms step_avg:97.33ms +step:63/1670 train_time:6127ms step_avg:97.26ms +step:64/1670 train_time:6220ms step_avg:97.19ms +step:65/1670 train_time:6312ms step_avg:97.11ms +step:66/1670 train_time:6405ms step_avg:97.04ms +step:67/1670 train_time:6497ms step_avg:96.98ms +step:68/1670 train_time:6590ms step_avg:96.91ms +step:69/1670 train_time:6682ms step_avg:96.84ms +step:70/1670 train_time:6774ms step_avg:96.78ms +step:71/1670 train_time:6868ms step_avg:96.73ms +step:72/1670 train_time:6960ms step_avg:96.67ms +step:73/1670 train_time:7053ms step_avg:96.62ms +step:74/1670 train_time:7145ms step_avg:96.55ms +step:75/1670 train_time:7237ms step_avg:96.49ms +step:76/1670 train_time:7329ms step_avg:96.43ms +step:77/1670 train_time:7421ms step_avg:96.38ms +step:78/1670 train_time:7514ms step_avg:96.33ms +step:79/1670 train_time:7606ms step_avg:96.28ms +step:80/1670 train_time:7698ms step_avg:96.22ms +step:81/1670 train_time:7791ms step_avg:96.18ms +step:82/1670 train_time:7883ms step_avg:96.13ms +step:83/1670 train_time:7975ms step_avg:96.08ms +step:84/1670 train_time:8068ms step_avg:96.05ms +step:85/1670 train_time:8160ms step_avg:96.00ms +step:86/1670 train_time:8254ms step_avg:95.97ms +step:87/1670 train_time:8346ms step_avg:95.93ms +step:88/1670 train_time:8438ms step_avg:95.88ms +step:89/1670 train_time:8530ms step_avg:95.84ms +step:90/1670 train_time:8622ms step_avg:95.80ms +step:91/1670 train_time:8714ms step_avg:95.76ms +step:92/1670 train_time:8807ms step_avg:95.73ms +step:93/1670 train_time:8899ms step_avg:95.68ms +step:94/1670 train_time:8991ms step_avg:95.65ms +step:95/1670 train_time:9084ms step_avg:95.62ms +step:96/1670 train_time:9176ms step_avg:95.58ms +step:97/1670 train_time:9268ms step_avg:95.55ms +step:98/1670 train_time:9360ms step_avg:95.51ms +step:99/1670 train_time:9453ms step_avg:95.48ms +step:100/1670 train_time:9545ms step_avg:95.45ms +step:101/1670 train_time:9637ms step_avg:95.41ms +step:102/1670 train_time:9730ms step_avg:95.39ms +step:103/1670 train_time:9822ms step_avg:95.36ms +step:104/1670 train_time:9914ms step_avg:95.33ms +step:105/1670 train_time:10006ms step_avg:95.30ms +step:106/1670 train_time:10099ms step_avg:95.27ms +step:107/1670 train_time:10192ms step_avg:95.25ms +step:108/1670 train_time:10284ms step_avg:95.23ms +step:109/1670 train_time:10377ms step_avg:95.20ms +step:110/1670 train_time:10469ms step_avg:95.17ms +step:111/1670 train_time:10561ms step_avg:95.15ms +step:112/1670 train_time:10654ms step_avg:95.12ms +step:113/1670 train_time:10747ms step_avg:95.11ms +step:114/1670 train_time:10838ms step_avg:95.07ms +step:115/1670 train_time:10932ms step_avg:95.06ms +step:116/1670 train_time:11025ms step_avg:95.04ms +step:117/1670 train_time:11116ms step_avg:95.01ms +step:118/1670 train_time:11209ms step_avg:94.99ms +step:119/1670 train_time:11301ms step_avg:94.96ms +step:120/1670 train_time:11394ms step_avg:94.95ms +step:121/1670 train_time:11487ms step_avg:94.93ms +step:122/1670 train_time:11578ms step_avg:94.90ms +step:123/1670 train_time:11671ms step_avg:94.89ms +step:124/1670 train_time:11763ms step_avg:94.86ms +step:125/1670 train_time:11855ms step_avg:94.84ms +step:125/1670 val_loss:4.2857 train_time:11946ms step_avg:95.56ms +step:126/1670 train_time:11971ms step_avg:95.01ms +step:127/1670 train_time:12046ms step_avg:94.85ms +step:128/1670 train_time:12147ms step_avg:94.90ms +step:129/1670 train_time:12242ms step_avg:94.90ms +step:130/1670 train_time:12334ms step_avg:94.88ms +step:131/1670 train_time:12427ms step_avg:94.86ms +step:132/1670 train_time:12518ms step_avg:94.84ms +step:133/1670 train_time:12610ms step_avg:94.81ms +step:134/1670 train_time:12702ms step_avg:94.79ms +step:135/1670 train_time:12793ms step_avg:94.77ms +step:136/1670 train_time:12885ms step_avg:94.74ms +step:137/1670 train_time:12977ms step_avg:94.72ms +step:138/1670 train_time:13071ms step_avg:94.72ms +step:139/1670 train_time:13165ms step_avg:94.71ms +step:140/1670 train_time:13259ms step_avg:94.71ms +step:141/1670 train_time:13351ms step_avg:94.69ms +step:142/1670 train_time:13443ms step_avg:94.67ms +step:143/1670 train_time:13535ms step_avg:94.65ms +step:144/1670 train_time:13628ms step_avg:94.64ms +step:145/1670 train_time:13719ms step_avg:94.61ms +step:146/1670 train_time:13810ms step_avg:94.59ms +step:147/1670 train_time:13902ms step_avg:94.57ms +step:148/1670 train_time:13994ms step_avg:94.55ms +step:149/1670 train_time:14087ms step_avg:94.54ms +step:150/1670 train_time:14180ms step_avg:94.53ms +step:151/1670 train_time:14272ms step_avg:94.52ms +step:152/1670 train_time:14365ms step_avg:94.51ms +step:153/1670 train_time:14457ms step_avg:94.49ms +step:154/1670 train_time:14549ms step_avg:94.47ms +step:155/1670 train_time:14641ms step_avg:94.46ms +step:156/1670 train_time:14732ms step_avg:94.44ms +step:157/1670 train_time:14824ms step_avg:94.42ms +step:158/1670 train_time:14916ms step_avg:94.40ms +step:159/1670 train_time:15008ms step_avg:94.39ms +step:160/1670 train_time:15101ms step_avg:94.38ms +step:161/1670 train_time:15193ms step_avg:94.36ms +step:162/1670 train_time:15286ms step_avg:94.36ms +step:163/1670 train_time:15378ms step_avg:94.34ms +step:164/1670 train_time:15470ms step_avg:94.33ms +step:165/1670 train_time:15563ms step_avg:94.32ms +step:166/1670 train_time:15655ms step_avg:94.30ms +step:167/1670 train_time:15747ms step_avg:94.29ms +step:168/1670 train_time:15839ms step_avg:94.28ms +step:169/1670 train_time:15932ms step_avg:94.27ms +step:170/1670 train_time:16025ms step_avg:94.26ms +step:171/1670 train_time:16117ms step_avg:94.25ms +step:172/1670 train_time:16209ms step_avg:94.24ms +step:173/1670 train_time:16304ms step_avg:94.24ms +step:174/1670 train_time:16396ms step_avg:94.23ms +step:175/1670 train_time:16488ms step_avg:94.22ms +step:176/1670 train_time:16580ms step_avg:94.21ms +step:177/1670 train_time:16672ms step_avg:94.19ms +step:178/1670 train_time:16764ms step_avg:94.18ms +step:179/1670 train_time:16857ms step_avg:94.17ms +step:180/1670 train_time:16949ms step_avg:94.16ms +step:181/1670 train_time:17042ms step_avg:94.15ms +step:182/1670 train_time:17134ms step_avg:94.14ms +step:183/1670 train_time:17227ms step_avg:94.14ms +step:184/1670 train_time:17319ms step_avg:94.13ms +step:185/1670 train_time:17411ms step_avg:94.11ms +step:186/1670 train_time:17504ms step_avg:94.11ms +step:187/1670 train_time:17596ms step_avg:94.10ms +step:188/1670 train_time:17688ms step_avg:94.09ms +step:189/1670 train_time:17780ms step_avg:94.07ms +step:190/1670 train_time:17872ms step_avg:94.06ms +step:191/1670 train_time:17964ms step_avg:94.05ms +step:192/1670 train_time:18056ms step_avg:94.04ms +step:193/1670 train_time:18149ms step_avg:94.04ms +step:194/1670 train_time:18242ms step_avg:94.03ms +step:195/1670 train_time:18334ms step_avg:94.02ms +step:196/1670 train_time:18427ms step_avg:94.01ms +step:197/1670 train_time:18520ms step_avg:94.01ms +step:198/1670 train_time:18611ms step_avg:94.00ms +step:199/1670 train_time:18703ms step_avg:93.99ms +step:200/1670 train_time:18796ms step_avg:93.98ms +step:201/1670 train_time:18888ms step_avg:93.97ms +step:202/1670 train_time:18981ms step_avg:93.96ms +step:203/1670 train_time:19073ms step_avg:93.96ms +step:204/1670 train_time:19166ms step_avg:93.95ms +step:205/1670 train_time:19258ms step_avg:93.94ms +step:206/1670 train_time:19350ms step_avg:93.93ms +step:207/1670 train_time:19443ms step_avg:93.93ms +step:208/1670 train_time:19535ms step_avg:93.92ms +step:209/1670 train_time:19628ms step_avg:93.92ms +step:210/1670 train_time:19720ms step_avg:93.91ms +step:211/1670 train_time:19813ms step_avg:93.90ms +step:212/1670 train_time:19906ms step_avg:93.89ms +step:213/1670 train_time:20226ms step_avg:94.96ms +step:214/1670 train_time:20368ms step_avg:95.18ms +step:215/1670 train_time:20459ms step_avg:95.16ms +step:216/1670 train_time:20551ms step_avg:95.14ms +step:217/1670 train_time:20642ms step_avg:95.12ms +step:218/1670 train_time:20733ms step_avg:95.11ms +step:219/1670 train_time:20825ms step_avg:95.09ms +step:220/1670 train_time:20917ms step_avg:95.08ms +step:221/1670 train_time:21009ms step_avg:95.06ms +step:222/1670 train_time:21100ms step_avg:95.04ms +step:223/1670 train_time:21192ms step_avg:95.03ms +step:224/1670 train_time:21287ms step_avg:95.03ms +step:225/1670 train_time:21382ms step_avg:95.03ms +step:226/1670 train_time:21475ms step_avg:95.02ms +step:227/1670 train_time:21567ms step_avg:95.01ms +step:228/1670 train_time:21658ms step_avg:94.99ms +step:229/1670 train_time:21750ms step_avg:94.98ms +step:230/1670 train_time:21841ms step_avg:94.96ms +step:231/1670 train_time:21933ms step_avg:94.95ms +step:232/1670 train_time:22025ms step_avg:94.93ms +step:233/1670 train_time:22116ms step_avg:94.92ms +step:234/1670 train_time:22209ms step_avg:94.91ms +step:235/1670 train_time:22303ms step_avg:94.90ms +step:236/1670 train_time:22395ms step_avg:94.90ms +step:237/1670 train_time:22488ms step_avg:94.89ms +step:238/1670 train_time:22580ms step_avg:94.88ms +step:239/1670 train_time:22672ms step_avg:94.86ms +step:240/1670 train_time:22765ms step_avg:94.85ms +step:241/1670 train_time:22857ms step_avg:94.84ms +step:242/1670 train_time:22949ms step_avg:94.83ms +step:243/1670 train_time:23041ms step_avg:94.82ms +step:244/1670 train_time:23133ms step_avg:94.81ms +step:245/1670 train_time:23225ms step_avg:94.80ms +step:246/1670 train_time:23317ms step_avg:94.78ms +step:247/1670 train_time:23409ms step_avg:94.77ms +step:248/1670 train_time:23502ms step_avg:94.77ms +step:249/1670 train_time:23594ms step_avg:94.76ms +step:250/1670 train_time:23687ms step_avg:94.75ms +step:250/1670 val_loss:3.9662 train_time:23777ms step_avg:95.11ms +step:251/1670 train_time:23802ms step_avg:94.83ms +step:252/1670 train_time:23877ms step_avg:94.75ms +step:253/1670 train_time:23975ms step_avg:94.76ms +step:254/1670 train_time:24070ms step_avg:94.76ms +step:255/1670 train_time:24162ms step_avg:94.75ms +step:256/1670 train_time:24254ms step_avg:94.74ms +step:257/1670 train_time:24346ms step_avg:94.73ms +step:258/1670 train_time:24437ms step_avg:94.72ms +step:259/1670 train_time:24528ms step_avg:94.70ms +step:260/1670 train_time:24620ms step_avg:94.69ms +step:261/1670 train_time:24711ms step_avg:94.68ms +step:262/1670 train_time:24803ms step_avg:94.67ms +step:263/1670 train_time:24897ms step_avg:94.66ms +step:264/1670 train_time:24991ms step_avg:94.66ms +step:265/1670 train_time:25084ms step_avg:94.66ms +step:266/1670 train_time:25178ms step_avg:94.65ms +step:267/1670 train_time:25270ms step_avg:94.64ms +step:268/1670 train_time:25362ms step_avg:94.63ms +step:269/1670 train_time:25454ms step_avg:94.62ms +step:270/1670 train_time:25546ms step_avg:94.61ms +step:271/1670 train_time:25637ms step_avg:94.60ms +step:272/1670 train_time:25728ms step_avg:94.59ms +step:273/1670 train_time:25820ms step_avg:94.58ms +step:274/1670 train_time:25913ms step_avg:94.57ms +step:275/1670 train_time:26005ms step_avg:94.56ms +step:276/1670 train_time:26098ms step_avg:94.56ms +step:277/1670 train_time:26191ms step_avg:94.55ms +step:278/1670 train_time:26284ms step_avg:94.55ms +step:279/1670 train_time:26376ms step_avg:94.54ms +step:280/1670 train_time:26468ms step_avg:94.53ms +step:281/1670 train_time:26560ms step_avg:94.52ms +step:282/1670 train_time:26652ms step_avg:94.51ms +step:283/1670 train_time:26744ms step_avg:94.50ms +step:284/1670 train_time:26836ms step_avg:94.49ms +step:285/1670 train_time:26927ms step_avg:94.48ms +step:286/1670 train_time:27020ms step_avg:94.47ms +step:287/1670 train_time:27112ms step_avg:94.47ms +step:288/1670 train_time:27204ms step_avg:94.46ms +step:289/1670 train_time:27296ms step_avg:94.45ms +step:290/1670 train_time:27388ms step_avg:94.44ms +step:291/1670 train_time:27481ms step_avg:94.43ms +step:292/1670 train_time:27573ms step_avg:94.43ms +step:293/1670 train_time:27664ms step_avg:94.42ms +step:294/1670 train_time:27757ms step_avg:94.41ms +step:295/1670 train_time:27849ms step_avg:94.40ms +step:296/1670 train_time:27942ms step_avg:94.40ms +step:297/1670 train_time:28034ms step_avg:94.39ms +step:298/1670 train_time:28126ms step_avg:94.38ms +step:299/1670 train_time:28219ms step_avg:94.38ms +step:300/1670 train_time:28311ms step_avg:94.37ms +step:301/1670 train_time:28403ms step_avg:94.36ms +step:302/1670 train_time:28496ms step_avg:94.36ms +step:303/1670 train_time:28589ms step_avg:94.35ms +step:304/1670 train_time:28681ms step_avg:94.34ms +step:305/1670 train_time:28773ms step_avg:94.34ms +step:306/1670 train_time:28865ms step_avg:94.33ms +step:307/1670 train_time:28957ms step_avg:94.32ms +step:308/1670 train_time:29050ms step_avg:94.32ms +step:309/1670 train_time:29142ms step_avg:94.31ms +step:310/1670 train_time:29235ms step_avg:94.31ms +step:311/1670 train_time:29327ms step_avg:94.30ms +step:312/1670 train_time:29420ms step_avg:94.29ms +step:313/1670 train_time:29511ms step_avg:94.29ms +step:314/1670 train_time:29604ms step_avg:94.28ms +step:315/1670 train_time:29696ms step_avg:94.27ms +step:316/1670 train_time:29788ms step_avg:94.27ms +step:317/1670 train_time:29881ms step_avg:94.26ms +step:318/1670 train_time:29973ms step_avg:94.26ms +step:319/1670 train_time:30065ms step_avg:94.25ms +step:320/1670 train_time:30158ms step_avg:94.25ms +step:321/1670 train_time:30251ms step_avg:94.24ms +step:322/1670 train_time:30343ms step_avg:94.23ms +step:323/1670 train_time:30435ms step_avg:94.23ms +step:324/1670 train_time:30528ms step_avg:94.22ms +step:325/1670 train_time:30620ms step_avg:94.22ms +step:326/1670 train_time:30712ms step_avg:94.21ms +step:327/1670 train_time:30804ms step_avg:94.20ms +step:328/1670 train_time:30896ms step_avg:94.20ms +step:329/1670 train_time:30988ms step_avg:94.19ms +step:330/1670 train_time:31081ms step_avg:94.18ms +step:331/1670 train_time:31173ms step_avg:94.18ms +step:332/1670 train_time:31265ms step_avg:94.17ms +step:333/1670 train_time:31358ms step_avg:94.17ms +step:334/1670 train_time:31450ms step_avg:94.16ms +step:335/1670 train_time:31542ms step_avg:94.16ms +step:336/1670 train_time:31634ms step_avg:94.15ms +step:337/1670 train_time:31726ms step_avg:94.14ms +step:338/1670 train_time:31818ms step_avg:94.14ms +step:339/1670 train_time:31910ms step_avg:94.13ms +step:340/1670 train_time:32002ms step_avg:94.12ms +step:341/1670 train_time:32095ms step_avg:94.12ms +step:342/1670 train_time:32187ms step_avg:94.11ms +step:343/1670 train_time:32280ms step_avg:94.11ms +step:344/1670 train_time:32372ms step_avg:94.11ms +step:345/1670 train_time:32465ms step_avg:94.10ms +step:346/1670 train_time:32556ms step_avg:94.09ms +step:347/1670 train_time:32648ms step_avg:94.09ms +step:348/1670 train_time:32741ms step_avg:94.08ms +step:349/1670 train_time:32833ms step_avg:94.08ms +step:350/1670 train_time:32925ms step_avg:94.07ms +step:351/1670 train_time:33017ms step_avg:94.07ms +step:352/1670 train_time:33109ms step_avg:94.06ms +step:353/1670 train_time:33202ms step_avg:94.06ms +step:354/1670 train_time:33294ms step_avg:94.05ms +step:355/1670 train_time:33386ms step_avg:94.04ms +step:356/1670 train_time:33479ms step_avg:94.04ms +step:357/1670 train_time:33571ms step_avg:94.04ms +step:358/1670 train_time:33663ms step_avg:94.03ms +step:359/1670 train_time:33756ms step_avg:94.03ms +step:360/1670 train_time:33848ms step_avg:94.02ms +step:361/1670 train_time:33941ms step_avg:94.02ms +step:362/1670 train_time:34033ms step_avg:94.01ms +step:363/1670 train_time:34125ms step_avg:94.01ms +step:364/1670 train_time:34218ms step_avg:94.00ms +step:365/1670 train_time:34310ms step_avg:94.00ms +step:366/1670 train_time:34402ms step_avg:94.00ms +step:367/1670 train_time:34495ms step_avg:93.99ms +step:368/1670 train_time:34586ms step_avg:93.98ms +step:369/1670 train_time:34679ms step_avg:93.98ms +step:370/1670 train_time:34771ms step_avg:93.98ms +step:371/1670 train_time:34863ms step_avg:93.97ms +step:372/1670 train_time:34956ms step_avg:93.97ms +step:373/1670 train_time:35048ms step_avg:93.96ms +step:374/1670 train_time:35140ms step_avg:93.96ms +step:375/1670 train_time:35232ms step_avg:93.95ms +step:375/1670 val_loss:3.8152 train_time:35322ms step_avg:94.19ms +step:376/1670 train_time:35348ms step_avg:94.01ms +step:377/1670 train_time:35422ms step_avg:93.96ms +step:378/1670 train_time:35521ms step_avg:93.97ms +step:379/1670 train_time:35614ms step_avg:93.97ms +step:380/1670 train_time:35707ms step_avg:93.97ms +step:381/1670 train_time:35798ms step_avg:93.96ms +step:382/1670 train_time:35890ms step_avg:93.95ms +step:383/1670 train_time:35980ms step_avg:93.94ms +step:384/1670 train_time:36072ms step_avg:93.94ms +step:385/1670 train_time:36164ms step_avg:93.93ms +step:386/1670 train_time:36255ms step_avg:93.93ms +step:387/1670 train_time:36347ms step_avg:93.92ms +step:388/1670 train_time:36441ms step_avg:93.92ms +step:389/1670 train_time:36536ms step_avg:93.92ms +step:390/1670 train_time:36629ms step_avg:93.92ms +step:391/1670 train_time:36721ms step_avg:93.92ms +step:392/1670 train_time:36813ms step_avg:93.91ms +step:393/1670 train_time:36905ms step_avg:93.91ms +step:394/1670 train_time:36997ms step_avg:93.90ms +step:395/1670 train_time:37089ms step_avg:93.90ms +step:396/1670 train_time:37181ms step_avg:93.89ms +step:397/1670 train_time:37273ms step_avg:93.89ms +step:398/1670 train_time:37366ms step_avg:93.88ms +step:399/1670 train_time:37459ms step_avg:93.88ms +step:400/1670 train_time:37552ms step_avg:93.88ms +step:401/1670 train_time:37645ms step_avg:93.88ms +step:402/1670 train_time:37736ms step_avg:93.87ms +step:403/1670 train_time:37829ms step_avg:93.87ms +step:404/1670 train_time:37921ms step_avg:93.86ms +step:405/1670 train_time:38013ms step_avg:93.86ms +step:406/1670 train_time:38106ms step_avg:93.86ms +step:407/1670 train_time:38197ms step_avg:93.85ms +step:408/1670 train_time:38290ms step_avg:93.85ms +step:409/1670 train_time:38382ms step_avg:93.84ms +step:410/1670 train_time:38474ms step_avg:93.84ms +step:411/1670 train_time:38567ms step_avg:93.84ms +step:412/1670 train_time:38659ms step_avg:93.83ms +step:413/1670 train_time:38752ms step_avg:93.83ms +step:414/1670 train_time:38844ms step_avg:93.83ms +step:415/1670 train_time:38936ms step_avg:93.82ms +step:416/1670 train_time:39029ms step_avg:93.82ms +step:417/1670 train_time:39121ms step_avg:93.81ms +step:418/1670 train_time:39213ms step_avg:93.81ms +step:419/1670 train_time:39306ms step_avg:93.81ms +step:420/1670 train_time:39398ms step_avg:93.80ms +step:421/1670 train_time:39490ms step_avg:93.80ms +step:422/1670 train_time:39584ms step_avg:93.80ms +step:423/1670 train_time:39676ms step_avg:93.80ms +step:424/1670 train_time:39769ms step_avg:93.79ms +step:425/1670 train_time:40096ms step_avg:94.34ms +step:426/1670 train_time:40293ms step_avg:94.58ms +step:427/1670 train_time:40384ms step_avg:94.57ms +step:428/1670 train_time:40475ms step_avg:94.57ms +step:429/1670 train_time:40566ms step_avg:94.56ms +step:430/1670 train_time:40658ms step_avg:94.55ms +step:431/1670 train_time:40749ms step_avg:94.54ms +step:432/1670 train_time:40840ms step_avg:94.54ms +step:433/1670 train_time:40931ms step_avg:94.53ms +step:434/1670 train_time:41023ms step_avg:94.52ms +step:435/1670 train_time:41115ms step_avg:94.52ms +step:436/1670 train_time:41210ms step_avg:94.52ms +step:437/1670 train_time:41306ms step_avg:94.52ms +step:438/1670 train_time:41399ms step_avg:94.52ms +step:439/1670 train_time:41492ms step_avg:94.51ms +step:440/1670 train_time:41584ms step_avg:94.51ms +step:441/1670 train_time:41676ms step_avg:94.50ms +step:442/1670 train_time:41768ms step_avg:94.50ms +step:443/1670 train_time:41860ms step_avg:94.49ms +step:444/1670 train_time:41951ms step_avg:94.48ms +step:445/1670 train_time:42043ms step_avg:94.48ms +step:446/1670 train_time:42135ms step_avg:94.47ms +step:447/1670 train_time:42228ms step_avg:94.47ms +step:448/1670 train_time:42321ms step_avg:94.47ms +step:449/1670 train_time:42414ms step_avg:94.46ms +step:450/1670 train_time:42508ms step_avg:94.46ms +step:451/1670 train_time:42600ms step_avg:94.46ms +step:452/1670 train_time:42692ms step_avg:94.45ms +step:453/1670 train_time:42784ms step_avg:94.45ms +step:454/1670 train_time:42876ms step_avg:94.44ms +step:455/1670 train_time:42968ms step_avg:94.44ms +step:456/1670 train_time:43060ms step_avg:94.43ms +step:457/1670 train_time:43153ms step_avg:94.43ms +step:458/1670 train_time:43245ms step_avg:94.42ms +step:459/1670 train_time:43339ms step_avg:94.42ms +step:460/1670 train_time:43433ms step_avg:94.42ms +step:461/1670 train_time:43526ms step_avg:94.42ms +step:462/1670 train_time:43619ms step_avg:94.41ms +step:463/1670 train_time:43711ms step_avg:94.41ms +step:464/1670 train_time:43803ms step_avg:94.40ms +step:465/1670 train_time:43895ms step_avg:94.40ms +step:466/1670 train_time:43988ms step_avg:94.39ms +step:467/1670 train_time:44080ms step_avg:94.39ms +step:468/1670 train_time:44172ms step_avg:94.39ms +step:469/1670 train_time:44266ms step_avg:94.38ms +step:470/1670 train_time:44359ms step_avg:94.38ms +step:471/1670 train_time:44453ms step_avg:94.38ms +step:472/1670 train_time:44546ms step_avg:94.38ms +step:473/1670 train_time:44638ms step_avg:94.37ms +step:474/1670 train_time:44730ms step_avg:94.37ms +step:475/1670 train_time:44824ms step_avg:94.37ms +step:476/1670 train_time:44915ms step_avg:94.36ms +step:477/1670 train_time:45008ms step_avg:94.36ms +step:478/1670 train_time:45100ms step_avg:94.35ms +step:479/1670 train_time:45192ms step_avg:94.35ms +step:480/1670 train_time:45284ms step_avg:94.34ms +step:481/1670 train_time:45377ms step_avg:94.34ms +step:482/1670 train_time:45470ms step_avg:94.34ms +step:483/1670 train_time:45562ms step_avg:94.33ms +step:484/1670 train_time:45654ms step_avg:94.33ms +step:485/1670 train_time:45746ms step_avg:94.32ms +step:486/1670 train_time:45839ms step_avg:94.32ms +step:487/1670 train_time:45931ms step_avg:94.31ms +step:488/1670 train_time:46024ms step_avg:94.31ms +step:489/1670 train_time:46115ms step_avg:94.30ms +step:490/1670 train_time:46208ms step_avg:94.30ms +step:491/1670 train_time:46300ms step_avg:94.30ms +step:492/1670 train_time:46393ms step_avg:94.29ms +step:493/1670 train_time:46485ms step_avg:94.29ms +step:494/1670 train_time:46577ms step_avg:94.29ms +step:495/1670 train_time:46670ms step_avg:94.28ms +step:496/1670 train_time:46762ms step_avg:94.28ms +step:497/1670 train_time:46854ms step_avg:94.27ms +step:498/1670 train_time:46946ms step_avg:94.27ms +step:499/1670 train_time:47039ms step_avg:94.27ms +step:500/1670 train_time:47131ms step_avg:94.26ms +step:500/1670 val_loss:3.7135 train_time:47222ms step_avg:94.44ms +step:501/1670 train_time:47247ms step_avg:94.31ms +step:502/1670 train_time:47321ms step_avg:94.26ms +step:503/1670 train_time:47421ms step_avg:94.28ms +step:504/1670 train_time:47517ms step_avg:94.28ms +step:505/1670 train_time:47608ms step_avg:94.27ms +step:506/1670 train_time:47699ms step_avg:94.27ms +step:507/1670 train_time:47791ms step_avg:94.26ms +step:508/1670 train_time:47882ms step_avg:94.26ms +step:509/1670 train_time:47974ms step_avg:94.25ms +step:510/1670 train_time:48065ms step_avg:94.25ms +step:511/1670 train_time:48156ms step_avg:94.24ms +step:512/1670 train_time:48248ms step_avg:94.23ms +step:513/1670 train_time:48343ms step_avg:94.24ms +step:514/1670 train_time:48438ms step_avg:94.24ms +step:515/1670 train_time:48532ms step_avg:94.24ms +step:516/1670 train_time:48625ms step_avg:94.23ms +step:517/1670 train_time:48717ms step_avg:94.23ms +step:518/1670 train_time:48809ms step_avg:94.23ms +step:519/1670 train_time:48900ms step_avg:94.22ms +step:520/1670 train_time:48991ms step_avg:94.21ms +step:521/1670 train_time:49084ms step_avg:94.21ms +step:522/1670 train_time:49176ms step_avg:94.21ms +step:523/1670 train_time:49268ms step_avg:94.20ms +step:524/1670 train_time:49361ms step_avg:94.20ms +step:525/1670 train_time:49456ms step_avg:94.20ms +step:526/1670 train_time:49549ms step_avg:94.20ms +step:527/1670 train_time:49642ms step_avg:94.20ms +step:528/1670 train_time:49734ms step_avg:94.19ms +step:529/1670 train_time:49826ms step_avg:94.19ms +step:530/1670 train_time:49918ms step_avg:94.19ms +step:531/1670 train_time:50010ms step_avg:94.18ms +step:532/1670 train_time:50102ms step_avg:94.18ms +step:533/1670 train_time:50193ms step_avg:94.17ms +step:534/1670 train_time:50286ms step_avg:94.17ms +step:535/1670 train_time:50379ms step_avg:94.17ms +step:536/1670 train_time:50473ms step_avg:94.17ms +step:537/1670 train_time:50565ms step_avg:94.16ms +step:538/1670 train_time:50659ms step_avg:94.16ms +step:539/1670 train_time:50752ms step_avg:94.16ms +step:540/1670 train_time:50844ms step_avg:94.16ms +step:541/1670 train_time:50937ms step_avg:94.15ms +step:542/1670 train_time:51028ms step_avg:94.15ms +step:543/1670 train_time:51120ms step_avg:94.14ms +step:544/1670 train_time:51212ms step_avg:94.14ms +step:545/1670 train_time:51304ms step_avg:94.14ms +step:546/1670 train_time:51397ms step_avg:94.13ms +step:547/1670 train_time:51489ms step_avg:94.13ms +step:548/1670 train_time:51582ms step_avg:94.13ms +step:549/1670 train_time:51674ms step_avg:94.12ms +step:550/1670 train_time:51766ms step_avg:94.12ms +step:551/1670 train_time:51859ms step_avg:94.12ms +step:552/1670 train_time:51952ms step_avg:94.12ms +step:553/1670 train_time:52044ms step_avg:94.11ms +step:554/1670 train_time:52136ms step_avg:94.11ms +step:555/1670 train_time:52228ms step_avg:94.10ms +step:556/1670 train_time:52320ms step_avg:94.10ms +step:557/1670 train_time:52412ms step_avg:94.10ms +step:558/1670 train_time:52615ms step_avg:94.29ms +step:559/1670 train_time:52683ms step_avg:94.25ms +step:560/1670 train_time:52775ms step_avg:94.24ms +step:561/1670 train_time:52868ms step_avg:94.24ms +step:562/1670 train_time:52961ms step_avg:94.24ms +step:563/1670 train_time:53054ms step_avg:94.23ms +step:564/1670 train_time:53146ms step_avg:94.23ms +step:565/1670 train_time:53239ms step_avg:94.23ms +step:566/1670 train_time:53332ms step_avg:94.23ms +step:567/1670 train_time:53424ms step_avg:94.22ms +step:568/1670 train_time:53520ms step_avg:94.23ms +step:569/1670 train_time:53619ms step_avg:94.23ms +step:570/1670 train_time:53713ms step_avg:94.23ms +step:571/1670 train_time:53806ms step_avg:94.23ms +step:572/1670 train_time:53899ms step_avg:94.23ms +step:573/1670 train_time:53993ms step_avg:94.23ms +step:574/1670 train_time:54085ms step_avg:94.23ms +step:575/1670 train_time:54178ms step_avg:94.22ms +step:576/1670 train_time:54270ms step_avg:94.22ms +step:577/1670 train_time:54363ms step_avg:94.22ms +step:578/1670 train_time:54457ms step_avg:94.22ms +step:579/1670 train_time:54553ms step_avg:94.22ms +step:580/1670 train_time:54647ms step_avg:94.22ms +step:581/1670 train_time:54742ms step_avg:94.22ms +step:582/1670 train_time:54836ms step_avg:94.22ms +step:583/1670 train_time:54929ms step_avg:94.22ms +step:584/1670 train_time:55023ms step_avg:94.22ms +step:585/1670 train_time:55116ms step_avg:94.22ms +step:586/1670 train_time:55209ms step_avg:94.21ms +step:587/1670 train_time:55302ms step_avg:94.21ms +step:588/1670 train_time:55395ms step_avg:94.21ms +step:589/1670 train_time:55490ms step_avg:94.21ms +step:590/1670 train_time:55585ms step_avg:94.21ms +step:591/1670 train_time:55679ms step_avg:94.21ms +step:592/1670 train_time:55773ms step_avg:94.21ms +step:593/1670 train_time:55866ms step_avg:94.21ms +step:594/1670 train_time:55961ms step_avg:94.21ms +step:595/1670 train_time:56055ms step_avg:94.21ms +step:596/1670 train_time:56148ms step_avg:94.21ms +step:597/1670 train_time:56242ms step_avg:94.21ms +step:598/1670 train_time:56334ms step_avg:94.20ms +step:599/1670 train_time:56427ms step_avg:94.20ms +step:600/1670 train_time:56521ms step_avg:94.20ms +step:601/1670 train_time:56615ms step_avg:94.20ms +step:602/1670 train_time:56709ms step_avg:94.20ms +step:603/1670 train_time:56803ms step_avg:94.20ms +step:604/1670 train_time:56897ms step_avg:94.20ms +step:605/1670 train_time:56991ms step_avg:94.20ms +step:606/1670 train_time:57085ms step_avg:94.20ms +step:607/1670 train_time:57178ms step_avg:94.20ms +step:608/1670 train_time:57271ms step_avg:94.20ms +step:609/1670 train_time:57365ms step_avg:94.19ms +step:610/1670 train_time:57459ms step_avg:94.19ms +step:611/1670 train_time:57553ms step_avg:94.20ms +step:612/1670 train_time:57647ms step_avg:94.19ms +step:613/1670 train_time:57741ms step_avg:94.19ms +step:614/1670 train_time:57834ms step_avg:94.19ms +step:615/1670 train_time:57928ms step_avg:94.19ms +step:616/1670 train_time:58022ms step_avg:94.19ms +step:617/1670 train_time:58117ms step_avg:94.19ms +step:618/1670 train_time:58210ms step_avg:94.19ms +step:619/1670 train_time:58303ms step_avg:94.19ms +step:620/1670 train_time:58397ms step_avg:94.19ms +step:621/1670 train_time:58490ms step_avg:94.19ms +step:622/1670 train_time:58585ms step_avg:94.19ms +step:623/1670 train_time:58679ms step_avg:94.19ms +step:624/1670 train_time:58772ms step_avg:94.19ms +step:625/1670 train_time:58866ms step_avg:94.19ms +step:625/1670 val_loss:3.6129 train_time:58958ms step_avg:94.33ms +step:626/1670 train_time:58984ms step_avg:94.22ms +step:627/1670 train_time:59066ms step_avg:94.20ms +step:628/1670 train_time:59167ms step_avg:94.21ms +step:629/1670 train_time:59261ms step_avg:94.21ms +step:630/1670 train_time:59353ms step_avg:94.21ms +step:631/1670 train_time:59446ms step_avg:94.21ms +step:632/1670 train_time:59539ms step_avg:94.21ms +step:633/1670 train_time:59631ms step_avg:94.20ms +step:634/1670 train_time:59724ms step_avg:94.20ms +step:635/1670 train_time:59816ms step_avg:94.20ms +step:636/1670 train_time:59909ms step_avg:94.20ms +step:637/1670 train_time:60004ms step_avg:94.20ms +step:638/1670 train_time:60100ms step_avg:94.20ms +step:639/1670 train_time:60550ms step_avg:94.76ms +step:640/1670 train_time:60621ms step_avg:94.72ms +step:641/1670 train_time:60714ms step_avg:94.72ms +step:642/1670 train_time:60806ms step_avg:94.71ms +step:643/1670 train_time:60898ms step_avg:94.71ms +step:644/1670 train_time:60991ms step_avg:94.71ms +step:645/1670 train_time:61083ms step_avg:94.70ms +step:646/1670 train_time:61176ms step_avg:94.70ms +step:647/1670 train_time:61269ms step_avg:94.70ms +step:648/1670 train_time:61361ms step_avg:94.69ms +step:649/1670 train_time:61457ms step_avg:94.70ms +step:650/1670 train_time:61555ms step_avg:94.70ms +step:651/1670 train_time:61650ms step_avg:94.70ms +step:652/1670 train_time:61745ms step_avg:94.70ms +step:653/1670 train_time:61838ms step_avg:94.70ms +step:654/1670 train_time:61931ms step_avg:94.70ms +step:655/1670 train_time:62024ms step_avg:94.69ms +step:656/1670 train_time:62116ms step_avg:94.69ms +step:657/1670 train_time:62209ms step_avg:94.69ms +step:658/1670 train_time:62302ms step_avg:94.68ms +step:659/1670 train_time:62396ms step_avg:94.68ms +step:660/1670 train_time:62490ms step_avg:94.68ms +step:661/1670 train_time:62585ms step_avg:94.68ms +step:662/1670 train_time:62680ms step_avg:94.68ms +step:663/1670 train_time:62774ms step_avg:94.68ms +step:664/1670 train_time:62868ms step_avg:94.68ms +step:665/1670 train_time:62961ms step_avg:94.68ms +step:666/1670 train_time:63054ms step_avg:94.68ms +step:667/1670 train_time:63148ms step_avg:94.67ms +step:668/1670 train_time:63241ms step_avg:94.67ms +step:669/1670 train_time:63334ms step_avg:94.67ms +step:670/1670 train_time:63427ms step_avg:94.67ms +step:671/1670 train_time:63520ms step_avg:94.67ms +step:672/1670 train_time:63615ms step_avg:94.67ms +step:673/1670 train_time:63710ms step_avg:94.67ms +step:674/1670 train_time:63804ms step_avg:94.66ms +step:675/1670 train_time:63897ms step_avg:94.66ms +step:676/1670 train_time:63990ms step_avg:94.66ms +step:677/1670 train_time:64084ms step_avg:94.66ms +step:678/1670 train_time:64177ms step_avg:94.66ms +step:679/1670 train_time:64270ms step_avg:94.65ms +step:680/1670 train_time:64363ms step_avg:94.65ms +step:681/1670 train_time:64457ms step_avg:94.65ms +step:682/1670 train_time:64552ms step_avg:94.65ms +step:683/1670 train_time:64646ms step_avg:94.65ms +step:684/1670 train_time:64740ms step_avg:94.65ms +step:685/1670 train_time:64833ms step_avg:94.65ms +step:686/1670 train_time:64927ms step_avg:94.65ms +step:687/1670 train_time:65020ms step_avg:94.64ms +step:688/1670 train_time:65114ms step_avg:94.64ms +step:689/1670 train_time:65207ms step_avg:94.64ms +step:690/1670 train_time:65301ms step_avg:94.64ms +step:691/1670 train_time:65394ms step_avg:94.64ms +step:692/1670 train_time:65488ms step_avg:94.64ms +step:693/1670 train_time:65582ms step_avg:94.63ms +step:694/1670 train_time:65675ms step_avg:94.63ms +step:695/1670 train_time:65769ms step_avg:94.63ms +step:696/1670 train_time:65863ms step_avg:94.63ms +step:697/1670 train_time:65957ms step_avg:94.63ms +step:698/1670 train_time:66051ms step_avg:94.63ms +step:699/1670 train_time:66144ms step_avg:94.63ms +step:700/1670 train_time:66237ms step_avg:94.62ms +step:701/1670 train_time:66330ms step_avg:94.62ms +step:702/1670 train_time:66424ms step_avg:94.62ms +step:703/1670 train_time:66518ms step_avg:94.62ms +step:704/1670 train_time:66613ms step_avg:94.62ms +step:705/1670 train_time:66707ms step_avg:94.62ms +step:706/1670 train_time:66799ms step_avg:94.62ms +step:707/1670 train_time:66893ms step_avg:94.62ms +step:708/1670 train_time:66987ms step_avg:94.61ms +step:709/1670 train_time:67080ms step_avg:94.61ms +step:710/1670 train_time:67174ms step_avg:94.61ms +step:711/1670 train_time:67267ms step_avg:94.61ms +step:712/1670 train_time:67360ms step_avg:94.61ms +step:713/1670 train_time:67453ms step_avg:94.61ms +step:714/1670 train_time:67547ms step_avg:94.60ms +step:715/1670 train_time:67641ms step_avg:94.60ms +step:716/1670 train_time:67734ms step_avg:94.60ms +step:717/1670 train_time:67829ms step_avg:94.60ms +step:718/1670 train_time:67923ms step_avg:94.60ms +step:719/1670 train_time:68017ms step_avg:94.60ms +step:720/1670 train_time:68110ms step_avg:94.60ms +step:721/1670 train_time:68203ms step_avg:94.60ms +step:722/1670 train_time:68297ms step_avg:94.59ms +step:723/1670 train_time:68390ms step_avg:94.59ms +step:724/1670 train_time:68485ms step_avg:94.59ms +step:725/1670 train_time:68578ms step_avg:94.59ms +step:726/1670 train_time:68672ms step_avg:94.59ms +step:727/1670 train_time:68766ms step_avg:94.59ms +step:728/1670 train_time:68859ms step_avg:94.59ms +step:729/1670 train_time:68954ms step_avg:94.59ms +step:730/1670 train_time:69048ms step_avg:94.59ms +step:731/1670 train_time:69141ms step_avg:94.58ms +step:732/1670 train_time:69235ms step_avg:94.58ms +step:733/1670 train_time:69328ms step_avg:94.58ms +step:734/1670 train_time:69422ms step_avg:94.58ms +step:735/1670 train_time:69515ms step_avg:94.58ms +step:736/1670 train_time:69609ms step_avg:94.58ms +step:737/1670 train_time:69702ms step_avg:94.58ms +step:738/1670 train_time:69797ms step_avg:94.58ms +step:739/1670 train_time:69890ms step_avg:94.57ms +step:740/1670 train_time:69983ms step_avg:94.57ms +step:741/1670 train_time:70077ms step_avg:94.57ms +step:742/1670 train_time:70171ms step_avg:94.57ms +step:743/1670 train_time:70264ms step_avg:94.57ms +step:744/1670 train_time:70357ms step_avg:94.57ms +step:745/1670 train_time:70452ms step_avg:94.57ms +step:746/1670 train_time:70546ms step_avg:94.57ms +step:747/1670 train_time:70639ms step_avg:94.56ms +step:748/1670 train_time:70733ms step_avg:94.56ms +step:749/1670 train_time:70827ms step_avg:94.56ms +step:750/1670 train_time:70920ms step_avg:94.56ms +step:750/1670 val_loss:3.5634 train_time:71012ms step_avg:94.68ms +step:751/1670 train_time:71037ms step_avg:94.59ms +step:752/1670 train_time:71114ms step_avg:94.57ms +step:753/1670 train_time:71216ms step_avg:94.58ms +step:754/1670 train_time:71311ms step_avg:94.58ms +step:755/1670 train_time:71404ms step_avg:94.57ms +step:756/1670 train_time:71496ms step_avg:94.57ms +step:757/1670 train_time:71589ms step_avg:94.57ms +step:758/1670 train_time:71681ms step_avg:94.57ms +step:759/1670 train_time:71774ms step_avg:94.56ms +step:760/1670 train_time:71866ms step_avg:94.56ms +step:761/1670 train_time:71959ms step_avg:94.56ms +step:762/1670 train_time:72054ms step_avg:94.56ms +step:763/1670 train_time:72150ms step_avg:94.56ms +step:764/1670 train_time:72246ms step_avg:94.56ms +step:765/1670 train_time:72340ms step_avg:94.56ms +step:766/1670 train_time:72433ms step_avg:94.56ms +step:767/1670 train_time:72526ms step_avg:94.56ms +step:768/1670 train_time:72618ms step_avg:94.55ms +step:769/1670 train_time:72711ms step_avg:94.55ms +step:770/1670 train_time:72804ms step_avg:94.55ms +step:771/1670 train_time:72897ms step_avg:94.55ms +step:772/1670 train_time:72990ms step_avg:94.55ms +step:773/1670 train_time:73085ms step_avg:94.55ms +step:774/1670 train_time:73180ms step_avg:94.55ms +step:775/1670 train_time:73275ms step_avg:94.55ms +step:776/1670 train_time:73369ms step_avg:94.55ms +step:777/1670 train_time:73462ms step_avg:94.55ms +step:778/1670 train_time:73556ms step_avg:94.55ms +step:779/1670 train_time:73649ms step_avg:94.54ms +step:780/1670 train_time:73742ms step_avg:94.54ms +step:781/1670 train_time:73835ms step_avg:94.54ms +step:782/1670 train_time:73928ms step_avg:94.54ms +step:783/1670 train_time:74022ms step_avg:94.54ms +step:784/1670 train_time:74117ms step_avg:94.54ms +step:785/1670 train_time:74213ms step_avg:94.54ms +step:786/1670 train_time:74306ms step_avg:94.54ms +step:787/1670 train_time:74400ms step_avg:94.54ms +step:788/1670 train_time:74494ms step_avg:94.53ms +step:789/1670 train_time:74588ms step_avg:94.53ms +step:790/1670 train_time:74681ms step_avg:94.53ms +step:791/1670 train_time:74774ms step_avg:94.53ms +step:792/1670 train_time:74868ms step_avg:94.53ms +step:793/1670 train_time:74962ms step_avg:94.53ms +step:794/1670 train_time:75056ms step_avg:94.53ms +step:795/1670 train_time:75150ms step_avg:94.53ms +step:796/1670 train_time:75244ms step_avg:94.53ms +step:797/1670 train_time:75338ms step_avg:94.53ms +step:798/1670 train_time:75432ms step_avg:94.53ms +step:799/1670 train_time:75526ms step_avg:94.53ms +step:800/1670 train_time:75619ms step_avg:94.52ms +step:801/1670 train_time:75713ms step_avg:94.52ms +step:802/1670 train_time:75806ms step_avg:94.52ms +step:803/1670 train_time:75899ms step_avg:94.52ms +step:804/1670 train_time:75993ms step_avg:94.52ms +step:805/1670 train_time:76087ms step_avg:94.52ms +step:806/1670 train_time:76180ms step_avg:94.52ms +step:807/1670 train_time:76274ms step_avg:94.52ms +step:808/1670 train_time:76368ms step_avg:94.51ms +step:809/1670 train_time:76462ms step_avg:94.51ms +step:810/1670 train_time:76555ms step_avg:94.51ms +step:811/1670 train_time:76648ms step_avg:94.51ms +step:812/1670 train_time:76742ms step_avg:94.51ms +step:813/1670 train_time:76835ms step_avg:94.51ms +step:814/1670 train_time:76929ms step_avg:94.51ms +step:815/1670 train_time:77022ms step_avg:94.51ms +step:816/1670 train_time:77116ms step_avg:94.50ms +step:817/1670 train_time:77210ms step_avg:94.50ms +step:818/1670 train_time:77303ms step_avg:94.50ms +step:819/1670 train_time:77397ms step_avg:94.50ms +step:820/1670 train_time:77492ms step_avg:94.50ms +step:821/1670 train_time:77585ms step_avg:94.50ms +step:822/1670 train_time:77679ms step_avg:94.50ms +step:823/1670 train_time:77772ms step_avg:94.50ms +step:824/1670 train_time:77866ms step_avg:94.50ms +step:825/1670 train_time:77959ms step_avg:94.50ms +step:826/1670 train_time:78053ms step_avg:94.49ms +step:827/1670 train_time:78146ms step_avg:94.49ms +step:828/1670 train_time:78239ms step_avg:94.49ms +step:829/1670 train_time:78333ms step_avg:94.49ms +step:830/1670 train_time:78426ms step_avg:94.49ms +step:831/1670 train_time:78519ms step_avg:94.49ms +step:832/1670 train_time:78614ms step_avg:94.49ms +step:833/1670 train_time:78707ms step_avg:94.49ms +step:834/1670 train_time:78801ms step_avg:94.49ms +step:835/1670 train_time:78894ms step_avg:94.48ms +step:836/1670 train_time:78988ms step_avg:94.48ms +step:837/1670 train_time:79082ms step_avg:94.48ms +step:838/1670 train_time:79175ms step_avg:94.48ms +step:839/1670 train_time:79269ms step_avg:94.48ms +step:840/1670 train_time:79364ms step_avg:94.48ms +step:841/1670 train_time:79457ms step_avg:94.48ms +step:842/1670 train_time:79550ms step_avg:94.48ms +step:843/1670 train_time:79644ms step_avg:94.48ms +step:844/1670 train_time:79737ms step_avg:94.48ms +step:845/1670 train_time:79832ms step_avg:94.48ms +step:846/1670 train_time:79926ms step_avg:94.48ms +step:847/1670 train_time:80020ms step_avg:94.47ms +step:848/1670 train_time:80113ms step_avg:94.47ms +step:849/1670 train_time:80207ms step_avg:94.47ms +step:850/1670 train_time:80300ms step_avg:94.47ms +step:851/1670 train_time:80657ms step_avg:94.78ms +step:852/1670 train_time:80823ms step_avg:94.86ms +step:853/1670 train_time:80915ms step_avg:94.86ms +step:854/1670 train_time:81007ms step_avg:94.86ms +step:855/1670 train_time:81099ms step_avg:94.85ms +step:856/1670 train_time:81192ms step_avg:94.85ms +step:857/1670 train_time:81285ms step_avg:94.85ms +step:858/1670 train_time:81377ms step_avg:94.85ms +step:859/1670 train_time:81470ms step_avg:94.84ms +step:860/1670 train_time:81563ms step_avg:94.84ms +step:861/1670 train_time:81656ms step_avg:94.84ms +step:862/1670 train_time:81753ms step_avg:94.84ms +step:863/1670 train_time:81851ms step_avg:94.84ms +step:864/1670 train_time:81946ms step_avg:94.84ms +step:865/1670 train_time:82039ms step_avg:94.84ms +step:866/1670 train_time:82132ms step_avg:94.84ms +step:867/1670 train_time:82225ms step_avg:94.84ms +step:868/1670 train_time:82317ms step_avg:94.84ms +step:869/1670 train_time:82411ms step_avg:94.83ms +step:870/1670 train_time:82503ms step_avg:94.83ms +step:871/1670 train_time:82596ms step_avg:94.83ms +step:872/1670 train_time:82691ms step_avg:94.83ms +step:873/1670 train_time:82787ms step_avg:94.83ms +step:874/1670 train_time:82882ms step_avg:94.83ms +step:875/1670 train_time:82976ms step_avg:94.83ms +step:875/1670 val_loss:3.5179 train_time:83069ms step_avg:94.94ms +step:876/1670 train_time:83094ms step_avg:94.86ms +step:877/1670 train_time:83171ms step_avg:94.84ms +step:878/1670 train_time:83269ms step_avg:94.84ms +step:879/1670 train_time:83364ms step_avg:94.84ms +step:880/1670 train_time:83457ms step_avg:94.84ms +step:881/1670 train_time:83550ms step_avg:94.83ms +step:882/1670 train_time:83642ms step_avg:94.83ms +step:883/1670 train_time:83735ms step_avg:94.83ms +step:884/1670 train_time:83828ms step_avg:94.83ms +step:885/1670 train_time:83920ms step_avg:94.83ms +step:886/1670 train_time:84013ms step_avg:94.82ms +step:887/1670 train_time:84110ms step_avg:94.83ms +step:888/1670 train_time:84207ms step_avg:94.83ms +step:889/1670 train_time:84303ms step_avg:94.83ms +step:890/1670 train_time:84396ms step_avg:94.83ms +step:891/1670 train_time:84490ms step_avg:94.83ms +step:892/1670 train_time:84583ms step_avg:94.82ms +step:893/1670 train_time:84676ms step_avg:94.82ms +step:894/1670 train_time:84769ms step_avg:94.82ms +step:895/1670 train_time:84862ms step_avg:94.82ms +step:896/1670 train_time:84956ms step_avg:94.82ms +step:897/1670 train_time:85050ms step_avg:94.82ms +step:898/1670 train_time:85146ms step_avg:94.82ms +step:899/1670 train_time:85240ms step_avg:94.82ms +step:900/1670 train_time:85334ms step_avg:94.82ms +step:901/1670 train_time:85428ms step_avg:94.81ms +step:902/1670 train_time:85521ms step_avg:94.81ms +step:903/1670 train_time:85614ms step_avg:94.81ms +step:904/1670 train_time:85707ms step_avg:94.81ms +step:905/1670 train_time:85800ms step_avg:94.81ms +step:906/1670 train_time:85892ms step_avg:94.80ms +step:907/1670 train_time:85985ms step_avg:94.80ms +step:908/1670 train_time:86079ms step_avg:94.80ms +step:909/1670 train_time:86173ms step_avg:94.80ms +step:910/1670 train_time:86268ms step_avg:94.80ms +step:911/1670 train_time:86363ms step_avg:94.80ms +step:912/1670 train_time:86457ms step_avg:94.80ms +step:913/1670 train_time:86551ms step_avg:94.80ms +step:914/1670 train_time:86644ms step_avg:94.80ms +step:915/1670 train_time:86737ms step_avg:94.80ms +step:916/1670 train_time:86831ms step_avg:94.79ms +step:917/1670 train_time:86925ms step_avg:94.79ms +step:918/1670 train_time:87019ms step_avg:94.79ms +step:919/1670 train_time:87113ms step_avg:94.79ms +step:920/1670 train_time:87206ms step_avg:94.79ms +step:921/1670 train_time:87300ms step_avg:94.79ms +step:922/1670 train_time:87395ms step_avg:94.79ms +step:923/1670 train_time:87489ms step_avg:94.79ms +step:924/1670 train_time:87582ms step_avg:94.79ms +step:925/1670 train_time:87676ms step_avg:94.78ms +step:926/1670 train_time:87769ms step_avg:94.78ms +step:927/1670 train_time:87863ms step_avg:94.78ms +step:928/1670 train_time:87956ms step_avg:94.78ms +step:929/1670 train_time:88051ms step_avg:94.78ms +step:930/1670 train_time:88145ms step_avg:94.78ms +step:931/1670 train_time:88238ms step_avg:94.78ms +step:932/1670 train_time:88332ms step_avg:94.78ms +step:933/1670 train_time:88427ms step_avg:94.78ms +step:934/1670 train_time:88520ms step_avg:94.78ms +step:935/1670 train_time:88613ms step_avg:94.77ms +step:936/1670 train_time:88707ms step_avg:94.77ms +step:937/1670 train_time:88800ms step_avg:94.77ms +step:938/1670 train_time:88893ms step_avg:94.77ms +step:939/1670 train_time:88987ms step_avg:94.77ms +step:940/1670 train_time:89081ms step_avg:94.77ms +step:941/1670 train_time:89175ms step_avg:94.77ms +step:942/1670 train_time:89268ms step_avg:94.76ms +step:943/1670 train_time:89362ms step_avg:94.76ms +step:944/1670 train_time:89456ms step_avg:94.76ms +step:945/1670 train_time:89549ms step_avg:94.76ms +step:946/1670 train_time:89643ms step_avg:94.76ms +step:947/1670 train_time:89736ms step_avg:94.76ms +step:948/1670 train_time:89830ms step_avg:94.76ms +step:949/1670 train_time:89925ms step_avg:94.76ms +step:950/1670 train_time:90019ms step_avg:94.76ms +step:951/1670 train_time:90112ms step_avg:94.75ms +step:952/1670 train_time:90205ms step_avg:94.75ms +step:953/1670 train_time:90300ms step_avg:94.75ms +step:954/1670 train_time:90393ms step_avg:94.75ms +step:955/1670 train_time:90487ms step_avg:94.75ms +step:956/1670 train_time:90580ms step_avg:94.75ms +step:957/1670 train_time:90673ms step_avg:94.75ms +step:958/1670 train_time:90767ms step_avg:94.75ms +step:959/1670 train_time:90861ms step_avg:94.75ms +step:960/1670 train_time:90955ms step_avg:94.74ms +step:961/1670 train_time:91049ms step_avg:94.74ms +step:962/1670 train_time:91143ms step_avg:94.74ms +step:963/1670 train_time:91236ms step_avg:94.74ms +step:964/1670 train_time:91331ms step_avg:94.74ms +step:965/1670 train_time:91424ms step_avg:94.74ms +step:966/1670 train_time:91518ms step_avg:94.74ms +step:967/1670 train_time:91612ms step_avg:94.74ms +step:968/1670 train_time:91705ms step_avg:94.74ms +step:969/1670 train_time:91798ms step_avg:94.73ms +step:970/1670 train_time:91892ms step_avg:94.73ms +step:971/1670 train_time:91986ms step_avg:94.73ms +step:972/1670 train_time:92080ms step_avg:94.73ms +step:973/1670 train_time:92173ms step_avg:94.73ms +step:974/1670 train_time:92266ms step_avg:94.73ms +step:975/1670 train_time:92359ms step_avg:94.73ms +step:976/1670 train_time:92454ms step_avg:94.73ms +step:977/1670 train_time:92548ms step_avg:94.73ms +step:978/1670 train_time:92641ms step_avg:94.73ms +step:979/1670 train_time:92735ms step_avg:94.72ms +step:980/1670 train_time:92829ms step_avg:94.72ms +step:981/1670 train_time:92923ms step_avg:94.72ms +step:982/1670 train_time:93017ms step_avg:94.72ms +step:983/1670 train_time:93111ms step_avg:94.72ms +step:984/1670 train_time:93204ms step_avg:94.72ms +step:985/1670 train_time:93297ms step_avg:94.72ms +step:986/1670 train_time:93390ms step_avg:94.72ms +step:987/1670 train_time:93484ms step_avg:94.72ms +step:988/1670 train_time:93577ms step_avg:94.71ms +step:989/1670 train_time:93671ms step_avg:94.71ms +step:990/1670 train_time:93764ms step_avg:94.71ms +step:991/1670 train_time:93859ms step_avg:94.71ms +step:992/1670 train_time:93953ms step_avg:94.71ms +step:993/1670 train_time:94048ms step_avg:94.71ms +step:994/1670 train_time:94142ms step_avg:94.71ms +step:995/1670 train_time:94235ms step_avg:94.71ms +step:996/1670 train_time:94329ms step_avg:94.71ms +step:997/1670 train_time:94423ms step_avg:94.71ms +step:998/1670 train_time:94516ms step_avg:94.71ms +step:999/1670 train_time:94609ms step_avg:94.70ms +step:1000/1670 train_time:94703ms step_avg:94.70ms +step:1000/1670 val_loss:3.4686 train_time:94795ms step_avg:94.79ms +step:1001/1670 train_time:94820ms step_avg:94.73ms +step:1002/1670 train_time:94896ms step_avg:94.71ms +step:1003/1670 train_time:94996ms step_avg:94.71ms +step:1004/1670 train_time:95091ms step_avg:94.71ms +step:1005/1670 train_time:95183ms step_avg:94.71ms +step:1006/1670 train_time:95276ms step_avg:94.71ms +step:1007/1670 train_time:95368ms step_avg:94.71ms +step:1008/1670 train_time:95461ms step_avg:94.70ms +step:1009/1670 train_time:95553ms step_avg:94.70ms +step:1010/1670 train_time:95646ms step_avg:94.70ms +step:1011/1670 train_time:95740ms step_avg:94.70ms +step:1012/1670 train_time:95834ms step_avg:94.70ms +step:1013/1670 train_time:95929ms step_avg:94.70ms +step:1014/1670 train_time:96025ms step_avg:94.70ms +step:1015/1670 train_time:96119ms step_avg:94.70ms +step:1016/1670 train_time:96212ms step_avg:94.70ms +step:1017/1670 train_time:96305ms step_avg:94.70ms +step:1018/1670 train_time:96398ms step_avg:94.69ms +step:1019/1670 train_time:96490ms step_avg:94.69ms +step:1020/1670 train_time:96584ms step_avg:94.69ms +step:1021/1670 train_time:96678ms step_avg:94.69ms +step:1022/1670 train_time:96772ms step_avg:94.69ms +step:1023/1670 train_time:96866ms step_avg:94.69ms +step:1024/1670 train_time:96963ms step_avg:94.69ms +step:1025/1670 train_time:97057ms step_avg:94.69ms +step:1026/1670 train_time:97151ms step_avg:94.69ms +step:1027/1670 train_time:97244ms step_avg:94.69ms +step:1028/1670 train_time:97338ms step_avg:94.69ms +step:1029/1670 train_time:97431ms step_avg:94.69ms +step:1030/1670 train_time:97524ms step_avg:94.68ms +step:1031/1670 train_time:97617ms step_avg:94.68ms +step:1032/1670 train_time:97710ms step_avg:94.68ms +step:1033/1670 train_time:97805ms step_avg:94.68ms +step:1034/1670 train_time:97900ms step_avg:94.68ms +step:1035/1670 train_time:97994ms step_avg:94.68ms +step:1036/1670 train_time:98088ms step_avg:94.68ms +step:1037/1670 train_time:98182ms step_avg:94.68ms +step:1038/1670 train_time:98276ms step_avg:94.68ms +step:1039/1670 train_time:98368ms step_avg:94.68ms +step:1040/1670 train_time:98462ms step_avg:94.67ms +step:1041/1670 train_time:98555ms step_avg:94.67ms +step:1042/1670 train_time:98648ms step_avg:94.67ms +step:1043/1670 train_time:98742ms step_avg:94.67ms +step:1044/1670 train_time:98835ms step_avg:94.67ms +step:1045/1670 train_time:98929ms step_avg:94.67ms +step:1046/1670 train_time:99024ms step_avg:94.67ms +step:1047/1670 train_time:99117ms step_avg:94.67ms +step:1048/1670 train_time:99210ms step_avg:94.67ms +step:1049/1670 train_time:99304ms step_avg:94.67ms +step:1050/1670 train_time:99398ms step_avg:94.66ms +step:1051/1670 train_time:99491ms step_avg:94.66ms +step:1052/1670 train_time:99585ms step_avg:94.66ms +step:1053/1670 train_time:99678ms step_avg:94.66ms +step:1054/1670 train_time:99771ms step_avg:94.66ms +step:1055/1670 train_time:99864ms step_avg:94.66ms +step:1056/1670 train_time:99959ms step_avg:94.66ms +step:1057/1670 train_time:100054ms step_avg:94.66ms +step:1058/1670 train_time:100147ms step_avg:94.66ms +step:1059/1670 train_time:100241ms step_avg:94.66ms +step:1060/1670 train_time:100335ms step_avg:94.66ms +step:1061/1670 train_time:100428ms step_avg:94.65ms +step:1062/1670 train_time:100757ms step_avg:94.87ms +step:1063/1670 train_time:100947ms step_avg:94.96ms +step:1064/1670 train_time:101038ms step_avg:94.96ms +step:1065/1670 train_time:101131ms step_avg:94.96ms +step:1066/1670 train_time:101223ms step_avg:94.96ms +step:1067/1670 train_time:101315ms step_avg:94.95ms +step:1068/1670 train_time:101408ms step_avg:94.95ms +step:1069/1670 train_time:101500ms step_avg:94.95ms +step:1070/1670 train_time:101592ms step_avg:94.95ms +step:1071/1670 train_time:101685ms step_avg:94.94ms +step:1072/1670 train_time:101782ms step_avg:94.95ms +step:1073/1670 train_time:101880ms step_avg:94.95ms +step:1074/1670 train_time:101975ms step_avg:94.95ms +step:1075/1670 train_time:102068ms step_avg:94.95ms +step:1076/1670 train_time:102162ms step_avg:94.95ms +step:1077/1670 train_time:102255ms step_avg:94.94ms +step:1078/1670 train_time:102348ms step_avg:94.94ms +step:1079/1670 train_time:102441ms step_avg:94.94ms +step:1080/1670 train_time:102534ms step_avg:94.94ms +step:1081/1670 train_time:102627ms step_avg:94.94ms +step:1082/1670 train_time:102720ms step_avg:94.94ms +step:1083/1670 train_time:102814ms step_avg:94.93ms +step:1084/1670 train_time:102909ms step_avg:94.93ms +step:1085/1670 train_time:103003ms step_avg:94.93ms +step:1086/1670 train_time:103097ms step_avg:94.93ms +step:1087/1670 train_time:103191ms step_avg:94.93ms +step:1088/1670 train_time:103284ms step_avg:94.93ms +step:1089/1670 train_time:103378ms step_avg:94.93ms +step:1090/1670 train_time:103470ms step_avg:94.93ms +step:1091/1670 train_time:103564ms step_avg:94.93ms +step:1092/1670 train_time:103657ms step_avg:94.92ms +step:1093/1670 train_time:103750ms step_avg:94.92ms +step:1094/1670 train_time:103845ms step_avg:94.92ms +step:1095/1670 train_time:103939ms step_avg:94.92ms +step:1096/1670 train_time:104032ms step_avg:94.92ms +step:1097/1670 train_time:104126ms step_avg:94.92ms +step:1098/1670 train_time:104221ms step_avg:94.92ms +step:1099/1670 train_time:104314ms step_avg:94.92ms +step:1100/1670 train_time:104407ms step_avg:94.92ms +step:1101/1670 train_time:104500ms step_avg:94.91ms +step:1102/1670 train_time:104593ms step_avg:94.91ms +step:1103/1670 train_time:104686ms step_avg:94.91ms +step:1104/1670 train_time:104780ms step_avg:94.91ms +step:1105/1670 train_time:104874ms step_avg:94.91ms +step:1106/1670 train_time:104967ms step_avg:94.91ms +step:1107/1670 train_time:105062ms step_avg:94.91ms +step:1108/1670 train_time:105156ms step_avg:94.91ms +step:1109/1670 train_time:105250ms step_avg:94.90ms +step:1110/1670 train_time:105344ms step_avg:94.90ms +step:1111/1670 train_time:105437ms step_avg:94.90ms +step:1112/1670 train_time:105531ms step_avg:94.90ms +step:1113/1670 train_time:105624ms step_avg:94.90ms +step:1114/1670 train_time:105718ms step_avg:94.90ms +step:1115/1670 train_time:105922ms step_avg:95.00ms +step:1116/1670 train_time:105990ms step_avg:94.97ms +step:1117/1670 train_time:106084ms step_avg:94.97ms +step:1118/1670 train_time:106177ms step_avg:94.97ms +step:1119/1670 train_time:106269ms step_avg:94.97ms +step:1120/1670 train_time:106363ms step_avg:94.97ms +step:1121/1670 train_time:106456ms step_avg:94.96ms +step:1122/1670 train_time:106549ms step_avg:94.96ms +step:1123/1670 train_time:106642ms step_avg:94.96ms +step:1124/1670 train_time:106735ms step_avg:94.96ms +step:1125/1670 train_time:106833ms step_avg:94.96ms +step:1125/1670 val_loss:3.4160 train_time:106930ms step_avg:95.05ms +step:1126/1670 train_time:106955ms step_avg:94.99ms +step:1127/1670 train_time:107041ms step_avg:94.98ms +step:1128/1670 train_time:107142ms step_avg:94.98ms +step:1129/1670 train_time:107236ms step_avg:94.98ms +step:1130/1670 train_time:107328ms step_avg:94.98ms +step:1131/1670 train_time:107422ms step_avg:94.98ms +step:1132/1670 train_time:107515ms step_avg:94.98ms +step:1133/1670 train_time:107608ms step_avg:94.98ms +step:1134/1670 train_time:107702ms step_avg:94.98ms +step:1135/1670 train_time:107795ms step_avg:94.97ms +step:1136/1670 train_time:107888ms step_avg:94.97ms +step:1137/1670 train_time:107986ms step_avg:94.97ms +step:1138/1670 train_time:108084ms step_avg:94.98ms +step:1139/1670 train_time:108181ms step_avg:94.98ms +step:1140/1670 train_time:108276ms step_avg:94.98ms +step:1141/1670 train_time:108369ms step_avg:94.98ms +step:1142/1670 train_time:108462ms step_avg:94.98ms +step:1143/1670 train_time:108556ms step_avg:94.97ms +step:1144/1670 train_time:108650ms step_avg:94.97ms +step:1145/1670 train_time:108743ms step_avg:94.97ms +step:1146/1670 train_time:108836ms step_avg:94.97ms +step:1147/1670 train_time:108930ms step_avg:94.97ms +step:1148/1670 train_time:109026ms step_avg:94.97ms +step:1149/1670 train_time:109122ms step_avg:94.97ms +step:1150/1670 train_time:109219ms step_avg:94.97ms +step:1151/1670 train_time:109313ms step_avg:94.97ms +step:1152/1670 train_time:109407ms step_avg:94.97ms +step:1153/1670 train_time:109501ms step_avg:94.97ms +step:1154/1670 train_time:109595ms step_avg:94.97ms +step:1155/1670 train_time:109689ms step_avg:94.97ms +step:1156/1670 train_time:109783ms step_avg:94.97ms +step:1157/1670 train_time:109876ms step_avg:94.97ms +step:1158/1670 train_time:109971ms step_avg:94.97ms +step:1159/1670 train_time:110066ms step_avg:94.97ms +step:1160/1670 train_time:110162ms step_avg:94.97ms +step:1161/1670 train_time:110258ms step_avg:94.97ms +step:1162/1670 train_time:110353ms step_avg:94.97ms +step:1163/1670 train_time:110447ms step_avg:94.97ms +step:1164/1670 train_time:110541ms step_avg:94.97ms +step:1165/1670 train_time:110635ms step_avg:94.97ms +step:1166/1670 train_time:110728ms step_avg:94.96ms +step:1167/1670 train_time:110822ms step_avg:94.96ms +step:1168/1670 train_time:110916ms step_avg:94.96ms +step:1169/1670 train_time:111009ms step_avg:94.96ms +step:1170/1670 train_time:111105ms step_avg:94.96ms +step:1171/1670 train_time:111200ms step_avg:94.96ms +step:1172/1670 train_time:111296ms step_avg:94.96ms +step:1173/1670 train_time:111391ms step_avg:94.96ms +step:1174/1670 train_time:111485ms step_avg:94.96ms +step:1175/1670 train_time:111579ms step_avg:94.96ms +step:1176/1670 train_time:111673ms step_avg:94.96ms +step:1177/1670 train_time:111767ms step_avg:94.96ms +step:1178/1670 train_time:111861ms step_avg:94.96ms +step:1179/1670 train_time:111955ms step_avg:94.96ms +step:1180/1670 train_time:112050ms step_avg:94.96ms +step:1181/1670 train_time:112145ms step_avg:94.96ms +step:1182/1670 train_time:112240ms step_avg:94.96ms +step:1183/1670 train_time:112336ms step_avg:94.96ms +step:1184/1670 train_time:112430ms step_avg:94.96ms +step:1185/1670 train_time:112524ms step_avg:94.96ms +step:1186/1670 train_time:112619ms step_avg:94.96ms +step:1187/1670 train_time:112713ms step_avg:94.96ms +step:1188/1670 train_time:112808ms step_avg:94.96ms +step:1189/1670 train_time:112902ms step_avg:94.96ms +step:1190/1670 train_time:112996ms step_avg:94.95ms +step:1191/1670 train_time:113091ms step_avg:94.95ms +step:1192/1670 train_time:113186ms step_avg:94.95ms +step:1193/1670 train_time:113282ms step_avg:94.96ms +step:1194/1670 train_time:113377ms step_avg:94.96ms +step:1195/1670 train_time:113471ms step_avg:94.95ms +step:1196/1670 train_time:113565ms step_avg:94.95ms +step:1197/1670 train_time:113660ms step_avg:94.95ms +step:1198/1670 train_time:113755ms step_avg:94.95ms +step:1199/1670 train_time:113848ms step_avg:94.95ms +step:1200/1670 train_time:113943ms step_avg:94.95ms +step:1201/1670 train_time:114037ms step_avg:94.95ms +step:1202/1670 train_time:114131ms step_avg:94.95ms +step:1203/1670 train_time:114226ms step_avg:94.95ms +step:1204/1670 train_time:114321ms step_avg:94.95ms +step:1205/1670 train_time:114416ms step_avg:94.95ms +step:1206/1670 train_time:114509ms step_avg:94.95ms +step:1207/1670 train_time:114604ms step_avg:94.95ms +step:1208/1670 train_time:114698ms step_avg:94.95ms +step:1209/1670 train_time:114793ms step_avg:94.95ms +step:1210/1670 train_time:114887ms step_avg:94.95ms +step:1211/1670 train_time:114981ms step_avg:94.95ms +step:1212/1670 train_time:115075ms step_avg:94.95ms +step:1213/1670 train_time:115169ms step_avg:94.95ms +step:1214/1670 train_time:115264ms step_avg:94.95ms +step:1215/1670 train_time:115359ms step_avg:94.95ms +step:1216/1670 train_time:115453ms step_avg:94.95ms +step:1217/1670 train_time:115548ms step_avg:94.94ms +step:1218/1670 train_time:115642ms step_avg:94.94ms +step:1219/1670 train_time:115736ms step_avg:94.94ms +step:1220/1670 train_time:115830ms step_avg:94.94ms +step:1221/1670 train_time:115924ms step_avg:94.94ms +step:1222/1670 train_time:116018ms step_avg:94.94ms +step:1223/1670 train_time:116113ms step_avg:94.94ms +step:1224/1670 train_time:116207ms step_avg:94.94ms +step:1225/1670 train_time:116302ms step_avg:94.94ms +step:1226/1670 train_time:116397ms step_avg:94.94ms +step:1227/1670 train_time:116490ms step_avg:94.94ms +step:1228/1670 train_time:116585ms step_avg:94.94ms +step:1229/1670 train_time:116679ms step_avg:94.94ms +step:1230/1670 train_time:116774ms step_avg:94.94ms +step:1231/1670 train_time:116867ms step_avg:94.94ms +step:1232/1670 train_time:116962ms step_avg:94.94ms +step:1233/1670 train_time:117057ms step_avg:94.94ms +step:1234/1670 train_time:117151ms step_avg:94.94ms +step:1235/1670 train_time:117246ms step_avg:94.94ms +step:1236/1670 train_time:117341ms step_avg:94.94ms +step:1237/1670 train_time:117436ms step_avg:94.94ms +step:1238/1670 train_time:117530ms step_avg:94.94ms +step:1239/1670 train_time:117625ms step_avg:94.94ms +step:1240/1670 train_time:117719ms step_avg:94.93ms +step:1241/1670 train_time:117813ms step_avg:94.93ms +step:1242/1670 train_time:117907ms step_avg:94.93ms +step:1243/1670 train_time:118002ms step_avg:94.93ms +step:1244/1670 train_time:118096ms step_avg:94.93ms +step:1245/1670 train_time:118190ms step_avg:94.93ms +step:1246/1670 train_time:118285ms step_avg:94.93ms +step:1247/1670 train_time:118379ms step_avg:94.93ms +step:1248/1670 train_time:118473ms step_avg:94.93ms +step:1249/1670 train_time:118567ms step_avg:94.93ms +step:1250/1670 train_time:118662ms step_avg:94.93ms +step:1250/1670 val_loss:3.3767 train_time:118755ms step_avg:95.00ms +step:1251/1670 train_time:118781ms step_avg:94.95ms +step:1252/1670 train_time:118858ms step_avg:94.93ms +step:1253/1670 train_time:118958ms step_avg:94.94ms +step:1254/1670 train_time:119053ms step_avg:94.94ms +step:1255/1670 train_time:119147ms step_avg:94.94ms +step:1256/1670 train_time:119240ms step_avg:94.94ms +step:1257/1670 train_time:119334ms step_avg:94.94ms +step:1258/1670 train_time:119427ms step_avg:94.93ms +step:1259/1670 train_time:119520ms step_avg:94.93ms +step:1260/1670 train_time:119613ms step_avg:94.93ms +step:1261/1670 train_time:119707ms step_avg:94.93ms +step:1262/1670 train_time:119804ms step_avg:94.93ms +step:1263/1670 train_time:119900ms step_avg:94.93ms +step:1264/1670 train_time:119995ms step_avg:94.93ms +step:1265/1670 train_time:120089ms step_avg:94.93ms +step:1266/1670 train_time:120185ms step_avg:94.93ms +step:1267/1670 train_time:120279ms step_avg:94.93ms +step:1268/1670 train_time:120373ms step_avg:94.93ms +step:1269/1670 train_time:120467ms step_avg:94.93ms +step:1270/1670 train_time:120561ms step_avg:94.93ms +step:1271/1670 train_time:120654ms step_avg:94.93ms +step:1272/1670 train_time:120749ms step_avg:94.93ms +step:1273/1670 train_time:120846ms step_avg:94.93ms +step:1274/1670 train_time:121303ms step_avg:95.21ms +step:1275/1670 train_time:121372ms step_avg:95.19ms +step:1276/1670 train_time:121465ms step_avg:95.19ms +step:1277/1670 train_time:121559ms step_avg:95.19ms +step:1278/1670 train_time:121652ms step_avg:95.19ms +step:1279/1670 train_time:121745ms step_avg:95.19ms +step:1280/1670 train_time:121839ms step_avg:95.19ms +step:1281/1670 train_time:121932ms step_avg:95.19ms +step:1282/1670 train_time:122025ms step_avg:95.18ms +step:1283/1670 train_time:122118ms step_avg:95.18ms +step:1284/1670 train_time:122215ms step_avg:95.18ms +step:1285/1670 train_time:122312ms step_avg:95.18ms +step:1286/1670 train_time:122408ms step_avg:95.18ms +step:1287/1670 train_time:122502ms step_avg:95.18ms +step:1288/1670 train_time:122596ms step_avg:95.18ms +step:1289/1670 train_time:122691ms step_avg:95.18ms +step:1290/1670 train_time:122786ms step_avg:95.18ms +step:1291/1670 train_time:122879ms step_avg:95.18ms +step:1292/1670 train_time:122973ms step_avg:95.18ms +step:1293/1670 train_time:123067ms step_avg:95.18ms +step:1294/1670 train_time:123162ms step_avg:95.18ms +step:1295/1670 train_time:123259ms step_avg:95.18ms +step:1296/1670 train_time:123353ms step_avg:95.18ms +step:1297/1670 train_time:123449ms step_avg:95.18ms +step:1298/1670 train_time:123544ms step_avg:95.18ms +step:1299/1670 train_time:123639ms step_avg:95.18ms +step:1300/1670 train_time:123734ms step_avg:95.18ms +step:1301/1670 train_time:123828ms step_avg:95.18ms +step:1302/1670 train_time:123921ms step_avg:95.18ms +step:1303/1670 train_time:124015ms step_avg:95.18ms +step:1304/1670 train_time:124109ms step_avg:95.18ms +step:1305/1670 train_time:124205ms step_avg:95.18ms +step:1306/1670 train_time:124299ms step_avg:95.18ms +step:1307/1670 train_time:124393ms step_avg:95.17ms +step:1308/1670 train_time:124488ms step_avg:95.17ms +step:1309/1670 train_time:124583ms step_avg:95.17ms +step:1310/1670 train_time:124679ms step_avg:95.17ms +step:1311/1670 train_time:124772ms step_avg:95.17ms +step:1312/1670 train_time:124867ms step_avg:95.17ms +step:1313/1670 train_time:124960ms step_avg:95.17ms +step:1314/1670 train_time:125054ms step_avg:95.17ms +step:1315/1670 train_time:125149ms step_avg:95.17ms +step:1316/1670 train_time:125245ms step_avg:95.17ms +step:1317/1670 train_time:125339ms step_avg:95.17ms +step:1318/1670 train_time:125434ms step_avg:95.17ms +step:1319/1670 train_time:125529ms step_avg:95.17ms +step:1320/1670 train_time:125624ms step_avg:95.17ms +step:1321/1670 train_time:125718ms step_avg:95.17ms +step:1322/1670 train_time:125812ms step_avg:95.17ms +step:1323/1670 train_time:125907ms step_avg:95.17ms +step:1324/1670 train_time:126001ms step_avg:95.17ms +step:1325/1670 train_time:126095ms step_avg:95.17ms +step:1326/1670 train_time:126190ms step_avg:95.17ms +step:1327/1670 train_time:126285ms step_avg:95.17ms +step:1328/1670 train_time:126380ms step_avg:95.17ms +step:1329/1670 train_time:126475ms step_avg:95.17ms +step:1330/1670 train_time:126570ms step_avg:95.17ms +step:1331/1670 train_time:126665ms step_avg:95.17ms +step:1332/1670 train_time:126760ms step_avg:95.17ms +step:1333/1670 train_time:126854ms step_avg:95.16ms +step:1334/1670 train_time:126949ms step_avg:95.16ms +step:1335/1670 train_time:127043ms step_avg:95.16ms +step:1336/1670 train_time:127138ms step_avg:95.16ms +step:1337/1670 train_time:127232ms step_avg:95.16ms +step:1338/1670 train_time:127326ms step_avg:95.16ms +step:1339/1670 train_time:127421ms step_avg:95.16ms +step:1340/1670 train_time:127515ms step_avg:95.16ms +step:1341/1670 train_time:127611ms step_avg:95.16ms +step:1342/1670 train_time:127705ms step_avg:95.16ms +step:1343/1670 train_time:127800ms step_avg:95.16ms +step:1344/1670 train_time:127894ms step_avg:95.16ms +step:1345/1670 train_time:127988ms step_avg:95.16ms +step:1346/1670 train_time:128084ms step_avg:95.16ms +step:1347/1670 train_time:128178ms step_avg:95.16ms +step:1348/1670 train_time:128272ms step_avg:95.16ms +step:1349/1670 train_time:128366ms step_avg:95.16ms +step:1350/1670 train_time:128461ms step_avg:95.16ms +step:1351/1670 train_time:128555ms step_avg:95.16ms +step:1352/1670 train_time:128650ms step_avg:95.16ms +step:1353/1670 train_time:128746ms step_avg:95.16ms +step:1354/1670 train_time:128840ms step_avg:95.16ms +step:1355/1670 train_time:128933ms step_avg:95.15ms +step:1356/1670 train_time:129028ms step_avg:95.15ms +step:1357/1670 train_time:129123ms step_avg:95.15ms +step:1358/1670 train_time:129217ms step_avg:95.15ms +step:1359/1670 train_time:129311ms step_avg:95.15ms +step:1360/1670 train_time:129405ms step_avg:95.15ms +step:1361/1670 train_time:129499ms step_avg:95.15ms +step:1362/1670 train_time:129593ms step_avg:95.15ms +step:1363/1670 train_time:129688ms step_avg:95.15ms +step:1364/1670 train_time:129783ms step_avg:95.15ms +step:1365/1670 train_time:129877ms step_avg:95.15ms +step:1366/1670 train_time:129971ms step_avg:95.15ms +step:1367/1670 train_time:130066ms step_avg:95.15ms +step:1368/1670 train_time:130161ms step_avg:95.15ms +step:1369/1670 train_time:130255ms step_avg:95.15ms +step:1370/1670 train_time:130350ms step_avg:95.15ms +step:1371/1670 train_time:130445ms step_avg:95.15ms +step:1372/1670 train_time:130539ms step_avg:95.14ms +step:1373/1670 train_time:130633ms step_avg:95.14ms +step:1374/1670 train_time:130728ms step_avg:95.14ms +step:1375/1670 train_time:130823ms step_avg:95.14ms +step:1375/1670 val_loss:3.3425 train_time:130915ms step_avg:95.21ms +step:1376/1670 train_time:130940ms step_avg:95.16ms +step:1377/1670 train_time:131019ms step_avg:95.15ms +step:1378/1670 train_time:131117ms step_avg:95.15ms +step:1379/1670 train_time:131210ms step_avg:95.15ms +step:1380/1670 train_time:131303ms step_avg:95.15ms +step:1381/1670 train_time:131397ms step_avg:95.15ms +step:1382/1670 train_time:131491ms step_avg:95.15ms +step:1383/1670 train_time:131586ms step_avg:95.15ms +step:1384/1670 train_time:131680ms step_avg:95.14ms +step:1385/1670 train_time:131773ms step_avg:95.14ms +step:1386/1670 train_time:131868ms step_avg:95.14ms +step:1387/1670 train_time:131964ms step_avg:95.14ms +step:1388/1670 train_time:132061ms step_avg:95.14ms +step:1389/1670 train_time:132157ms step_avg:95.15ms +step:1390/1670 train_time:132252ms step_avg:95.15ms +step:1391/1670 train_time:132345ms step_avg:95.14ms +step:1392/1670 train_time:132439ms step_avg:95.14ms +step:1393/1670 train_time:132533ms step_avg:95.14ms +step:1394/1670 train_time:132626ms step_avg:95.14ms +step:1395/1670 train_time:132720ms step_avg:95.14ms +step:1396/1670 train_time:132814ms step_avg:95.14ms +step:1397/1670 train_time:132910ms step_avg:95.14ms +step:1398/1670 train_time:133005ms step_avg:95.14ms +step:1399/1670 train_time:133101ms step_avg:95.14ms +step:1400/1670 train_time:133197ms step_avg:95.14ms +step:1401/1670 train_time:133291ms step_avg:95.14ms +step:1402/1670 train_time:133384ms step_avg:95.14ms +step:1403/1670 train_time:133480ms step_avg:95.14ms +step:1404/1670 train_time:133574ms step_avg:95.14ms +step:1405/1670 train_time:133667ms step_avg:95.14ms +step:1406/1670 train_time:133762ms step_avg:95.14ms +step:1407/1670 train_time:133856ms step_avg:95.14ms +step:1408/1670 train_time:133951ms step_avg:95.14ms +step:1409/1670 train_time:134046ms step_avg:95.14ms +step:1410/1670 train_time:134142ms step_avg:95.14ms +step:1411/1670 train_time:134237ms step_avg:95.14ms +step:1412/1670 train_time:134331ms step_avg:95.13ms +step:1413/1670 train_time:134425ms step_avg:95.13ms +step:1414/1670 train_time:134519ms step_avg:95.13ms +step:1415/1670 train_time:134614ms step_avg:95.13ms +step:1416/1670 train_time:134708ms step_avg:95.13ms +step:1417/1670 train_time:134802ms step_avg:95.13ms +step:1418/1670 train_time:134897ms step_avg:95.13ms +step:1419/1670 train_time:134993ms step_avg:95.13ms +step:1420/1670 train_time:135088ms step_avg:95.13ms +step:1421/1670 train_time:135183ms step_avg:95.13ms +step:1422/1670 train_time:135277ms step_avg:95.13ms +step:1423/1670 train_time:135371ms step_avg:95.13ms +step:1424/1670 train_time:135465ms step_avg:95.13ms +step:1425/1670 train_time:135560ms step_avg:95.13ms +step:1426/1670 train_time:135655ms step_avg:95.13ms +step:1427/1670 train_time:135749ms step_avg:95.13ms +step:1428/1670 train_time:135843ms step_avg:95.13ms +step:1429/1670 train_time:135939ms step_avg:95.13ms +step:1430/1670 train_time:136035ms step_avg:95.13ms +step:1431/1670 train_time:136130ms step_avg:95.13ms +step:1432/1670 train_time:136224ms step_avg:95.13ms +step:1433/1670 train_time:136319ms step_avg:95.13ms +step:1434/1670 train_time:136413ms step_avg:95.13ms +step:1435/1670 train_time:136508ms step_avg:95.13ms +step:1436/1670 train_time:136602ms step_avg:95.13ms +step:1437/1670 train_time:136697ms step_avg:95.13ms +step:1438/1670 train_time:136791ms step_avg:95.13ms +step:1439/1670 train_time:136886ms step_avg:95.13ms +step:1440/1670 train_time:136981ms step_avg:95.13ms +step:1441/1670 train_time:137077ms step_avg:95.13ms +step:1442/1670 train_time:137172ms step_avg:95.13ms +step:1443/1670 train_time:137265ms step_avg:95.13ms +step:1444/1670 train_time:137360ms step_avg:95.12ms +step:1445/1670 train_time:137454ms step_avg:95.12ms +step:1446/1670 train_time:137548ms step_avg:95.12ms +step:1447/1670 train_time:137643ms step_avg:95.12ms +step:1448/1670 train_time:137738ms step_avg:95.12ms +step:1449/1670 train_time:137832ms step_avg:95.12ms +step:1450/1670 train_time:137927ms step_avg:95.12ms +step:1451/1670 train_time:138022ms step_avg:95.12ms +step:1452/1670 train_time:138118ms step_avg:95.12ms +step:1453/1670 train_time:138213ms step_avg:95.12ms +step:1454/1670 train_time:138307ms step_avg:95.12ms +step:1455/1670 train_time:138401ms step_avg:95.12ms +step:1456/1670 train_time:138496ms step_avg:95.12ms +step:1457/1670 train_time:138591ms step_avg:95.12ms +step:1458/1670 train_time:138684ms step_avg:95.12ms +step:1459/1670 train_time:138779ms step_avg:95.12ms +step:1460/1670 train_time:138874ms step_avg:95.12ms +step:1461/1670 train_time:138968ms step_avg:95.12ms +step:1462/1670 train_time:139063ms step_avg:95.12ms +step:1463/1670 train_time:139158ms step_avg:95.12ms +step:1464/1670 train_time:139253ms step_avg:95.12ms +step:1465/1670 train_time:139347ms step_avg:95.12ms +step:1466/1670 train_time:139442ms step_avg:95.12ms +step:1467/1670 train_time:139537ms step_avg:95.12ms +step:1468/1670 train_time:139632ms step_avg:95.12ms +step:1469/1670 train_time:139725ms step_avg:95.12ms +step:1470/1670 train_time:139820ms step_avg:95.12ms +step:1471/1670 train_time:139914ms step_avg:95.12ms +step:1472/1670 train_time:140009ms step_avg:95.11ms +step:1473/1670 train_time:140104ms step_avg:95.11ms +step:1474/1670 train_time:140199ms step_avg:95.11ms +step:1475/1670 train_time:140294ms step_avg:95.11ms +step:1476/1670 train_time:140388ms step_avg:95.11ms +step:1477/1670 train_time:140482ms step_avg:95.11ms +step:1478/1670 train_time:140577ms step_avg:95.11ms +step:1479/1670 train_time:140671ms step_avg:95.11ms +step:1480/1670 train_time:140765ms step_avg:95.11ms +step:1481/1670 train_time:140860ms step_avg:95.11ms +step:1482/1670 train_time:140955ms step_avg:95.11ms +step:1483/1670 train_time:141049ms step_avg:95.11ms +step:1484/1670 train_time:141144ms step_avg:95.11ms +step:1485/1670 train_time:141581ms step_avg:95.34ms +step:1486/1670 train_time:141650ms step_avg:95.32ms +step:1487/1670 train_time:141742ms step_avg:95.32ms +step:1488/1670 train_time:141835ms step_avg:95.32ms +step:1489/1670 train_time:141929ms step_avg:95.32ms +step:1490/1670 train_time:142022ms step_avg:95.32ms +step:1491/1670 train_time:142116ms step_avg:95.32ms +step:1492/1670 train_time:142209ms step_avg:95.31ms +step:1493/1670 train_time:142303ms step_avg:95.31ms +step:1494/1670 train_time:142397ms step_avg:95.31ms +step:1495/1670 train_time:142494ms step_avg:95.31ms +step:1496/1670 train_time:142590ms step_avg:95.31ms +step:1497/1670 train_time:142687ms step_avg:95.32ms +step:1498/1670 train_time:142783ms step_avg:95.32ms +step:1499/1670 train_time:142877ms step_avg:95.31ms +step:1500/1670 train_time:142970ms step_avg:95.31ms +step:1500/1670 val_loss:3.3124 train_time:143061ms step_avg:95.37ms +step:1501/1670 train_time:143087ms step_avg:95.33ms +step:1502/1670 train_time:143166ms step_avg:95.32ms +step:1503/1670 train_time:143269ms step_avg:95.32ms +step:1504/1670 train_time:143364ms step_avg:95.32ms +step:1505/1670 train_time:143458ms step_avg:95.32ms +step:1506/1670 train_time:143551ms step_avg:95.32ms +step:1507/1670 train_time:143644ms step_avg:95.32ms +step:1508/1670 train_time:143737ms step_avg:95.32ms +step:1509/1670 train_time:143830ms step_avg:95.31ms +step:1510/1670 train_time:143924ms step_avg:95.31ms +step:1511/1670 train_time:144018ms step_avg:95.31ms +step:1512/1670 train_time:144113ms step_avg:95.31ms +step:1513/1670 train_time:144211ms step_avg:95.31ms +step:1514/1670 train_time:144307ms step_avg:95.32ms +step:1515/1670 train_time:144405ms step_avg:95.32ms +step:1516/1670 train_time:144499ms step_avg:95.32ms +step:1517/1670 train_time:144592ms step_avg:95.31ms +step:1518/1670 train_time:144685ms step_avg:95.31ms +step:1519/1670 train_time:144778ms step_avg:95.31ms +step:1520/1670 train_time:144872ms step_avg:95.31ms +step:1521/1670 train_time:144966ms step_avg:95.31ms +step:1522/1670 train_time:145060ms step_avg:95.31ms +step:1523/1670 train_time:145154ms step_avg:95.31ms +step:1524/1670 train_time:145250ms step_avg:95.31ms +step:1525/1670 train_time:145347ms step_avg:95.31ms +step:1526/1670 train_time:145444ms step_avg:95.31ms +step:1527/1670 train_time:145538ms step_avg:95.31ms +step:1528/1670 train_time:145631ms step_avg:95.31ms +step:1529/1670 train_time:145725ms step_avg:95.31ms +step:1530/1670 train_time:145819ms step_avg:95.31ms +step:1531/1670 train_time:145912ms step_avg:95.31ms +step:1532/1670 train_time:146006ms step_avg:95.30ms +step:1533/1670 train_time:146101ms step_avg:95.30ms +step:1534/1670 train_time:146196ms step_avg:95.30ms +step:1535/1670 train_time:146291ms step_avg:95.30ms +step:1536/1670 train_time:146387ms step_avg:95.30ms +step:1537/1670 train_time:146482ms step_avg:95.30ms +step:1538/1670 train_time:146576ms step_avg:95.30ms +step:1539/1670 train_time:146670ms step_avg:95.30ms +step:1540/1670 train_time:146765ms step_avg:95.30ms +step:1541/1670 train_time:146859ms step_avg:95.30ms +step:1542/1670 train_time:146953ms step_avg:95.30ms +step:1543/1670 train_time:147047ms step_avg:95.30ms +step:1544/1670 train_time:147142ms step_avg:95.30ms +step:1545/1670 train_time:147237ms step_avg:95.30ms +step:1546/1670 train_time:147332ms step_avg:95.30ms +step:1547/1670 train_time:147427ms step_avg:95.30ms +step:1548/1670 train_time:147523ms step_avg:95.30ms +step:1549/1670 train_time:147617ms step_avg:95.30ms +step:1550/1670 train_time:147710ms step_avg:95.30ms +step:1551/1670 train_time:147805ms step_avg:95.30ms +step:1552/1670 train_time:147900ms step_avg:95.30ms +step:1553/1670 train_time:147993ms step_avg:95.30ms +step:1554/1670 train_time:148088ms step_avg:95.29ms +step:1555/1670 train_time:148183ms step_avg:95.29ms +step:1556/1670 train_time:148278ms step_avg:95.29ms +step:1557/1670 train_time:148373ms step_avg:95.29ms +step:1558/1670 train_time:148469ms step_avg:95.29ms +step:1559/1670 train_time:148564ms step_avg:95.29ms +step:1560/1670 train_time:148658ms step_avg:95.29ms +step:1561/1670 train_time:148752ms step_avg:95.29ms +step:1562/1670 train_time:148847ms step_avg:95.29ms +step:1563/1670 train_time:148940ms step_avg:95.29ms +step:1564/1670 train_time:149034ms step_avg:95.29ms +step:1565/1670 train_time:149129ms step_avg:95.29ms +step:1566/1670 train_time:149224ms step_avg:95.29ms +step:1567/1670 train_time:149319ms step_avg:95.29ms +step:1568/1670 train_time:149413ms step_avg:95.29ms +step:1569/1670 train_time:149508ms step_avg:95.29ms +step:1570/1670 train_time:149603ms step_avg:95.29ms +step:1571/1670 train_time:149697ms step_avg:95.29ms +step:1572/1670 train_time:149791ms step_avg:95.29ms +step:1573/1670 train_time:149885ms step_avg:95.29ms +step:1574/1670 train_time:149980ms step_avg:95.29ms +step:1575/1670 train_time:150074ms step_avg:95.29ms +step:1576/1670 train_time:150170ms step_avg:95.29ms +step:1577/1670 train_time:150265ms step_avg:95.29ms +step:1578/1670 train_time:150360ms step_avg:95.28ms +step:1579/1670 train_time:150454ms step_avg:95.28ms +step:1580/1670 train_time:150549ms step_avg:95.28ms +step:1581/1670 train_time:150644ms step_avg:95.28ms +step:1582/1670 train_time:150739ms step_avg:95.28ms +step:1583/1670 train_time:150832ms step_avg:95.28ms +step:1584/1670 train_time:150927ms step_avg:95.28ms +step:1585/1670 train_time:151021ms step_avg:95.28ms +step:1586/1670 train_time:151115ms step_avg:95.28ms +step:1587/1670 train_time:151210ms step_avg:95.28ms +step:1588/1670 train_time:151305ms step_avg:95.28ms +step:1589/1670 train_time:151399ms step_avg:95.28ms +step:1590/1670 train_time:151493ms step_avg:95.28ms +step:1591/1670 train_time:151589ms step_avg:95.28ms +step:1592/1670 train_time:151683ms step_avg:95.28ms +step:1593/1670 train_time:151778ms step_avg:95.28ms +step:1594/1670 train_time:151872ms step_avg:95.28ms +step:1595/1670 train_time:151967ms step_avg:95.28ms +step:1596/1670 train_time:152061ms step_avg:95.28ms +step:1597/1670 train_time:152155ms step_avg:95.28ms +step:1598/1670 train_time:152249ms step_avg:95.28ms +step:1599/1670 train_time:152344ms step_avg:95.27ms +step:1600/1670 train_time:152439ms step_avg:95.27ms +step:1601/1670 train_time:152533ms step_avg:95.27ms +step:1602/1670 train_time:152627ms step_avg:95.27ms +step:1603/1670 train_time:152722ms step_avg:95.27ms +step:1604/1670 train_time:152817ms step_avg:95.27ms +step:1605/1670 train_time:152911ms step_avg:95.27ms +step:1606/1670 train_time:153006ms step_avg:95.27ms +step:1607/1670 train_time:153101ms step_avg:95.27ms +step:1608/1670 train_time:153195ms step_avg:95.27ms +step:1609/1670 train_time:153290ms step_avg:95.27ms +step:1610/1670 train_time:153384ms step_avg:95.27ms +step:1611/1670 train_time:153478ms step_avg:95.27ms +step:1612/1670 train_time:153572ms step_avg:95.27ms +step:1613/1670 train_time:153667ms step_avg:95.27ms +step:1614/1670 train_time:153762ms step_avg:95.27ms +step:1615/1670 train_time:153857ms step_avg:95.27ms +step:1616/1670 train_time:153951ms step_avg:95.27ms +step:1617/1670 train_time:154046ms step_avg:95.27ms +step:1618/1670 train_time:154140ms step_avg:95.27ms +step:1619/1670 train_time:154234ms step_avg:95.27ms +step:1620/1670 train_time:154329ms step_avg:95.26ms +step:1621/1670 train_time:154424ms step_avg:95.26ms +step:1622/1670 train_time:154519ms step_avg:95.26ms +step:1623/1670 train_time:154613ms step_avg:95.26ms +step:1624/1670 train_time:154708ms step_avg:95.26ms +step:1625/1670 train_time:154803ms step_avg:95.26ms +step:1625/1670 val_loss:3.2875 train_time:154895ms step_avg:95.32ms +step:1626/1670 train_time:154920ms step_avg:95.28ms +step:1627/1670 train_time:154998ms step_avg:95.27ms +step:1628/1670 train_time:155103ms step_avg:95.27ms +step:1629/1670 train_time:155199ms step_avg:95.27ms +step:1630/1670 train_time:155294ms step_avg:95.27ms +step:1631/1670 train_time:155389ms step_avg:95.27ms +step:1632/1670 train_time:155482ms step_avg:95.27ms +step:1633/1670 train_time:155576ms step_avg:95.27ms +step:1634/1670 train_time:155669ms step_avg:95.27ms +step:1635/1670 train_time:155762ms step_avg:95.27ms +step:1636/1670 train_time:155856ms step_avg:95.27ms +step:1637/1670 train_time:155950ms step_avg:95.27ms +step:1638/1670 train_time:156047ms step_avg:95.27ms +step:1639/1670 train_time:156145ms step_avg:95.27ms +step:1640/1670 train_time:156240ms step_avg:95.27ms +step:1641/1670 train_time:156335ms step_avg:95.27ms +step:1642/1670 train_time:156430ms step_avg:95.27ms +step:1643/1670 train_time:156524ms step_avg:95.27ms +step:1644/1670 train_time:156618ms step_avg:95.27ms +step:1645/1670 train_time:156712ms step_avg:95.27ms +step:1646/1670 train_time:156805ms step_avg:95.26ms +step:1647/1670 train_time:156899ms step_avg:95.26ms +step:1648/1670 train_time:156996ms step_avg:95.26ms +step:1649/1670 train_time:157094ms step_avg:95.27ms +step:1650/1670 train_time:157190ms step_avg:95.27ms +step:1651/1670 train_time:157285ms step_avg:95.27ms +step:1652/1670 train_time:157380ms step_avg:95.27ms +step:1653/1670 train_time:157474ms step_avg:95.27ms +step:1654/1670 train_time:157568ms step_avg:95.26ms +step:1655/1670 train_time:157662ms step_avg:95.26ms +step:1656/1670 train_time:157756ms step_avg:95.26ms +step:1657/1670 train_time:157849ms step_avg:95.26ms +step:1658/1670 train_time:157944ms step_avg:95.26ms +step:1659/1670 train_time:158040ms step_avg:95.26ms +step:1660/1670 train_time:158135ms step_avg:95.26ms +step:1661/1670 train_time:158230ms step_avg:95.26ms +step:1662/1670 train_time:158324ms step_avg:95.26ms +step:1663/1670 train_time:158419ms step_avg:95.26ms +step:1664/1670 train_time:158513ms step_avg:95.26ms +step:1665/1670 train_time:158607ms step_avg:95.26ms +step:1666/1670 train_time:158702ms step_avg:95.26ms +step:1667/1670 train_time:158796ms step_avg:95.26ms +step:1668/1670 train_time:158891ms step_avg:95.26ms +step:1669/1670 train_time:158985ms step_avg:95.26ms +step:1670/1670 train_time:159082ms step_avg:95.26ms +step:1670/1670 val_loss:3.2785 train_time:159259ms step_avg:95.36ms +peak memory allocated: 32470 MiB reserved: 47536 MiB diff --git a/records/091025_Yarn/132fe599-bc5a-4237-ad14-ee33cbbd5fc0.txt b/records/091025_Yarn/132fe599-bc5a-4237-ad14-ee33cbbd5fc0.txt new file mode 100644 index 000000000..41ab4a052 --- /dev/null +++ b/records/091025_Yarn/132fe599-bc5a-4237-ad14-ee33cbbd5fc0.txt @@ -0,0 +1,2863 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math + +from dataclasses import dataclass +from functools import lru_cache +from itertools import accumulate +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate = CastedLinear(12, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, attn_args): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) + if ve is not None: + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = sa_lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None + + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), attn_args) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, x0, lambdas[i], attn_args) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = f"yarn/{uuid.uuid4()}" + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + head_dim=128, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +ws = get_ws(0) +for step in range(train_steps + 1): + last_step = (step == train_steps) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Thu Sep 11 04:13:30 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | Off | +| N/A 37C P0 120W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | Off | +| N/A 42C P0 125W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | Off | +| N/A 44C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | Off | +| N/A 36C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | Off | +| N/A 36C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | Off | +| N/A 43C P0 126W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | Off | +| N/A 41C P0 125W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | Off | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 67852 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 67853 C /usr/bin/python3 614MiB | +| 0 N/A N/A 67854 C /usr/bin/python3 614MiB | +| 0 N/A N/A 67855 C /usr/bin/python3 614MiB | +| 0 N/A N/A 67856 C /usr/bin/python3 614MiB | +| 0 N/A N/A 67857 C /usr/bin/python3 614MiB | +| 0 N/A N/A 67858 C /usr/bin/python3 614MiB | +| 0 N/A N/A 67859 C /usr/bin/python3 614MiB | +| 1 N/A N/A 67853 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 67854 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 67855 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 67856 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 67857 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 67858 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 67859 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1670 train_time:464ms step_avg:464.33ms +step:2/1670 train_time:488ms step_avg:244.05ms +step:3/1670 train_time:557ms step_avg:185.53ms +step:4/1670 train_time:647ms step_avg:161.72ms +step:5/1670 train_time:738ms step_avg:147.57ms +step:6/1670 train_time:830ms step_avg:138.30ms +step:7/1670 train_time:921ms step_avg:131.60ms +step:8/1670 train_time:1013ms step_avg:126.58ms +step:9/1670 train_time:1104ms step_avg:122.67ms +step:10/1670 train_time:1196ms step_avg:119.56ms +step:11/1670 train_time:1288ms step_avg:117.05ms +step:12/1670 train_time:1381ms step_avg:115.12ms +step:13/1670 train_time:1477ms step_avg:113.60ms +step:14/1670 train_time:1571ms step_avg:112.20ms +step:15/1670 train_time:1664ms step_avg:110.91ms +step:16/1670 train_time:1756ms step_avg:109.77ms +step:17/1670 train_time:1849ms step_avg:108.74ms +step:18/1670 train_time:1941ms step_avg:107.86ms +step:19/1670 train_time:2034ms step_avg:107.03ms +step:20/1670 train_time:2126ms step_avg:106.28ms +step:21/1670 train_time:2218ms step_avg:105.61ms +step:22/1670 train_time:2310ms step_avg:105.02ms +step:23/1670 train_time:2403ms step_avg:104.49ms +step:24/1670 train_time:2497ms step_avg:104.05ms +step:25/1670 train_time:2591ms step_avg:103.62ms +step:26/1670 train_time:2684ms step_avg:103.21ms +step:27/1670 train_time:2776ms step_avg:102.81ms +step:28/1670 train_time:2869ms step_avg:102.46ms +step:29/1670 train_time:2962ms step_avg:102.13ms +step:30/1670 train_time:3054ms step_avg:101.81ms +step:31/1670 train_time:3146ms step_avg:101.49ms +step:32/1670 train_time:3238ms step_avg:101.20ms +step:33/1670 train_time:3331ms step_avg:100.93ms +step:34/1670 train_time:3424ms step_avg:100.70ms +step:35/1670 train_time:3517ms step_avg:100.48ms +step:36/1670 train_time:3610ms step_avg:100.28ms +step:37/1670 train_time:3703ms step_avg:100.07ms +step:38/1670 train_time:3796ms step_avg:99.88ms +step:39/1670 train_time:3888ms step_avg:99.70ms +step:40/1670 train_time:3980ms step_avg:99.50ms +step:41/1670 train_time:4073ms step_avg:99.33ms +step:42/1670 train_time:4165ms step_avg:99.17ms +step:43/1670 train_time:4258ms step_avg:99.03ms +step:44/1670 train_time:4351ms step_avg:98.88ms +step:45/1670 train_time:4444ms step_avg:98.75ms +step:46/1670 train_time:4537ms step_avg:98.62ms +step:47/1670 train_time:4629ms step_avg:98.49ms +step:48/1670 train_time:4722ms step_avg:98.38ms +step:49/1670 train_time:4815ms step_avg:98.27ms +step:50/1670 train_time:4908ms step_avg:98.16ms +step:51/1670 train_time:5000ms step_avg:98.05ms +step:52/1670 train_time:5093ms step_avg:97.93ms +step:53/1670 train_time:5185ms step_avg:97.82ms +step:54/1670 train_time:5277ms step_avg:97.73ms +step:55/1670 train_time:5370ms step_avg:97.63ms +step:56/1670 train_time:5462ms step_avg:97.54ms +step:57/1670 train_time:5555ms step_avg:97.45ms +step:58/1670 train_time:5648ms step_avg:97.38ms +step:59/1670 train_time:5742ms step_avg:97.32ms +step:60/1670 train_time:5833ms step_avg:97.22ms +step:61/1670 train_time:5926ms step_avg:97.15ms +step:62/1670 train_time:6019ms step_avg:97.08ms +step:63/1670 train_time:6112ms step_avg:97.01ms +step:64/1670 train_time:6204ms step_avg:96.94ms +step:65/1670 train_time:6297ms step_avg:96.87ms +step:66/1670 train_time:6389ms step_avg:96.80ms +step:67/1670 train_time:6482ms step_avg:96.74ms +step:68/1670 train_time:6575ms step_avg:96.70ms +step:69/1670 train_time:6668ms step_avg:96.63ms +step:70/1670 train_time:6760ms step_avg:96.57ms +step:71/1670 train_time:6853ms step_avg:96.52ms +step:72/1670 train_time:6946ms step_avg:96.47ms +step:73/1670 train_time:7039ms step_avg:96.43ms +step:74/1670 train_time:7132ms step_avg:96.37ms +step:75/1670 train_time:7223ms step_avg:96.31ms +step:76/1670 train_time:7316ms step_avg:96.26ms +step:77/1670 train_time:7408ms step_avg:96.21ms +step:78/1670 train_time:7501ms step_avg:96.17ms +step:79/1670 train_time:7594ms step_avg:96.13ms +step:80/1670 train_time:7686ms step_avg:96.08ms +step:81/1670 train_time:7778ms step_avg:96.03ms +step:82/1670 train_time:7871ms step_avg:95.99ms +step:83/1670 train_time:7963ms step_avg:95.94ms +step:84/1670 train_time:8056ms step_avg:95.91ms +step:85/1670 train_time:8149ms step_avg:95.87ms +step:86/1670 train_time:8242ms step_avg:95.84ms +step:87/1670 train_time:8334ms step_avg:95.79ms +step:88/1670 train_time:8427ms step_avg:95.76ms +step:89/1670 train_time:8520ms step_avg:95.73ms +step:90/1670 train_time:8613ms step_avg:95.69ms +step:91/1670 train_time:8705ms step_avg:95.66ms +step:92/1670 train_time:8797ms step_avg:95.62ms +step:93/1670 train_time:8890ms step_avg:95.59ms +step:94/1670 train_time:8982ms step_avg:95.55ms +step:95/1670 train_time:9075ms step_avg:95.52ms +step:96/1670 train_time:9167ms step_avg:95.49ms +step:97/1670 train_time:9259ms step_avg:95.46ms +step:98/1670 train_time:9352ms step_avg:95.43ms +step:99/1670 train_time:9445ms step_avg:95.40ms +step:100/1670 train_time:9537ms step_avg:95.37ms +step:101/1670 train_time:9630ms step_avg:95.35ms +step:102/1670 train_time:9723ms step_avg:95.32ms +step:103/1670 train_time:9816ms step_avg:95.30ms +step:104/1670 train_time:9908ms step_avg:95.27ms +step:105/1670 train_time:10000ms step_avg:95.24ms +step:106/1670 train_time:10094ms step_avg:95.22ms +step:107/1670 train_time:10186ms step_avg:95.20ms +step:108/1670 train_time:10279ms step_avg:95.17ms +step:109/1670 train_time:10371ms step_avg:95.15ms +step:110/1670 train_time:10463ms step_avg:95.12ms +step:111/1670 train_time:10555ms step_avg:95.09ms +step:112/1670 train_time:10648ms step_avg:95.07ms +step:113/1670 train_time:10741ms step_avg:95.05ms +step:114/1670 train_time:10834ms step_avg:95.03ms +step:115/1670 train_time:10926ms step_avg:95.01ms +step:116/1670 train_time:11018ms step_avg:94.98ms +step:117/1670 train_time:11111ms step_avg:94.96ms +step:118/1670 train_time:11204ms step_avg:94.95ms +step:119/1670 train_time:11296ms step_avg:94.93ms +step:120/1670 train_time:11388ms step_avg:94.90ms +step:121/1670 train_time:11481ms step_avg:94.88ms +step:122/1670 train_time:11573ms step_avg:94.86ms +step:123/1670 train_time:11665ms step_avg:94.84ms +step:124/1670 train_time:11758ms step_avg:94.83ms +step:125/1670 train_time:11852ms step_avg:94.81ms +step:125/1670 val_loss:4.2892 train_time:11942ms step_avg:95.53ms +step:126/1670 train_time:11968ms step_avg:94.99ms +step:127/1670 train_time:12040ms step_avg:94.81ms +step:128/1670 train_time:12145ms step_avg:94.89ms +step:129/1670 train_time:12240ms step_avg:94.88ms +step:130/1670 train_time:12332ms step_avg:94.86ms +step:131/1670 train_time:12424ms step_avg:94.84ms +step:132/1670 train_time:12516ms step_avg:94.82ms +step:133/1670 train_time:12608ms step_avg:94.79ms +step:134/1670 train_time:12699ms step_avg:94.77ms +step:135/1670 train_time:12791ms step_avg:94.74ms +step:136/1670 train_time:12882ms step_avg:94.72ms +step:137/1670 train_time:12974ms step_avg:94.70ms +step:138/1670 train_time:13067ms step_avg:94.69ms +step:139/1670 train_time:13161ms step_avg:94.69ms +step:140/1670 train_time:13256ms step_avg:94.69ms +step:141/1670 train_time:13349ms step_avg:94.67ms +step:142/1670 train_time:13442ms step_avg:94.66ms +step:143/1670 train_time:13534ms step_avg:94.64ms +step:144/1670 train_time:13626ms step_avg:94.63ms +step:145/1670 train_time:13719ms step_avg:94.61ms +step:146/1670 train_time:13811ms step_avg:94.59ms +step:147/1670 train_time:13903ms step_avg:94.58ms +step:148/1670 train_time:13995ms step_avg:94.56ms +step:149/1670 train_time:14089ms step_avg:94.56ms +step:150/1670 train_time:14183ms step_avg:94.55ms +step:151/1670 train_time:14276ms step_avg:94.55ms +step:152/1670 train_time:14369ms step_avg:94.53ms +step:153/1670 train_time:14462ms step_avg:94.52ms +step:154/1670 train_time:14555ms step_avg:94.51ms +step:155/1670 train_time:14648ms step_avg:94.50ms +step:156/1670 train_time:14740ms step_avg:94.49ms +step:157/1670 train_time:14832ms step_avg:94.47ms +step:158/1670 train_time:14924ms step_avg:94.46ms +step:159/1670 train_time:15016ms step_avg:94.44ms +step:160/1670 train_time:15110ms step_avg:94.44ms +step:161/1670 train_time:15203ms step_avg:94.43ms +step:162/1670 train_time:15296ms step_avg:94.42ms +step:163/1670 train_time:15389ms step_avg:94.41ms +step:164/1670 train_time:15481ms step_avg:94.40ms +step:165/1670 train_time:15574ms step_avg:94.39ms +step:166/1670 train_time:15666ms step_avg:94.37ms +step:167/1670 train_time:15758ms step_avg:94.36ms +step:168/1670 train_time:15850ms step_avg:94.35ms +step:169/1670 train_time:15942ms step_avg:94.33ms +step:170/1670 train_time:16035ms step_avg:94.32ms +step:171/1670 train_time:16128ms step_avg:94.31ms +step:172/1670 train_time:16220ms step_avg:94.30ms +step:173/1670 train_time:16314ms step_avg:94.30ms +step:174/1670 train_time:16407ms step_avg:94.29ms +step:175/1670 train_time:16500ms step_avg:94.28ms +step:176/1670 train_time:16593ms step_avg:94.28ms +step:177/1670 train_time:16686ms step_avg:94.27ms +step:178/1670 train_time:16779ms step_avg:94.26ms +step:179/1670 train_time:16871ms step_avg:94.25ms +step:180/1670 train_time:16963ms step_avg:94.24ms +step:181/1670 train_time:17056ms step_avg:94.23ms +step:182/1670 train_time:17148ms step_avg:94.22ms +step:183/1670 train_time:17240ms step_avg:94.21ms +step:184/1670 train_time:17333ms step_avg:94.20ms +step:185/1670 train_time:17426ms step_avg:94.19ms +step:186/1670 train_time:17519ms step_avg:94.19ms +step:187/1670 train_time:17612ms step_avg:94.18ms +step:188/1670 train_time:17705ms step_avg:94.17ms +step:189/1670 train_time:17798ms step_avg:94.17ms +step:190/1670 train_time:17890ms step_avg:94.16ms +step:191/1670 train_time:17982ms step_avg:94.15ms +step:192/1670 train_time:18074ms step_avg:94.14ms +step:193/1670 train_time:18166ms step_avg:94.13ms +step:194/1670 train_time:18259ms step_avg:94.12ms +step:195/1670 train_time:18351ms step_avg:94.11ms +step:196/1670 train_time:18444ms step_avg:94.10ms +step:197/1670 train_time:18537ms step_avg:94.10ms +step:198/1670 train_time:18630ms step_avg:94.09ms +step:199/1670 train_time:18723ms step_avg:94.08ms +step:200/1670 train_time:18815ms step_avg:94.07ms +step:201/1670 train_time:18908ms step_avg:94.07ms +step:202/1670 train_time:19000ms step_avg:94.06ms +step:203/1670 train_time:19093ms step_avg:94.05ms +step:204/1670 train_time:19185ms step_avg:94.05ms +step:205/1670 train_time:19278ms step_avg:94.04ms +step:206/1670 train_time:19371ms step_avg:94.03ms +step:207/1670 train_time:19463ms step_avg:94.03ms +step:208/1670 train_time:19556ms step_avg:94.02ms +step:209/1670 train_time:19649ms step_avg:94.01ms +step:210/1670 train_time:19742ms step_avg:94.01ms +step:211/1670 train_time:19835ms step_avg:94.00ms +step:212/1670 train_time:19928ms step_avg:94.00ms +step:213/1670 train_time:20289ms step_avg:95.25ms +step:214/1670 train_time:20401ms step_avg:95.33ms +step:215/1670 train_time:20492ms step_avg:95.31ms +step:216/1670 train_time:20583ms step_avg:95.29ms +step:217/1670 train_time:20674ms step_avg:95.27ms +step:218/1670 train_time:20766ms step_avg:95.26ms +step:219/1670 train_time:20858ms step_avg:95.24ms +step:220/1670 train_time:20949ms step_avg:95.22ms +step:221/1670 train_time:21041ms step_avg:95.21ms +step:222/1670 train_time:21133ms step_avg:95.19ms +step:223/1670 train_time:21224ms step_avg:95.18ms +step:224/1670 train_time:21320ms step_avg:95.18ms +step:225/1670 train_time:21417ms step_avg:95.18ms +step:226/1670 train_time:21510ms step_avg:95.18ms +step:227/1670 train_time:21603ms step_avg:95.17ms +step:228/1670 train_time:21694ms step_avg:95.15ms +step:229/1670 train_time:21787ms step_avg:95.14ms +step:230/1670 train_time:21878ms step_avg:95.12ms +step:231/1670 train_time:21970ms step_avg:95.11ms +step:232/1670 train_time:22061ms step_avg:95.09ms +step:233/1670 train_time:22153ms step_avg:95.08ms +step:234/1670 train_time:22246ms step_avg:95.07ms +step:235/1670 train_time:22340ms step_avg:95.06ms +step:236/1670 train_time:22433ms step_avg:95.06ms +step:237/1670 train_time:22527ms step_avg:95.05ms +step:238/1670 train_time:22620ms step_avg:95.04ms +step:239/1670 train_time:22712ms step_avg:95.03ms +step:240/1670 train_time:22804ms step_avg:95.02ms +step:241/1670 train_time:22896ms step_avg:95.01ms +step:242/1670 train_time:22988ms step_avg:94.99ms +step:243/1670 train_time:23081ms step_avg:94.98ms +step:244/1670 train_time:23173ms step_avg:94.97ms +step:245/1670 train_time:23265ms step_avg:94.96ms +step:246/1670 train_time:23358ms step_avg:94.95ms +step:247/1670 train_time:23452ms step_avg:94.95ms +step:248/1670 train_time:23545ms step_avg:94.94ms +step:249/1670 train_time:23638ms step_avg:94.93ms +step:250/1670 train_time:23730ms step_avg:94.92ms +step:250/1670 val_loss:3.9642 train_time:23821ms step_avg:95.28ms +step:251/1670 train_time:23845ms step_avg:95.00ms +step:252/1670 train_time:23918ms step_avg:94.91ms +step:253/1670 train_time:24018ms step_avg:94.93ms +step:254/1670 train_time:24115ms step_avg:94.94ms +step:255/1670 train_time:24207ms step_avg:94.93ms +step:256/1670 train_time:24299ms step_avg:94.92ms +step:257/1670 train_time:24391ms step_avg:94.91ms +step:258/1670 train_time:24482ms step_avg:94.89ms +step:259/1670 train_time:24573ms step_avg:94.88ms +step:260/1670 train_time:24666ms step_avg:94.87ms +step:261/1670 train_time:24758ms step_avg:94.86ms +step:262/1670 train_time:24850ms step_avg:94.85ms +step:263/1670 train_time:24944ms step_avg:94.84ms +step:264/1670 train_time:25039ms step_avg:94.85ms +step:265/1670 train_time:25133ms step_avg:94.84ms +step:266/1670 train_time:25226ms step_avg:94.83ms +step:267/1670 train_time:25318ms step_avg:94.82ms +step:268/1670 train_time:25410ms step_avg:94.81ms +step:269/1670 train_time:25502ms step_avg:94.80ms +step:270/1670 train_time:25593ms step_avg:94.79ms +step:271/1670 train_time:25685ms step_avg:94.78ms +step:272/1670 train_time:25777ms step_avg:94.77ms +step:273/1670 train_time:25869ms step_avg:94.76ms +step:274/1670 train_time:25963ms step_avg:94.75ms +step:275/1670 train_time:26056ms step_avg:94.75ms +step:276/1670 train_time:26149ms step_avg:94.74ms +step:277/1670 train_time:26242ms step_avg:94.73ms +step:278/1670 train_time:26335ms step_avg:94.73ms +step:279/1670 train_time:26427ms step_avg:94.72ms +step:280/1670 train_time:26519ms step_avg:94.71ms +step:281/1670 train_time:26611ms step_avg:94.70ms +step:282/1670 train_time:26704ms step_avg:94.69ms +step:283/1670 train_time:26796ms step_avg:94.68ms +step:284/1670 train_time:26888ms step_avg:94.68ms +step:285/1670 train_time:26981ms step_avg:94.67ms +step:286/1670 train_time:27074ms step_avg:94.66ms +step:287/1670 train_time:27167ms step_avg:94.66ms +step:288/1670 train_time:27261ms step_avg:94.66ms +step:289/1670 train_time:27353ms step_avg:94.65ms +step:290/1670 train_time:27445ms step_avg:94.64ms +step:291/1670 train_time:27538ms step_avg:94.63ms +step:292/1670 train_time:27630ms step_avg:94.62ms +step:293/1670 train_time:27722ms step_avg:94.61ms +step:294/1670 train_time:27814ms step_avg:94.61ms +step:295/1670 train_time:27907ms step_avg:94.60ms +step:296/1670 train_time:28000ms step_avg:94.59ms +step:297/1670 train_time:28092ms step_avg:94.59ms +step:298/1670 train_time:28185ms step_avg:94.58ms +step:299/1670 train_time:28278ms step_avg:94.58ms +step:300/1670 train_time:28371ms step_avg:94.57ms +step:301/1670 train_time:28463ms step_avg:94.56ms +step:302/1670 train_time:28555ms step_avg:94.55ms +step:303/1670 train_time:28648ms step_avg:94.55ms +step:304/1670 train_time:28740ms step_avg:94.54ms +step:305/1670 train_time:28833ms step_avg:94.53ms +step:306/1670 train_time:28926ms step_avg:94.53ms +step:307/1670 train_time:29018ms step_avg:94.52ms +step:308/1670 train_time:29111ms step_avg:94.52ms +step:309/1670 train_time:29205ms step_avg:94.51ms +step:310/1670 train_time:29298ms step_avg:94.51ms +step:311/1670 train_time:29390ms step_avg:94.50ms +step:312/1670 train_time:29483ms step_avg:94.50ms +step:313/1670 train_time:29575ms step_avg:94.49ms +step:314/1670 train_time:29668ms step_avg:94.48ms +step:315/1670 train_time:29760ms step_avg:94.48ms +step:316/1670 train_time:29853ms step_avg:94.47ms +step:317/1670 train_time:29946ms step_avg:94.47ms +step:318/1670 train_time:30039ms step_avg:94.46ms +step:319/1670 train_time:30131ms step_avg:94.46ms +step:320/1670 train_time:30223ms step_avg:94.45ms +step:321/1670 train_time:30316ms step_avg:94.44ms +step:322/1670 train_time:30409ms step_avg:94.44ms +step:323/1670 train_time:30502ms step_avg:94.43ms +step:324/1670 train_time:30594ms step_avg:94.43ms +step:325/1670 train_time:30687ms step_avg:94.42ms +step:326/1670 train_time:30779ms step_avg:94.41ms +step:327/1670 train_time:30871ms step_avg:94.41ms +step:328/1670 train_time:30965ms step_avg:94.41ms +step:329/1670 train_time:31057ms step_avg:94.40ms +step:330/1670 train_time:31149ms step_avg:94.39ms +step:331/1670 train_time:31242ms step_avg:94.39ms +step:332/1670 train_time:31334ms step_avg:94.38ms +step:333/1670 train_time:31427ms step_avg:94.38ms +step:334/1670 train_time:31520ms step_avg:94.37ms +step:335/1670 train_time:31612ms step_avg:94.36ms +step:336/1670 train_time:31705ms step_avg:94.36ms +step:337/1670 train_time:31797ms step_avg:94.35ms +step:338/1670 train_time:31890ms step_avg:94.35ms +step:339/1670 train_time:31982ms step_avg:94.34ms +step:340/1670 train_time:32074ms step_avg:94.34ms +step:341/1670 train_time:32168ms step_avg:94.33ms +step:342/1670 train_time:32260ms step_avg:94.33ms +step:343/1670 train_time:32353ms step_avg:94.32ms +step:344/1670 train_time:32446ms step_avg:94.32ms +step:345/1670 train_time:32538ms step_avg:94.31ms +step:346/1670 train_time:32631ms step_avg:94.31ms +step:347/1670 train_time:32724ms step_avg:94.30ms +step:348/1670 train_time:32816ms step_avg:94.30ms +step:349/1670 train_time:32909ms step_avg:94.30ms +step:350/1670 train_time:33002ms step_avg:94.29ms +step:351/1670 train_time:33094ms step_avg:94.29ms +step:352/1670 train_time:33187ms step_avg:94.28ms +step:353/1670 train_time:33280ms step_avg:94.28ms +step:354/1670 train_time:33372ms step_avg:94.27ms +step:355/1670 train_time:33465ms step_avg:94.27ms +step:356/1670 train_time:33557ms step_avg:94.26ms +step:357/1670 train_time:33649ms step_avg:94.25ms +step:358/1670 train_time:33742ms step_avg:94.25ms +step:359/1670 train_time:33834ms step_avg:94.25ms +step:360/1670 train_time:33927ms step_avg:94.24ms +step:361/1670 train_time:34019ms step_avg:94.24ms +step:362/1670 train_time:34112ms step_avg:94.23ms +step:363/1670 train_time:34205ms step_avg:94.23ms +step:364/1670 train_time:34298ms step_avg:94.22ms +step:365/1670 train_time:34390ms step_avg:94.22ms +step:366/1670 train_time:34483ms step_avg:94.22ms +step:367/1670 train_time:34576ms step_avg:94.21ms +step:368/1670 train_time:34668ms step_avg:94.21ms +step:369/1670 train_time:34760ms step_avg:94.20ms +step:370/1670 train_time:34852ms step_avg:94.20ms +step:371/1670 train_time:34945ms step_avg:94.19ms +step:372/1670 train_time:35038ms step_avg:94.19ms +step:373/1670 train_time:35130ms step_avg:94.18ms +step:374/1670 train_time:35223ms step_avg:94.18ms +step:375/1670 train_time:35315ms step_avg:94.17ms +step:375/1670 val_loss:3.8157 train_time:35407ms step_avg:94.42ms +step:376/1670 train_time:35432ms step_avg:94.23ms +step:377/1670 train_time:35508ms step_avg:94.18ms +step:378/1670 train_time:35608ms step_avg:94.20ms +step:379/1670 train_time:35706ms step_avg:94.21ms +step:380/1670 train_time:35798ms step_avg:94.21ms +step:381/1670 train_time:35890ms step_avg:94.20ms +step:382/1670 train_time:35981ms step_avg:94.19ms +step:383/1670 train_time:36073ms step_avg:94.19ms +step:384/1670 train_time:36164ms step_avg:94.18ms +step:385/1670 train_time:36256ms step_avg:94.17ms +step:386/1670 train_time:36348ms step_avg:94.16ms +step:387/1670 train_time:36440ms step_avg:94.16ms +step:388/1670 train_time:36535ms step_avg:94.16ms +step:389/1670 train_time:36630ms step_avg:94.16ms +step:390/1670 train_time:36724ms step_avg:94.16ms +step:391/1670 train_time:36816ms step_avg:94.16ms +step:392/1670 train_time:36908ms step_avg:94.15ms +step:393/1670 train_time:37000ms step_avg:94.15ms +step:394/1670 train_time:37092ms step_avg:94.14ms +step:395/1670 train_time:37183ms step_avg:94.13ms +step:396/1670 train_time:37275ms step_avg:94.13ms +step:397/1670 train_time:37367ms step_avg:94.12ms +step:398/1670 train_time:37459ms step_avg:94.12ms +step:399/1670 train_time:37553ms step_avg:94.12ms +step:400/1670 train_time:37648ms step_avg:94.12ms +step:401/1670 train_time:37740ms step_avg:94.12ms +step:402/1670 train_time:37834ms step_avg:94.11ms +step:403/1670 train_time:37926ms step_avg:94.11ms +step:404/1670 train_time:38018ms step_avg:94.10ms +step:405/1670 train_time:38110ms step_avg:94.10ms +step:406/1670 train_time:38201ms step_avg:94.09ms +step:407/1670 train_time:38294ms step_avg:94.09ms +step:408/1670 train_time:38386ms step_avg:94.08ms +step:409/1670 train_time:38478ms step_avg:94.08ms +step:410/1670 train_time:38571ms step_avg:94.08ms +step:411/1670 train_time:38664ms step_avg:94.07ms +step:412/1670 train_time:38757ms step_avg:94.07ms +step:413/1670 train_time:38851ms step_avg:94.07ms +step:414/1670 train_time:38944ms step_avg:94.07ms +step:415/1670 train_time:39036ms step_avg:94.06ms +step:416/1670 train_time:39129ms step_avg:94.06ms +step:417/1670 train_time:39221ms step_avg:94.05ms +step:418/1670 train_time:39313ms step_avg:94.05ms +step:419/1670 train_time:39405ms step_avg:94.04ms +step:420/1670 train_time:39497ms step_avg:94.04ms +step:421/1670 train_time:39589ms step_avg:94.04ms +step:422/1670 train_time:39683ms step_avg:94.04ms +step:423/1670 train_time:39777ms step_avg:94.04ms +step:424/1670 train_time:39869ms step_avg:94.03ms +step:425/1670 train_time:40193ms step_avg:94.57ms +step:426/1670 train_time:40386ms step_avg:94.80ms +step:427/1670 train_time:40477ms step_avg:94.79ms +step:428/1670 train_time:40567ms step_avg:94.78ms +step:429/1670 train_time:40659ms step_avg:94.78ms +step:430/1670 train_time:40750ms step_avg:94.77ms +step:431/1670 train_time:40842ms step_avg:94.76ms +step:432/1670 train_time:40934ms step_avg:94.75ms +step:433/1670 train_time:41025ms step_avg:94.75ms +step:434/1670 train_time:41117ms step_avg:94.74ms +step:435/1670 train_time:41209ms step_avg:94.73ms +step:436/1670 train_time:41303ms step_avg:94.73ms +step:437/1670 train_time:41399ms step_avg:94.73ms +step:438/1670 train_time:41494ms step_avg:94.74ms +step:439/1670 train_time:41587ms step_avg:94.73ms +step:440/1670 train_time:41679ms step_avg:94.73ms +step:441/1670 train_time:41771ms step_avg:94.72ms +step:442/1670 train_time:41863ms step_avg:94.71ms +step:443/1670 train_time:41955ms step_avg:94.71ms +step:444/1670 train_time:42047ms step_avg:94.70ms +step:445/1670 train_time:42139ms step_avg:94.69ms +step:446/1670 train_time:42232ms step_avg:94.69ms +step:447/1670 train_time:42326ms step_avg:94.69ms +step:448/1670 train_time:42420ms step_avg:94.69ms +step:449/1670 train_time:42514ms step_avg:94.68ms +step:450/1670 train_time:42607ms step_avg:94.68ms +step:451/1670 train_time:42699ms step_avg:94.68ms +step:452/1670 train_time:42791ms step_avg:94.67ms +step:453/1670 train_time:42884ms step_avg:94.67ms +step:454/1670 train_time:42976ms step_avg:94.66ms +step:455/1670 train_time:43067ms step_avg:94.65ms +step:456/1670 train_time:43159ms step_avg:94.65ms +step:457/1670 train_time:43252ms step_avg:94.64ms +step:458/1670 train_time:43346ms step_avg:94.64ms +step:459/1670 train_time:43438ms step_avg:94.64ms +step:460/1670 train_time:43532ms step_avg:94.63ms +step:461/1670 train_time:43624ms step_avg:94.63ms +step:462/1670 train_time:43716ms step_avg:94.62ms +step:463/1670 train_time:43808ms step_avg:94.62ms +step:464/1670 train_time:43901ms step_avg:94.61ms +step:465/1670 train_time:43993ms step_avg:94.61ms +step:466/1670 train_time:44085ms step_avg:94.60ms +step:467/1670 train_time:44177ms step_avg:94.60ms +step:468/1670 train_time:44270ms step_avg:94.59ms +step:469/1670 train_time:44363ms step_avg:94.59ms +step:470/1670 train_time:44456ms step_avg:94.59ms +step:471/1670 train_time:44549ms step_avg:94.58ms +step:472/1670 train_time:44642ms step_avg:94.58ms +step:473/1670 train_time:44735ms step_avg:94.58ms +step:474/1670 train_time:44827ms step_avg:94.57ms +step:475/1670 train_time:44918ms step_avg:94.57ms +step:476/1670 train_time:45011ms step_avg:94.56ms +step:477/1670 train_time:45103ms step_avg:94.56ms +step:478/1670 train_time:45196ms step_avg:94.55ms +step:479/1670 train_time:45288ms step_avg:94.55ms +step:480/1670 train_time:45381ms step_avg:94.54ms +step:481/1670 train_time:45474ms step_avg:94.54ms +step:482/1670 train_time:45567ms step_avg:94.54ms +step:483/1670 train_time:45660ms step_avg:94.53ms +step:484/1670 train_time:45753ms step_avg:94.53ms +step:485/1670 train_time:45845ms step_avg:94.53ms +step:486/1670 train_time:45938ms step_avg:94.52ms +step:487/1670 train_time:46030ms step_avg:94.52ms +step:488/1670 train_time:46123ms step_avg:94.51ms +step:489/1670 train_time:46215ms step_avg:94.51ms +step:490/1670 train_time:46307ms step_avg:94.50ms +step:491/1670 train_time:46399ms step_avg:94.50ms +step:492/1670 train_time:46492ms step_avg:94.50ms +step:493/1670 train_time:46584ms step_avg:94.49ms +step:494/1670 train_time:46677ms step_avg:94.49ms +step:495/1670 train_time:46770ms step_avg:94.48ms +step:496/1670 train_time:46862ms step_avg:94.48ms +step:497/1670 train_time:46955ms step_avg:94.48ms +step:498/1670 train_time:47047ms step_avg:94.47ms +step:499/1670 train_time:47140ms step_avg:94.47ms +step:500/1670 train_time:47232ms step_avg:94.46ms +step:500/1670 val_loss:3.7170 train_time:47323ms step_avg:94.65ms +step:501/1670 train_time:47348ms step_avg:94.51ms +step:502/1670 train_time:47420ms step_avg:94.46ms +step:503/1670 train_time:47520ms step_avg:94.47ms +step:504/1670 train_time:47615ms step_avg:94.47ms +step:505/1670 train_time:47707ms step_avg:94.47ms +step:506/1670 train_time:47799ms step_avg:94.46ms +step:507/1670 train_time:47891ms step_avg:94.46ms +step:508/1670 train_time:47982ms step_avg:94.45ms +step:509/1670 train_time:48074ms step_avg:94.45ms +step:510/1670 train_time:48165ms step_avg:94.44ms +step:511/1670 train_time:48256ms step_avg:94.43ms +step:512/1670 train_time:48348ms step_avg:94.43ms +step:513/1670 train_time:48443ms step_avg:94.43ms +step:514/1670 train_time:48537ms step_avg:94.43ms +step:515/1670 train_time:48631ms step_avg:94.43ms +step:516/1670 train_time:48724ms step_avg:94.43ms +step:517/1670 train_time:48816ms step_avg:94.42ms +step:518/1670 train_time:48908ms step_avg:94.42ms +step:519/1670 train_time:49000ms step_avg:94.41ms +step:520/1670 train_time:49093ms step_avg:94.41ms +step:521/1670 train_time:49184ms step_avg:94.40ms +step:522/1670 train_time:49276ms step_avg:94.40ms +step:523/1670 train_time:49368ms step_avg:94.39ms +step:524/1670 train_time:49462ms step_avg:94.39ms +step:525/1670 train_time:49555ms step_avg:94.39ms +step:526/1670 train_time:49648ms step_avg:94.39ms +step:527/1670 train_time:49741ms step_avg:94.38ms +step:528/1670 train_time:49833ms step_avg:94.38ms +step:529/1670 train_time:49925ms step_avg:94.38ms +step:530/1670 train_time:50018ms step_avg:94.37ms +step:531/1670 train_time:50110ms step_avg:94.37ms +step:532/1670 train_time:50203ms step_avg:94.37ms +step:533/1670 train_time:50296ms step_avg:94.36ms +step:534/1670 train_time:50388ms step_avg:94.36ms +step:535/1670 train_time:50481ms step_avg:94.36ms +step:536/1670 train_time:50574ms step_avg:94.35ms +step:537/1670 train_time:50666ms step_avg:94.35ms +step:538/1670 train_time:50760ms step_avg:94.35ms +step:539/1670 train_time:50853ms step_avg:94.35ms +step:540/1670 train_time:50945ms step_avg:94.34ms +step:541/1670 train_time:51037ms step_avg:94.34ms +step:542/1670 train_time:51129ms step_avg:94.33ms +step:543/1670 train_time:51221ms step_avg:94.33ms +step:544/1670 train_time:51313ms step_avg:94.33ms +step:545/1670 train_time:51406ms step_avg:94.32ms +step:546/1670 train_time:51499ms step_avg:94.32ms +step:547/1670 train_time:51595ms step_avg:94.32ms +step:548/1670 train_time:51687ms step_avg:94.32ms +step:549/1670 train_time:51778ms step_avg:94.31ms +step:550/1670 train_time:51871ms step_avg:94.31ms +step:551/1670 train_time:51964ms step_avg:94.31ms +step:552/1670 train_time:52056ms step_avg:94.30ms +step:553/1670 train_time:52148ms step_avg:94.30ms +step:554/1670 train_time:52242ms step_avg:94.30ms +step:555/1670 train_time:52334ms step_avg:94.30ms +step:556/1670 train_time:52427ms step_avg:94.29ms +step:557/1670 train_time:52520ms step_avg:94.29ms +step:558/1670 train_time:52713ms step_avg:94.47ms +step:559/1670 train_time:52790ms step_avg:94.44ms +step:560/1670 train_time:52882ms step_avg:94.43ms +step:561/1670 train_time:52975ms step_avg:94.43ms +step:562/1670 train_time:53068ms step_avg:94.43ms +step:563/1670 train_time:53161ms step_avg:94.42ms +step:564/1670 train_time:53253ms step_avg:94.42ms +step:565/1670 train_time:53346ms step_avg:94.42ms +step:566/1670 train_time:53438ms step_avg:94.41ms +step:567/1670 train_time:53531ms step_avg:94.41ms +step:568/1670 train_time:53629ms step_avg:94.42ms +step:569/1670 train_time:53727ms step_avg:94.42ms +step:570/1670 train_time:53822ms step_avg:94.43ms +step:571/1670 train_time:53916ms step_avg:94.42ms +step:572/1670 train_time:54009ms step_avg:94.42ms +step:573/1670 train_time:54102ms step_avg:94.42ms +step:574/1670 train_time:54196ms step_avg:94.42ms +step:575/1670 train_time:54289ms step_avg:94.42ms +step:576/1670 train_time:54382ms step_avg:94.41ms +step:577/1670 train_time:54475ms step_avg:94.41ms +step:578/1670 train_time:54568ms step_avg:94.41ms +step:579/1670 train_time:54663ms step_avg:94.41ms +step:580/1670 train_time:54758ms step_avg:94.41ms +step:581/1670 train_time:54852ms step_avg:94.41ms +step:582/1670 train_time:54946ms step_avg:94.41ms +step:583/1670 train_time:55039ms step_avg:94.41ms +step:584/1670 train_time:55133ms step_avg:94.41ms +step:585/1670 train_time:55228ms step_avg:94.41ms +step:586/1670 train_time:55321ms step_avg:94.40ms +step:587/1670 train_time:55413ms step_avg:94.40ms +step:588/1670 train_time:55507ms step_avg:94.40ms +step:589/1670 train_time:55602ms step_avg:94.40ms +step:590/1670 train_time:55697ms step_avg:94.40ms +step:591/1670 train_time:55791ms step_avg:94.40ms +step:592/1670 train_time:55885ms step_avg:94.40ms +step:593/1670 train_time:55979ms step_avg:94.40ms +step:594/1670 train_time:56073ms step_avg:94.40ms +step:595/1670 train_time:56167ms step_avg:94.40ms +step:596/1670 train_time:56260ms step_avg:94.40ms +step:597/1670 train_time:56353ms step_avg:94.39ms +step:598/1670 train_time:56446ms step_avg:94.39ms +step:599/1670 train_time:56539ms step_avg:94.39ms +step:600/1670 train_time:56634ms step_avg:94.39ms +step:601/1670 train_time:56728ms step_avg:94.39ms +step:602/1670 train_time:56822ms step_avg:94.39ms +step:603/1670 train_time:56916ms step_avg:94.39ms +step:604/1670 train_time:57010ms step_avg:94.39ms +step:605/1670 train_time:57105ms step_avg:94.39ms +step:606/1670 train_time:57198ms step_avg:94.39ms +step:607/1670 train_time:57292ms step_avg:94.39ms +step:608/1670 train_time:57385ms step_avg:94.38ms +step:609/1670 train_time:57479ms step_avg:94.38ms +step:610/1670 train_time:57572ms step_avg:94.38ms +step:611/1670 train_time:57666ms step_avg:94.38ms +step:612/1670 train_time:57761ms step_avg:94.38ms +step:613/1670 train_time:57855ms step_avg:94.38ms +step:614/1670 train_time:57949ms step_avg:94.38ms +step:615/1670 train_time:58043ms step_avg:94.38ms +step:616/1670 train_time:58137ms step_avg:94.38ms +step:617/1670 train_time:58232ms step_avg:94.38ms +step:618/1670 train_time:58325ms step_avg:94.38ms +step:619/1670 train_time:58418ms step_avg:94.37ms +step:620/1670 train_time:58511ms step_avg:94.37ms +step:621/1670 train_time:58605ms step_avg:94.37ms +step:622/1670 train_time:58700ms step_avg:94.37ms +step:623/1670 train_time:58793ms step_avg:94.37ms +step:624/1670 train_time:58888ms step_avg:94.37ms +step:625/1670 train_time:58982ms step_avg:94.37ms +step:625/1670 val_loss:3.6148 train_time:59074ms step_avg:94.52ms +step:626/1670 train_time:59099ms step_avg:94.41ms +step:627/1670 train_time:59181ms step_avg:94.39ms +step:628/1670 train_time:59281ms step_avg:94.40ms +step:629/1670 train_time:59376ms step_avg:94.40ms +step:630/1670 train_time:59468ms step_avg:94.39ms +step:631/1670 train_time:59561ms step_avg:94.39ms +step:632/1670 train_time:59654ms step_avg:94.39ms +step:633/1670 train_time:59747ms step_avg:94.39ms +step:634/1670 train_time:59840ms step_avg:94.38ms +step:635/1670 train_time:59932ms step_avg:94.38ms +step:636/1670 train_time:60024ms step_avg:94.38ms +step:637/1670 train_time:60120ms step_avg:94.38ms +step:638/1670 train_time:60216ms step_avg:94.38ms +step:639/1670 train_time:60666ms step_avg:94.94ms +step:640/1670 train_time:60738ms step_avg:94.90ms +step:641/1670 train_time:60830ms step_avg:94.90ms +step:642/1670 train_time:60923ms step_avg:94.90ms +step:643/1670 train_time:61016ms step_avg:94.89ms +step:644/1670 train_time:61108ms step_avg:94.89ms +step:645/1670 train_time:61201ms step_avg:94.88ms +step:646/1670 train_time:61293ms step_avg:94.88ms +step:647/1670 train_time:61386ms step_avg:94.88ms +step:648/1670 train_time:61478ms step_avg:94.87ms +step:649/1670 train_time:61574ms step_avg:94.88ms +step:650/1670 train_time:61676ms step_avg:94.89ms +step:651/1670 train_time:61773ms step_avg:94.89ms +step:652/1670 train_time:61867ms step_avg:94.89ms +step:653/1670 train_time:61960ms step_avg:94.88ms +step:654/1670 train_time:62053ms step_avg:94.88ms +step:655/1670 train_time:62145ms step_avg:94.88ms +step:656/1670 train_time:62238ms step_avg:94.87ms +step:657/1670 train_time:62331ms step_avg:94.87ms +step:658/1670 train_time:62423ms step_avg:94.87ms +step:659/1670 train_time:62517ms step_avg:94.87ms +step:660/1670 train_time:62612ms step_avg:94.87ms +step:661/1670 train_time:62706ms step_avg:94.87ms +step:662/1670 train_time:62801ms step_avg:94.87ms +step:663/1670 train_time:62896ms step_avg:94.87ms +step:664/1670 train_time:62990ms step_avg:94.86ms +step:665/1670 train_time:63082ms step_avg:94.86ms +step:666/1670 train_time:63176ms step_avg:94.86ms +step:667/1670 train_time:63268ms step_avg:94.85ms +step:668/1670 train_time:63361ms step_avg:94.85ms +step:669/1670 train_time:63454ms step_avg:94.85ms +step:670/1670 train_time:63548ms step_avg:94.85ms +step:671/1670 train_time:63643ms step_avg:94.85ms +step:672/1670 train_time:63737ms step_avg:94.85ms +step:673/1670 train_time:63832ms step_avg:94.85ms +step:674/1670 train_time:63926ms step_avg:94.85ms +step:675/1670 train_time:64020ms step_avg:94.85ms +step:676/1670 train_time:64114ms step_avg:94.84ms +step:677/1670 train_time:64207ms step_avg:94.84ms +step:678/1670 train_time:64301ms step_avg:94.84ms +step:679/1670 train_time:64393ms step_avg:94.84ms +step:680/1670 train_time:64486ms step_avg:94.83ms +step:681/1670 train_time:64580ms step_avg:94.83ms +step:682/1670 train_time:64674ms step_avg:94.83ms +step:683/1670 train_time:64768ms step_avg:94.83ms +step:684/1670 train_time:64863ms step_avg:94.83ms +step:685/1670 train_time:64957ms step_avg:94.83ms +step:686/1670 train_time:65051ms step_avg:94.83ms +step:687/1670 train_time:65144ms step_avg:94.82ms +step:688/1670 train_time:65237ms step_avg:94.82ms +step:689/1670 train_time:65331ms step_avg:94.82ms +step:690/1670 train_time:65424ms step_avg:94.82ms +step:691/1670 train_time:65517ms step_avg:94.82ms +step:692/1670 train_time:65611ms step_avg:94.81ms +step:693/1670 train_time:65705ms step_avg:94.81ms +step:694/1670 train_time:65799ms step_avg:94.81ms +step:695/1670 train_time:65893ms step_avg:94.81ms +step:696/1670 train_time:65987ms step_avg:94.81ms +step:697/1670 train_time:66081ms step_avg:94.81ms +step:698/1670 train_time:66175ms step_avg:94.81ms +step:699/1670 train_time:66269ms step_avg:94.81ms +step:700/1670 train_time:66362ms step_avg:94.80ms +step:701/1670 train_time:66455ms step_avg:94.80ms +step:702/1670 train_time:66549ms step_avg:94.80ms +step:703/1670 train_time:66642ms step_avg:94.80ms +step:704/1670 train_time:66736ms step_avg:94.80ms +step:705/1670 train_time:66831ms step_avg:94.80ms +step:706/1670 train_time:66925ms step_avg:94.79ms +step:707/1670 train_time:67019ms step_avg:94.79ms +step:708/1670 train_time:67113ms step_avg:94.79ms +step:709/1670 train_time:67206ms step_avg:94.79ms +step:710/1670 train_time:67300ms step_avg:94.79ms +step:711/1670 train_time:67393ms step_avg:94.79ms +step:712/1670 train_time:67487ms step_avg:94.78ms +step:713/1670 train_time:67581ms step_avg:94.78ms +step:714/1670 train_time:67675ms step_avg:94.78ms +step:715/1670 train_time:67768ms step_avg:94.78ms +step:716/1670 train_time:67862ms step_avg:94.78ms +step:717/1670 train_time:67956ms step_avg:94.78ms +step:718/1670 train_time:68049ms step_avg:94.78ms +step:719/1670 train_time:68143ms step_avg:94.77ms +step:720/1670 train_time:68237ms step_avg:94.77ms +step:721/1670 train_time:68331ms step_avg:94.77ms +step:722/1670 train_time:68424ms step_avg:94.77ms +step:723/1670 train_time:68518ms step_avg:94.77ms +step:724/1670 train_time:68612ms step_avg:94.77ms +step:725/1670 train_time:68706ms step_avg:94.77ms +step:726/1670 train_time:68799ms step_avg:94.77ms +step:727/1670 train_time:68893ms step_avg:94.76ms +step:728/1670 train_time:68987ms step_avg:94.76ms +step:729/1670 train_time:69081ms step_avg:94.76ms +step:730/1670 train_time:69175ms step_avg:94.76ms +step:731/1670 train_time:69268ms step_avg:94.76ms +step:732/1670 train_time:69362ms step_avg:94.76ms +step:733/1670 train_time:69456ms step_avg:94.76ms +step:734/1670 train_time:69550ms step_avg:94.75ms +step:735/1670 train_time:69643ms step_avg:94.75ms +step:736/1670 train_time:69737ms step_avg:94.75ms +step:737/1670 train_time:69831ms step_avg:94.75ms +step:738/1670 train_time:69926ms step_avg:94.75ms +step:739/1670 train_time:70019ms step_avg:94.75ms +step:740/1670 train_time:70112ms step_avg:94.75ms +step:741/1670 train_time:70206ms step_avg:94.74ms +step:742/1670 train_time:70299ms step_avg:94.74ms +step:743/1670 train_time:70393ms step_avg:94.74ms +step:744/1670 train_time:70487ms step_avg:94.74ms +step:745/1670 train_time:70580ms step_avg:94.74ms +step:746/1670 train_time:70673ms step_avg:94.74ms +step:747/1670 train_time:70767ms step_avg:94.73ms +step:748/1670 train_time:70861ms step_avg:94.73ms +step:749/1670 train_time:70956ms step_avg:94.73ms +step:750/1670 train_time:71049ms step_avg:94.73ms +step:750/1670 val_loss:3.5637 train_time:71141ms step_avg:94.85ms +step:751/1670 train_time:71167ms step_avg:94.76ms +step:752/1670 train_time:71242ms step_avg:94.74ms +step:753/1670 train_time:71342ms step_avg:94.74ms +step:754/1670 train_time:71439ms step_avg:94.75ms +step:755/1670 train_time:71532ms step_avg:94.74ms +step:756/1670 train_time:71625ms step_avg:94.74ms +step:757/1670 train_time:71717ms step_avg:94.74ms +step:758/1670 train_time:71810ms step_avg:94.74ms +step:759/1670 train_time:71902ms step_avg:94.73ms +step:760/1670 train_time:71995ms step_avg:94.73ms +step:761/1670 train_time:72088ms step_avg:94.73ms +step:762/1670 train_time:72182ms step_avg:94.73ms +step:763/1670 train_time:72277ms step_avg:94.73ms +step:764/1670 train_time:72374ms step_avg:94.73ms +step:765/1670 train_time:72468ms step_avg:94.73ms +step:766/1670 train_time:72563ms step_avg:94.73ms +step:767/1670 train_time:72656ms step_avg:94.73ms +step:768/1670 train_time:72749ms step_avg:94.72ms +step:769/1670 train_time:72841ms step_avg:94.72ms +step:770/1670 train_time:72934ms step_avg:94.72ms +step:771/1670 train_time:73027ms step_avg:94.72ms +step:772/1670 train_time:73120ms step_avg:94.72ms +step:773/1670 train_time:73214ms step_avg:94.71ms +step:774/1670 train_time:73309ms step_avg:94.71ms +step:775/1670 train_time:73404ms step_avg:94.71ms +step:776/1670 train_time:73499ms step_avg:94.72ms +step:777/1670 train_time:73593ms step_avg:94.71ms +step:778/1670 train_time:73686ms step_avg:94.71ms +step:779/1670 train_time:73779ms step_avg:94.71ms +step:780/1670 train_time:73872ms step_avg:94.71ms +step:781/1670 train_time:73966ms step_avg:94.71ms +step:782/1670 train_time:74059ms step_avg:94.70ms +step:783/1670 train_time:74153ms step_avg:94.70ms +step:784/1670 train_time:74246ms step_avg:94.70ms +step:785/1670 train_time:74342ms step_avg:94.70ms +step:786/1670 train_time:74437ms step_avg:94.70ms +step:787/1670 train_time:74531ms step_avg:94.70ms +step:788/1670 train_time:74624ms step_avg:94.70ms +step:789/1670 train_time:74719ms step_avg:94.70ms +step:790/1670 train_time:74811ms step_avg:94.70ms +step:791/1670 train_time:74905ms step_avg:94.70ms +step:792/1670 train_time:74999ms step_avg:94.70ms +step:793/1670 train_time:75092ms step_avg:94.69ms +step:794/1670 train_time:75186ms step_avg:94.69ms +step:795/1670 train_time:75279ms step_avg:94.69ms +step:796/1670 train_time:75374ms step_avg:94.69ms +step:797/1670 train_time:75469ms step_avg:94.69ms +step:798/1670 train_time:75562ms step_avg:94.69ms +step:799/1670 train_time:75656ms step_avg:94.69ms +step:800/1670 train_time:75750ms step_avg:94.69ms +step:801/1670 train_time:75843ms step_avg:94.69ms +step:802/1670 train_time:75936ms step_avg:94.68ms +step:803/1670 train_time:76030ms step_avg:94.68ms +step:804/1670 train_time:76124ms step_avg:94.68ms +step:805/1670 train_time:76217ms step_avg:94.68ms +step:806/1670 train_time:76311ms step_avg:94.68ms +step:807/1670 train_time:76405ms step_avg:94.68ms +step:808/1670 train_time:76500ms step_avg:94.68ms +step:809/1670 train_time:76594ms step_avg:94.68ms +step:810/1670 train_time:76688ms step_avg:94.68ms +step:811/1670 train_time:76781ms step_avg:94.67ms +step:812/1670 train_time:76875ms step_avg:94.67ms +step:813/1670 train_time:76969ms step_avg:94.67ms +step:814/1670 train_time:77061ms step_avg:94.67ms +step:815/1670 train_time:77155ms step_avg:94.67ms +step:816/1670 train_time:77249ms step_avg:94.67ms +step:817/1670 train_time:77343ms step_avg:94.67ms +step:818/1670 train_time:77437ms step_avg:94.67ms +step:819/1670 train_time:77532ms step_avg:94.67ms +step:820/1670 train_time:77625ms step_avg:94.67ms +step:821/1670 train_time:77719ms step_avg:94.66ms +step:822/1670 train_time:77813ms step_avg:94.66ms +step:823/1670 train_time:77906ms step_avg:94.66ms +step:824/1670 train_time:78000ms step_avg:94.66ms +step:825/1670 train_time:78093ms step_avg:94.66ms +step:826/1670 train_time:78186ms step_avg:94.66ms +step:827/1670 train_time:78280ms step_avg:94.65ms +step:828/1670 train_time:78374ms step_avg:94.65ms +step:829/1670 train_time:78467ms step_avg:94.65ms +step:830/1670 train_time:78561ms step_avg:94.65ms +step:831/1670 train_time:78654ms step_avg:94.65ms +step:832/1670 train_time:78748ms step_avg:94.65ms +step:833/1670 train_time:78842ms step_avg:94.65ms +step:834/1670 train_time:78936ms step_avg:94.65ms +step:835/1670 train_time:79030ms step_avg:94.65ms +step:836/1670 train_time:79124ms step_avg:94.65ms +step:837/1670 train_time:79217ms step_avg:94.64ms +step:838/1670 train_time:79312ms step_avg:94.64ms +step:839/1670 train_time:79406ms step_avg:94.64ms +step:840/1670 train_time:79501ms step_avg:94.64ms +step:841/1670 train_time:79594ms step_avg:94.64ms +step:842/1670 train_time:79688ms step_avg:94.64ms +step:843/1670 train_time:79782ms step_avg:94.64ms +step:844/1670 train_time:79876ms step_avg:94.64ms +step:845/1670 train_time:79970ms step_avg:94.64ms +step:846/1670 train_time:80064ms step_avg:94.64ms +step:847/1670 train_time:80157ms step_avg:94.64ms +step:848/1670 train_time:80251ms step_avg:94.64ms +step:849/1670 train_time:80344ms step_avg:94.63ms +step:850/1670 train_time:80439ms step_avg:94.63ms +step:851/1670 train_time:80791ms step_avg:94.94ms +step:852/1670 train_time:80960ms step_avg:95.02ms +step:853/1670 train_time:81052ms step_avg:95.02ms +step:854/1670 train_time:81144ms step_avg:95.02ms +step:855/1670 train_time:81237ms step_avg:95.01ms +step:856/1670 train_time:81330ms step_avg:95.01ms +step:857/1670 train_time:81423ms step_avg:95.01ms +step:858/1670 train_time:81516ms step_avg:95.01ms +step:859/1670 train_time:81608ms step_avg:95.00ms +step:860/1670 train_time:81701ms step_avg:95.00ms +step:861/1670 train_time:81799ms step_avg:95.00ms +step:862/1670 train_time:81896ms step_avg:95.01ms +step:863/1670 train_time:81992ms step_avg:95.01ms +step:864/1670 train_time:82086ms step_avg:95.01ms +step:865/1670 train_time:82179ms step_avg:95.00ms +step:866/1670 train_time:82271ms step_avg:95.00ms +step:867/1670 train_time:82365ms step_avg:95.00ms +step:868/1670 train_time:82458ms step_avg:95.00ms +step:869/1670 train_time:82550ms step_avg:94.99ms +step:870/1670 train_time:82643ms step_avg:94.99ms +step:871/1670 train_time:82736ms step_avg:94.99ms +step:872/1670 train_time:82832ms step_avg:94.99ms +step:873/1670 train_time:82927ms step_avg:94.99ms +step:874/1670 train_time:83021ms step_avg:94.99ms +step:875/1670 train_time:83114ms step_avg:94.99ms +step:875/1670 val_loss:3.5185 train_time:83206ms step_avg:95.09ms +step:876/1670 train_time:83232ms step_avg:95.01ms +step:877/1670 train_time:83306ms step_avg:94.99ms +step:878/1670 train_time:83409ms step_avg:95.00ms +step:879/1670 train_time:83503ms step_avg:95.00ms +step:880/1670 train_time:83596ms step_avg:95.00ms +step:881/1670 train_time:83689ms step_avg:94.99ms +step:882/1670 train_time:83781ms step_avg:94.99ms +step:883/1670 train_time:83873ms step_avg:94.99ms +step:884/1670 train_time:83966ms step_avg:94.98ms +step:885/1670 train_time:84059ms step_avg:94.98ms +step:886/1670 train_time:84152ms step_avg:94.98ms +step:887/1670 train_time:84247ms step_avg:94.98ms +step:888/1670 train_time:84344ms step_avg:94.98ms +step:889/1670 train_time:84439ms step_avg:94.98ms +step:890/1670 train_time:84534ms step_avg:94.98ms +step:891/1670 train_time:84628ms step_avg:94.98ms +step:892/1670 train_time:84721ms step_avg:94.98ms +step:893/1670 train_time:84813ms step_avg:94.98ms +step:894/1670 train_time:84906ms step_avg:94.97ms +step:895/1670 train_time:84999ms step_avg:94.97ms +step:896/1670 train_time:85093ms step_avg:94.97ms +step:897/1670 train_time:85187ms step_avg:94.97ms +step:898/1670 train_time:85281ms step_avg:94.97ms +step:899/1670 train_time:85377ms step_avg:94.97ms +step:900/1670 train_time:85473ms step_avg:94.97ms +step:901/1670 train_time:85568ms step_avg:94.97ms +step:902/1670 train_time:85661ms step_avg:94.97ms +step:903/1670 train_time:85754ms step_avg:94.97ms +step:904/1670 train_time:85847ms step_avg:94.96ms +step:905/1670 train_time:85940ms step_avg:94.96ms +step:906/1670 train_time:86033ms step_avg:94.96ms +step:907/1670 train_time:86126ms step_avg:94.96ms +step:908/1670 train_time:86219ms step_avg:94.96ms +step:909/1670 train_time:86314ms step_avg:94.96ms +step:910/1670 train_time:86409ms step_avg:94.96ms +step:911/1670 train_time:86504ms step_avg:94.96ms +step:912/1670 train_time:86598ms step_avg:94.95ms +step:913/1670 train_time:86691ms step_avg:94.95ms +step:914/1670 train_time:86785ms step_avg:94.95ms +step:915/1670 train_time:86878ms step_avg:94.95ms +step:916/1670 train_time:86971ms step_avg:94.95ms +step:917/1670 train_time:87065ms step_avg:94.95ms +step:918/1670 train_time:87157ms step_avg:94.94ms +step:919/1670 train_time:87251ms step_avg:94.94ms +step:920/1670 train_time:87346ms step_avg:94.94ms +step:921/1670 train_time:87440ms step_avg:94.94ms +step:922/1670 train_time:87535ms step_avg:94.94ms +step:923/1670 train_time:87629ms step_avg:94.94ms +step:924/1670 train_time:87722ms step_avg:94.94ms +step:925/1670 train_time:87816ms step_avg:94.94ms +step:926/1670 train_time:87909ms step_avg:94.93ms +step:927/1670 train_time:88002ms step_avg:94.93ms +step:928/1670 train_time:88096ms step_avg:94.93ms +step:929/1670 train_time:88189ms step_avg:94.93ms +step:930/1670 train_time:88283ms step_avg:94.93ms +step:931/1670 train_time:88377ms step_avg:94.93ms +step:932/1670 train_time:88471ms step_avg:94.93ms +step:933/1670 train_time:88566ms step_avg:94.93ms +step:934/1670 train_time:88660ms step_avg:94.93ms +step:935/1670 train_time:88753ms step_avg:94.92ms +step:936/1670 train_time:88847ms step_avg:94.92ms +step:937/1670 train_time:88940ms step_avg:94.92ms +step:938/1670 train_time:89033ms step_avg:94.92ms +step:939/1670 train_time:89128ms step_avg:94.92ms +step:940/1670 train_time:89221ms step_avg:94.92ms +step:941/1670 train_time:89315ms step_avg:94.91ms +step:942/1670 train_time:89409ms step_avg:94.91ms +step:943/1670 train_time:89504ms step_avg:94.91ms +step:944/1670 train_time:89597ms step_avg:94.91ms +step:945/1670 train_time:89691ms step_avg:94.91ms +step:946/1670 train_time:89784ms step_avg:94.91ms +step:947/1670 train_time:89879ms step_avg:94.91ms +step:948/1670 train_time:89972ms step_avg:94.91ms +step:949/1670 train_time:90066ms step_avg:94.91ms +step:950/1670 train_time:90159ms step_avg:94.90ms +step:951/1670 train_time:90253ms step_avg:94.90ms +step:952/1670 train_time:90347ms step_avg:94.90ms +step:953/1670 train_time:90441ms step_avg:94.90ms +step:954/1670 train_time:90535ms step_avg:94.90ms +step:955/1670 train_time:90629ms step_avg:94.90ms +step:956/1670 train_time:90722ms step_avg:94.90ms +step:957/1670 train_time:90815ms step_avg:94.90ms +step:958/1670 train_time:90909ms step_avg:94.89ms +step:959/1670 train_time:91002ms step_avg:94.89ms +step:960/1670 train_time:91096ms step_avg:94.89ms +step:961/1670 train_time:91189ms step_avg:94.89ms +step:962/1670 train_time:91283ms step_avg:94.89ms +step:963/1670 train_time:91377ms step_avg:94.89ms +step:964/1670 train_time:91472ms step_avg:94.89ms +step:965/1670 train_time:91565ms step_avg:94.89ms +step:966/1670 train_time:91659ms step_avg:94.88ms +step:967/1670 train_time:91752ms step_avg:94.88ms +step:968/1670 train_time:91846ms step_avg:94.88ms +step:969/1670 train_time:91940ms step_avg:94.88ms +step:970/1670 train_time:92034ms step_avg:94.88ms +step:971/1670 train_time:92127ms step_avg:94.88ms +step:972/1670 train_time:92220ms step_avg:94.88ms +step:973/1670 train_time:92315ms step_avg:94.88ms +step:974/1670 train_time:92409ms step_avg:94.88ms +step:975/1670 train_time:92503ms step_avg:94.87ms +step:976/1670 train_time:92597ms step_avg:94.87ms +step:977/1670 train_time:92690ms step_avg:94.87ms +step:978/1670 train_time:92784ms step_avg:94.87ms +step:979/1670 train_time:92878ms step_avg:94.87ms +step:980/1670 train_time:92971ms step_avg:94.87ms +step:981/1670 train_time:93065ms step_avg:94.87ms +step:982/1670 train_time:93158ms step_avg:94.87ms +step:983/1670 train_time:93252ms step_avg:94.86ms +step:984/1670 train_time:93346ms step_avg:94.86ms +step:985/1670 train_time:93439ms step_avg:94.86ms +step:986/1670 train_time:93533ms step_avg:94.86ms +step:987/1670 train_time:93626ms step_avg:94.86ms +step:988/1670 train_time:93720ms step_avg:94.86ms +step:989/1670 train_time:93814ms step_avg:94.86ms +step:990/1670 train_time:93908ms step_avg:94.86ms +step:991/1670 train_time:94002ms step_avg:94.86ms +step:992/1670 train_time:94095ms step_avg:94.85ms +step:993/1670 train_time:94189ms step_avg:94.85ms +step:994/1670 train_time:94283ms step_avg:94.85ms +step:995/1670 train_time:94377ms step_avg:94.85ms +step:996/1670 train_time:94472ms step_avg:94.85ms +step:997/1670 train_time:94566ms step_avg:94.85ms +step:998/1670 train_time:94659ms step_avg:94.85ms +step:999/1670 train_time:94753ms step_avg:94.85ms +step:1000/1670 train_time:94846ms step_avg:94.85ms +step:1000/1670 val_loss:3.4706 train_time:94939ms step_avg:94.94ms +step:1001/1670 train_time:94965ms step_avg:94.87ms +step:1002/1670 train_time:95040ms step_avg:94.85ms +step:1003/1670 train_time:95141ms step_avg:94.86ms +step:1004/1670 train_time:95236ms step_avg:94.86ms +step:1005/1670 train_time:95329ms step_avg:94.85ms +step:1006/1670 train_time:95422ms step_avg:94.85ms +step:1007/1670 train_time:95515ms step_avg:94.85ms +step:1008/1670 train_time:95607ms step_avg:94.85ms +step:1009/1670 train_time:95700ms step_avg:94.85ms +step:1010/1670 train_time:95793ms step_avg:94.84ms +step:1011/1670 train_time:95885ms step_avg:94.84ms +step:1012/1670 train_time:95980ms step_avg:94.84ms +step:1013/1670 train_time:96077ms step_avg:94.84ms +step:1014/1670 train_time:96172ms step_avg:94.84ms +step:1015/1670 train_time:96267ms step_avg:94.84ms +step:1016/1670 train_time:96360ms step_avg:94.84ms +step:1017/1670 train_time:96454ms step_avg:94.84ms +step:1018/1670 train_time:96548ms step_avg:94.84ms +step:1019/1670 train_time:96641ms step_avg:94.84ms +step:1020/1670 train_time:96734ms step_avg:94.84ms +step:1021/1670 train_time:96827ms step_avg:94.84ms +step:1022/1670 train_time:96919ms step_avg:94.83ms +step:1023/1670 train_time:97014ms step_avg:94.83ms +step:1024/1670 train_time:97109ms step_avg:94.83ms +step:1025/1670 train_time:97204ms step_avg:94.83ms +step:1026/1670 train_time:97299ms step_avg:94.83ms +step:1027/1670 train_time:97392ms step_avg:94.83ms +step:1028/1670 train_time:97486ms step_avg:94.83ms +step:1029/1670 train_time:97579ms step_avg:94.83ms +step:1030/1670 train_time:97672ms step_avg:94.83ms +step:1031/1670 train_time:97764ms step_avg:94.82ms +step:1032/1670 train_time:97857ms step_avg:94.82ms +step:1033/1670 train_time:97951ms step_avg:94.82ms +step:1034/1670 train_time:98046ms step_avg:94.82ms +step:1035/1670 train_time:98140ms step_avg:94.82ms +step:1036/1670 train_time:98235ms step_avg:94.82ms +step:1037/1670 train_time:98329ms step_avg:94.82ms +step:1038/1670 train_time:98422ms step_avg:94.82ms +step:1039/1670 train_time:98517ms step_avg:94.82ms +step:1040/1670 train_time:98610ms step_avg:94.82ms +step:1041/1670 train_time:98703ms step_avg:94.82ms +step:1042/1670 train_time:98796ms step_avg:94.81ms +step:1043/1670 train_time:98890ms step_avg:94.81ms +step:1044/1670 train_time:98984ms step_avg:94.81ms +step:1045/1670 train_time:99078ms step_avg:94.81ms +step:1046/1670 train_time:99172ms step_avg:94.81ms +step:1047/1670 train_time:99266ms step_avg:94.81ms +step:1048/1670 train_time:99360ms step_avg:94.81ms +step:1049/1670 train_time:99455ms step_avg:94.81ms +step:1050/1670 train_time:99549ms step_avg:94.81ms +step:1051/1670 train_time:99642ms step_avg:94.81ms +step:1052/1670 train_time:99735ms step_avg:94.81ms +step:1053/1670 train_time:99829ms step_avg:94.80ms +step:1054/1670 train_time:99921ms step_avg:94.80ms +step:1055/1670 train_time:100015ms step_avg:94.80ms +step:1056/1670 train_time:100109ms step_avg:94.80ms +step:1057/1670 train_time:100203ms step_avg:94.80ms +step:1058/1670 train_time:100297ms step_avg:94.80ms +step:1059/1670 train_time:100392ms step_avg:94.80ms +step:1060/1670 train_time:100486ms step_avg:94.80ms +step:1061/1670 train_time:100580ms step_avg:94.80ms +step:1062/1670 train_time:100905ms step_avg:95.01ms +step:1063/1670 train_time:101101ms step_avg:95.11ms +step:1064/1670 train_time:101193ms step_avg:95.11ms +step:1065/1670 train_time:101286ms step_avg:95.10ms +step:1066/1670 train_time:101379ms step_avg:95.10ms +step:1067/1670 train_time:101471ms step_avg:95.10ms +step:1068/1670 train_time:101564ms step_avg:95.10ms +step:1069/1670 train_time:101656ms step_avg:95.09ms +step:1070/1670 train_time:101749ms step_avg:95.09ms +step:1071/1670 train_time:101841ms step_avg:95.09ms +step:1072/1670 train_time:101938ms step_avg:95.09ms +step:1073/1670 train_time:102037ms step_avg:95.09ms +step:1074/1670 train_time:102132ms step_avg:95.09ms +step:1075/1670 train_time:102226ms step_avg:95.09ms +step:1076/1670 train_time:102319ms step_avg:95.09ms +step:1077/1670 train_time:102412ms step_avg:95.09ms +step:1078/1670 train_time:102505ms step_avg:95.09ms +step:1079/1670 train_time:102598ms step_avg:95.09ms +step:1080/1670 train_time:102691ms step_avg:95.08ms +step:1081/1670 train_time:102783ms step_avg:95.08ms +step:1082/1670 train_time:102876ms step_avg:95.08ms +step:1083/1670 train_time:102971ms step_avg:95.08ms +step:1084/1670 train_time:103067ms step_avg:95.08ms +step:1085/1670 train_time:103162ms step_avg:95.08ms +step:1086/1670 train_time:103256ms step_avg:95.08ms +step:1087/1670 train_time:103350ms step_avg:95.08ms +step:1088/1670 train_time:103443ms step_avg:95.08ms +step:1089/1670 train_time:103536ms step_avg:95.07ms +step:1090/1670 train_time:103629ms step_avg:95.07ms +step:1091/1670 train_time:103723ms step_avg:95.07ms +step:1092/1670 train_time:103816ms step_avg:95.07ms +step:1093/1670 train_time:103910ms step_avg:95.07ms +step:1094/1670 train_time:104005ms step_avg:95.07ms +step:1095/1670 train_time:104099ms step_avg:95.07ms +step:1096/1670 train_time:104193ms step_avg:95.07ms +step:1097/1670 train_time:104287ms step_avg:95.07ms +step:1098/1670 train_time:104381ms step_avg:95.06ms +step:1099/1670 train_time:104475ms step_avg:95.06ms +step:1100/1670 train_time:104568ms step_avg:95.06ms +step:1101/1670 train_time:104661ms step_avg:95.06ms +step:1102/1670 train_time:104755ms step_avg:95.06ms +step:1103/1670 train_time:104849ms step_avg:95.06ms +step:1104/1670 train_time:104943ms step_avg:95.06ms +step:1105/1670 train_time:105037ms step_avg:95.06ms +step:1106/1670 train_time:105130ms step_avg:95.05ms +step:1107/1670 train_time:105224ms step_avg:95.05ms +step:1108/1670 train_time:105318ms step_avg:95.05ms +step:1109/1670 train_time:105412ms step_avg:95.05ms +step:1110/1670 train_time:105506ms step_avg:95.05ms +step:1111/1670 train_time:105599ms step_avg:95.05ms +step:1112/1670 train_time:105693ms step_avg:95.05ms +step:1113/1670 train_time:105786ms step_avg:95.05ms +step:1114/1670 train_time:105879ms step_avg:95.04ms +step:1115/1670 train_time:106074ms step_avg:95.13ms +step:1116/1670 train_time:106152ms step_avg:95.12ms +step:1117/1670 train_time:106245ms step_avg:95.12ms +step:1118/1670 train_time:106338ms step_avg:95.11ms +step:1119/1670 train_time:106431ms step_avg:95.11ms +step:1120/1670 train_time:106524ms step_avg:95.11ms +step:1121/1670 train_time:106618ms step_avg:95.11ms +step:1122/1670 train_time:106711ms step_avg:95.11ms +step:1123/1670 train_time:106804ms step_avg:95.11ms +step:1124/1670 train_time:106897ms step_avg:95.10ms +step:1125/1670 train_time:106995ms step_avg:95.11ms +step:1125/1670 val_loss:3.4180 train_time:107091ms step_avg:95.19ms +step:1126/1670 train_time:107115ms step_avg:95.13ms +step:1127/1670 train_time:107195ms step_avg:95.12ms +step:1128/1670 train_time:107296ms step_avg:95.12ms +step:1129/1670 train_time:107391ms step_avg:95.12ms +step:1130/1670 train_time:107484ms step_avg:95.12ms +step:1131/1670 train_time:107578ms step_avg:95.12ms +step:1132/1670 train_time:107671ms step_avg:95.12ms +step:1133/1670 train_time:107764ms step_avg:95.11ms +step:1134/1670 train_time:107858ms step_avg:95.11ms +step:1135/1670 train_time:107951ms step_avg:95.11ms +step:1136/1670 train_time:108045ms step_avg:95.11ms +step:1137/1670 train_time:108140ms step_avg:95.11ms +step:1138/1670 train_time:108238ms step_avg:95.11ms +step:1139/1670 train_time:108335ms step_avg:95.11ms +step:1140/1670 train_time:108430ms step_avg:95.11ms +step:1141/1670 train_time:108524ms step_avg:95.11ms +step:1142/1670 train_time:108618ms step_avg:95.11ms +step:1143/1670 train_time:108712ms step_avg:95.11ms +step:1144/1670 train_time:108805ms step_avg:95.11ms +step:1145/1670 train_time:108898ms step_avg:95.11ms +step:1146/1670 train_time:108993ms step_avg:95.11ms +step:1147/1670 train_time:109087ms step_avg:95.11ms +step:1148/1670 train_time:109183ms step_avg:95.11ms +step:1149/1670 train_time:109278ms step_avg:95.11ms +step:1150/1670 train_time:109374ms step_avg:95.11ms +step:1151/1670 train_time:109469ms step_avg:95.11ms +step:1152/1670 train_time:109563ms step_avg:95.11ms +step:1153/1670 train_time:109657ms step_avg:95.11ms +step:1154/1670 train_time:109750ms step_avg:95.10ms +step:1155/1670 train_time:109844ms step_avg:95.10ms +step:1156/1670 train_time:109938ms step_avg:95.10ms +step:1157/1670 train_time:110032ms step_avg:95.10ms +step:1158/1670 train_time:110128ms step_avg:95.10ms +step:1159/1670 train_time:110223ms step_avg:95.10ms +step:1160/1670 train_time:110318ms step_avg:95.10ms +step:1161/1670 train_time:110414ms step_avg:95.10ms +step:1162/1670 train_time:110509ms step_avg:95.10ms +step:1163/1670 train_time:110603ms step_avg:95.10ms +step:1164/1670 train_time:110698ms step_avg:95.10ms +step:1165/1670 train_time:110791ms step_avg:95.10ms +step:1166/1670 train_time:110886ms step_avg:95.10ms +step:1167/1670 train_time:110979ms step_avg:95.10ms +step:1168/1670 train_time:111073ms step_avg:95.10ms +step:1169/1670 train_time:111168ms step_avg:95.10ms +step:1170/1670 train_time:111263ms step_avg:95.10ms +step:1171/1670 train_time:111358ms step_avg:95.10ms +step:1172/1670 train_time:111453ms step_avg:95.10ms +step:1173/1670 train_time:111548ms step_avg:95.10ms +step:1174/1670 train_time:111642ms step_avg:95.10ms +step:1175/1670 train_time:111736ms step_avg:95.09ms +step:1176/1670 train_time:111830ms step_avg:95.09ms +step:1177/1670 train_time:111924ms step_avg:95.09ms +step:1178/1670 train_time:112018ms step_avg:95.09ms +step:1179/1670 train_time:112113ms step_avg:95.09ms +step:1180/1670 train_time:112208ms step_avg:95.09ms +step:1181/1670 train_time:112303ms step_avg:95.09ms +step:1182/1670 train_time:112398ms step_avg:95.09ms +step:1183/1670 train_time:112493ms step_avg:95.09ms +step:1184/1670 train_time:112587ms step_avg:95.09ms +step:1185/1670 train_time:112682ms step_avg:95.09ms +step:1186/1670 train_time:112777ms step_avg:95.09ms +step:1187/1670 train_time:112870ms step_avg:95.09ms +step:1188/1670 train_time:112964ms step_avg:95.09ms +step:1189/1670 train_time:113059ms step_avg:95.09ms +step:1190/1670 train_time:113153ms step_avg:95.09ms +step:1191/1670 train_time:113248ms step_avg:95.09ms +step:1192/1670 train_time:113344ms step_avg:95.09ms +step:1193/1670 train_time:113439ms step_avg:95.09ms +step:1194/1670 train_time:113534ms step_avg:95.09ms +step:1195/1670 train_time:113629ms step_avg:95.09ms +step:1196/1670 train_time:113724ms step_avg:95.09ms +step:1197/1670 train_time:113819ms step_avg:95.09ms +step:1198/1670 train_time:113913ms step_avg:95.09ms +step:1199/1670 train_time:114007ms step_avg:95.09ms +step:1200/1670 train_time:114102ms step_avg:95.09ms +step:1201/1670 train_time:114196ms step_avg:95.08ms +step:1202/1670 train_time:114292ms step_avg:95.08ms +step:1203/1670 train_time:114387ms step_avg:95.08ms +step:1204/1670 train_time:114481ms step_avg:95.08ms +step:1205/1670 train_time:114576ms step_avg:95.08ms +step:1206/1670 train_time:114670ms step_avg:95.08ms +step:1207/1670 train_time:114765ms step_avg:95.08ms +step:1208/1670 train_time:114859ms step_avg:95.08ms +step:1209/1670 train_time:114954ms step_avg:95.08ms +step:1210/1670 train_time:115048ms step_avg:95.08ms +step:1211/1670 train_time:115142ms step_avg:95.08ms +step:1212/1670 train_time:115237ms step_avg:95.08ms +step:1213/1670 train_time:115332ms step_avg:95.08ms +step:1214/1670 train_time:115427ms step_avg:95.08ms +step:1215/1670 train_time:115520ms step_avg:95.08ms +step:1216/1670 train_time:115614ms step_avg:95.08ms +step:1217/1670 train_time:115708ms step_avg:95.08ms +step:1218/1670 train_time:115804ms step_avg:95.08ms +step:1219/1670 train_time:115899ms step_avg:95.08ms +step:1220/1670 train_time:115993ms step_avg:95.08ms +step:1221/1670 train_time:116088ms step_avg:95.08ms +step:1222/1670 train_time:116183ms step_avg:95.08ms +step:1223/1670 train_time:116277ms step_avg:95.07ms +step:1224/1670 train_time:116372ms step_avg:95.08ms +step:1225/1670 train_time:116467ms step_avg:95.08ms +step:1226/1670 train_time:116561ms step_avg:95.07ms +step:1227/1670 train_time:116655ms step_avg:95.07ms +step:1228/1670 train_time:116750ms step_avg:95.07ms +step:1229/1670 train_time:116845ms step_avg:95.07ms +step:1230/1670 train_time:116939ms step_avg:95.07ms +step:1231/1670 train_time:117034ms step_avg:95.07ms +step:1232/1670 train_time:117129ms step_avg:95.07ms +step:1233/1670 train_time:117224ms step_avg:95.07ms +step:1234/1670 train_time:117319ms step_avg:95.07ms +step:1235/1670 train_time:117414ms step_avg:95.07ms +step:1236/1670 train_time:117508ms step_avg:95.07ms +step:1237/1670 train_time:117602ms step_avg:95.07ms +step:1238/1670 train_time:117697ms step_avg:95.07ms +step:1239/1670 train_time:117792ms step_avg:95.07ms +step:1240/1670 train_time:117887ms step_avg:95.07ms +step:1241/1670 train_time:117982ms step_avg:95.07ms +step:1242/1670 train_time:118077ms step_avg:95.07ms +step:1243/1670 train_time:118172ms step_avg:95.07ms +step:1244/1670 train_time:118266ms step_avg:95.07ms +step:1245/1670 train_time:118362ms step_avg:95.07ms +step:1246/1670 train_time:118456ms step_avg:95.07ms +step:1247/1670 train_time:118550ms step_avg:95.07ms +step:1248/1670 train_time:118646ms step_avg:95.07ms +step:1249/1670 train_time:118740ms step_avg:95.07ms +step:1250/1670 train_time:118834ms step_avg:95.07ms +step:1250/1670 val_loss:3.3783 train_time:118926ms step_avg:95.14ms +step:1251/1670 train_time:118951ms step_avg:95.08ms +step:1252/1670 train_time:119028ms step_avg:95.07ms +step:1253/1670 train_time:119129ms step_avg:95.07ms +step:1254/1670 train_time:119225ms step_avg:95.08ms +step:1255/1670 train_time:119319ms step_avg:95.07ms +step:1256/1670 train_time:119413ms step_avg:95.07ms +step:1257/1670 train_time:119505ms step_avg:95.07ms +step:1258/1670 train_time:119599ms step_avg:95.07ms +step:1259/1670 train_time:119693ms step_avg:95.07ms +step:1260/1670 train_time:119786ms step_avg:95.07ms +step:1261/1670 train_time:119879ms step_avg:95.07ms +step:1262/1670 train_time:119974ms step_avg:95.07ms +step:1263/1670 train_time:120071ms step_avg:95.07ms +step:1264/1670 train_time:120167ms step_avg:95.07ms +step:1265/1670 train_time:120262ms step_avg:95.07ms +step:1266/1670 train_time:120356ms step_avg:95.07ms +step:1267/1670 train_time:120451ms step_avg:95.07ms +step:1268/1670 train_time:120545ms step_avg:95.07ms +step:1269/1670 train_time:120638ms step_avg:95.07ms +step:1270/1670 train_time:120732ms step_avg:95.06ms +step:1271/1670 train_time:120825ms step_avg:95.06ms +step:1272/1670 train_time:120919ms step_avg:95.06ms +step:1273/1670 train_time:121013ms step_avg:95.06ms +step:1274/1670 train_time:121465ms step_avg:95.34ms +step:1275/1670 train_time:121536ms step_avg:95.32ms +step:1276/1670 train_time:121629ms step_avg:95.32ms +step:1277/1670 train_time:121722ms step_avg:95.32ms +step:1278/1670 train_time:121815ms step_avg:95.32ms +step:1279/1670 train_time:121909ms step_avg:95.32ms +step:1280/1670 train_time:122002ms step_avg:95.31ms +step:1281/1670 train_time:122096ms step_avg:95.31ms +step:1282/1670 train_time:122190ms step_avg:95.31ms +step:1283/1670 train_time:122283ms step_avg:95.31ms +step:1284/1670 train_time:122380ms step_avg:95.31ms +step:1285/1670 train_time:122478ms step_avg:95.31ms +step:1286/1670 train_time:122576ms step_avg:95.32ms +step:1287/1670 train_time:122670ms step_avg:95.31ms +step:1288/1670 train_time:122765ms step_avg:95.31ms +step:1289/1670 train_time:122858ms step_avg:95.31ms +step:1290/1670 train_time:122952ms step_avg:95.31ms +step:1291/1670 train_time:123047ms step_avg:95.31ms +step:1292/1670 train_time:123140ms step_avg:95.31ms +step:1293/1670 train_time:123233ms step_avg:95.31ms +step:1294/1670 train_time:123329ms step_avg:95.31ms +step:1295/1670 train_time:123425ms step_avg:95.31ms +step:1296/1670 train_time:123522ms step_avg:95.31ms +step:1297/1670 train_time:123617ms step_avg:95.31ms +step:1298/1670 train_time:123712ms step_avg:95.31ms +step:1299/1670 train_time:123807ms step_avg:95.31ms +step:1300/1670 train_time:123901ms step_avg:95.31ms +step:1301/1670 train_time:123995ms step_avg:95.31ms +step:1302/1670 train_time:124089ms step_avg:95.31ms +step:1303/1670 train_time:124182ms step_avg:95.31ms +step:1304/1670 train_time:124277ms step_avg:95.30ms +step:1305/1670 train_time:124373ms step_avg:95.30ms +step:1306/1670 train_time:124468ms step_avg:95.31ms +step:1307/1670 train_time:124564ms step_avg:95.30ms +step:1308/1670 train_time:124659ms step_avg:95.30ms +step:1309/1670 train_time:124754ms step_avg:95.30ms +step:1310/1670 train_time:124849ms step_avg:95.30ms +step:1311/1670 train_time:124942ms step_avg:95.30ms +step:1312/1670 train_time:125037ms step_avg:95.30ms +step:1313/1670 train_time:125131ms step_avg:95.30ms +step:1314/1670 train_time:125225ms step_avg:95.30ms +step:1315/1670 train_time:125320ms step_avg:95.30ms +step:1316/1670 train_time:125415ms step_avg:95.30ms +step:1317/1670 train_time:125510ms step_avg:95.30ms +step:1318/1670 train_time:125605ms step_avg:95.30ms +step:1319/1670 train_time:125700ms step_avg:95.30ms +step:1320/1670 train_time:125795ms step_avg:95.30ms +step:1321/1670 train_time:125890ms step_avg:95.30ms +step:1322/1670 train_time:125984ms step_avg:95.30ms +step:1323/1670 train_time:126078ms step_avg:95.30ms +step:1324/1670 train_time:126171ms step_avg:95.30ms +step:1325/1670 train_time:126266ms step_avg:95.29ms +step:1326/1670 train_time:126361ms step_avg:95.29ms +step:1327/1670 train_time:126455ms step_avg:95.29ms +step:1328/1670 train_time:126551ms step_avg:95.29ms +step:1329/1670 train_time:126645ms step_avg:95.29ms +step:1330/1670 train_time:126741ms step_avg:95.29ms +step:1331/1670 train_time:126835ms step_avg:95.29ms +step:1332/1670 train_time:126930ms step_avg:95.29ms +step:1333/1670 train_time:127024ms step_avg:95.29ms +step:1334/1670 train_time:127118ms step_avg:95.29ms +step:1335/1670 train_time:127212ms step_avg:95.29ms +step:1336/1670 train_time:127307ms step_avg:95.29ms +step:1337/1670 train_time:127403ms step_avg:95.29ms +step:1338/1670 train_time:127497ms step_avg:95.29ms +step:1339/1670 train_time:127593ms step_avg:95.29ms +step:1340/1670 train_time:127688ms step_avg:95.29ms +step:1341/1670 train_time:127782ms step_avg:95.29ms +step:1342/1670 train_time:127877ms step_avg:95.29ms +step:1343/1670 train_time:127972ms step_avg:95.29ms +step:1344/1670 train_time:128066ms step_avg:95.29ms +step:1345/1670 train_time:128161ms step_avg:95.29ms +step:1346/1670 train_time:128255ms step_avg:95.29ms +step:1347/1670 train_time:128349ms step_avg:95.29ms +step:1348/1670 train_time:128444ms step_avg:95.28ms +step:1349/1670 train_time:128538ms step_avg:95.28ms +step:1350/1670 train_time:128633ms step_avg:95.28ms +step:1351/1670 train_time:128728ms step_avg:95.28ms +step:1352/1670 train_time:128822ms step_avg:95.28ms +step:1353/1670 train_time:128917ms step_avg:95.28ms +step:1354/1670 train_time:129012ms step_avg:95.28ms +step:1355/1670 train_time:129105ms step_avg:95.28ms +step:1356/1670 train_time:129200ms step_avg:95.28ms +step:1357/1670 train_time:129295ms step_avg:95.28ms +step:1358/1670 train_time:129388ms step_avg:95.28ms +step:1359/1670 train_time:129482ms step_avg:95.28ms +step:1360/1670 train_time:129577ms step_avg:95.28ms +step:1361/1670 train_time:129672ms step_avg:95.28ms +step:1362/1670 train_time:129766ms step_avg:95.28ms +step:1363/1670 train_time:129861ms step_avg:95.28ms +step:1364/1670 train_time:129955ms step_avg:95.28ms +step:1365/1670 train_time:130049ms step_avg:95.27ms +step:1366/1670 train_time:130144ms step_avg:95.27ms +step:1367/1670 train_time:130238ms step_avg:95.27ms +step:1368/1670 train_time:130333ms step_avg:95.27ms +step:1369/1670 train_time:130427ms step_avg:95.27ms +step:1370/1670 train_time:130523ms step_avg:95.27ms +step:1371/1670 train_time:130618ms step_avg:95.27ms +step:1372/1670 train_time:130713ms step_avg:95.27ms +step:1373/1670 train_time:130808ms step_avg:95.27ms +step:1374/1670 train_time:130902ms step_avg:95.27ms +step:1375/1670 train_time:130997ms step_avg:95.27ms +step:1375/1670 val_loss:3.3447 train_time:131091ms step_avg:95.34ms +step:1376/1670 train_time:131116ms step_avg:95.29ms +step:1377/1670 train_time:131193ms step_avg:95.27ms +step:1378/1670 train_time:131295ms step_avg:95.28ms +step:1379/1670 train_time:131392ms step_avg:95.28ms +step:1380/1670 train_time:131486ms step_avg:95.28ms +step:1381/1670 train_time:131580ms step_avg:95.28ms +step:1382/1670 train_time:131672ms step_avg:95.28ms +step:1383/1670 train_time:131767ms step_avg:95.28ms +step:1384/1670 train_time:131862ms step_avg:95.28ms +step:1385/1670 train_time:131955ms step_avg:95.27ms +step:1386/1670 train_time:132050ms step_avg:95.27ms +step:1387/1670 train_time:132149ms step_avg:95.28ms +step:1388/1670 train_time:132249ms step_avg:95.28ms +step:1389/1670 train_time:132345ms step_avg:95.28ms +step:1390/1670 train_time:132439ms step_avg:95.28ms +step:1391/1670 train_time:132532ms step_avg:95.28ms +step:1392/1670 train_time:132626ms step_avg:95.28ms +step:1393/1670 train_time:132719ms step_avg:95.28ms +step:1394/1670 train_time:132813ms step_avg:95.27ms +step:1395/1670 train_time:132906ms step_avg:95.27ms +step:1396/1670 train_time:133001ms step_avg:95.27ms +step:1397/1670 train_time:133096ms step_avg:95.27ms +step:1398/1670 train_time:133192ms step_avg:95.27ms +step:1399/1670 train_time:133288ms step_avg:95.27ms +step:1400/1670 train_time:133382ms step_avg:95.27ms +step:1401/1670 train_time:133477ms step_avg:95.27ms +step:1402/1670 train_time:133571ms step_avg:95.27ms +step:1403/1670 train_time:133666ms step_avg:95.27ms +step:1404/1670 train_time:133760ms step_avg:95.27ms +step:1405/1670 train_time:133854ms step_avg:95.27ms +step:1406/1670 train_time:133948ms step_avg:95.27ms +step:1407/1670 train_time:134043ms step_avg:95.27ms +step:1408/1670 train_time:134138ms step_avg:95.27ms +step:1409/1670 train_time:134235ms step_avg:95.27ms +step:1410/1670 train_time:134330ms step_avg:95.27ms +step:1411/1670 train_time:134425ms step_avg:95.27ms +step:1412/1670 train_time:134519ms step_avg:95.27ms +step:1413/1670 train_time:134614ms step_avg:95.27ms +step:1414/1670 train_time:134708ms step_avg:95.27ms +step:1415/1670 train_time:134802ms step_avg:95.27ms +step:1416/1670 train_time:134897ms step_avg:95.27ms +step:1417/1670 train_time:134991ms step_avg:95.27ms +step:1418/1670 train_time:135086ms step_avg:95.26ms +step:1419/1670 train_time:135181ms step_avg:95.26ms +step:1420/1670 train_time:135276ms step_avg:95.26ms +step:1421/1670 train_time:135372ms step_avg:95.27ms +step:1422/1670 train_time:135466ms step_avg:95.26ms +step:1423/1670 train_time:135561ms step_avg:95.26ms +step:1424/1670 train_time:135656ms step_avg:95.26ms +step:1425/1670 train_time:135750ms step_avg:95.26ms +step:1426/1670 train_time:135844ms step_avg:95.26ms +step:1427/1670 train_time:135939ms step_avg:95.26ms +step:1428/1670 train_time:136033ms step_avg:95.26ms +step:1429/1670 train_time:136128ms step_avg:95.26ms +step:1430/1670 train_time:136223ms step_avg:95.26ms +step:1431/1670 train_time:136318ms step_avg:95.26ms +step:1432/1670 train_time:136413ms step_avg:95.26ms +step:1433/1670 train_time:136507ms step_avg:95.26ms +step:1434/1670 train_time:136602ms step_avg:95.26ms +step:1435/1670 train_time:136697ms step_avg:95.26ms +step:1436/1670 train_time:136792ms step_avg:95.26ms +step:1437/1670 train_time:136886ms step_avg:95.26ms +step:1438/1670 train_time:136980ms step_avg:95.26ms +step:1439/1670 train_time:137075ms step_avg:95.26ms +step:1440/1670 train_time:137169ms step_avg:95.26ms +step:1441/1670 train_time:137264ms step_avg:95.26ms +step:1442/1670 train_time:137358ms step_avg:95.26ms +step:1443/1670 train_time:137452ms step_avg:95.25ms +step:1444/1670 train_time:137547ms step_avg:95.25ms +step:1445/1670 train_time:137642ms step_avg:95.25ms +step:1446/1670 train_time:137736ms step_avg:95.25ms +step:1447/1670 train_time:137831ms step_avg:95.25ms +step:1448/1670 train_time:137927ms step_avg:95.25ms +step:1449/1670 train_time:138021ms step_avg:95.25ms +step:1450/1670 train_time:138115ms step_avg:95.25ms +step:1451/1670 train_time:138209ms step_avg:95.25ms +step:1452/1670 train_time:138304ms step_avg:95.25ms +step:1453/1670 train_time:138400ms step_avg:95.25ms +step:1454/1670 train_time:138495ms step_avg:95.25ms +step:1455/1670 train_time:138589ms step_avg:95.25ms +step:1456/1670 train_time:138684ms step_avg:95.25ms +step:1457/1670 train_time:138779ms step_avg:95.25ms +step:1458/1670 train_time:138873ms step_avg:95.25ms +step:1459/1670 train_time:138968ms step_avg:95.25ms +step:1460/1670 train_time:139063ms step_avg:95.25ms +step:1461/1670 train_time:139158ms step_avg:95.25ms +step:1462/1670 train_time:139252ms step_avg:95.25ms +step:1463/1670 train_time:139347ms step_avg:95.25ms +step:1464/1670 train_time:139442ms step_avg:95.25ms +step:1465/1670 train_time:139537ms step_avg:95.25ms +step:1466/1670 train_time:139632ms step_avg:95.25ms +step:1467/1670 train_time:139727ms step_avg:95.25ms +step:1468/1670 train_time:139822ms step_avg:95.25ms +step:1469/1670 train_time:139916ms step_avg:95.25ms +step:1470/1670 train_time:140011ms step_avg:95.25ms +step:1471/1670 train_time:140105ms step_avg:95.24ms +step:1472/1670 train_time:140200ms step_avg:95.24ms +step:1473/1670 train_time:140295ms step_avg:95.24ms +step:1474/1670 train_time:140390ms step_avg:95.24ms +step:1475/1670 train_time:140485ms step_avg:95.24ms +step:1476/1670 train_time:140579ms step_avg:95.24ms +step:1477/1670 train_time:140673ms step_avg:95.24ms +step:1478/1670 train_time:140768ms step_avg:95.24ms +step:1479/1670 train_time:140863ms step_avg:95.24ms +step:1480/1670 train_time:140958ms step_avg:95.24ms +step:1481/1670 train_time:141053ms step_avg:95.24ms +step:1482/1670 train_time:141148ms step_avg:95.24ms +step:1483/1670 train_time:141242ms step_avg:95.24ms +step:1484/1670 train_time:141336ms step_avg:95.24ms +step:1485/1670 train_time:141661ms step_avg:95.39ms +step:1486/1670 train_time:141856ms step_avg:95.46ms +step:1487/1670 train_time:141948ms step_avg:95.46ms +step:1488/1670 train_time:142042ms step_avg:95.46ms +step:1489/1670 train_time:142135ms step_avg:95.46ms +step:1490/1670 train_time:142229ms step_avg:95.46ms +step:1491/1670 train_time:142323ms step_avg:95.45ms +step:1492/1670 train_time:142416ms step_avg:95.45ms +step:1493/1670 train_time:142510ms step_avg:95.45ms +step:1494/1670 train_time:142604ms step_avg:95.45ms +step:1495/1670 train_time:142701ms step_avg:95.45ms +step:1496/1670 train_time:142798ms step_avg:95.45ms +step:1497/1670 train_time:142897ms step_avg:95.46ms +step:1498/1670 train_time:142992ms step_avg:95.45ms +step:1499/1670 train_time:143085ms step_avg:95.45ms +step:1500/1670 train_time:143179ms step_avg:95.45ms +step:1500/1670 val_loss:3.3142 train_time:143271ms step_avg:95.51ms +step:1501/1670 train_time:143295ms step_avg:95.47ms +step:1502/1670 train_time:143372ms step_avg:95.45ms +step:1503/1670 train_time:143470ms step_avg:95.46ms +step:1504/1670 train_time:143565ms step_avg:95.46ms +step:1505/1670 train_time:143660ms step_avg:95.46ms +step:1506/1670 train_time:143753ms step_avg:95.45ms +step:1507/1670 train_time:143846ms step_avg:95.45ms +step:1508/1670 train_time:143940ms step_avg:95.45ms +step:1509/1670 train_time:144034ms step_avg:95.45ms +step:1510/1670 train_time:144128ms step_avg:95.45ms +step:1511/1670 train_time:144223ms step_avg:95.45ms +step:1512/1670 train_time:144321ms step_avg:95.45ms +step:1513/1670 train_time:144419ms step_avg:95.45ms +step:1514/1670 train_time:144516ms step_avg:95.45ms +step:1515/1670 train_time:144611ms step_avg:95.45ms +step:1516/1670 train_time:144705ms step_avg:95.45ms +step:1517/1670 train_time:144798ms step_avg:95.45ms +step:1518/1670 train_time:144892ms step_avg:95.45ms +step:1519/1670 train_time:144985ms step_avg:95.45ms +step:1520/1670 train_time:145078ms step_avg:95.45ms +step:1521/1670 train_time:145173ms step_avg:95.45ms +step:1522/1670 train_time:145268ms step_avg:95.45ms +step:1523/1670 train_time:145364ms step_avg:95.45ms +step:1524/1670 train_time:145461ms step_avg:95.45ms +step:1525/1670 train_time:145556ms step_avg:95.45ms +step:1526/1670 train_time:145651ms step_avg:95.45ms +step:1527/1670 train_time:145745ms step_avg:95.45ms +step:1528/1670 train_time:145839ms step_avg:95.44ms +step:1529/1670 train_time:145933ms step_avg:95.44ms +step:1530/1670 train_time:146026ms step_avg:95.44ms +step:1531/1670 train_time:146120ms step_avg:95.44ms +step:1532/1670 train_time:146215ms step_avg:95.44ms +step:1533/1670 train_time:146312ms step_avg:95.44ms +step:1534/1670 train_time:146407ms step_avg:95.44ms +step:1535/1670 train_time:146504ms step_avg:95.44ms +step:1536/1670 train_time:146599ms step_avg:95.44ms +step:1537/1670 train_time:146694ms step_avg:95.44ms +step:1538/1670 train_time:146789ms step_avg:95.44ms +step:1539/1670 train_time:146883ms step_avg:95.44ms +step:1540/1670 train_time:146976ms step_avg:95.44ms +step:1541/1670 train_time:147070ms step_avg:95.44ms +step:1542/1670 train_time:147165ms step_avg:95.44ms +step:1543/1670 train_time:147260ms step_avg:95.44ms +step:1544/1670 train_time:147355ms step_avg:95.44ms +step:1545/1670 train_time:147451ms step_avg:95.44ms +step:1546/1670 train_time:147547ms step_avg:95.44ms +step:1547/1670 train_time:147642ms step_avg:95.44ms +step:1548/1670 train_time:147737ms step_avg:95.44ms +step:1549/1670 train_time:147832ms step_avg:95.44ms +step:1550/1670 train_time:147925ms step_avg:95.44ms +step:1551/1670 train_time:148019ms step_avg:95.43ms +step:1552/1670 train_time:148113ms step_avg:95.43ms +step:1553/1670 train_time:148208ms step_avg:95.43ms +step:1554/1670 train_time:148303ms step_avg:95.43ms +step:1555/1670 train_time:148398ms step_avg:95.43ms +step:1556/1670 train_time:148493ms step_avg:95.43ms +step:1557/1670 train_time:148588ms step_avg:95.43ms +step:1558/1670 train_time:148683ms step_avg:95.43ms +step:1559/1670 train_time:148777ms step_avg:95.43ms +step:1560/1670 train_time:148872ms step_avg:95.43ms +step:1561/1670 train_time:148966ms step_avg:95.43ms +step:1562/1670 train_time:149061ms step_avg:95.43ms +step:1563/1670 train_time:149155ms step_avg:95.43ms +step:1564/1670 train_time:149251ms step_avg:95.43ms +step:1565/1670 train_time:149346ms step_avg:95.43ms +step:1566/1670 train_time:149441ms step_avg:95.43ms +step:1567/1670 train_time:149536ms step_avg:95.43ms +step:1568/1670 train_time:149630ms step_avg:95.43ms +step:1569/1670 train_time:149725ms step_avg:95.43ms +step:1570/1670 train_time:149820ms step_avg:95.43ms +step:1571/1670 train_time:149914ms step_avg:95.43ms +step:1572/1670 train_time:150008ms step_avg:95.42ms +step:1573/1670 train_time:150103ms step_avg:95.42ms +step:1574/1670 train_time:150197ms step_avg:95.42ms +step:1575/1670 train_time:150291ms step_avg:95.42ms +step:1576/1670 train_time:150386ms step_avg:95.42ms +step:1577/1670 train_time:150481ms step_avg:95.42ms +step:1578/1670 train_time:150576ms step_avg:95.42ms +step:1579/1670 train_time:150671ms step_avg:95.42ms +step:1580/1670 train_time:150766ms step_avg:95.42ms +step:1581/1670 train_time:150861ms step_avg:95.42ms +step:1582/1670 train_time:150955ms step_avg:95.42ms +step:1583/1670 train_time:151049ms step_avg:95.42ms +step:1584/1670 train_time:151143ms step_avg:95.42ms +step:1585/1670 train_time:151238ms step_avg:95.42ms +step:1586/1670 train_time:151332ms step_avg:95.42ms +step:1587/1670 train_time:151427ms step_avg:95.42ms +step:1588/1670 train_time:151522ms step_avg:95.42ms +step:1589/1670 train_time:151617ms step_avg:95.42ms +step:1590/1670 train_time:151712ms step_avg:95.42ms +step:1591/1670 train_time:151806ms step_avg:95.42ms +step:1592/1670 train_time:151900ms step_avg:95.41ms +step:1593/1670 train_time:151994ms step_avg:95.41ms +step:1594/1670 train_time:152089ms step_avg:95.41ms +step:1595/1670 train_time:152183ms step_avg:95.41ms +step:1596/1670 train_time:152279ms step_avg:95.41ms +step:1597/1670 train_time:152373ms step_avg:95.41ms +step:1598/1670 train_time:152468ms step_avg:95.41ms +step:1599/1670 train_time:152565ms step_avg:95.41ms +step:1600/1670 train_time:152659ms step_avg:95.41ms +step:1601/1670 train_time:152753ms step_avg:95.41ms +step:1602/1670 train_time:152847ms step_avg:95.41ms +step:1603/1670 train_time:152942ms step_avg:95.41ms +step:1604/1670 train_time:153036ms step_avg:95.41ms +step:1605/1670 train_time:153131ms step_avg:95.41ms +step:1606/1670 train_time:153225ms step_avg:95.41ms +step:1607/1670 train_time:153320ms step_avg:95.41ms +step:1608/1670 train_time:153415ms step_avg:95.41ms +step:1609/1670 train_time:153511ms step_avg:95.41ms +step:1610/1670 train_time:153605ms step_avg:95.41ms +step:1611/1670 train_time:153701ms step_avg:95.41ms +step:1612/1670 train_time:153796ms step_avg:95.41ms +step:1613/1670 train_time:153891ms step_avg:95.41ms +step:1614/1670 train_time:153986ms step_avg:95.41ms +step:1615/1670 train_time:154081ms step_avg:95.41ms +step:1616/1670 train_time:154175ms step_avg:95.41ms +step:1617/1670 train_time:154269ms step_avg:95.40ms +step:1618/1670 train_time:154363ms step_avg:95.40ms +step:1619/1670 train_time:154458ms step_avg:95.40ms +step:1620/1670 train_time:154554ms step_avg:95.40ms +step:1621/1670 train_time:154650ms step_avg:95.40ms +step:1622/1670 train_time:154745ms step_avg:95.40ms +step:1623/1670 train_time:154839ms step_avg:95.40ms +step:1624/1670 train_time:154933ms step_avg:95.40ms +step:1625/1670 train_time:155027ms step_avg:95.40ms +step:1625/1670 val_loss:3.2894 train_time:155120ms step_avg:95.46ms +step:1626/1670 train_time:155144ms step_avg:95.41ms +step:1627/1670 train_time:155222ms step_avg:95.40ms +step:1628/1670 train_time:155323ms step_avg:95.41ms +step:1629/1670 train_time:155421ms step_avg:95.41ms +step:1630/1670 train_time:155517ms step_avg:95.41ms +step:1631/1670 train_time:155610ms step_avg:95.41ms +step:1632/1670 train_time:155703ms step_avg:95.41ms +step:1633/1670 train_time:155797ms step_avg:95.41ms +step:1634/1670 train_time:155890ms step_avg:95.40ms +step:1635/1670 train_time:155984ms step_avg:95.40ms +step:1636/1670 train_time:156078ms step_avg:95.40ms +step:1637/1670 train_time:156173ms step_avg:95.40ms +step:1638/1670 train_time:156269ms step_avg:95.40ms +step:1639/1670 train_time:156366ms step_avg:95.40ms +step:1640/1670 train_time:156462ms step_avg:95.40ms +step:1641/1670 train_time:156558ms step_avg:95.40ms +step:1642/1670 train_time:156653ms step_avg:95.40ms +step:1643/1670 train_time:156746ms step_avg:95.40ms +step:1644/1670 train_time:156840ms step_avg:95.40ms +step:1645/1670 train_time:156933ms step_avg:95.40ms +step:1646/1670 train_time:157027ms step_avg:95.40ms +step:1647/1670 train_time:157121ms step_avg:95.40ms +step:1648/1670 train_time:157218ms step_avg:95.40ms +step:1649/1670 train_time:157314ms step_avg:95.40ms +step:1650/1670 train_time:157410ms step_avg:95.40ms +step:1651/1670 train_time:157505ms step_avg:95.40ms +step:1652/1670 train_time:157600ms step_avg:95.40ms +step:1653/1670 train_time:157695ms step_avg:95.40ms +step:1654/1670 train_time:157790ms step_avg:95.40ms +step:1655/1670 train_time:157884ms step_avg:95.40ms +step:1656/1670 train_time:157979ms step_avg:95.40ms +step:1657/1670 train_time:158072ms step_avg:95.40ms +step:1658/1670 train_time:158167ms step_avg:95.40ms +step:1659/1670 train_time:158261ms step_avg:95.40ms +step:1660/1670 train_time:158356ms step_avg:95.40ms +step:1661/1670 train_time:158451ms step_avg:95.40ms +step:1662/1670 train_time:158546ms step_avg:95.39ms +step:1663/1670 train_time:158640ms step_avg:95.39ms +step:1664/1670 train_time:158734ms step_avg:95.39ms +step:1665/1670 train_time:158828ms step_avg:95.39ms +step:1666/1670 train_time:158922ms step_avg:95.39ms +step:1667/1670 train_time:159017ms step_avg:95.39ms +step:1668/1670 train_time:159111ms step_avg:95.39ms +step:1669/1670 train_time:159205ms step_avg:95.39ms +step:1670/1670 train_time:159300ms step_avg:95.39ms +step:1670/1670 val_loss:3.2806 train_time:159468ms step_avg:95.49ms +peak memory allocated: 32712 MiB reserved: 47656 MiB diff --git a/records/091025_Yarn/61b04c65-2c0f-4d24-83e2-6035dfea1582.txt b/records/091025_Yarn/61b04c65-2c0f-4d24-83e2-6035dfea1582.txt new file mode 100644 index 000000000..4c448dd7c --- /dev/null +++ b/records/091025_Yarn/61b04c65-2c0f-4d24-83e2-6035dfea1582.txt @@ -0,0 +1,2863 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math + +from dataclasses import dataclass +from functools import lru_cache +from itertools import accumulate +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate = CastedLinear(12, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, attn_args): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) + if ve is not None: + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = sa_lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None + + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), attn_args) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, x0, lambdas[i], attn_args) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = f"yarn/{uuid.uuid4()}" + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + head_dim=128, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +ws = get_ws(0) +for step in range(train_steps + 1): + last_step = (step == train_steps) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Thu Sep 11 03:57:09 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | Off | +| N/A 32C P0 118W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | Off | +| N/A 33C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | Off | +| N/A 34C P0 117W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | Off | +| N/A 31C P0 117W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | Off | +| N/A 31C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | Off | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | Off | +| N/A 33C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | Off | +| N/A 32C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 61015 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 61016 C /usr/bin/python3 614MiB | +| 0 N/A N/A 61017 C /usr/bin/python3 614MiB | +| 0 N/A N/A 61018 C /usr/bin/python3 614MiB | +| 0 N/A N/A 61019 C /usr/bin/python3 614MiB | +| 0 N/A N/A 61020 C /usr/bin/python3 614MiB | +| 0 N/A N/A 61021 C /usr/bin/python3 614MiB | +| 0 N/A N/A 61022 C /usr/bin/python3 614MiB | +| 1 N/A N/A 61016 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 61017 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 61018 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 61019 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 61020 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 61021 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 61022 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:453ms step_avg:453.25ms +step:2/1670 train_time:477ms step_avg:238.73ms +step:3/1670 train_time:545ms step_avg:181.74ms +step:4/1670 train_time:636ms step_avg:158.90ms +step:5/1670 train_time:727ms step_avg:145.42ms +step:6/1670 train_time:818ms step_avg:136.41ms +step:7/1670 train_time:910ms step_avg:130.05ms +step:8/1670 train_time:1002ms step_avg:125.23ms +step:9/1670 train_time:1094ms step_avg:121.53ms +step:10/1670 train_time:1185ms step_avg:118.54ms +step:11/1670 train_time:1277ms step_avg:116.13ms +step:12/1670 train_time:1371ms step_avg:114.23ms +step:13/1670 train_time:1468ms step_avg:112.93ms +step:14/1670 train_time:1561ms step_avg:111.48ms +step:15/1670 train_time:1654ms step_avg:110.26ms +step:16/1670 train_time:1746ms step_avg:109.13ms +step:17/1670 train_time:1838ms step_avg:108.10ms +step:18/1670 train_time:1930ms step_avg:107.23ms +step:19/1670 train_time:2022ms step_avg:106.45ms +step:20/1670 train_time:2115ms step_avg:105.77ms +step:21/1670 train_time:2208ms step_avg:105.12ms +step:22/1670 train_time:2300ms step_avg:104.57ms +step:23/1670 train_time:2394ms step_avg:104.07ms +step:24/1670 train_time:2487ms step_avg:103.62ms +step:25/1670 train_time:2580ms step_avg:103.20ms +step:26/1670 train_time:2674ms step_avg:102.84ms +step:27/1670 train_time:2767ms step_avg:102.50ms +step:28/1670 train_time:2859ms step_avg:102.12ms +step:29/1670 train_time:2952ms step_avg:101.79ms +step:30/1670 train_time:3045ms step_avg:101.49ms +step:31/1670 train_time:3137ms step_avg:101.19ms +step:32/1670 train_time:3230ms step_avg:100.95ms +step:33/1670 train_time:3324ms step_avg:100.72ms +step:34/1670 train_time:3417ms step_avg:100.50ms +step:35/1670 train_time:3510ms step_avg:100.29ms +step:36/1670 train_time:3604ms step_avg:100.11ms +step:37/1670 train_time:3696ms step_avg:99.90ms +step:38/1670 train_time:3789ms step_avg:99.72ms +step:39/1670 train_time:3881ms step_avg:99.53ms +step:40/1670 train_time:3974ms step_avg:99.36ms +step:41/1670 train_time:4067ms step_avg:99.20ms +step:42/1670 train_time:4160ms step_avg:99.05ms +step:43/1670 train_time:4253ms step_avg:98.90ms +step:44/1670 train_time:4345ms step_avg:98.75ms +step:45/1670 train_time:4437ms step_avg:98.60ms +step:46/1670 train_time:4531ms step_avg:98.49ms +step:47/1670 train_time:4624ms step_avg:98.39ms +step:48/1670 train_time:4716ms step_avg:98.25ms +step:49/1670 train_time:4808ms step_avg:98.13ms +step:50/1670 train_time:4901ms step_avg:98.02ms +step:51/1670 train_time:4993ms step_avg:97.91ms +step:52/1670 train_time:5086ms step_avg:97.81ms +step:53/1670 train_time:5179ms step_avg:97.71ms +step:54/1670 train_time:5272ms step_avg:97.62ms +step:55/1670 train_time:5364ms step_avg:97.52ms +step:56/1670 train_time:5456ms step_avg:97.43ms +step:57/1670 train_time:5550ms step_avg:97.37ms +step:58/1670 train_time:5642ms step_avg:97.28ms +step:59/1670 train_time:5735ms step_avg:97.20ms +step:60/1670 train_time:5828ms step_avg:97.13ms +step:61/1670 train_time:5920ms step_avg:97.06ms +step:62/1670 train_time:6012ms step_avg:96.97ms +step:63/1670 train_time:6105ms step_avg:96.91ms +step:64/1670 train_time:6198ms step_avg:96.84ms +step:65/1670 train_time:6291ms step_avg:96.78ms +step:66/1670 train_time:6383ms step_avg:96.71ms +step:67/1670 train_time:6475ms step_avg:96.65ms +step:68/1670 train_time:6568ms step_avg:96.59ms +step:69/1670 train_time:6661ms step_avg:96.53ms +step:70/1670 train_time:6753ms step_avg:96.48ms +step:71/1670 train_time:6846ms step_avg:96.42ms +step:72/1670 train_time:6939ms step_avg:96.37ms +step:73/1670 train_time:7031ms step_avg:96.32ms +step:74/1670 train_time:7125ms step_avg:96.28ms +step:75/1670 train_time:7217ms step_avg:96.22ms +step:76/1670 train_time:7309ms step_avg:96.17ms +step:77/1670 train_time:7402ms step_avg:96.12ms +step:78/1670 train_time:7493ms step_avg:96.07ms +step:79/1670 train_time:7586ms step_avg:96.02ms +step:80/1670 train_time:7679ms step_avg:95.99ms +step:81/1670 train_time:7772ms step_avg:95.95ms +step:82/1670 train_time:7864ms step_avg:95.91ms +step:83/1670 train_time:7956ms step_avg:95.86ms +step:84/1670 train_time:8050ms step_avg:95.83ms +step:85/1670 train_time:8143ms step_avg:95.80ms +step:86/1670 train_time:8235ms step_avg:95.76ms +step:87/1670 train_time:8328ms step_avg:95.73ms +step:88/1670 train_time:8422ms step_avg:95.71ms +step:89/1670 train_time:8514ms step_avg:95.66ms +step:90/1670 train_time:8607ms step_avg:95.63ms +step:91/1670 train_time:8699ms step_avg:95.60ms +step:92/1670 train_time:8792ms step_avg:95.57ms +step:93/1670 train_time:8885ms step_avg:95.54ms +step:94/1670 train_time:8978ms step_avg:95.51ms +step:95/1670 train_time:9070ms step_avg:95.47ms +step:96/1670 train_time:9162ms step_avg:95.44ms +step:97/1670 train_time:9255ms step_avg:95.41ms +step:98/1670 train_time:9348ms step_avg:95.39ms +step:99/1670 train_time:9442ms step_avg:95.37ms +step:100/1670 train_time:9533ms step_avg:95.33ms +step:101/1670 train_time:9625ms step_avg:95.30ms +step:102/1670 train_time:9717ms step_avg:95.27ms +step:103/1670 train_time:9809ms step_avg:95.24ms +step:104/1670 train_time:9903ms step_avg:95.22ms +step:105/1670 train_time:9995ms step_avg:95.19ms +step:106/1670 train_time:10087ms step_avg:95.16ms +step:107/1670 train_time:10180ms step_avg:95.14ms +step:108/1670 train_time:10272ms step_avg:95.12ms +step:109/1670 train_time:10365ms step_avg:95.09ms +step:110/1670 train_time:10458ms step_avg:95.07ms +step:111/1670 train_time:10550ms step_avg:95.05ms +step:112/1670 train_time:10643ms step_avg:95.02ms +step:113/1670 train_time:10735ms step_avg:95.00ms +step:114/1670 train_time:10827ms step_avg:94.97ms +step:115/1670 train_time:10921ms step_avg:94.97ms +step:116/1670 train_time:11015ms step_avg:94.95ms +step:117/1670 train_time:11107ms step_avg:94.93ms +step:118/1670 train_time:11198ms step_avg:94.90ms +step:119/1670 train_time:11291ms step_avg:94.88ms +step:120/1670 train_time:11383ms step_avg:94.86ms +step:121/1670 train_time:11476ms step_avg:94.84ms +step:122/1670 train_time:11568ms step_avg:94.82ms +step:123/1670 train_time:11660ms step_avg:94.80ms +step:124/1670 train_time:11753ms step_avg:94.78ms +step:125/1670 train_time:11845ms step_avg:94.76ms +step:125/1670 val_loss:4.2943 train_time:11936ms step_avg:95.49ms +step:126/1670 train_time:11961ms step_avg:94.93ms +step:127/1670 train_time:12034ms step_avg:94.75ms +step:128/1670 train_time:12137ms step_avg:94.82ms +step:129/1670 train_time:12235ms step_avg:94.84ms +step:130/1670 train_time:12327ms step_avg:94.82ms +step:131/1670 train_time:12419ms step_avg:94.80ms +step:132/1670 train_time:12510ms step_avg:94.78ms +step:133/1670 train_time:12602ms step_avg:94.75ms +step:134/1670 train_time:12693ms step_avg:94.73ms +step:135/1670 train_time:12785ms step_avg:94.71ms +step:136/1670 train_time:12877ms step_avg:94.68ms +step:137/1670 train_time:12968ms step_avg:94.66ms +step:138/1670 train_time:13062ms step_avg:94.65ms +step:139/1670 train_time:13156ms step_avg:94.65ms +step:140/1670 train_time:13249ms step_avg:94.64ms +step:141/1670 train_time:13343ms step_avg:94.63ms +step:142/1670 train_time:13435ms step_avg:94.61ms +step:143/1670 train_time:13528ms step_avg:94.60ms +step:144/1670 train_time:13620ms step_avg:94.58ms +step:145/1670 train_time:13711ms step_avg:94.56ms +step:146/1670 train_time:13803ms step_avg:94.54ms +step:147/1670 train_time:13895ms step_avg:94.52ms +step:148/1670 train_time:13987ms step_avg:94.50ms +step:149/1670 train_time:14079ms step_avg:94.49ms +step:150/1670 train_time:14172ms step_avg:94.48ms +step:151/1670 train_time:14266ms step_avg:94.47ms +step:152/1670 train_time:14359ms step_avg:94.47ms +step:153/1670 train_time:14451ms step_avg:94.45ms +step:154/1670 train_time:14543ms step_avg:94.44ms +step:155/1670 train_time:14635ms step_avg:94.42ms +step:156/1670 train_time:14728ms step_avg:94.41ms +step:157/1670 train_time:14820ms step_avg:94.40ms +step:158/1670 train_time:14912ms step_avg:94.38ms +step:159/1670 train_time:15006ms step_avg:94.38ms +step:160/1670 train_time:15099ms step_avg:94.37ms +step:161/1670 train_time:15192ms step_avg:94.36ms +step:162/1670 train_time:15284ms step_avg:94.35ms +step:163/1670 train_time:15377ms step_avg:94.34ms +step:164/1670 train_time:15470ms step_avg:94.33ms +step:165/1670 train_time:15562ms step_avg:94.32ms +step:166/1670 train_time:15656ms step_avg:94.31ms +step:167/1670 train_time:15748ms step_avg:94.30ms +step:168/1670 train_time:15840ms step_avg:94.29ms +step:169/1670 train_time:15932ms step_avg:94.27ms +step:170/1670 train_time:16025ms step_avg:94.27ms +step:171/1670 train_time:16117ms step_avg:94.25ms +step:172/1670 train_time:16209ms step_avg:94.24ms +step:173/1670 train_time:16302ms step_avg:94.23ms +step:174/1670 train_time:16395ms step_avg:94.22ms +step:175/1670 train_time:16488ms step_avg:94.22ms +step:176/1670 train_time:16581ms step_avg:94.21ms +step:177/1670 train_time:16673ms step_avg:94.20ms +step:178/1670 train_time:16765ms step_avg:94.18ms +step:179/1670 train_time:16857ms step_avg:94.18ms +step:180/1670 train_time:16949ms step_avg:94.16ms +step:181/1670 train_time:17042ms step_avg:94.15ms +step:182/1670 train_time:17135ms step_avg:94.15ms +step:183/1670 train_time:17226ms step_avg:94.13ms +step:184/1670 train_time:17319ms step_avg:94.12ms +step:185/1670 train_time:17411ms step_avg:94.12ms +step:186/1670 train_time:17504ms step_avg:94.11ms +step:187/1670 train_time:17597ms step_avg:94.10ms +step:188/1670 train_time:17689ms step_avg:94.09ms +step:189/1670 train_time:17782ms step_avg:94.08ms +step:190/1670 train_time:17874ms step_avg:94.07ms +step:191/1670 train_time:17967ms step_avg:94.07ms +step:192/1670 train_time:18059ms step_avg:94.06ms +step:193/1670 train_time:18152ms step_avg:94.05ms +step:194/1670 train_time:18245ms step_avg:94.05ms +step:195/1670 train_time:18338ms step_avg:94.04ms +step:196/1670 train_time:18430ms step_avg:94.03ms +step:197/1670 train_time:18523ms step_avg:94.02ms +step:198/1670 train_time:18615ms step_avg:94.02ms +step:199/1670 train_time:18708ms step_avg:94.01ms +step:200/1670 train_time:18800ms step_avg:94.00ms +step:201/1670 train_time:18892ms step_avg:93.99ms +step:202/1670 train_time:18985ms step_avg:93.98ms +step:203/1670 train_time:19078ms step_avg:93.98ms +step:204/1670 train_time:19170ms step_avg:93.97ms +step:205/1670 train_time:19262ms step_avg:93.96ms +step:206/1670 train_time:19354ms step_avg:93.95ms +step:207/1670 train_time:19446ms step_avg:93.94ms +step:208/1670 train_time:19540ms step_avg:93.94ms +step:209/1670 train_time:19632ms step_avg:93.93ms +step:210/1670 train_time:19724ms step_avg:93.93ms +step:211/1670 train_time:19817ms step_avg:93.92ms +step:212/1670 train_time:19909ms step_avg:93.91ms +step:213/1670 train_time:20263ms step_avg:95.13ms +step:214/1670 train_time:20379ms step_avg:95.23ms +step:215/1670 train_time:20469ms step_avg:95.21ms +step:216/1670 train_time:20561ms step_avg:95.19ms +step:217/1670 train_time:20652ms step_avg:95.17ms +step:218/1670 train_time:20744ms step_avg:95.16ms +step:219/1670 train_time:20835ms step_avg:95.14ms +step:220/1670 train_time:20926ms step_avg:95.12ms +step:221/1670 train_time:21018ms step_avg:95.10ms +step:222/1670 train_time:21110ms step_avg:95.09ms +step:223/1670 train_time:21203ms step_avg:95.08ms +step:224/1670 train_time:21298ms step_avg:95.08ms +step:225/1670 train_time:21391ms step_avg:95.07ms +step:226/1670 train_time:21484ms step_avg:95.06ms +step:227/1670 train_time:21577ms step_avg:95.05ms +step:228/1670 train_time:21669ms step_avg:95.04ms +step:229/1670 train_time:21760ms step_avg:95.02ms +step:230/1670 train_time:21852ms step_avg:95.01ms +step:231/1670 train_time:21944ms step_avg:95.00ms +step:232/1670 train_time:22035ms step_avg:94.98ms +step:233/1670 train_time:22127ms step_avg:94.97ms +step:234/1670 train_time:22220ms step_avg:94.96ms +step:235/1670 train_time:22313ms step_avg:94.95ms +step:236/1670 train_time:22406ms step_avg:94.94ms +step:237/1670 train_time:22500ms step_avg:94.94ms +step:238/1670 train_time:22593ms step_avg:94.93ms +step:239/1670 train_time:22685ms step_avg:94.92ms +step:240/1670 train_time:22778ms step_avg:94.91ms +step:241/1670 train_time:22870ms step_avg:94.90ms +step:242/1670 train_time:22962ms step_avg:94.88ms +step:243/1670 train_time:23053ms step_avg:94.87ms +step:244/1670 train_time:23146ms step_avg:94.86ms +step:245/1670 train_time:23239ms step_avg:94.85ms +step:246/1670 train_time:23331ms step_avg:94.84ms +step:247/1670 train_time:23423ms step_avg:94.83ms +step:248/1670 train_time:23517ms step_avg:94.83ms +step:249/1670 train_time:23609ms step_avg:94.82ms +step:250/1670 train_time:23702ms step_avg:94.81ms +step:250/1670 val_loss:3.9692 train_time:23793ms step_avg:95.17ms +step:251/1670 train_time:23819ms step_avg:94.90ms +step:252/1670 train_time:23894ms step_avg:94.82ms +step:253/1670 train_time:23995ms step_avg:94.84ms +step:254/1670 train_time:24089ms step_avg:94.84ms +step:255/1670 train_time:24182ms step_avg:94.83ms +step:256/1670 train_time:24273ms step_avg:94.82ms +step:257/1670 train_time:24364ms step_avg:94.80ms +step:258/1670 train_time:24456ms step_avg:94.79ms +step:259/1670 train_time:24547ms step_avg:94.78ms +step:260/1670 train_time:24639ms step_avg:94.76ms +step:261/1670 train_time:24730ms step_avg:94.75ms +step:262/1670 train_time:24823ms step_avg:94.74ms +step:263/1670 train_time:24917ms step_avg:94.74ms +step:264/1670 train_time:25011ms step_avg:94.74ms +step:265/1670 train_time:25104ms step_avg:94.73ms +step:266/1670 train_time:25197ms step_avg:94.73ms +step:267/1670 train_time:25290ms step_avg:94.72ms +step:268/1670 train_time:25382ms step_avg:94.71ms +step:269/1670 train_time:25474ms step_avg:94.70ms +step:270/1670 train_time:25565ms step_avg:94.69ms +step:271/1670 train_time:25658ms step_avg:94.68ms +step:272/1670 train_time:25749ms step_avg:94.67ms +step:273/1670 train_time:25842ms step_avg:94.66ms +step:274/1670 train_time:25935ms step_avg:94.65ms +step:275/1670 train_time:26028ms step_avg:94.65ms +step:276/1670 train_time:26121ms step_avg:94.64ms +step:277/1670 train_time:26214ms step_avg:94.64ms +step:278/1670 train_time:26308ms step_avg:94.63ms +step:279/1670 train_time:26400ms step_avg:94.62ms +step:280/1670 train_time:26492ms step_avg:94.61ms +step:281/1670 train_time:26584ms step_avg:94.61ms +step:282/1670 train_time:26676ms step_avg:94.59ms +step:283/1670 train_time:26768ms step_avg:94.59ms +step:284/1670 train_time:26861ms step_avg:94.58ms +step:285/1670 train_time:26954ms step_avg:94.58ms +step:286/1670 train_time:27047ms step_avg:94.57ms +step:287/1670 train_time:27139ms step_avg:94.56ms +step:288/1670 train_time:27232ms step_avg:94.55ms +step:289/1670 train_time:27324ms step_avg:94.55ms +step:290/1670 train_time:27416ms step_avg:94.54ms +step:291/1670 train_time:27509ms step_avg:94.53ms +step:292/1670 train_time:27601ms step_avg:94.52ms +step:293/1670 train_time:27693ms step_avg:94.52ms +step:294/1670 train_time:27786ms step_avg:94.51ms +step:295/1670 train_time:27879ms step_avg:94.50ms +step:296/1670 train_time:27972ms step_avg:94.50ms +step:297/1670 train_time:28065ms step_avg:94.49ms +step:298/1670 train_time:28157ms step_avg:94.49ms +step:299/1670 train_time:28250ms step_avg:94.48ms +step:300/1670 train_time:28343ms step_avg:94.48ms +step:301/1670 train_time:28435ms step_avg:94.47ms +step:302/1670 train_time:28527ms step_avg:94.46ms +step:303/1670 train_time:28619ms step_avg:94.45ms +step:304/1670 train_time:28712ms step_avg:94.45ms +step:305/1670 train_time:28804ms step_avg:94.44ms +step:306/1670 train_time:28896ms step_avg:94.43ms +step:307/1670 train_time:28989ms step_avg:94.43ms +step:308/1670 train_time:29081ms step_avg:94.42ms +step:309/1670 train_time:29174ms step_avg:94.41ms +step:310/1670 train_time:29267ms step_avg:94.41ms +step:311/1670 train_time:29359ms step_avg:94.40ms +step:312/1670 train_time:29451ms step_avg:94.40ms +step:313/1670 train_time:29544ms step_avg:94.39ms +step:314/1670 train_time:29636ms step_avg:94.38ms +step:315/1670 train_time:29730ms step_avg:94.38ms +step:316/1670 train_time:29823ms step_avg:94.38ms +step:317/1670 train_time:29915ms step_avg:94.37ms +step:318/1670 train_time:30006ms step_avg:94.36ms +step:319/1670 train_time:30099ms step_avg:94.35ms +step:320/1670 train_time:30191ms step_avg:94.35ms +step:321/1670 train_time:30284ms step_avg:94.34ms +step:322/1670 train_time:30376ms step_avg:94.34ms +step:323/1670 train_time:30468ms step_avg:94.33ms +step:324/1670 train_time:30560ms step_avg:94.32ms +step:325/1670 train_time:30653ms step_avg:94.32ms +step:326/1670 train_time:30746ms step_avg:94.31ms +step:327/1670 train_time:30839ms step_avg:94.31ms +step:328/1670 train_time:30931ms step_avg:94.30ms +step:329/1670 train_time:31024ms step_avg:94.30ms +step:330/1670 train_time:31116ms step_avg:94.29ms +step:331/1670 train_time:31209ms step_avg:94.29ms +step:332/1670 train_time:31301ms step_avg:94.28ms +step:333/1670 train_time:31394ms step_avg:94.28ms +step:334/1670 train_time:31487ms step_avg:94.27ms +step:335/1670 train_time:31579ms step_avg:94.26ms +step:336/1670 train_time:31671ms step_avg:94.26ms +step:337/1670 train_time:31763ms step_avg:94.25ms +step:338/1670 train_time:31856ms step_avg:94.25ms +step:339/1670 train_time:31948ms step_avg:94.24ms +step:340/1670 train_time:32041ms step_avg:94.24ms +step:341/1670 train_time:32133ms step_avg:94.23ms +step:342/1670 train_time:32226ms step_avg:94.23ms +step:343/1670 train_time:32318ms step_avg:94.22ms +step:344/1670 train_time:32411ms step_avg:94.22ms +step:345/1670 train_time:32502ms step_avg:94.21ms +step:346/1670 train_time:32595ms step_avg:94.20ms +step:347/1670 train_time:32687ms step_avg:94.20ms +step:348/1670 train_time:32779ms step_avg:94.19ms +step:349/1670 train_time:32872ms step_avg:94.19ms +step:350/1670 train_time:32964ms step_avg:94.18ms +step:351/1670 train_time:33056ms step_avg:94.18ms +step:352/1670 train_time:33148ms step_avg:94.17ms +step:353/1670 train_time:33241ms step_avg:94.17ms +step:354/1670 train_time:33334ms step_avg:94.16ms +step:355/1670 train_time:33426ms step_avg:94.16ms +step:356/1670 train_time:33518ms step_avg:94.15ms +step:357/1670 train_time:33611ms step_avg:94.15ms +step:358/1670 train_time:33702ms step_avg:94.14ms +step:359/1670 train_time:33795ms step_avg:94.14ms +step:360/1670 train_time:33887ms step_avg:94.13ms +step:361/1670 train_time:33980ms step_avg:94.13ms +step:362/1670 train_time:34073ms step_avg:94.12ms +step:363/1670 train_time:34164ms step_avg:94.12ms +step:364/1670 train_time:34258ms step_avg:94.11ms +step:365/1670 train_time:34350ms step_avg:94.11ms +step:366/1670 train_time:34443ms step_avg:94.11ms +step:367/1670 train_time:34535ms step_avg:94.10ms +step:368/1670 train_time:34628ms step_avg:94.10ms +step:369/1670 train_time:34720ms step_avg:94.09ms +step:370/1670 train_time:34812ms step_avg:94.09ms +step:371/1670 train_time:34905ms step_avg:94.08ms +step:372/1670 train_time:34997ms step_avg:94.08ms +step:373/1670 train_time:35090ms step_avg:94.07ms +step:374/1670 train_time:35182ms step_avg:94.07ms +step:375/1670 train_time:35274ms step_avg:94.06ms +step:375/1670 val_loss:3.8146 train_time:35365ms step_avg:94.31ms +step:376/1670 train_time:35391ms step_avg:94.12ms +step:377/1670 train_time:35464ms step_avg:94.07ms +step:378/1670 train_time:35562ms step_avg:94.08ms +step:379/1670 train_time:35655ms step_avg:94.08ms +step:380/1670 train_time:35747ms step_avg:94.07ms +step:381/1670 train_time:35838ms step_avg:94.06ms +step:382/1670 train_time:35929ms step_avg:94.06ms +step:383/1670 train_time:36021ms step_avg:94.05ms +step:384/1670 train_time:36113ms step_avg:94.04ms +step:385/1670 train_time:36204ms step_avg:94.04ms +step:386/1670 train_time:36297ms step_avg:94.03ms +step:387/1670 train_time:36389ms step_avg:94.03ms +step:388/1670 train_time:36484ms step_avg:94.03ms +step:389/1670 train_time:36578ms step_avg:94.03ms +step:390/1670 train_time:36670ms step_avg:94.03ms +step:391/1670 train_time:36763ms step_avg:94.02ms +step:392/1670 train_time:36855ms step_avg:94.02ms +step:393/1670 train_time:36947ms step_avg:94.01ms +step:394/1670 train_time:37040ms step_avg:94.01ms +step:395/1670 train_time:37131ms step_avg:94.00ms +step:396/1670 train_time:37223ms step_avg:94.00ms +step:397/1670 train_time:37315ms step_avg:93.99ms +step:398/1670 train_time:37409ms step_avg:93.99ms +step:399/1670 train_time:37503ms step_avg:93.99ms +step:400/1670 train_time:37596ms step_avg:93.99ms +step:401/1670 train_time:37689ms step_avg:93.99ms +step:402/1670 train_time:37783ms step_avg:93.99ms +step:403/1670 train_time:37875ms step_avg:93.98ms +step:404/1670 train_time:37967ms step_avg:93.98ms +step:405/1670 train_time:38059ms step_avg:93.97ms +step:406/1670 train_time:38151ms step_avg:93.97ms +step:407/1670 train_time:38243ms step_avg:93.96ms +step:408/1670 train_time:38337ms step_avg:93.96ms +step:409/1670 train_time:38429ms step_avg:93.96ms +step:410/1670 train_time:38522ms step_avg:93.96ms +step:411/1670 train_time:38615ms step_avg:93.95ms +step:412/1670 train_time:38709ms step_avg:93.95ms +step:413/1670 train_time:38802ms step_avg:93.95ms +step:414/1670 train_time:38893ms step_avg:93.95ms +step:415/1670 train_time:38986ms step_avg:93.94ms +step:416/1670 train_time:39078ms step_avg:93.94ms +step:417/1670 train_time:39170ms step_avg:93.93ms +step:418/1670 train_time:39262ms step_avg:93.93ms +step:419/1670 train_time:39354ms step_avg:93.92ms +step:420/1670 train_time:39447ms step_avg:93.92ms +step:421/1670 train_time:39539ms step_avg:93.92ms +step:422/1670 train_time:39632ms step_avg:93.92ms +step:423/1670 train_time:39725ms step_avg:93.91ms +step:424/1670 train_time:39818ms step_avg:93.91ms +step:425/1670 train_time:40145ms step_avg:94.46ms +step:426/1670 train_time:40336ms step_avg:94.68ms +step:427/1670 train_time:40426ms step_avg:94.67ms +step:428/1670 train_time:40517ms step_avg:94.67ms +step:429/1670 train_time:40609ms step_avg:94.66ms +step:430/1670 train_time:40700ms step_avg:94.65ms +step:431/1670 train_time:40792ms step_avg:94.64ms +step:432/1670 train_time:40884ms step_avg:94.64ms +step:433/1670 train_time:40975ms step_avg:94.63ms +step:434/1670 train_time:41067ms step_avg:94.62ms +step:435/1670 train_time:41158ms step_avg:94.62ms +step:436/1670 train_time:41253ms step_avg:94.62ms +step:437/1670 train_time:41349ms step_avg:94.62ms +step:438/1670 train_time:41442ms step_avg:94.62ms +step:439/1670 train_time:41535ms step_avg:94.61ms +step:440/1670 train_time:41627ms step_avg:94.61ms +step:441/1670 train_time:41719ms step_avg:94.60ms +step:442/1670 train_time:41812ms step_avg:94.60ms +step:443/1670 train_time:41903ms step_avg:94.59ms +step:444/1670 train_time:41995ms step_avg:94.58ms +step:445/1670 train_time:42088ms step_avg:94.58ms +step:446/1670 train_time:42180ms step_avg:94.57ms +step:447/1670 train_time:42273ms step_avg:94.57ms +step:448/1670 train_time:42367ms step_avg:94.57ms +step:449/1670 train_time:42460ms step_avg:94.57ms +step:450/1670 train_time:42553ms step_avg:94.56ms +step:451/1670 train_time:42645ms step_avg:94.56ms +step:452/1670 train_time:42738ms step_avg:94.55ms +step:453/1670 train_time:42831ms step_avg:94.55ms +step:454/1670 train_time:42923ms step_avg:94.54ms +step:455/1670 train_time:43014ms step_avg:94.54ms +step:456/1670 train_time:43106ms step_avg:94.53ms +step:457/1670 train_time:43199ms step_avg:94.53ms +step:458/1670 train_time:43292ms step_avg:94.52ms +step:459/1670 train_time:43384ms step_avg:94.52ms +step:460/1670 train_time:43477ms step_avg:94.52ms +step:461/1670 train_time:43569ms step_avg:94.51ms +step:462/1670 train_time:43662ms step_avg:94.51ms +step:463/1670 train_time:43755ms step_avg:94.50ms +step:464/1670 train_time:43847ms step_avg:94.50ms +step:465/1670 train_time:43940ms step_avg:94.49ms +step:466/1670 train_time:44032ms step_avg:94.49ms +step:467/1670 train_time:44125ms step_avg:94.49ms +step:468/1670 train_time:44217ms step_avg:94.48ms +step:469/1670 train_time:44310ms step_avg:94.48ms +step:470/1670 train_time:44402ms step_avg:94.47ms +step:471/1670 train_time:44495ms step_avg:94.47ms +step:472/1670 train_time:44587ms step_avg:94.46ms +step:473/1670 train_time:44680ms step_avg:94.46ms +step:474/1670 train_time:44773ms step_avg:94.46ms +step:475/1670 train_time:44865ms step_avg:94.45ms +step:476/1670 train_time:44957ms step_avg:94.45ms +step:477/1670 train_time:45049ms step_avg:94.44ms +step:478/1670 train_time:45141ms step_avg:94.44ms +step:479/1670 train_time:45233ms step_avg:94.43ms +step:480/1670 train_time:45326ms step_avg:94.43ms +step:481/1670 train_time:45418ms step_avg:94.42ms +step:482/1670 train_time:45511ms step_avg:94.42ms +step:483/1670 train_time:45603ms step_avg:94.42ms +step:484/1670 train_time:45696ms step_avg:94.41ms +step:485/1670 train_time:45788ms step_avg:94.41ms +step:486/1670 train_time:45881ms step_avg:94.40ms +step:487/1670 train_time:45973ms step_avg:94.40ms +step:488/1670 train_time:46065ms step_avg:94.40ms +step:489/1670 train_time:46158ms step_avg:94.39ms +step:490/1670 train_time:46251ms step_avg:94.39ms +step:491/1670 train_time:46343ms step_avg:94.38ms +step:492/1670 train_time:46435ms step_avg:94.38ms +step:493/1670 train_time:46528ms step_avg:94.38ms +step:494/1670 train_time:46620ms step_avg:94.37ms +step:495/1670 train_time:46712ms step_avg:94.37ms +step:496/1670 train_time:46805ms step_avg:94.37ms +step:497/1670 train_time:46898ms step_avg:94.36ms +step:498/1670 train_time:46990ms step_avg:94.36ms +step:499/1670 train_time:47082ms step_avg:94.35ms +step:500/1670 train_time:47175ms step_avg:94.35ms +step:500/1670 val_loss:3.7137 train_time:47265ms step_avg:94.53ms +step:501/1670 train_time:47290ms step_avg:94.39ms +step:502/1670 train_time:47365ms step_avg:94.35ms +step:503/1670 train_time:47462ms step_avg:94.36ms +step:504/1670 train_time:47556ms step_avg:94.36ms +step:505/1670 train_time:47648ms step_avg:94.35ms +step:506/1670 train_time:47739ms step_avg:94.35ms +step:507/1670 train_time:47831ms step_avg:94.34ms +step:508/1670 train_time:47923ms step_avg:94.34ms +step:509/1670 train_time:48015ms step_avg:94.33ms +step:510/1670 train_time:48107ms step_avg:94.33ms +step:511/1670 train_time:48198ms step_avg:94.32ms +step:512/1670 train_time:48292ms step_avg:94.32ms +step:513/1670 train_time:48387ms step_avg:94.32ms +step:514/1670 train_time:48483ms step_avg:94.33ms +step:515/1670 train_time:48576ms step_avg:94.32ms +step:516/1670 train_time:48668ms step_avg:94.32ms +step:517/1670 train_time:48759ms step_avg:94.31ms +step:518/1670 train_time:48852ms step_avg:94.31ms +step:519/1670 train_time:48943ms step_avg:94.30ms +step:520/1670 train_time:49035ms step_avg:94.30ms +step:521/1670 train_time:49127ms step_avg:94.29ms +step:522/1670 train_time:49219ms step_avg:94.29ms +step:523/1670 train_time:49313ms step_avg:94.29ms +step:524/1670 train_time:49407ms step_avg:94.29ms +step:525/1670 train_time:49499ms step_avg:94.28ms +step:526/1670 train_time:49592ms step_avg:94.28ms +step:527/1670 train_time:49685ms step_avg:94.28ms +step:528/1670 train_time:49778ms step_avg:94.28ms +step:529/1670 train_time:49869ms step_avg:94.27ms +step:530/1670 train_time:49961ms step_avg:94.27ms +step:531/1670 train_time:50053ms step_avg:94.26ms +step:532/1670 train_time:50144ms step_avg:94.26ms +step:533/1670 train_time:50236ms step_avg:94.25ms +step:534/1670 train_time:50329ms step_avg:94.25ms +step:535/1670 train_time:50422ms step_avg:94.25ms +step:536/1670 train_time:50515ms step_avg:94.24ms +step:537/1670 train_time:50608ms step_avg:94.24ms +step:538/1670 train_time:50699ms step_avg:94.24ms +step:539/1670 train_time:50792ms step_avg:94.23ms +step:540/1670 train_time:50884ms step_avg:94.23ms +step:541/1670 train_time:50976ms step_avg:94.23ms +step:542/1670 train_time:51068ms step_avg:94.22ms +step:543/1670 train_time:51160ms step_avg:94.22ms +step:544/1670 train_time:51253ms step_avg:94.22ms +step:545/1670 train_time:51346ms step_avg:94.21ms +step:546/1670 train_time:51439ms step_avg:94.21ms +step:547/1670 train_time:51531ms step_avg:94.21ms +step:548/1670 train_time:51624ms step_avg:94.20ms +step:549/1670 train_time:51716ms step_avg:94.20ms +step:550/1670 train_time:51809ms step_avg:94.20ms +step:551/1670 train_time:51902ms step_avg:94.20ms +step:552/1670 train_time:51994ms step_avg:94.19ms +step:553/1670 train_time:52086ms step_avg:94.19ms +step:554/1670 train_time:52178ms step_avg:94.18ms +step:555/1670 train_time:52271ms step_avg:94.18ms +step:556/1670 train_time:52363ms step_avg:94.18ms +step:557/1670 train_time:52456ms step_avg:94.18ms +step:558/1670 train_time:52659ms step_avg:94.37ms +step:559/1670 train_time:52726ms step_avg:94.32ms +step:560/1670 train_time:52819ms step_avg:94.32ms +step:561/1670 train_time:52912ms step_avg:94.32ms +step:562/1670 train_time:53005ms step_avg:94.31ms +step:563/1670 train_time:53097ms step_avg:94.31ms +step:564/1670 train_time:53190ms step_avg:94.31ms +step:565/1670 train_time:53283ms step_avg:94.31ms +step:566/1670 train_time:53376ms step_avg:94.30ms +step:567/1670 train_time:53469ms step_avg:94.30ms +step:568/1670 train_time:53568ms step_avg:94.31ms +step:569/1670 train_time:53665ms step_avg:94.31ms +step:570/1670 train_time:53759ms step_avg:94.31ms +step:571/1670 train_time:53853ms step_avg:94.31ms +step:572/1670 train_time:53945ms step_avg:94.31ms +step:573/1670 train_time:54038ms step_avg:94.31ms +step:574/1670 train_time:54131ms step_avg:94.31ms +step:575/1670 train_time:54224ms step_avg:94.30ms +step:576/1670 train_time:54317ms step_avg:94.30ms +step:577/1670 train_time:54410ms step_avg:94.30ms +step:578/1670 train_time:54505ms step_avg:94.30ms +step:579/1670 train_time:54600ms step_avg:94.30ms +step:580/1670 train_time:54695ms step_avg:94.30ms +step:581/1670 train_time:54790ms step_avg:94.30ms +step:582/1670 train_time:54884ms step_avg:94.30ms +step:583/1670 train_time:54977ms step_avg:94.30ms +step:584/1670 train_time:55070ms step_avg:94.30ms +step:585/1670 train_time:55164ms step_avg:94.30ms +step:586/1670 train_time:55256ms step_avg:94.29ms +step:587/1670 train_time:55349ms step_avg:94.29ms +step:588/1670 train_time:55443ms step_avg:94.29ms +step:589/1670 train_time:55537ms step_avg:94.29ms +step:590/1670 train_time:55631ms step_avg:94.29ms +step:591/1670 train_time:55726ms step_avg:94.29ms +step:592/1670 train_time:55820ms step_avg:94.29ms +step:593/1670 train_time:55914ms step_avg:94.29ms +step:594/1670 train_time:56008ms step_avg:94.29ms +step:595/1670 train_time:56101ms step_avg:94.29ms +step:596/1670 train_time:56194ms step_avg:94.29ms +step:597/1670 train_time:56287ms step_avg:94.28ms +step:598/1670 train_time:56380ms step_avg:94.28ms +step:599/1670 train_time:56474ms step_avg:94.28ms +step:600/1670 train_time:56569ms step_avg:94.28ms +step:601/1670 train_time:56663ms step_avg:94.28ms +step:602/1670 train_time:56758ms step_avg:94.28ms +step:603/1670 train_time:56853ms step_avg:94.28ms +step:604/1670 train_time:56947ms step_avg:94.28ms +step:605/1670 train_time:57040ms step_avg:94.28ms +step:606/1670 train_time:57133ms step_avg:94.28ms +step:607/1670 train_time:57226ms step_avg:94.28ms +step:608/1670 train_time:57319ms step_avg:94.27ms +step:609/1670 train_time:57412ms step_avg:94.27ms +step:610/1670 train_time:57506ms step_avg:94.27ms +step:611/1670 train_time:57600ms step_avg:94.27ms +step:612/1670 train_time:57695ms step_avg:94.27ms +step:613/1670 train_time:57790ms step_avg:94.27ms +step:614/1670 train_time:57885ms step_avg:94.28ms +step:615/1670 train_time:57980ms step_avg:94.28ms +step:616/1670 train_time:58073ms step_avg:94.27ms +step:617/1670 train_time:58167ms step_avg:94.27ms +step:618/1670 train_time:58259ms step_avg:94.27ms +step:619/1670 train_time:58354ms step_avg:94.27ms +step:620/1670 train_time:58448ms step_avg:94.27ms +step:621/1670 train_time:58541ms step_avg:94.27ms +step:622/1670 train_time:58635ms step_avg:94.27ms +step:623/1670 train_time:58729ms step_avg:94.27ms +step:624/1670 train_time:58824ms step_avg:94.27ms +step:625/1670 train_time:58918ms step_avg:94.27ms +step:625/1670 val_loss:3.6129 train_time:59010ms step_avg:94.42ms +step:626/1670 train_time:59036ms step_avg:94.31ms +step:627/1670 train_time:59113ms step_avg:94.28ms +step:628/1670 train_time:59213ms step_avg:94.29ms +step:629/1670 train_time:59307ms step_avg:94.29ms +step:630/1670 train_time:59400ms step_avg:94.29ms +step:631/1670 train_time:59492ms step_avg:94.28ms +step:632/1670 train_time:59584ms step_avg:94.28ms +step:633/1670 train_time:59676ms step_avg:94.28ms +step:634/1670 train_time:59769ms step_avg:94.27ms +step:635/1670 train_time:59862ms step_avg:94.27ms +step:636/1670 train_time:59957ms step_avg:94.27ms +step:637/1670 train_time:60051ms step_avg:94.27ms +step:638/1670 train_time:60146ms step_avg:94.27ms +step:639/1670 train_time:60541ms step_avg:94.74ms +step:640/1670 train_time:60608ms step_avg:94.70ms +step:641/1670 train_time:60700ms step_avg:94.70ms +step:642/1670 train_time:60793ms step_avg:94.69ms +step:643/1670 train_time:60885ms step_avg:94.69ms +step:644/1670 train_time:60978ms step_avg:94.69ms +step:645/1670 train_time:61070ms step_avg:94.68ms +step:646/1670 train_time:61163ms step_avg:94.68ms +step:647/1670 train_time:61255ms step_avg:94.68ms +step:648/1670 train_time:61348ms step_avg:94.67ms +step:649/1670 train_time:61441ms step_avg:94.67ms +step:650/1670 train_time:61539ms step_avg:94.67ms +step:651/1670 train_time:61634ms step_avg:94.68ms +step:652/1670 train_time:61727ms step_avg:94.67ms +step:653/1670 train_time:61820ms step_avg:94.67ms +step:654/1670 train_time:61914ms step_avg:94.67ms +step:655/1670 train_time:62006ms step_avg:94.67ms +step:656/1670 train_time:62099ms step_avg:94.66ms +step:657/1670 train_time:62192ms step_avg:94.66ms +step:658/1670 train_time:62284ms step_avg:94.66ms +step:659/1670 train_time:62377ms step_avg:94.65ms +step:660/1670 train_time:62471ms step_avg:94.65ms +step:661/1670 train_time:62565ms step_avg:94.65ms +step:662/1670 train_time:62659ms step_avg:94.65ms +step:663/1670 train_time:62753ms step_avg:94.65ms +step:664/1670 train_time:62847ms step_avg:94.65ms +step:665/1670 train_time:62941ms step_avg:94.65ms +step:666/1670 train_time:63035ms step_avg:94.65ms +step:667/1670 train_time:63127ms step_avg:94.64ms +step:668/1670 train_time:63221ms step_avg:94.64ms +step:669/1670 train_time:63314ms step_avg:94.64ms +step:670/1670 train_time:63409ms step_avg:94.64ms +step:671/1670 train_time:63500ms step_avg:94.63ms +step:672/1670 train_time:63594ms step_avg:94.63ms +step:673/1670 train_time:63688ms step_avg:94.63ms +step:674/1670 train_time:63782ms step_avg:94.63ms +step:675/1670 train_time:63876ms step_avg:94.63ms +step:676/1670 train_time:63969ms step_avg:94.63ms +step:677/1670 train_time:64062ms step_avg:94.63ms +step:678/1670 train_time:64155ms step_avg:94.62ms +step:679/1670 train_time:64248ms step_avg:94.62ms +step:680/1670 train_time:64343ms step_avg:94.62ms +step:681/1670 train_time:64437ms step_avg:94.62ms +step:682/1670 train_time:64530ms step_avg:94.62ms +step:683/1670 train_time:64623ms step_avg:94.62ms +step:684/1670 train_time:64716ms step_avg:94.61ms +step:685/1670 train_time:64810ms step_avg:94.61ms +step:686/1670 train_time:64904ms step_avg:94.61ms +step:687/1670 train_time:64998ms step_avg:94.61ms +step:688/1670 train_time:65093ms step_avg:94.61ms +step:689/1670 train_time:65185ms step_avg:94.61ms +step:690/1670 train_time:65279ms step_avg:94.61ms +step:691/1670 train_time:65372ms step_avg:94.60ms +step:692/1670 train_time:65465ms step_avg:94.60ms +step:693/1670 train_time:65559ms step_avg:94.60ms +step:694/1670 train_time:65653ms step_avg:94.60ms +step:695/1670 train_time:65746ms step_avg:94.60ms +step:696/1670 train_time:65840ms step_avg:94.60ms +step:697/1670 train_time:65933ms step_avg:94.60ms +step:698/1670 train_time:66027ms step_avg:94.59ms +step:699/1670 train_time:66120ms step_avg:94.59ms +step:700/1670 train_time:66214ms step_avg:94.59ms +step:701/1670 train_time:66307ms step_avg:94.59ms +step:702/1670 train_time:66401ms step_avg:94.59ms +step:703/1670 train_time:66494ms step_avg:94.59ms +step:704/1670 train_time:66587ms step_avg:94.58ms +step:705/1670 train_time:66681ms step_avg:94.58ms +step:706/1670 train_time:66775ms step_avg:94.58ms +step:707/1670 train_time:66868ms step_avg:94.58ms +step:708/1670 train_time:66961ms step_avg:94.58ms +step:709/1670 train_time:67055ms step_avg:94.58ms +step:710/1670 train_time:67148ms step_avg:94.57ms +step:711/1670 train_time:67243ms step_avg:94.58ms +step:712/1670 train_time:67337ms step_avg:94.57ms +step:713/1670 train_time:67430ms step_avg:94.57ms +step:714/1670 train_time:67523ms step_avg:94.57ms +step:715/1670 train_time:67616ms step_avg:94.57ms +step:716/1670 train_time:67709ms step_avg:94.57ms +step:717/1670 train_time:67803ms step_avg:94.56ms +step:718/1670 train_time:67896ms step_avg:94.56ms +step:719/1670 train_time:67990ms step_avg:94.56ms +step:720/1670 train_time:68084ms step_avg:94.56ms +step:721/1670 train_time:68177ms step_avg:94.56ms +step:722/1670 train_time:68271ms step_avg:94.56ms +step:723/1670 train_time:68365ms step_avg:94.56ms +step:724/1670 train_time:68459ms step_avg:94.56ms +step:725/1670 train_time:68553ms step_avg:94.56ms +step:726/1670 train_time:68646ms step_avg:94.55ms +step:727/1670 train_time:68739ms step_avg:94.55ms +step:728/1670 train_time:68832ms step_avg:94.55ms +step:729/1670 train_time:68926ms step_avg:94.55ms +step:730/1670 train_time:69019ms step_avg:94.55ms +step:731/1670 train_time:69113ms step_avg:94.55ms +step:732/1670 train_time:69206ms step_avg:94.54ms +step:733/1670 train_time:69300ms step_avg:94.54ms +step:734/1670 train_time:69394ms step_avg:94.54ms +step:735/1670 train_time:69487ms step_avg:94.54ms +step:736/1670 train_time:69581ms step_avg:94.54ms +step:737/1670 train_time:69674ms step_avg:94.54ms +step:738/1670 train_time:69768ms step_avg:94.54ms +step:739/1670 train_time:69862ms step_avg:94.54ms +step:740/1670 train_time:69955ms step_avg:94.53ms +step:741/1670 train_time:70049ms step_avg:94.53ms +step:742/1670 train_time:70142ms step_avg:94.53ms +step:743/1670 train_time:70237ms step_avg:94.53ms +step:744/1670 train_time:70330ms step_avg:94.53ms +step:745/1670 train_time:70424ms step_avg:94.53ms +step:746/1670 train_time:70518ms step_avg:94.53ms +step:747/1670 train_time:70611ms step_avg:94.53ms +step:748/1670 train_time:70704ms step_avg:94.52ms +step:749/1670 train_time:70798ms step_avg:94.52ms +step:750/1670 train_time:70892ms step_avg:94.52ms +step:750/1670 val_loss:3.5617 train_time:70983ms step_avg:94.64ms +step:751/1670 train_time:71009ms step_avg:94.55ms +step:752/1670 train_time:71088ms step_avg:94.53ms +step:753/1670 train_time:71187ms step_avg:94.54ms +step:754/1670 train_time:71280ms step_avg:94.54ms +step:755/1670 train_time:71373ms step_avg:94.53ms +step:756/1670 train_time:71466ms step_avg:94.53ms +step:757/1670 train_time:71558ms step_avg:94.53ms +step:758/1670 train_time:71651ms step_avg:94.53ms +step:759/1670 train_time:71743ms step_avg:94.52ms +step:760/1670 train_time:71836ms step_avg:94.52ms +step:761/1670 train_time:71929ms step_avg:94.52ms +step:762/1670 train_time:72024ms step_avg:94.52ms +step:763/1670 train_time:72119ms step_avg:94.52ms +step:764/1670 train_time:72215ms step_avg:94.52ms +step:765/1670 train_time:72308ms step_avg:94.52ms +step:766/1670 train_time:72402ms step_avg:94.52ms +step:767/1670 train_time:72495ms step_avg:94.52ms +step:768/1670 train_time:72589ms step_avg:94.52ms +step:769/1670 train_time:72681ms step_avg:94.51ms +step:770/1670 train_time:72774ms step_avg:94.51ms +step:771/1670 train_time:72867ms step_avg:94.51ms +step:772/1670 train_time:72960ms step_avg:94.51ms +step:773/1670 train_time:73055ms step_avg:94.51ms +step:774/1670 train_time:73149ms step_avg:94.51ms +step:775/1670 train_time:73244ms step_avg:94.51ms +step:776/1670 train_time:73338ms step_avg:94.51ms +step:777/1670 train_time:73431ms step_avg:94.51ms +step:778/1670 train_time:73524ms step_avg:94.50ms +step:779/1670 train_time:73617ms step_avg:94.50ms +step:780/1670 train_time:73710ms step_avg:94.50ms +step:781/1670 train_time:73804ms step_avg:94.50ms +step:782/1670 train_time:73897ms step_avg:94.50ms +step:783/1670 train_time:73991ms step_avg:94.50ms +step:784/1670 train_time:74085ms step_avg:94.50ms +step:785/1670 train_time:74179ms step_avg:94.50ms +step:786/1670 train_time:74274ms step_avg:94.50ms +step:787/1670 train_time:74368ms step_avg:94.50ms +step:788/1670 train_time:74462ms step_avg:94.49ms +step:789/1670 train_time:74555ms step_avg:94.49ms +step:790/1670 train_time:74648ms step_avg:94.49ms +step:791/1670 train_time:74740ms step_avg:94.49ms +step:792/1670 train_time:74834ms step_avg:94.49ms +step:793/1670 train_time:74927ms step_avg:94.49ms +step:794/1670 train_time:75022ms step_avg:94.49ms +step:795/1670 train_time:75116ms step_avg:94.49ms +step:796/1670 train_time:75210ms step_avg:94.49ms +step:797/1670 train_time:75304ms step_avg:94.48ms +step:798/1670 train_time:75398ms step_avg:94.48ms +step:799/1670 train_time:75491ms step_avg:94.48ms +step:800/1670 train_time:75585ms step_avg:94.48ms +step:801/1670 train_time:75679ms step_avg:94.48ms +step:802/1670 train_time:75772ms step_avg:94.48ms +step:803/1670 train_time:75865ms step_avg:94.48ms +step:804/1670 train_time:75958ms step_avg:94.47ms +step:805/1670 train_time:76051ms step_avg:94.47ms +step:806/1670 train_time:76144ms step_avg:94.47ms +step:807/1670 train_time:76239ms step_avg:94.47ms +step:808/1670 train_time:76333ms step_avg:94.47ms +step:809/1670 train_time:76427ms step_avg:94.47ms +step:810/1670 train_time:76521ms step_avg:94.47ms +step:811/1670 train_time:76614ms step_avg:94.47ms +step:812/1670 train_time:76707ms step_avg:94.47ms +step:813/1670 train_time:76801ms step_avg:94.47ms +step:814/1670 train_time:76894ms step_avg:94.46ms +step:815/1670 train_time:76987ms step_avg:94.46ms +step:816/1670 train_time:77081ms step_avg:94.46ms +step:817/1670 train_time:77176ms step_avg:94.46ms +step:818/1670 train_time:77270ms step_avg:94.46ms +step:819/1670 train_time:77364ms step_avg:94.46ms +step:820/1670 train_time:77457ms step_avg:94.46ms +step:821/1670 train_time:77550ms step_avg:94.46ms +step:822/1670 train_time:77645ms step_avg:94.46ms +step:823/1670 train_time:77739ms step_avg:94.46ms +step:824/1670 train_time:77833ms step_avg:94.46ms +step:825/1670 train_time:77926ms step_avg:94.46ms +step:826/1670 train_time:78020ms step_avg:94.45ms +step:827/1670 train_time:78113ms step_avg:94.45ms +step:828/1670 train_time:78207ms step_avg:94.45ms +step:829/1670 train_time:78302ms step_avg:94.45ms +step:830/1670 train_time:78396ms step_avg:94.45ms +step:831/1670 train_time:78489ms step_avg:94.45ms +step:832/1670 train_time:78583ms step_avg:94.45ms +step:833/1670 train_time:78677ms step_avg:94.45ms +step:834/1670 train_time:78771ms step_avg:94.45ms +step:835/1670 train_time:78864ms step_avg:94.45ms +step:836/1670 train_time:78958ms step_avg:94.45ms +step:837/1670 train_time:79051ms step_avg:94.45ms +step:838/1670 train_time:79145ms step_avg:94.44ms +step:839/1670 train_time:79239ms step_avg:94.45ms +step:840/1670 train_time:79334ms step_avg:94.44ms +step:841/1670 train_time:79427ms step_avg:94.44ms +step:842/1670 train_time:79521ms step_avg:94.44ms +step:843/1670 train_time:79615ms step_avg:94.44ms +step:844/1670 train_time:79709ms step_avg:94.44ms +step:845/1670 train_time:79803ms step_avg:94.44ms +step:846/1670 train_time:79896ms step_avg:94.44ms +step:847/1670 train_time:79990ms step_avg:94.44ms +step:848/1670 train_time:80083ms step_avg:94.44ms +step:849/1670 train_time:80176ms step_avg:94.44ms +step:850/1670 train_time:80270ms step_avg:94.44ms +step:851/1670 train_time:80629ms step_avg:94.75ms +step:852/1670 train_time:80744ms step_avg:94.77ms +step:853/1670 train_time:80836ms step_avg:94.77ms +step:854/1670 train_time:80928ms step_avg:94.76ms +step:855/1670 train_time:81021ms step_avg:94.76ms +step:856/1670 train_time:81114ms step_avg:94.76ms +step:857/1670 train_time:81207ms step_avg:94.76ms +step:858/1670 train_time:81299ms step_avg:94.75ms +step:859/1670 train_time:81391ms step_avg:94.75ms +step:860/1670 train_time:81484ms step_avg:94.75ms +step:861/1670 train_time:81579ms step_avg:94.75ms +step:862/1670 train_time:81678ms step_avg:94.75ms +step:863/1670 train_time:81775ms step_avg:94.76ms +step:864/1670 train_time:81869ms step_avg:94.76ms +step:865/1670 train_time:81962ms step_avg:94.75ms +step:866/1670 train_time:82055ms step_avg:94.75ms +step:867/1670 train_time:82147ms step_avg:94.75ms +step:868/1670 train_time:82240ms step_avg:94.75ms +step:869/1670 train_time:82332ms step_avg:94.74ms +step:870/1670 train_time:82425ms step_avg:94.74ms +step:871/1670 train_time:82518ms step_avg:94.74ms +step:872/1670 train_time:82613ms step_avg:94.74ms +step:873/1670 train_time:82710ms step_avg:94.74ms +step:874/1670 train_time:82804ms step_avg:94.74ms +step:875/1670 train_time:82898ms step_avg:94.74ms +step:875/1670 val_loss:3.5164 train_time:82990ms step_avg:94.85ms +step:876/1670 train_time:83015ms step_avg:94.77ms +step:877/1670 train_time:83092ms step_avg:94.75ms +step:878/1670 train_time:83192ms step_avg:94.75ms +step:879/1670 train_time:83285ms step_avg:94.75ms +step:880/1670 train_time:83379ms step_avg:94.75ms +step:881/1670 train_time:83472ms step_avg:94.75ms +step:882/1670 train_time:83564ms step_avg:94.74ms +step:883/1670 train_time:83657ms step_avg:94.74ms +step:884/1670 train_time:83750ms step_avg:94.74ms +step:885/1670 train_time:83842ms step_avg:94.74ms +step:886/1670 train_time:83936ms step_avg:94.74ms +step:887/1670 train_time:84031ms step_avg:94.74ms +step:888/1670 train_time:84127ms step_avg:94.74ms +step:889/1670 train_time:84222ms step_avg:94.74ms +step:890/1670 train_time:84316ms step_avg:94.74ms +step:891/1670 train_time:84409ms step_avg:94.74ms +step:892/1670 train_time:84502ms step_avg:94.73ms +step:893/1670 train_time:84596ms step_avg:94.73ms +step:894/1670 train_time:84688ms step_avg:94.73ms +step:895/1670 train_time:84781ms step_avg:94.73ms +step:896/1670 train_time:84875ms step_avg:94.73ms +step:897/1670 train_time:84969ms step_avg:94.73ms +step:898/1670 train_time:85063ms step_avg:94.72ms +step:899/1670 train_time:85158ms step_avg:94.72ms +step:900/1670 train_time:85252ms step_avg:94.72ms +step:901/1670 train_time:85346ms step_avg:94.72ms +step:902/1670 train_time:85440ms step_avg:94.72ms +step:903/1670 train_time:85533ms step_avg:94.72ms +step:904/1670 train_time:85626ms step_avg:94.72ms +step:905/1670 train_time:85720ms step_avg:94.72ms +step:906/1670 train_time:85813ms step_avg:94.72ms +step:907/1670 train_time:85906ms step_avg:94.71ms +step:908/1670 train_time:86000ms step_avg:94.71ms +step:909/1670 train_time:86094ms step_avg:94.71ms +step:910/1670 train_time:86189ms step_avg:94.71ms +step:911/1670 train_time:86282ms step_avg:94.71ms +step:912/1670 train_time:86377ms step_avg:94.71ms +step:913/1670 train_time:86471ms step_avg:94.71ms +step:914/1670 train_time:86563ms step_avg:94.71ms +step:915/1670 train_time:86656ms step_avg:94.71ms +step:916/1670 train_time:86750ms step_avg:94.71ms +step:917/1670 train_time:86843ms step_avg:94.70ms +step:918/1670 train_time:86937ms step_avg:94.70ms +step:919/1670 train_time:87031ms step_avg:94.70ms +step:920/1670 train_time:87125ms step_avg:94.70ms +step:921/1670 train_time:87219ms step_avg:94.70ms +step:922/1670 train_time:87313ms step_avg:94.70ms +step:923/1670 train_time:87407ms step_avg:94.70ms +step:924/1670 train_time:87500ms step_avg:94.70ms +step:925/1670 train_time:87594ms step_avg:94.70ms +step:926/1670 train_time:87687ms step_avg:94.69ms +step:927/1670 train_time:87780ms step_avg:94.69ms +step:928/1670 train_time:87874ms step_avg:94.69ms +step:929/1670 train_time:87967ms step_avg:94.69ms +step:930/1670 train_time:88060ms step_avg:94.69ms +step:931/1670 train_time:88155ms step_avg:94.69ms +step:932/1670 train_time:88248ms step_avg:94.69ms +step:933/1670 train_time:88343ms step_avg:94.69ms +step:934/1670 train_time:88437ms step_avg:94.69ms +step:935/1670 train_time:88530ms step_avg:94.68ms +step:936/1670 train_time:88623ms step_avg:94.68ms +step:937/1670 train_time:88716ms step_avg:94.68ms +step:938/1670 train_time:88810ms step_avg:94.68ms +step:939/1670 train_time:88904ms step_avg:94.68ms +step:940/1670 train_time:88998ms step_avg:94.68ms +step:941/1670 train_time:89091ms step_avg:94.68ms +step:942/1670 train_time:89185ms step_avg:94.68ms +step:943/1670 train_time:89278ms step_avg:94.67ms +step:944/1670 train_time:89373ms step_avg:94.67ms +step:945/1670 train_time:89467ms step_avg:94.67ms +step:946/1670 train_time:89561ms step_avg:94.67ms +step:947/1670 train_time:89654ms step_avg:94.67ms +step:948/1670 train_time:89748ms step_avg:94.67ms +step:949/1670 train_time:89841ms step_avg:94.67ms +step:950/1670 train_time:89935ms step_avg:94.67ms +step:951/1670 train_time:90028ms step_avg:94.67ms +step:952/1670 train_time:90121ms step_avg:94.67ms +step:953/1670 train_time:90215ms step_avg:94.66ms +step:954/1670 train_time:90309ms step_avg:94.66ms +step:955/1670 train_time:90403ms step_avg:94.66ms +step:956/1670 train_time:90497ms step_avg:94.66ms +step:957/1670 train_time:90591ms step_avg:94.66ms +step:958/1670 train_time:90684ms step_avg:94.66ms +step:959/1670 train_time:90777ms step_avg:94.66ms +step:960/1670 train_time:90871ms step_avg:94.66ms +step:961/1670 train_time:90964ms step_avg:94.66ms +step:962/1670 train_time:91057ms step_avg:94.65ms +step:963/1670 train_time:91152ms step_avg:94.65ms +step:964/1670 train_time:91246ms step_avg:94.65ms +step:965/1670 train_time:91340ms step_avg:94.65ms +step:966/1670 train_time:91435ms step_avg:94.65ms +step:967/1670 train_time:91528ms step_avg:94.65ms +step:968/1670 train_time:91621ms step_avg:94.65ms +step:969/1670 train_time:91714ms step_avg:94.65ms +step:970/1670 train_time:91809ms step_avg:94.65ms +step:971/1670 train_time:91902ms step_avg:94.65ms +step:972/1670 train_time:91996ms step_avg:94.65ms +step:973/1670 train_time:92089ms step_avg:94.64ms +step:974/1670 train_time:92182ms step_avg:94.64ms +step:975/1670 train_time:92278ms step_avg:94.64ms +step:976/1670 train_time:92373ms step_avg:94.64ms +step:977/1670 train_time:92467ms step_avg:94.64ms +step:978/1670 train_time:92560ms step_avg:94.64ms +step:979/1670 train_time:92654ms step_avg:94.64ms +step:980/1670 train_time:92748ms step_avg:94.64ms +step:981/1670 train_time:92841ms step_avg:94.64ms +step:982/1670 train_time:92935ms step_avg:94.64ms +step:983/1670 train_time:93029ms step_avg:94.64ms +step:984/1670 train_time:93122ms step_avg:94.64ms +step:985/1670 train_time:93215ms step_avg:94.63ms +step:986/1670 train_time:93310ms step_avg:94.63ms +step:987/1670 train_time:93403ms step_avg:94.63ms +step:988/1670 train_time:93497ms step_avg:94.63ms +step:989/1670 train_time:93591ms step_avg:94.63ms +step:990/1670 train_time:93684ms step_avg:94.63ms +step:991/1670 train_time:93778ms step_avg:94.63ms +step:992/1670 train_time:93871ms step_avg:94.63ms +step:993/1670 train_time:93966ms step_avg:94.63ms +step:994/1670 train_time:94058ms step_avg:94.63ms +step:995/1670 train_time:94151ms step_avg:94.62ms +step:996/1670 train_time:94245ms step_avg:94.62ms +step:997/1670 train_time:94339ms step_avg:94.62ms +step:998/1670 train_time:94432ms step_avg:94.62ms +step:999/1670 train_time:94527ms step_avg:94.62ms +step:1000/1670 train_time:94620ms step_avg:94.62ms +step:1000/1670 val_loss:3.4679 train_time:94712ms step_avg:94.71ms +step:1001/1670 train_time:94738ms step_avg:94.64ms +step:1002/1670 train_time:94812ms step_avg:94.62ms +step:1003/1670 train_time:94914ms step_avg:94.63ms +step:1004/1670 train_time:95010ms step_avg:94.63ms +step:1005/1670 train_time:95103ms step_avg:94.63ms +step:1006/1670 train_time:95196ms step_avg:94.63ms +step:1007/1670 train_time:95288ms step_avg:94.63ms +step:1008/1670 train_time:95381ms step_avg:94.62ms +step:1009/1670 train_time:95474ms step_avg:94.62ms +step:1010/1670 train_time:95566ms step_avg:94.62ms +step:1011/1670 train_time:95659ms step_avg:94.62ms +step:1012/1670 train_time:95754ms step_avg:94.62ms +step:1013/1670 train_time:95850ms step_avg:94.62ms +step:1014/1670 train_time:95946ms step_avg:94.62ms +step:1015/1670 train_time:96041ms step_avg:94.62ms +step:1016/1670 train_time:96134ms step_avg:94.62ms +step:1017/1670 train_time:96227ms step_avg:94.62ms +step:1018/1670 train_time:96320ms step_avg:94.62ms +step:1019/1670 train_time:96413ms step_avg:94.62ms +step:1020/1670 train_time:96507ms step_avg:94.61ms +step:1021/1670 train_time:96599ms step_avg:94.61ms +step:1022/1670 train_time:96692ms step_avg:94.61ms +step:1023/1670 train_time:96786ms step_avg:94.61ms +step:1024/1670 train_time:96882ms step_avg:94.61ms +step:1025/1670 train_time:96976ms step_avg:94.61ms +step:1026/1670 train_time:97071ms step_avg:94.61ms +step:1027/1670 train_time:97165ms step_avg:94.61ms +step:1028/1670 train_time:97258ms step_avg:94.61ms +step:1029/1670 train_time:97351ms step_avg:94.61ms +step:1030/1670 train_time:97445ms step_avg:94.61ms +step:1031/1670 train_time:97539ms step_avg:94.61ms +step:1032/1670 train_time:97632ms step_avg:94.60ms +step:1033/1670 train_time:97726ms step_avg:94.60ms +step:1034/1670 train_time:97819ms step_avg:94.60ms +step:1035/1670 train_time:97915ms step_avg:94.60ms +step:1036/1670 train_time:98010ms step_avg:94.60ms +step:1037/1670 train_time:98103ms step_avg:94.60ms +step:1038/1670 train_time:98196ms step_avg:94.60ms +step:1039/1670 train_time:98290ms step_avg:94.60ms +step:1040/1670 train_time:98384ms step_avg:94.60ms +step:1041/1670 train_time:98477ms step_avg:94.60ms +step:1042/1670 train_time:98570ms step_avg:94.60ms +step:1043/1670 train_time:98663ms step_avg:94.60ms +step:1044/1670 train_time:98757ms step_avg:94.59ms +step:1045/1670 train_time:98851ms step_avg:94.59ms +step:1046/1670 train_time:98947ms step_avg:94.60ms +step:1047/1670 train_time:99040ms step_avg:94.59ms +step:1048/1670 train_time:99135ms step_avg:94.59ms +step:1049/1670 train_time:99229ms step_avg:94.59ms +step:1050/1670 train_time:99322ms step_avg:94.59ms +step:1051/1670 train_time:99416ms step_avg:94.59ms +step:1052/1670 train_time:99509ms step_avg:94.59ms +step:1053/1670 train_time:99604ms step_avg:94.59ms +step:1054/1670 train_time:99696ms step_avg:94.59ms +step:1055/1670 train_time:99790ms step_avg:94.59ms +step:1056/1670 train_time:99885ms step_avg:94.59ms +step:1057/1670 train_time:99980ms step_avg:94.59ms +step:1058/1670 train_time:100074ms step_avg:94.59ms +step:1059/1670 train_time:100167ms step_avg:94.59ms +step:1060/1670 train_time:100261ms step_avg:94.59ms +step:1061/1670 train_time:100355ms step_avg:94.59ms +step:1062/1670 train_time:100784ms step_avg:94.90ms +step:1063/1670 train_time:100852ms step_avg:94.88ms +step:1064/1670 train_time:100944ms step_avg:94.87ms +step:1065/1670 train_time:101036ms step_avg:94.87ms +step:1066/1670 train_time:101129ms step_avg:94.87ms +step:1067/1670 train_time:101221ms step_avg:94.87ms +step:1068/1670 train_time:101314ms step_avg:94.86ms +step:1069/1670 train_time:101407ms step_avg:94.86ms +step:1070/1670 train_time:101499ms step_avg:94.86ms +step:1071/1670 train_time:101592ms step_avg:94.86ms +step:1072/1670 train_time:101687ms step_avg:94.86ms +step:1073/1670 train_time:101782ms step_avg:94.86ms +step:1074/1670 train_time:101880ms step_avg:94.86ms +step:1075/1670 train_time:101974ms step_avg:94.86ms +step:1076/1670 train_time:102067ms step_avg:94.86ms +step:1077/1670 train_time:102160ms step_avg:94.86ms +step:1078/1670 train_time:102252ms step_avg:94.85ms +step:1079/1670 train_time:102346ms step_avg:94.85ms +step:1080/1670 train_time:102439ms step_avg:94.85ms +step:1081/1670 train_time:102532ms step_avg:94.85ms +step:1082/1670 train_time:102625ms step_avg:94.85ms +step:1083/1670 train_time:102720ms step_avg:94.85ms +step:1084/1670 train_time:102816ms step_avg:94.85ms +step:1085/1670 train_time:102912ms step_avg:94.85ms +step:1086/1670 train_time:103007ms step_avg:94.85ms +step:1087/1670 train_time:103099ms step_avg:94.85ms +step:1088/1670 train_time:103193ms step_avg:94.85ms +step:1089/1670 train_time:103286ms step_avg:94.84ms +step:1090/1670 train_time:103379ms step_avg:94.84ms +step:1091/1670 train_time:103472ms step_avg:94.84ms +step:1092/1670 train_time:103566ms step_avg:94.84ms +step:1093/1670 train_time:103659ms step_avg:94.84ms +step:1094/1670 train_time:103753ms step_avg:94.84ms +step:1095/1670 train_time:103848ms step_avg:94.84ms +step:1096/1670 train_time:103942ms step_avg:94.84ms +step:1097/1670 train_time:104036ms step_avg:94.84ms +step:1098/1670 train_time:104130ms step_avg:94.84ms +step:1099/1670 train_time:104224ms step_avg:94.84ms +step:1100/1670 train_time:104317ms step_avg:94.83ms +step:1101/1670 train_time:104411ms step_avg:94.83ms +step:1102/1670 train_time:104504ms step_avg:94.83ms +step:1103/1670 train_time:104597ms step_avg:94.83ms +step:1104/1670 train_time:104690ms step_avg:94.83ms +step:1105/1670 train_time:104784ms step_avg:94.83ms +step:1106/1670 train_time:104878ms step_avg:94.83ms +step:1107/1670 train_time:104973ms step_avg:94.83ms +step:1108/1670 train_time:105066ms step_avg:94.83ms +step:1109/1670 train_time:105160ms step_avg:94.82ms +step:1110/1670 train_time:105253ms step_avg:94.82ms +step:1111/1670 train_time:105347ms step_avg:94.82ms +step:1112/1670 train_time:105440ms step_avg:94.82ms +step:1113/1670 train_time:105534ms step_avg:94.82ms +step:1114/1670 train_time:105628ms step_avg:94.82ms +step:1115/1670 train_time:105830ms step_avg:94.91ms +step:1116/1670 train_time:105899ms step_avg:94.89ms +step:1117/1670 train_time:105992ms step_avg:94.89ms +step:1118/1670 train_time:106085ms step_avg:94.89ms +step:1119/1670 train_time:106178ms step_avg:94.89ms +step:1120/1670 train_time:106272ms step_avg:94.89ms +step:1121/1670 train_time:106366ms step_avg:94.88ms +step:1122/1670 train_time:106459ms step_avg:94.88ms +step:1123/1670 train_time:106553ms step_avg:94.88ms +step:1124/1670 train_time:106646ms step_avg:94.88ms +step:1125/1670 train_time:106745ms step_avg:94.88ms +step:1125/1670 val_loss:3.4148 train_time:106842ms step_avg:94.97ms +step:1126/1670 train_time:106868ms step_avg:94.91ms +step:1127/1670 train_time:106942ms step_avg:94.89ms +step:1128/1670 train_time:107042ms step_avg:94.90ms +step:1129/1670 train_time:107137ms step_avg:94.90ms +step:1130/1670 train_time:107230ms step_avg:94.89ms +step:1131/1670 train_time:107323ms step_avg:94.89ms +step:1132/1670 train_time:107417ms step_avg:94.89ms +step:1133/1670 train_time:107510ms step_avg:94.89ms +step:1134/1670 train_time:107604ms step_avg:94.89ms +step:1135/1670 train_time:107697ms step_avg:94.89ms +step:1136/1670 train_time:107792ms step_avg:94.89ms +step:1137/1670 train_time:107888ms step_avg:94.89ms +step:1138/1670 train_time:107983ms step_avg:94.89ms +step:1139/1670 train_time:108077ms step_avg:94.89ms +step:1140/1670 train_time:108171ms step_avg:94.89ms +step:1141/1670 train_time:108265ms step_avg:94.89ms +step:1142/1670 train_time:108359ms step_avg:94.89ms +step:1143/1670 train_time:108453ms step_avg:94.88ms +step:1144/1670 train_time:108547ms step_avg:94.88ms +step:1145/1670 train_time:108640ms step_avg:94.88ms +step:1146/1670 train_time:108734ms step_avg:94.88ms +step:1147/1670 train_time:108829ms step_avg:94.88ms +step:1148/1670 train_time:108924ms step_avg:94.88ms +step:1149/1670 train_time:109019ms step_avg:94.88ms +step:1150/1670 train_time:109113ms step_avg:94.88ms +step:1151/1670 train_time:109208ms step_avg:94.88ms +step:1152/1670 train_time:109301ms step_avg:94.88ms +step:1153/1670 train_time:109395ms step_avg:94.88ms +step:1154/1670 train_time:109489ms step_avg:94.88ms +step:1155/1670 train_time:109583ms step_avg:94.88ms +step:1156/1670 train_time:109677ms step_avg:94.88ms +step:1157/1670 train_time:109771ms step_avg:94.88ms +step:1158/1670 train_time:109865ms step_avg:94.88ms +step:1159/1670 train_time:109960ms step_avg:94.87ms +step:1160/1670 train_time:110054ms step_avg:94.87ms +step:1161/1670 train_time:110148ms step_avg:94.87ms +step:1162/1670 train_time:110242ms step_avg:94.87ms +step:1163/1670 train_time:110336ms step_avg:94.87ms +step:1164/1670 train_time:110430ms step_avg:94.87ms +step:1165/1670 train_time:110524ms step_avg:94.87ms +step:1166/1670 train_time:110617ms step_avg:94.87ms +step:1167/1670 train_time:110712ms step_avg:94.87ms +step:1168/1670 train_time:110806ms step_avg:94.87ms +step:1169/1670 train_time:110900ms step_avg:94.87ms +step:1170/1670 train_time:110994ms step_avg:94.87ms +step:1171/1670 train_time:111088ms step_avg:94.87ms +step:1172/1670 train_time:111184ms step_avg:94.87ms +step:1173/1670 train_time:111277ms step_avg:94.87ms +step:1174/1670 train_time:111371ms step_avg:94.86ms +step:1175/1670 train_time:111466ms step_avg:94.86ms +step:1176/1670 train_time:111561ms step_avg:94.86ms +step:1177/1670 train_time:111654ms step_avg:94.86ms +step:1178/1670 train_time:111747ms step_avg:94.86ms +step:1179/1670 train_time:111842ms step_avg:94.86ms +step:1180/1670 train_time:111936ms step_avg:94.86ms +step:1181/1670 train_time:112030ms step_avg:94.86ms +step:1182/1670 train_time:112124ms step_avg:94.86ms +step:1183/1670 train_time:112218ms step_avg:94.86ms +step:1184/1670 train_time:112312ms step_avg:94.86ms +step:1185/1670 train_time:112406ms step_avg:94.86ms +step:1186/1670 train_time:112500ms step_avg:94.86ms +step:1187/1670 train_time:112594ms step_avg:94.86ms +step:1188/1670 train_time:112688ms step_avg:94.86ms +step:1189/1670 train_time:112783ms step_avg:94.86ms +step:1190/1670 train_time:112876ms step_avg:94.85ms +step:1191/1670 train_time:112970ms step_avg:94.85ms +step:1192/1670 train_time:113065ms step_avg:94.85ms +step:1193/1670 train_time:113159ms step_avg:94.85ms +step:1194/1670 train_time:113254ms step_avg:94.85ms +step:1195/1670 train_time:113349ms step_avg:94.85ms +step:1196/1670 train_time:113443ms step_avg:94.85ms +step:1197/1670 train_time:113538ms step_avg:94.85ms +step:1198/1670 train_time:113633ms step_avg:94.85ms +step:1199/1670 train_time:113727ms step_avg:94.85ms +step:1200/1670 train_time:113821ms step_avg:94.85ms +step:1201/1670 train_time:113915ms step_avg:94.85ms +step:1202/1670 train_time:114009ms step_avg:94.85ms +step:1203/1670 train_time:114103ms step_avg:94.85ms +step:1204/1670 train_time:114197ms step_avg:94.85ms +step:1205/1670 train_time:114292ms step_avg:94.85ms +step:1206/1670 train_time:114387ms step_avg:94.85ms +step:1207/1670 train_time:114481ms step_avg:94.85ms +step:1208/1670 train_time:114575ms step_avg:94.85ms +step:1209/1670 train_time:114670ms step_avg:94.85ms +step:1210/1670 train_time:114765ms step_avg:94.85ms +step:1211/1670 train_time:114859ms step_avg:94.85ms +step:1212/1670 train_time:114954ms step_avg:94.85ms +step:1213/1670 train_time:115048ms step_avg:94.85ms +step:1214/1670 train_time:115143ms step_avg:94.85ms +step:1215/1670 train_time:115237ms step_avg:94.85ms +step:1216/1670 train_time:115332ms step_avg:94.85ms +step:1217/1670 train_time:115427ms step_avg:94.85ms +step:1218/1670 train_time:115521ms step_avg:94.85ms +step:1219/1670 train_time:115615ms step_avg:94.84ms +step:1220/1670 train_time:115711ms step_avg:94.85ms +step:1221/1670 train_time:115806ms step_avg:94.85ms +step:1222/1670 train_time:115900ms step_avg:94.84ms +step:1223/1670 train_time:115994ms step_avg:94.84ms +step:1224/1670 train_time:116089ms step_avg:94.84ms +step:1225/1670 train_time:116182ms step_avg:94.84ms +step:1226/1670 train_time:116277ms step_avg:94.84ms +step:1227/1670 train_time:116373ms step_avg:94.84ms +step:1228/1670 train_time:116467ms step_avg:94.84ms +step:1229/1670 train_time:116561ms step_avg:94.84ms +step:1230/1670 train_time:116656ms step_avg:94.84ms +step:1231/1670 train_time:116751ms step_avg:94.84ms +step:1232/1670 train_time:116847ms step_avg:94.84ms +step:1233/1670 train_time:116941ms step_avg:94.84ms +step:1234/1670 train_time:117035ms step_avg:94.84ms +step:1235/1670 train_time:117129ms step_avg:94.84ms +step:1236/1670 train_time:117223ms step_avg:94.84ms +step:1237/1670 train_time:117316ms step_avg:94.84ms +step:1238/1670 train_time:117411ms step_avg:94.84ms +step:1239/1670 train_time:117505ms step_avg:94.84ms +step:1240/1670 train_time:117601ms step_avg:94.84ms +step:1241/1670 train_time:117696ms step_avg:94.84ms +step:1242/1670 train_time:117790ms step_avg:94.84ms +step:1243/1670 train_time:117885ms step_avg:94.84ms +step:1244/1670 train_time:117979ms step_avg:94.84ms +step:1245/1670 train_time:118073ms step_avg:94.84ms +step:1246/1670 train_time:118169ms step_avg:94.84ms +step:1247/1670 train_time:118262ms step_avg:94.84ms +step:1248/1670 train_time:118357ms step_avg:94.84ms +step:1249/1670 train_time:118451ms step_avg:94.84ms +step:1250/1670 train_time:118546ms step_avg:94.84ms +step:1250/1670 val_loss:3.3758 train_time:118638ms step_avg:94.91ms +step:1251/1670 train_time:118665ms step_avg:94.86ms +step:1252/1670 train_time:118745ms step_avg:94.84ms +step:1253/1670 train_time:118845ms step_avg:94.85ms +step:1254/1670 train_time:118941ms step_avg:94.85ms +step:1255/1670 train_time:119035ms step_avg:94.85ms +step:1256/1670 train_time:119128ms step_avg:94.85ms +step:1257/1670 train_time:119221ms step_avg:94.85ms +step:1258/1670 train_time:119314ms step_avg:94.84ms +step:1259/1670 train_time:119407ms step_avg:94.84ms +step:1260/1670 train_time:119501ms step_avg:94.84ms +step:1261/1670 train_time:119594ms step_avg:94.84ms +step:1262/1670 train_time:119691ms step_avg:94.84ms +step:1263/1670 train_time:119788ms step_avg:94.84ms +step:1264/1670 train_time:119885ms step_avg:94.85ms +step:1265/1670 train_time:119980ms step_avg:94.85ms +step:1266/1670 train_time:120074ms step_avg:94.85ms +step:1267/1670 train_time:120169ms step_avg:94.85ms +step:1268/1670 train_time:120263ms step_avg:94.84ms +step:1269/1670 train_time:120356ms step_avg:94.84ms +step:1270/1670 train_time:120450ms step_avg:94.84ms +step:1271/1670 train_time:120544ms step_avg:94.84ms +step:1272/1670 train_time:120639ms step_avg:94.84ms +step:1273/1670 train_time:120733ms step_avg:94.84ms +step:1274/1670 train_time:121100ms step_avg:95.06ms +step:1275/1670 train_time:121188ms step_avg:95.05ms +step:1276/1670 train_time:121281ms step_avg:95.05ms +step:1277/1670 train_time:121374ms step_avg:95.05ms +step:1278/1670 train_time:121467ms step_avg:95.04ms +step:1279/1670 train_time:121560ms step_avg:95.04ms +step:1280/1670 train_time:121653ms step_avg:95.04ms +step:1281/1670 train_time:121746ms step_avg:95.04ms +step:1282/1670 train_time:121840ms step_avg:95.04ms +step:1283/1670 train_time:121932ms step_avg:95.04ms +step:1284/1670 train_time:122027ms step_avg:95.04ms +step:1285/1670 train_time:122126ms step_avg:95.04ms +step:1286/1670 train_time:122223ms step_avg:95.04ms +step:1287/1670 train_time:122317ms step_avg:95.04ms +step:1288/1670 train_time:122411ms step_avg:95.04ms +step:1289/1670 train_time:122505ms step_avg:95.04ms +step:1290/1670 train_time:122598ms step_avg:95.04ms +step:1291/1670 train_time:122692ms step_avg:95.04ms +step:1292/1670 train_time:122786ms step_avg:95.04ms +step:1293/1670 train_time:122880ms step_avg:95.03ms +step:1294/1670 train_time:122973ms step_avg:95.03ms +step:1295/1670 train_time:123069ms step_avg:95.03ms +step:1296/1670 train_time:123165ms step_avg:95.03ms +step:1297/1670 train_time:123261ms step_avg:95.04ms +step:1298/1670 train_time:123356ms step_avg:95.04ms +step:1299/1670 train_time:123451ms step_avg:95.04ms +step:1300/1670 train_time:123545ms step_avg:95.03ms +step:1301/1670 train_time:123639ms step_avg:95.03ms +step:1302/1670 train_time:123733ms step_avg:95.03ms +step:1303/1670 train_time:123826ms step_avg:95.03ms +step:1304/1670 train_time:123920ms step_avg:95.03ms +step:1305/1670 train_time:124013ms step_avg:95.03ms +step:1306/1670 train_time:124109ms step_avg:95.03ms +step:1307/1670 train_time:124206ms step_avg:95.03ms +step:1308/1670 train_time:124301ms step_avg:95.03ms +step:1309/1670 train_time:124396ms step_avg:95.03ms +step:1310/1670 train_time:124491ms step_avg:95.03ms +step:1311/1670 train_time:124586ms step_avg:95.03ms +step:1312/1670 train_time:124681ms step_avg:95.03ms +step:1313/1670 train_time:124775ms step_avg:95.03ms +step:1314/1670 train_time:124870ms step_avg:95.03ms +step:1315/1670 train_time:124964ms step_avg:95.03ms +step:1316/1670 train_time:125059ms step_avg:95.03ms +step:1317/1670 train_time:125153ms step_avg:95.03ms +step:1318/1670 train_time:125248ms step_avg:95.03ms +step:1319/1670 train_time:125344ms step_avg:95.03ms +step:1320/1670 train_time:125439ms step_avg:95.03ms +step:1321/1670 train_time:125533ms step_avg:95.03ms +step:1322/1670 train_time:125627ms step_avg:95.03ms +step:1323/1670 train_time:125722ms step_avg:95.03ms +step:1324/1670 train_time:125816ms step_avg:95.03ms +step:1325/1670 train_time:125910ms step_avg:95.03ms +step:1326/1670 train_time:126004ms step_avg:95.03ms +step:1327/1670 train_time:126098ms step_avg:95.02ms +step:1328/1670 train_time:126192ms step_avg:95.02ms +step:1329/1670 train_time:126288ms step_avg:95.03ms +step:1330/1670 train_time:126384ms step_avg:95.03ms +step:1331/1670 train_time:126479ms step_avg:95.03ms +step:1332/1670 train_time:126573ms step_avg:95.02ms +step:1333/1670 train_time:126667ms step_avg:95.02ms +step:1334/1670 train_time:126762ms step_avg:95.02ms +step:1335/1670 train_time:126857ms step_avg:95.02ms +step:1336/1670 train_time:126952ms step_avg:95.02ms +step:1337/1670 train_time:127045ms step_avg:95.02ms +step:1338/1670 train_time:127140ms step_avg:95.02ms +step:1339/1670 train_time:127235ms step_avg:95.02ms +step:1340/1670 train_time:127329ms step_avg:95.02ms +step:1341/1670 train_time:127423ms step_avg:95.02ms +step:1342/1670 train_time:127517ms step_avg:95.02ms +step:1343/1670 train_time:127612ms step_avg:95.02ms +step:1344/1670 train_time:127705ms step_avg:95.02ms +step:1345/1670 train_time:127799ms step_avg:95.02ms +step:1346/1670 train_time:127893ms step_avg:95.02ms +step:1347/1670 train_time:127988ms step_avg:95.02ms +step:1348/1670 train_time:128082ms step_avg:95.02ms +step:1349/1670 train_time:128177ms step_avg:95.02ms +step:1350/1670 train_time:128271ms step_avg:95.02ms +step:1351/1670 train_time:128367ms step_avg:95.02ms +step:1352/1670 train_time:128462ms step_avg:95.02ms +step:1353/1670 train_time:128556ms step_avg:95.02ms +step:1354/1670 train_time:128651ms step_avg:95.02ms +step:1355/1670 train_time:128745ms step_avg:95.01ms +step:1356/1670 train_time:128840ms step_avg:95.01ms +step:1357/1670 train_time:128933ms step_avg:95.01ms +step:1358/1670 train_time:129027ms step_avg:95.01ms +step:1359/1670 train_time:129122ms step_avg:95.01ms +step:1360/1670 train_time:129216ms step_avg:95.01ms +step:1361/1670 train_time:129311ms step_avg:95.01ms +step:1362/1670 train_time:129406ms step_avg:95.01ms +step:1363/1670 train_time:129500ms step_avg:95.01ms +step:1364/1670 train_time:129595ms step_avg:95.01ms +step:1365/1670 train_time:129689ms step_avg:95.01ms +step:1366/1670 train_time:129784ms step_avg:95.01ms +step:1367/1670 train_time:129878ms step_avg:95.01ms +step:1368/1670 train_time:129973ms step_avg:95.01ms +step:1369/1670 train_time:130068ms step_avg:95.01ms +step:1370/1670 train_time:130163ms step_avg:95.01ms +step:1371/1670 train_time:130258ms step_avg:95.01ms +step:1372/1670 train_time:130351ms step_avg:95.01ms +step:1373/1670 train_time:130446ms step_avg:95.01ms +step:1374/1670 train_time:130541ms step_avg:95.01ms +step:1375/1670 train_time:130635ms step_avg:95.01ms +step:1375/1670 val_loss:3.3413 train_time:130728ms step_avg:95.07ms +step:1376/1670 train_time:130754ms step_avg:95.02ms +step:1377/1670 train_time:130832ms step_avg:95.01ms +step:1378/1670 train_time:130935ms step_avg:95.02ms +step:1379/1670 train_time:131030ms step_avg:95.02ms +step:1380/1670 train_time:131123ms step_avg:95.02ms +step:1381/1670 train_time:131218ms step_avg:95.02ms +step:1382/1670 train_time:131311ms step_avg:95.02ms +step:1383/1670 train_time:131404ms step_avg:95.01ms +step:1384/1670 train_time:131498ms step_avg:95.01ms +step:1385/1670 train_time:131592ms step_avg:95.01ms +step:1386/1670 train_time:131685ms step_avg:95.01ms +step:1387/1670 train_time:131783ms step_avg:95.01ms +step:1388/1670 train_time:131881ms step_avg:95.01ms +step:1389/1670 train_time:131978ms step_avg:95.02ms +step:1390/1670 train_time:132074ms step_avg:95.02ms +step:1391/1670 train_time:132167ms step_avg:95.02ms +step:1392/1670 train_time:132261ms step_avg:95.01ms +step:1393/1670 train_time:132354ms step_avg:95.01ms +step:1394/1670 train_time:132448ms step_avg:95.01ms +step:1395/1670 train_time:132542ms step_avg:95.01ms +step:1396/1670 train_time:132635ms step_avg:95.01ms +step:1397/1670 train_time:132730ms step_avg:95.01ms +step:1398/1670 train_time:132826ms step_avg:95.01ms +step:1399/1670 train_time:132922ms step_avg:95.01ms +step:1400/1670 train_time:133019ms step_avg:95.01ms +step:1401/1670 train_time:133114ms step_avg:95.01ms +step:1402/1670 train_time:133208ms step_avg:95.01ms +step:1403/1670 train_time:133302ms step_avg:95.01ms +step:1404/1670 train_time:133395ms step_avg:95.01ms +step:1405/1670 train_time:133489ms step_avg:95.01ms +step:1406/1670 train_time:133583ms step_avg:95.01ms +step:1407/1670 train_time:133677ms step_avg:95.01ms +step:1408/1670 train_time:133773ms step_avg:95.01ms +step:1409/1670 train_time:133869ms step_avg:95.01ms +step:1410/1670 train_time:133963ms step_avg:95.01ms +step:1411/1670 train_time:134059ms step_avg:95.01ms +step:1412/1670 train_time:134154ms step_avg:95.01ms +step:1413/1670 train_time:134248ms step_avg:95.01ms +step:1414/1670 train_time:134342ms step_avg:95.01ms +step:1415/1670 train_time:134436ms step_avg:95.01ms +step:1416/1670 train_time:134531ms step_avg:95.01ms +step:1417/1670 train_time:134625ms step_avg:95.01ms +step:1418/1670 train_time:134719ms step_avg:95.01ms +step:1419/1670 train_time:134814ms step_avg:95.01ms +step:1420/1670 train_time:134909ms step_avg:95.01ms +step:1421/1670 train_time:135004ms step_avg:95.01ms +step:1422/1670 train_time:135100ms step_avg:95.01ms +step:1423/1670 train_time:135194ms step_avg:95.01ms +step:1424/1670 train_time:135288ms step_avg:95.01ms +step:1425/1670 train_time:135382ms step_avg:95.00ms +step:1426/1670 train_time:135476ms step_avg:95.00ms +step:1427/1670 train_time:135570ms step_avg:95.00ms +step:1428/1670 train_time:135664ms step_avg:95.00ms +step:1429/1670 train_time:135759ms step_avg:95.00ms +step:1430/1670 train_time:135854ms step_avg:95.00ms +step:1431/1670 train_time:135948ms step_avg:95.00ms +step:1432/1670 train_time:136043ms step_avg:95.00ms +step:1433/1670 train_time:136138ms step_avg:95.00ms +step:1434/1670 train_time:136233ms step_avg:95.00ms +step:1435/1670 train_time:136327ms step_avg:95.00ms +step:1436/1670 train_time:136422ms step_avg:95.00ms +step:1437/1670 train_time:136516ms step_avg:95.00ms +step:1438/1670 train_time:136610ms step_avg:95.00ms +step:1439/1670 train_time:136704ms step_avg:95.00ms +step:1440/1670 train_time:136800ms step_avg:95.00ms +step:1441/1670 train_time:136894ms step_avg:95.00ms +step:1442/1670 train_time:136991ms step_avg:95.00ms +step:1443/1670 train_time:137085ms step_avg:95.00ms +step:1444/1670 train_time:137180ms step_avg:95.00ms +step:1445/1670 train_time:137275ms step_avg:95.00ms +step:1446/1670 train_time:137369ms step_avg:95.00ms +step:1447/1670 train_time:137464ms step_avg:95.00ms +step:1448/1670 train_time:137559ms step_avg:95.00ms +step:1449/1670 train_time:137653ms step_avg:95.00ms +step:1450/1670 train_time:137748ms step_avg:95.00ms +step:1451/1670 train_time:137842ms step_avg:95.00ms +step:1452/1670 train_time:137937ms step_avg:95.00ms +step:1453/1670 train_time:138032ms step_avg:95.00ms +step:1454/1670 train_time:138127ms step_avg:95.00ms +step:1455/1670 train_time:138222ms step_avg:95.00ms +step:1456/1670 train_time:138317ms step_avg:95.00ms +step:1457/1670 train_time:138412ms step_avg:95.00ms +step:1458/1670 train_time:138506ms step_avg:95.00ms +step:1459/1670 train_time:138601ms step_avg:95.00ms +step:1460/1670 train_time:138696ms step_avg:95.00ms +step:1461/1670 train_time:138790ms step_avg:95.00ms +step:1462/1670 train_time:138884ms step_avg:95.00ms +step:1463/1670 train_time:138978ms step_avg:95.00ms +step:1464/1670 train_time:139074ms step_avg:95.00ms +step:1465/1670 train_time:139169ms step_avg:95.00ms +step:1466/1670 train_time:139263ms step_avg:95.00ms +step:1467/1670 train_time:139358ms step_avg:95.00ms +step:1468/1670 train_time:139453ms step_avg:95.00ms +step:1469/1670 train_time:139547ms step_avg:94.99ms +step:1470/1670 train_time:139642ms step_avg:94.99ms +step:1471/1670 train_time:139736ms step_avg:94.99ms +step:1472/1670 train_time:139831ms step_avg:94.99ms +step:1473/1670 train_time:139925ms step_avg:94.99ms +step:1474/1670 train_time:140020ms step_avg:94.99ms +step:1475/1670 train_time:140115ms step_avg:94.99ms +step:1476/1670 train_time:140211ms step_avg:94.99ms +step:1477/1670 train_time:140306ms step_avg:94.99ms +step:1478/1670 train_time:140400ms step_avg:94.99ms +step:1479/1670 train_time:140496ms step_avg:94.99ms +step:1480/1670 train_time:140590ms step_avg:94.99ms +step:1481/1670 train_time:140685ms step_avg:94.99ms +step:1482/1670 train_time:140780ms step_avg:94.99ms +step:1483/1670 train_time:140874ms step_avg:94.99ms +step:1484/1670 train_time:140968ms step_avg:94.99ms +step:1485/1670 train_time:141304ms step_avg:95.15ms +step:1486/1670 train_time:141379ms step_avg:95.14ms +step:1487/1670 train_time:141472ms step_avg:95.14ms +step:1488/1670 train_time:141565ms step_avg:95.14ms +step:1489/1670 train_time:141659ms step_avg:95.14ms +step:1490/1670 train_time:141753ms step_avg:95.14ms +step:1491/1670 train_time:141845ms step_avg:95.13ms +step:1492/1670 train_time:141939ms step_avg:95.13ms +step:1493/1670 train_time:142032ms step_avg:95.13ms +step:1494/1670 train_time:142126ms step_avg:95.13ms +step:1495/1670 train_time:142226ms step_avg:95.13ms +step:1496/1670 train_time:142326ms step_avg:95.14ms +step:1497/1670 train_time:142422ms step_avg:95.14ms +step:1498/1670 train_time:142516ms step_avg:95.14ms +step:1499/1670 train_time:142610ms step_avg:95.14ms +step:1500/1670 train_time:142703ms step_avg:95.14ms +step:1500/1670 val_loss:3.3112 train_time:142795ms step_avg:95.20ms +step:1501/1670 train_time:142821ms step_avg:95.15ms +step:1502/1670 train_time:142901ms step_avg:95.14ms +step:1503/1670 train_time:143000ms step_avg:95.14ms +step:1504/1670 train_time:143095ms step_avg:95.14ms +step:1505/1670 train_time:143188ms step_avg:95.14ms +step:1506/1670 train_time:143281ms step_avg:95.14ms +step:1507/1670 train_time:143374ms step_avg:95.14ms +step:1508/1670 train_time:143467ms step_avg:95.14ms +step:1509/1670 train_time:143560ms step_avg:95.14ms +step:1510/1670 train_time:143654ms step_avg:95.13ms +step:1511/1670 train_time:143748ms step_avg:95.13ms +step:1512/1670 train_time:143845ms step_avg:95.14ms +step:1513/1670 train_time:143941ms step_avg:95.14ms +step:1514/1670 train_time:144037ms step_avg:95.14ms +step:1515/1670 train_time:144132ms step_avg:95.14ms +step:1516/1670 train_time:144227ms step_avg:95.14ms +step:1517/1670 train_time:144322ms step_avg:95.14ms +step:1518/1670 train_time:144415ms step_avg:95.14ms +step:1519/1670 train_time:144509ms step_avg:95.13ms +step:1520/1670 train_time:144603ms step_avg:95.13ms +step:1521/1670 train_time:144697ms step_avg:95.13ms +step:1522/1670 train_time:144792ms step_avg:95.13ms +step:1523/1670 train_time:144887ms step_avg:95.13ms +step:1524/1670 train_time:144982ms step_avg:95.13ms +step:1525/1670 train_time:145078ms step_avg:95.13ms +step:1526/1670 train_time:145173ms step_avg:95.13ms +step:1527/1670 train_time:145267ms step_avg:95.13ms +step:1528/1670 train_time:145361ms step_avg:95.13ms +step:1529/1670 train_time:145455ms step_avg:95.13ms +step:1530/1670 train_time:145550ms step_avg:95.13ms +step:1531/1670 train_time:145643ms step_avg:95.13ms +step:1532/1670 train_time:145737ms step_avg:95.13ms +step:1533/1670 train_time:145832ms step_avg:95.13ms +step:1534/1670 train_time:145928ms step_avg:95.13ms +step:1535/1670 train_time:146023ms step_avg:95.13ms +step:1536/1670 train_time:146118ms step_avg:95.13ms +step:1537/1670 train_time:146213ms step_avg:95.13ms +step:1538/1670 train_time:146308ms step_avg:95.13ms +step:1539/1670 train_time:146403ms step_avg:95.13ms +step:1540/1670 train_time:146497ms step_avg:95.13ms +step:1541/1670 train_time:146591ms step_avg:95.13ms +step:1542/1670 train_time:146686ms step_avg:95.13ms +step:1543/1670 train_time:146780ms step_avg:95.13ms +step:1544/1670 train_time:146874ms step_avg:95.13ms +step:1545/1670 train_time:146971ms step_avg:95.13ms +step:1546/1670 train_time:147067ms step_avg:95.13ms +step:1547/1670 train_time:147161ms step_avg:95.13ms +step:1548/1670 train_time:147255ms step_avg:95.13ms +step:1549/1670 train_time:147350ms step_avg:95.13ms +step:1550/1670 train_time:147444ms step_avg:95.13ms +step:1551/1670 train_time:147539ms step_avg:95.12ms +step:1552/1670 train_time:147633ms step_avg:95.12ms +step:1553/1670 train_time:147728ms step_avg:95.12ms +step:1554/1670 train_time:147822ms step_avg:95.12ms +step:1555/1670 train_time:147917ms step_avg:95.12ms +step:1556/1670 train_time:148012ms step_avg:95.12ms +step:1557/1670 train_time:148108ms step_avg:95.12ms +step:1558/1670 train_time:148203ms step_avg:95.12ms +step:1559/1670 train_time:148298ms step_avg:95.12ms +step:1560/1670 train_time:148392ms step_avg:95.12ms +step:1561/1670 train_time:148487ms step_avg:95.12ms +step:1562/1670 train_time:148582ms step_avg:95.12ms +step:1563/1670 train_time:148677ms step_avg:95.12ms +step:1564/1670 train_time:148772ms step_avg:95.12ms +step:1565/1670 train_time:148867ms step_avg:95.12ms +step:1566/1670 train_time:148963ms step_avg:95.12ms +step:1567/1670 train_time:149058ms step_avg:95.12ms +step:1568/1670 train_time:149153ms step_avg:95.12ms +step:1569/1670 train_time:149248ms step_avg:95.12ms +step:1570/1670 train_time:149343ms step_avg:95.12ms +step:1571/1670 train_time:149437ms step_avg:95.12ms +step:1572/1670 train_time:149533ms step_avg:95.12ms +step:1573/1670 train_time:149628ms step_avg:95.12ms +step:1574/1670 train_time:149722ms step_avg:95.12ms +step:1575/1670 train_time:149817ms step_avg:95.12ms +step:1576/1670 train_time:149911ms step_avg:95.12ms +step:1577/1670 train_time:150006ms step_avg:95.12ms +step:1578/1670 train_time:150102ms step_avg:95.12ms +step:1579/1670 train_time:150196ms step_avg:95.12ms +step:1580/1670 train_time:150292ms step_avg:95.12ms +step:1581/1670 train_time:150386ms step_avg:95.12ms +step:1582/1670 train_time:150481ms step_avg:95.12ms +step:1583/1670 train_time:150576ms step_avg:95.12ms +step:1584/1670 train_time:150670ms step_avg:95.12ms +step:1585/1670 train_time:150764ms step_avg:95.12ms +step:1586/1670 train_time:150859ms step_avg:95.12ms +step:1587/1670 train_time:150954ms step_avg:95.12ms +step:1588/1670 train_time:151049ms step_avg:95.12ms +step:1589/1670 train_time:151143ms step_avg:95.12ms +step:1590/1670 train_time:151238ms step_avg:95.12ms +step:1591/1670 train_time:151333ms step_avg:95.12ms +step:1592/1670 train_time:151427ms step_avg:95.12ms +step:1593/1670 train_time:151522ms step_avg:95.12ms +step:1594/1670 train_time:151616ms step_avg:95.12ms +step:1595/1670 train_time:151712ms step_avg:95.12ms +step:1596/1670 train_time:151806ms step_avg:95.12ms +step:1597/1670 train_time:151901ms step_avg:95.12ms +step:1598/1670 train_time:151995ms step_avg:95.12ms +step:1599/1670 train_time:152089ms step_avg:95.12ms +step:1600/1670 train_time:152184ms step_avg:95.12ms +step:1601/1670 train_time:152279ms step_avg:95.12ms +step:1602/1670 train_time:152374ms step_avg:95.11ms +step:1603/1670 train_time:152468ms step_avg:95.11ms +step:1604/1670 train_time:152564ms step_avg:95.11ms +step:1605/1670 train_time:152659ms step_avg:95.11ms +step:1606/1670 train_time:152752ms step_avg:95.11ms +step:1607/1670 train_time:152847ms step_avg:95.11ms +step:1608/1670 train_time:152942ms step_avg:95.11ms +step:1609/1670 train_time:153037ms step_avg:95.11ms +step:1610/1670 train_time:153132ms step_avg:95.11ms +step:1611/1670 train_time:153227ms step_avg:95.11ms +step:1612/1670 train_time:153322ms step_avg:95.11ms +step:1613/1670 train_time:153416ms step_avg:95.11ms +step:1614/1670 train_time:153511ms step_avg:95.11ms +step:1615/1670 train_time:153607ms step_avg:95.11ms +step:1616/1670 train_time:153701ms step_avg:95.11ms +step:1617/1670 train_time:153795ms step_avg:95.11ms +step:1618/1670 train_time:153890ms step_avg:95.11ms +step:1619/1670 train_time:153985ms step_avg:95.11ms +step:1620/1670 train_time:154079ms step_avg:95.11ms +step:1621/1670 train_time:154173ms step_avg:95.11ms +step:1622/1670 train_time:154268ms step_avg:95.11ms +step:1623/1670 train_time:154363ms step_avg:95.11ms +step:1624/1670 train_time:154458ms step_avg:95.11ms +step:1625/1670 train_time:154553ms step_avg:95.11ms +step:1625/1670 val_loss:3.2865 train_time:154646ms step_avg:95.17ms +step:1626/1670 train_time:154672ms step_avg:95.12ms +step:1627/1670 train_time:154749ms step_avg:95.11ms +step:1628/1670 train_time:154849ms step_avg:95.12ms +step:1629/1670 train_time:154944ms step_avg:95.12ms +step:1630/1670 train_time:155038ms step_avg:95.12ms +step:1631/1670 train_time:155132ms step_avg:95.11ms +step:1632/1670 train_time:155226ms step_avg:95.11ms +step:1633/1670 train_time:155319ms step_avg:95.11ms +step:1634/1670 train_time:155413ms step_avg:95.11ms +step:1635/1670 train_time:155506ms step_avg:95.11ms +step:1636/1670 train_time:155600ms step_avg:95.11ms +step:1637/1670 train_time:155696ms step_avg:95.11ms +step:1638/1670 train_time:155793ms step_avg:95.11ms +step:1639/1670 train_time:155890ms step_avg:95.11ms +step:1640/1670 train_time:155985ms step_avg:95.11ms +step:1641/1670 train_time:156079ms step_avg:95.11ms +step:1642/1670 train_time:156173ms step_avg:95.11ms +step:1643/1670 train_time:156268ms step_avg:95.11ms +step:1644/1670 train_time:156361ms step_avg:95.11ms +step:1645/1670 train_time:156455ms step_avg:95.11ms +step:1646/1670 train_time:156549ms step_avg:95.11ms +step:1647/1670 train_time:156643ms step_avg:95.11ms +step:1648/1670 train_time:156739ms step_avg:95.11ms +step:1649/1670 train_time:156836ms step_avg:95.11ms +step:1650/1670 train_time:156932ms step_avg:95.11ms +step:1651/1670 train_time:157027ms step_avg:95.11ms +step:1652/1670 train_time:157121ms step_avg:95.11ms +step:1653/1670 train_time:157215ms step_avg:95.11ms +step:1654/1670 train_time:157310ms step_avg:95.11ms +step:1655/1670 train_time:157405ms step_avg:95.11ms +step:1656/1670 train_time:157499ms step_avg:95.11ms +step:1657/1670 train_time:157592ms step_avg:95.11ms +step:1658/1670 train_time:157686ms step_avg:95.11ms +step:1659/1670 train_time:157781ms step_avg:95.11ms +step:1660/1670 train_time:157876ms step_avg:95.11ms +step:1661/1670 train_time:157972ms step_avg:95.11ms +step:1662/1670 train_time:158066ms step_avg:95.11ms +step:1663/1670 train_time:158161ms step_avg:95.11ms +step:1664/1670 train_time:158255ms step_avg:95.11ms +step:1665/1670 train_time:158350ms step_avg:95.10ms +step:1666/1670 train_time:158444ms step_avg:95.10ms +step:1667/1670 train_time:158538ms step_avg:95.10ms +step:1668/1670 train_time:158632ms step_avg:95.10ms +step:1669/1670 train_time:158727ms step_avg:95.10ms +step:1670/1670 train_time:158822ms step_avg:95.10ms +step:1670/1670 val_loss:3.2779 train_time:158998ms step_avg:95.21ms +peak memory allocated: 32304 MiB reserved: 47696 MiB diff --git a/records/091025_Yarn/6297777d-03bd-4955-9c3a-c854246b928a.txt b/records/091025_Yarn/6297777d-03bd-4955-9c3a-c854246b928a.txt new file mode 100644 index 000000000..f506379be --- /dev/null +++ b/records/091025_Yarn/6297777d-03bd-4955-9c3a-c854246b928a.txt @@ -0,0 +1,2863 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math + +from dataclasses import dataclass +from functools import lru_cache +from itertools import accumulate +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate = CastedLinear(12, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, attn_args): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) + if ve is not None: + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = sa_lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None + + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), attn_args) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, x0, lambdas[i], attn_args) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = f"yarn/{uuid.uuid4()}" + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + head_dim=128, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +ws = get_ws(0) +for step in range(train_steps + 1): + last_step = (step == train_steps) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Thu Sep 11 04:01:30 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | Off | +| N/A 36C P0 120W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | Off | +| N/A 40C P0 124W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | Off | +| N/A 41C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | Off | +| N/A 35C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | Off | +| N/A 35C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | Off | +| N/A 41C P0 124W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | Off | +| N/A 39C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | Off | +| N/A 36C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 63307 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 63308 C /usr/bin/python3 614MiB | +| 0 N/A N/A 63309 C /usr/bin/python3 614MiB | +| 0 N/A N/A 63310 C /usr/bin/python3 614MiB | +| 0 N/A N/A 63311 C /usr/bin/python3 614MiB | +| 0 N/A N/A 63312 C /usr/bin/python3 614MiB | +| 0 N/A N/A 63313 C /usr/bin/python3 614MiB | +| 0 N/A N/A 63314 C /usr/bin/python3 614MiB | +| 1 N/A N/A 63308 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 63309 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 63310 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 63311 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 63312 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 63313 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 63314 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1670 train_time:455ms step_avg:454.74ms +step:2/1670 train_time:481ms step_avg:240.42ms +step:3/1670 train_time:548ms step_avg:182.66ms +step:4/1670 train_time:638ms step_avg:159.62ms +step:5/1670 train_time:730ms step_avg:146.05ms +step:6/1670 train_time:822ms step_avg:136.98ms +step:7/1670 train_time:914ms step_avg:130.63ms +step:8/1670 train_time:1006ms step_avg:125.75ms +step:9/1670 train_time:1098ms step_avg:121.97ms +step:10/1670 train_time:1189ms step_avg:118.94ms +step:11/1670 train_time:1281ms step_avg:116.46ms +step:12/1670 train_time:1373ms step_avg:114.43ms +step:13/1670 train_time:1469ms step_avg:113.03ms +step:14/1670 train_time:1565ms step_avg:111.75ms +step:15/1670 train_time:1659ms step_avg:110.57ms +step:16/1670 train_time:1751ms step_avg:109.46ms +step:17/1670 train_time:1844ms step_avg:108.47ms +step:18/1670 train_time:1936ms step_avg:107.55ms +step:19/1670 train_time:2029ms step_avg:106.78ms +step:20/1670 train_time:2120ms step_avg:106.02ms +step:21/1670 train_time:2212ms step_avg:105.35ms +step:22/1670 train_time:2305ms step_avg:104.79ms +step:23/1670 train_time:2398ms step_avg:104.25ms +step:24/1670 train_time:2492ms step_avg:103.81ms +step:25/1670 train_time:2585ms step_avg:103.41ms +step:26/1670 train_time:2679ms step_avg:103.03ms +step:27/1670 train_time:2771ms step_avg:102.65ms +step:28/1670 train_time:2864ms step_avg:102.30ms +step:29/1670 train_time:2957ms step_avg:101.97ms +step:30/1670 train_time:3049ms step_avg:101.64ms +step:31/1670 train_time:3142ms step_avg:101.36ms +step:32/1670 train_time:3235ms step_avg:101.09ms +step:33/1670 train_time:3328ms step_avg:100.84ms +step:34/1670 train_time:3421ms step_avg:100.63ms +step:35/1670 train_time:3515ms step_avg:100.43ms +step:36/1670 train_time:3608ms step_avg:100.23ms +step:37/1670 train_time:3701ms step_avg:100.03ms +step:38/1670 train_time:3794ms step_avg:99.85ms +step:39/1670 train_time:3888ms step_avg:99.69ms +step:40/1670 train_time:3980ms step_avg:99.50ms +step:41/1670 train_time:4073ms step_avg:99.34ms +step:42/1670 train_time:4167ms step_avg:99.20ms +step:43/1670 train_time:4259ms step_avg:99.05ms +step:44/1670 train_time:4352ms step_avg:98.90ms +step:45/1670 train_time:4445ms step_avg:98.79ms +step:46/1670 train_time:4539ms step_avg:98.68ms +step:47/1670 train_time:4633ms step_avg:98.56ms +step:48/1670 train_time:4725ms step_avg:98.43ms +step:49/1670 train_time:4818ms step_avg:98.33ms +step:50/1670 train_time:4911ms step_avg:98.22ms +step:51/1670 train_time:5004ms step_avg:98.12ms +step:52/1670 train_time:5097ms step_avg:98.02ms +step:53/1670 train_time:5190ms step_avg:97.92ms +step:54/1670 train_time:5283ms step_avg:97.83ms +step:55/1670 train_time:5376ms step_avg:97.74ms +step:56/1670 train_time:5469ms step_avg:97.66ms +step:57/1670 train_time:5562ms step_avg:97.58ms +step:58/1670 train_time:5655ms step_avg:97.50ms +step:59/1670 train_time:5748ms step_avg:97.42ms +step:60/1670 train_time:5841ms step_avg:97.34ms +step:61/1670 train_time:5934ms step_avg:97.27ms +step:62/1670 train_time:6027ms step_avg:97.20ms +step:63/1670 train_time:6120ms step_avg:97.14ms +step:64/1670 train_time:6211ms step_avg:97.05ms +step:65/1670 train_time:6304ms step_avg:96.99ms +step:66/1670 train_time:6398ms step_avg:96.93ms +step:67/1670 train_time:6490ms step_avg:96.87ms +step:68/1670 train_time:6584ms step_avg:96.83ms +step:69/1670 train_time:6677ms step_avg:96.77ms +step:70/1670 train_time:6770ms step_avg:96.71ms +step:71/1670 train_time:6864ms step_avg:96.68ms +step:72/1670 train_time:6957ms step_avg:96.63ms +step:73/1670 train_time:7050ms step_avg:96.57ms +step:74/1670 train_time:7143ms step_avg:96.53ms +step:75/1670 train_time:7236ms step_avg:96.48ms +step:76/1670 train_time:7328ms step_avg:96.42ms +step:77/1670 train_time:7420ms step_avg:96.37ms +step:78/1670 train_time:7512ms step_avg:96.31ms +step:79/1670 train_time:7605ms step_avg:96.26ms +step:80/1670 train_time:7697ms step_avg:96.22ms +step:81/1670 train_time:7790ms step_avg:96.17ms +step:82/1670 train_time:7883ms step_avg:96.13ms +step:83/1670 train_time:7976ms step_avg:96.10ms +step:84/1670 train_time:8069ms step_avg:96.06ms +step:85/1670 train_time:8161ms step_avg:96.02ms +step:86/1670 train_time:8253ms step_avg:95.97ms +step:87/1670 train_time:8346ms step_avg:95.93ms +step:88/1670 train_time:8439ms step_avg:95.89ms +step:89/1670 train_time:8530ms step_avg:95.85ms +step:90/1670 train_time:8623ms step_avg:95.81ms +step:91/1670 train_time:8716ms step_avg:95.77ms +step:92/1670 train_time:8808ms step_avg:95.74ms +step:93/1670 train_time:8900ms step_avg:95.70ms +step:94/1670 train_time:8992ms step_avg:95.66ms +step:95/1670 train_time:9086ms step_avg:95.65ms +step:96/1670 train_time:9179ms step_avg:95.62ms +step:97/1670 train_time:9272ms step_avg:95.59ms +step:98/1670 train_time:9365ms step_avg:95.56ms +step:99/1670 train_time:9458ms step_avg:95.53ms +step:100/1670 train_time:9549ms step_avg:95.49ms +step:101/1670 train_time:9643ms step_avg:95.47ms +step:102/1670 train_time:9735ms step_avg:95.44ms +step:103/1670 train_time:9827ms step_avg:95.41ms +step:104/1670 train_time:9920ms step_avg:95.38ms +step:105/1670 train_time:10012ms step_avg:95.36ms +step:106/1670 train_time:10105ms step_avg:95.33ms +step:107/1670 train_time:10198ms step_avg:95.31ms +step:108/1670 train_time:10291ms step_avg:95.28ms +step:109/1670 train_time:10384ms step_avg:95.26ms +step:110/1670 train_time:10476ms step_avg:95.23ms +step:111/1670 train_time:10568ms step_avg:95.20ms +step:112/1670 train_time:10660ms step_avg:95.18ms +step:113/1670 train_time:10752ms step_avg:95.15ms +step:114/1670 train_time:10845ms step_avg:95.13ms +step:115/1670 train_time:10937ms step_avg:95.10ms +step:116/1670 train_time:11030ms step_avg:95.08ms +step:117/1670 train_time:11122ms step_avg:95.06ms +step:118/1670 train_time:11214ms step_avg:95.04ms +step:119/1670 train_time:11306ms step_avg:95.01ms +step:120/1670 train_time:11399ms step_avg:94.99ms +step:121/1670 train_time:11491ms step_avg:94.97ms +step:122/1670 train_time:11584ms step_avg:94.95ms +step:123/1670 train_time:11676ms step_avg:94.93ms +step:124/1670 train_time:11768ms step_avg:94.91ms +step:125/1670 train_time:11861ms step_avg:94.89ms +step:125/1670 val_loss:4.3018 train_time:11951ms step_avg:95.61ms +step:126/1670 train_time:11978ms step_avg:95.07ms +step:127/1670 train_time:12049ms step_avg:94.88ms +step:128/1670 train_time:12154ms step_avg:94.95ms +step:129/1670 train_time:12252ms step_avg:94.98ms +step:130/1670 train_time:12346ms step_avg:94.97ms +step:131/1670 train_time:12437ms step_avg:94.94ms +step:132/1670 train_time:12529ms step_avg:94.91ms +step:133/1670 train_time:12620ms step_avg:94.89ms +step:134/1670 train_time:12712ms step_avg:94.86ms +step:135/1670 train_time:12803ms step_avg:94.84ms +step:136/1670 train_time:12894ms step_avg:94.81ms +step:137/1670 train_time:12986ms step_avg:94.79ms +step:138/1670 train_time:13079ms step_avg:94.77ms +step:139/1670 train_time:13173ms step_avg:94.77ms +step:140/1670 train_time:13268ms step_avg:94.77ms +step:141/1670 train_time:13360ms step_avg:94.75ms +step:142/1670 train_time:13453ms step_avg:94.74ms +step:143/1670 train_time:13545ms step_avg:94.72ms +step:144/1670 train_time:13637ms step_avg:94.70ms +step:145/1670 train_time:13728ms step_avg:94.68ms +step:146/1670 train_time:13820ms step_avg:94.66ms +step:147/1670 train_time:13912ms step_avg:94.64ms +step:148/1670 train_time:14005ms step_avg:94.63ms +step:149/1670 train_time:14097ms step_avg:94.61ms +step:150/1670 train_time:14190ms step_avg:94.60ms +step:151/1670 train_time:14283ms step_avg:94.59ms +step:152/1670 train_time:14376ms step_avg:94.58ms +step:153/1670 train_time:14469ms step_avg:94.57ms +step:154/1670 train_time:14561ms step_avg:94.55ms +step:155/1670 train_time:14654ms step_avg:94.54ms +step:156/1670 train_time:14746ms step_avg:94.53ms +step:157/1670 train_time:14838ms step_avg:94.51ms +step:158/1670 train_time:14931ms step_avg:94.50ms +step:159/1670 train_time:15023ms step_avg:94.48ms +step:160/1670 train_time:15115ms step_avg:94.47ms +step:161/1670 train_time:15208ms step_avg:94.46ms +step:162/1670 train_time:15301ms step_avg:94.45ms +step:163/1670 train_time:15394ms step_avg:94.44ms +step:164/1670 train_time:15486ms step_avg:94.43ms +step:165/1670 train_time:15578ms step_avg:94.41ms +step:166/1670 train_time:15671ms step_avg:94.40ms +step:167/1670 train_time:15764ms step_avg:94.40ms +step:168/1670 train_time:15856ms step_avg:94.38ms +step:169/1670 train_time:15947ms step_avg:94.36ms +step:170/1670 train_time:16039ms step_avg:94.35ms +step:171/1670 train_time:16132ms step_avg:94.34ms +step:172/1670 train_time:16225ms step_avg:94.33ms +step:173/1670 train_time:16317ms step_avg:94.32ms +step:174/1670 train_time:16411ms step_avg:94.31ms +step:175/1670 train_time:16503ms step_avg:94.30ms +step:176/1670 train_time:16595ms step_avg:94.29ms +step:177/1670 train_time:16688ms step_avg:94.28ms +step:178/1670 train_time:16780ms step_avg:94.27ms +step:179/1670 train_time:16872ms step_avg:94.26ms +step:180/1670 train_time:16965ms step_avg:94.25ms +step:181/1670 train_time:17058ms step_avg:94.24ms +step:182/1670 train_time:17150ms step_avg:94.23ms +step:183/1670 train_time:17243ms step_avg:94.22ms +step:184/1670 train_time:17335ms step_avg:94.21ms +step:185/1670 train_time:17428ms step_avg:94.21ms +step:186/1670 train_time:17521ms step_avg:94.20ms +step:187/1670 train_time:17615ms step_avg:94.20ms +step:188/1670 train_time:17707ms step_avg:94.19ms +step:189/1670 train_time:17799ms step_avg:94.18ms +step:190/1670 train_time:17892ms step_avg:94.17ms +step:191/1670 train_time:17985ms step_avg:94.16ms +step:192/1670 train_time:18078ms step_avg:94.15ms +step:193/1670 train_time:18169ms step_avg:94.14ms +step:194/1670 train_time:18262ms step_avg:94.14ms +step:195/1670 train_time:18355ms step_avg:94.13ms +step:196/1670 train_time:18447ms step_avg:94.12ms +step:197/1670 train_time:18539ms step_avg:94.11ms +step:198/1670 train_time:18633ms step_avg:94.10ms +step:199/1670 train_time:18725ms step_avg:94.09ms +step:200/1670 train_time:18817ms step_avg:94.08ms +step:201/1670 train_time:18909ms step_avg:94.08ms +step:202/1670 train_time:19002ms step_avg:94.07ms +step:203/1670 train_time:19095ms step_avg:94.06ms +step:204/1670 train_time:19188ms step_avg:94.06ms +step:205/1670 train_time:19280ms step_avg:94.05ms +step:206/1670 train_time:19373ms step_avg:94.04ms +step:207/1670 train_time:19466ms step_avg:94.04ms +step:208/1670 train_time:19558ms step_avg:94.03ms +step:209/1670 train_time:19651ms step_avg:94.02ms +step:210/1670 train_time:19743ms step_avg:94.02ms +step:211/1670 train_time:19836ms step_avg:94.01ms +step:212/1670 train_time:19928ms step_avg:94.00ms +step:213/1670 train_time:20267ms step_avg:95.15ms +step:214/1670 train_time:20407ms step_avg:95.36ms +step:215/1670 train_time:20498ms step_avg:95.34ms +step:216/1670 train_time:20590ms step_avg:95.32ms +step:217/1670 train_time:20681ms step_avg:95.30ms +step:218/1670 train_time:20772ms step_avg:95.29ms +step:219/1670 train_time:20864ms step_avg:95.27ms +step:220/1670 train_time:20956ms step_avg:95.25ms +step:221/1670 train_time:21047ms step_avg:95.23ms +step:222/1670 train_time:21138ms step_avg:95.22ms +step:223/1670 train_time:21230ms step_avg:95.20ms +step:224/1670 train_time:21324ms step_avg:95.20ms +step:225/1670 train_time:21420ms step_avg:95.20ms +step:226/1670 train_time:21513ms step_avg:95.19ms +step:227/1670 train_time:21606ms step_avg:95.18ms +step:228/1670 train_time:21698ms step_avg:95.17ms +step:229/1670 train_time:21790ms step_avg:95.15ms +step:230/1670 train_time:21882ms step_avg:95.14ms +step:231/1670 train_time:21974ms step_avg:95.12ms +step:232/1670 train_time:22066ms step_avg:95.11ms +step:233/1670 train_time:22158ms step_avg:95.10ms +step:234/1670 train_time:22250ms step_avg:95.09ms +step:235/1670 train_time:22343ms step_avg:95.08ms +step:236/1670 train_time:22436ms step_avg:95.07ms +step:237/1670 train_time:22530ms step_avg:95.06ms +step:238/1670 train_time:22623ms step_avg:95.05ms +step:239/1670 train_time:22715ms step_avg:95.04ms +step:240/1670 train_time:22807ms step_avg:95.03ms +step:241/1670 train_time:22899ms step_avg:95.01ms +step:242/1670 train_time:22991ms step_avg:95.00ms +step:243/1670 train_time:23083ms step_avg:94.99ms +step:244/1670 train_time:23175ms step_avg:94.98ms +step:245/1670 train_time:23267ms step_avg:94.97ms +step:246/1670 train_time:23361ms step_avg:94.96ms +step:247/1670 train_time:23455ms step_avg:94.96ms +step:248/1670 train_time:23547ms step_avg:94.95ms +step:249/1670 train_time:23640ms step_avg:94.94ms +step:250/1670 train_time:23732ms step_avg:94.93ms +step:250/1670 val_loss:3.9694 train_time:23822ms step_avg:95.29ms +step:251/1670 train_time:23851ms step_avg:95.02ms +step:252/1670 train_time:23922ms step_avg:94.93ms +step:253/1670 train_time:24021ms step_avg:94.94ms +step:254/1670 train_time:24115ms step_avg:94.94ms +step:255/1670 train_time:24207ms step_avg:94.93ms +step:256/1670 train_time:24299ms step_avg:94.92ms +step:257/1670 train_time:24390ms step_avg:94.90ms +step:258/1670 train_time:24482ms step_avg:94.89ms +step:259/1670 train_time:24573ms step_avg:94.88ms +step:260/1670 train_time:24664ms step_avg:94.86ms +step:261/1670 train_time:24756ms step_avg:94.85ms +step:262/1670 train_time:24849ms step_avg:94.84ms +step:263/1670 train_time:24944ms step_avg:94.84ms +step:264/1670 train_time:25039ms step_avg:94.84ms +step:265/1670 train_time:25132ms step_avg:94.84ms +step:266/1670 train_time:25224ms step_avg:94.83ms +step:267/1670 train_time:25316ms step_avg:94.82ms +step:268/1670 train_time:25409ms step_avg:94.81ms +step:269/1670 train_time:25500ms step_avg:94.80ms +step:270/1670 train_time:25593ms step_avg:94.79ms +step:271/1670 train_time:25684ms step_avg:94.78ms +step:272/1670 train_time:25777ms step_avg:94.77ms +step:273/1670 train_time:25870ms step_avg:94.76ms +step:274/1670 train_time:25962ms step_avg:94.75ms +step:275/1670 train_time:26056ms step_avg:94.75ms +step:276/1670 train_time:26150ms step_avg:94.75ms +step:277/1670 train_time:26243ms step_avg:94.74ms +step:278/1670 train_time:26336ms step_avg:94.73ms +step:279/1670 train_time:26428ms step_avg:94.72ms +step:280/1670 train_time:26520ms step_avg:94.72ms +step:281/1670 train_time:26613ms step_avg:94.71ms +step:282/1670 train_time:26704ms step_avg:94.70ms +step:283/1670 train_time:26797ms step_avg:94.69ms +step:284/1670 train_time:26889ms step_avg:94.68ms +step:285/1670 train_time:26982ms step_avg:94.67ms +step:286/1670 train_time:27076ms step_avg:94.67ms +step:287/1670 train_time:27168ms step_avg:94.66ms +step:288/1670 train_time:27261ms step_avg:94.66ms +step:289/1670 train_time:27354ms step_avg:94.65ms +step:290/1670 train_time:27446ms step_avg:94.64ms +step:291/1670 train_time:27539ms step_avg:94.63ms +step:292/1670 train_time:27631ms step_avg:94.63ms +step:293/1670 train_time:27723ms step_avg:94.62ms +step:294/1670 train_time:27815ms step_avg:94.61ms +step:295/1670 train_time:27907ms step_avg:94.60ms +step:296/1670 train_time:28000ms step_avg:94.59ms +step:297/1670 train_time:28093ms step_avg:94.59ms +step:298/1670 train_time:28186ms step_avg:94.58ms +step:299/1670 train_time:28279ms step_avg:94.58ms +step:300/1670 train_time:28371ms step_avg:94.57ms +step:301/1670 train_time:28463ms step_avg:94.56ms +step:302/1670 train_time:28556ms step_avg:94.56ms +step:303/1670 train_time:28648ms step_avg:94.55ms +step:304/1670 train_time:28740ms step_avg:94.54ms +step:305/1670 train_time:28833ms step_avg:94.53ms +step:306/1670 train_time:28925ms step_avg:94.53ms +step:307/1670 train_time:29018ms step_avg:94.52ms +step:308/1670 train_time:29111ms step_avg:94.52ms +step:309/1670 train_time:29203ms step_avg:94.51ms +step:310/1670 train_time:29296ms step_avg:94.50ms +step:311/1670 train_time:29388ms step_avg:94.50ms +step:312/1670 train_time:29482ms step_avg:94.49ms +step:313/1670 train_time:29574ms step_avg:94.49ms +step:314/1670 train_time:29667ms step_avg:94.48ms +step:315/1670 train_time:29759ms step_avg:94.47ms +step:316/1670 train_time:29852ms step_avg:94.47ms +step:317/1670 train_time:29944ms step_avg:94.46ms +step:318/1670 train_time:30037ms step_avg:94.46ms +step:319/1670 train_time:30131ms step_avg:94.45ms +step:320/1670 train_time:30223ms step_avg:94.45ms +step:321/1670 train_time:30315ms step_avg:94.44ms +step:322/1670 train_time:30407ms step_avg:94.43ms +step:323/1670 train_time:30501ms step_avg:94.43ms +step:324/1670 train_time:30594ms step_avg:94.43ms +step:325/1670 train_time:30686ms step_avg:94.42ms +step:326/1670 train_time:30778ms step_avg:94.41ms +step:327/1670 train_time:30871ms step_avg:94.41ms +step:328/1670 train_time:30963ms step_avg:94.40ms +step:329/1670 train_time:31056ms step_avg:94.39ms +step:330/1670 train_time:31148ms step_avg:94.39ms +step:331/1670 train_time:31240ms step_avg:94.38ms +step:332/1670 train_time:31333ms step_avg:94.38ms +step:333/1670 train_time:31426ms step_avg:94.37ms +step:334/1670 train_time:31518ms step_avg:94.37ms +step:335/1670 train_time:31611ms step_avg:94.36ms +step:336/1670 train_time:31703ms step_avg:94.35ms +step:337/1670 train_time:31795ms step_avg:94.35ms +step:338/1670 train_time:31887ms step_avg:94.34ms +step:339/1670 train_time:31979ms step_avg:94.33ms +step:340/1670 train_time:32073ms step_avg:94.33ms +step:341/1670 train_time:32165ms step_avg:94.33ms +step:342/1670 train_time:32258ms step_avg:94.32ms +step:343/1670 train_time:32350ms step_avg:94.32ms +step:344/1670 train_time:32444ms step_avg:94.31ms +step:345/1670 train_time:32537ms step_avg:94.31ms +step:346/1670 train_time:32629ms step_avg:94.30ms +step:347/1670 train_time:32721ms step_avg:94.30ms +step:348/1670 train_time:32815ms step_avg:94.30ms +step:349/1670 train_time:32907ms step_avg:94.29ms +step:350/1670 train_time:32999ms step_avg:94.28ms +step:351/1670 train_time:33092ms step_avg:94.28ms +step:352/1670 train_time:33184ms step_avg:94.27ms +step:353/1670 train_time:33277ms step_avg:94.27ms +step:354/1670 train_time:33369ms step_avg:94.26ms +step:355/1670 train_time:33462ms step_avg:94.26ms +step:356/1670 train_time:33555ms step_avg:94.25ms +step:357/1670 train_time:33647ms step_avg:94.25ms +step:358/1670 train_time:33740ms step_avg:94.25ms +step:359/1670 train_time:33833ms step_avg:94.24ms +step:360/1670 train_time:33925ms step_avg:94.23ms +step:361/1670 train_time:34018ms step_avg:94.23ms +step:362/1670 train_time:34111ms step_avg:94.23ms +step:363/1670 train_time:34203ms step_avg:94.22ms +step:364/1670 train_time:34295ms step_avg:94.22ms +step:365/1670 train_time:34387ms step_avg:94.21ms +step:366/1670 train_time:34480ms step_avg:94.21ms +step:367/1670 train_time:34573ms step_avg:94.20ms +step:368/1670 train_time:34666ms step_avg:94.20ms +step:369/1670 train_time:34759ms step_avg:94.20ms +step:370/1670 train_time:34851ms step_avg:94.19ms +step:371/1670 train_time:34943ms step_avg:94.19ms +step:372/1670 train_time:35036ms step_avg:94.18ms +step:373/1670 train_time:35128ms step_avg:94.18ms +step:374/1670 train_time:35221ms step_avg:94.17ms +step:375/1670 train_time:35313ms step_avg:94.17ms +step:375/1670 val_loss:3.8201 train_time:35403ms step_avg:94.41ms +step:376/1670 train_time:35431ms step_avg:94.23ms +step:377/1670 train_time:35506ms step_avg:94.18ms +step:378/1670 train_time:35607ms step_avg:94.20ms +step:379/1670 train_time:35704ms step_avg:94.20ms +step:380/1670 train_time:35796ms step_avg:94.20ms +step:381/1670 train_time:35887ms step_avg:94.19ms +step:382/1670 train_time:35978ms step_avg:94.18ms +step:383/1670 train_time:36070ms step_avg:94.18ms +step:384/1670 train_time:36162ms step_avg:94.17ms +step:385/1670 train_time:36253ms step_avg:94.16ms +step:386/1670 train_time:36345ms step_avg:94.16ms +step:387/1670 train_time:36437ms step_avg:94.15ms +step:388/1670 train_time:36533ms step_avg:94.16ms +step:389/1670 train_time:36628ms step_avg:94.16ms +step:390/1670 train_time:36722ms step_avg:94.16ms +step:391/1670 train_time:36814ms step_avg:94.15ms +step:392/1670 train_time:36907ms step_avg:94.15ms +step:393/1670 train_time:36999ms step_avg:94.14ms +step:394/1670 train_time:37090ms step_avg:94.14ms +step:395/1670 train_time:37182ms step_avg:94.13ms +step:396/1670 train_time:37273ms step_avg:94.12ms +step:397/1670 train_time:37365ms step_avg:94.12ms +step:398/1670 train_time:37458ms step_avg:94.12ms +step:399/1670 train_time:37552ms step_avg:94.11ms +step:400/1670 train_time:37645ms step_avg:94.11ms +step:401/1670 train_time:37738ms step_avg:94.11ms +step:402/1670 train_time:37831ms step_avg:94.11ms +step:403/1670 train_time:37924ms step_avg:94.10ms +step:404/1670 train_time:38016ms step_avg:94.10ms +step:405/1670 train_time:38109ms step_avg:94.10ms +step:406/1670 train_time:38200ms step_avg:94.09ms +step:407/1670 train_time:38292ms step_avg:94.08ms +step:408/1670 train_time:38384ms step_avg:94.08ms +step:409/1670 train_time:38478ms step_avg:94.08ms +step:410/1670 train_time:38571ms step_avg:94.08ms +step:411/1670 train_time:38664ms step_avg:94.07ms +step:412/1670 train_time:38758ms step_avg:94.07ms +step:413/1670 train_time:38850ms step_avg:94.07ms +step:414/1670 train_time:38942ms step_avg:94.06ms +step:415/1670 train_time:39035ms step_avg:94.06ms +step:416/1670 train_time:39128ms step_avg:94.06ms +step:417/1670 train_time:39220ms step_avg:94.05ms +step:418/1670 train_time:39312ms step_avg:94.05ms +step:419/1670 train_time:39404ms step_avg:94.04ms +step:420/1670 train_time:39497ms step_avg:94.04ms +step:421/1670 train_time:39591ms step_avg:94.04ms +step:422/1670 train_time:39683ms step_avg:94.04ms +step:423/1670 train_time:39777ms step_avg:94.03ms +step:424/1670 train_time:39870ms step_avg:94.03ms +step:425/1670 train_time:40207ms step_avg:94.60ms +step:426/1670 train_time:40381ms step_avg:94.79ms +step:427/1670 train_time:40471ms step_avg:94.78ms +step:428/1670 train_time:40563ms step_avg:94.77ms +step:429/1670 train_time:40655ms step_avg:94.77ms +step:430/1670 train_time:40745ms step_avg:94.76ms +step:431/1670 train_time:40836ms step_avg:94.75ms +step:432/1670 train_time:40928ms step_avg:94.74ms +step:433/1670 train_time:41020ms step_avg:94.73ms +step:434/1670 train_time:41111ms step_avg:94.73ms +step:435/1670 train_time:41205ms step_avg:94.72ms +step:436/1670 train_time:41301ms step_avg:94.73ms +step:437/1670 train_time:41397ms step_avg:94.73ms +step:438/1670 train_time:41491ms step_avg:94.73ms +step:439/1670 train_time:41584ms step_avg:94.72ms +step:440/1670 train_time:41676ms step_avg:94.72ms +step:441/1670 train_time:41768ms step_avg:94.71ms +step:442/1670 train_time:41860ms step_avg:94.71ms +step:443/1670 train_time:41952ms step_avg:94.70ms +step:444/1670 train_time:42043ms step_avg:94.69ms +step:445/1670 train_time:42134ms step_avg:94.68ms +step:446/1670 train_time:42227ms step_avg:94.68ms +step:447/1670 train_time:42320ms step_avg:94.68ms +step:448/1670 train_time:42413ms step_avg:94.67ms +step:449/1670 train_time:42507ms step_avg:94.67ms +step:450/1670 train_time:42600ms step_avg:94.67ms +step:451/1670 train_time:42692ms step_avg:94.66ms +step:452/1670 train_time:42784ms step_avg:94.66ms +step:453/1670 train_time:42877ms step_avg:94.65ms +step:454/1670 train_time:42969ms step_avg:94.65ms +step:455/1670 train_time:43061ms step_avg:94.64ms +step:456/1670 train_time:43153ms step_avg:94.63ms +step:457/1670 train_time:43245ms step_avg:94.63ms +step:458/1670 train_time:43339ms step_avg:94.63ms +step:459/1670 train_time:43433ms step_avg:94.62ms +step:460/1670 train_time:43527ms step_avg:94.62ms +step:461/1670 train_time:43619ms step_avg:94.62ms +step:462/1670 train_time:43712ms step_avg:94.62ms +step:463/1670 train_time:43805ms step_avg:94.61ms +step:464/1670 train_time:43896ms step_avg:94.60ms +step:465/1670 train_time:43989ms step_avg:94.60ms +step:466/1670 train_time:44081ms step_avg:94.59ms +step:467/1670 train_time:44174ms step_avg:94.59ms +step:468/1670 train_time:44266ms step_avg:94.59ms +step:469/1670 train_time:44358ms step_avg:94.58ms +step:470/1670 train_time:44451ms step_avg:94.58ms +step:471/1670 train_time:44543ms step_avg:94.57ms +step:472/1670 train_time:44635ms step_avg:94.57ms +step:473/1670 train_time:44729ms step_avg:94.56ms +step:474/1670 train_time:44821ms step_avg:94.56ms +step:475/1670 train_time:44913ms step_avg:94.55ms +step:476/1670 train_time:45006ms step_avg:94.55ms +step:477/1670 train_time:45098ms step_avg:94.55ms +step:478/1670 train_time:45190ms step_avg:94.54ms +step:479/1670 train_time:45283ms step_avg:94.54ms +step:480/1670 train_time:45376ms step_avg:94.53ms +step:481/1670 train_time:45468ms step_avg:94.53ms +step:482/1670 train_time:45561ms step_avg:94.52ms +step:483/1670 train_time:45654ms step_avg:94.52ms +step:484/1670 train_time:45746ms step_avg:94.52ms +step:485/1670 train_time:45838ms step_avg:94.51ms +step:486/1670 train_time:45931ms step_avg:94.51ms +step:487/1670 train_time:46024ms step_avg:94.51ms +step:488/1670 train_time:46117ms step_avg:94.50ms +step:489/1670 train_time:46209ms step_avg:94.50ms +step:490/1670 train_time:46301ms step_avg:94.49ms +step:491/1670 train_time:46394ms step_avg:94.49ms +step:492/1670 train_time:46487ms step_avg:94.49ms +step:493/1670 train_time:46580ms step_avg:94.48ms +step:494/1670 train_time:46672ms step_avg:94.48ms +step:495/1670 train_time:46765ms step_avg:94.47ms +step:496/1670 train_time:46858ms step_avg:94.47ms +step:497/1670 train_time:46951ms step_avg:94.47ms +step:498/1670 train_time:47043ms step_avg:94.46ms +step:499/1670 train_time:47135ms step_avg:94.46ms +step:500/1670 train_time:47228ms step_avg:94.46ms +step:500/1670 val_loss:3.7176 train_time:47317ms step_avg:94.63ms +step:501/1670 train_time:47344ms step_avg:94.50ms +step:502/1670 train_time:47417ms step_avg:94.46ms +step:503/1670 train_time:47517ms step_avg:94.47ms +step:504/1670 train_time:47612ms step_avg:94.47ms +step:505/1670 train_time:47703ms step_avg:94.46ms +step:506/1670 train_time:47795ms step_avg:94.46ms +step:507/1670 train_time:47886ms step_avg:94.45ms +step:508/1670 train_time:47977ms step_avg:94.44ms +step:509/1670 train_time:48069ms step_avg:94.44ms +step:510/1670 train_time:48161ms step_avg:94.43ms +step:511/1670 train_time:48253ms step_avg:94.43ms +step:512/1670 train_time:48345ms step_avg:94.42ms +step:513/1670 train_time:48440ms step_avg:94.42ms +step:514/1670 train_time:48535ms step_avg:94.43ms +step:515/1670 train_time:48629ms step_avg:94.42ms +step:516/1670 train_time:48721ms step_avg:94.42ms +step:517/1670 train_time:48813ms step_avg:94.41ms +step:518/1670 train_time:48906ms step_avg:94.41ms +step:519/1670 train_time:48997ms step_avg:94.41ms +step:520/1670 train_time:49088ms step_avg:94.40ms +step:521/1670 train_time:49180ms step_avg:94.40ms +step:522/1670 train_time:49272ms step_avg:94.39ms +step:523/1670 train_time:49365ms step_avg:94.39ms +step:524/1670 train_time:49458ms step_avg:94.39ms +step:525/1670 train_time:49552ms step_avg:94.39ms +step:526/1670 train_time:49645ms step_avg:94.38ms +step:527/1670 train_time:49738ms step_avg:94.38ms +step:528/1670 train_time:49830ms step_avg:94.38ms +step:529/1670 train_time:49923ms step_avg:94.37ms +step:530/1670 train_time:50014ms step_avg:94.37ms +step:531/1670 train_time:50107ms step_avg:94.36ms +step:532/1670 train_time:50199ms step_avg:94.36ms +step:533/1670 train_time:50291ms step_avg:94.35ms +step:534/1670 train_time:50384ms step_avg:94.35ms +step:535/1670 train_time:50478ms step_avg:94.35ms +step:536/1670 train_time:50571ms step_avg:94.35ms +step:537/1670 train_time:50664ms step_avg:94.35ms +step:538/1670 train_time:50757ms step_avg:94.34ms +step:539/1670 train_time:50850ms step_avg:94.34ms +step:540/1670 train_time:50942ms step_avg:94.34ms +step:541/1670 train_time:51035ms step_avg:94.33ms +step:542/1670 train_time:51128ms step_avg:94.33ms +step:543/1670 train_time:51219ms step_avg:94.33ms +step:544/1670 train_time:51312ms step_avg:94.32ms +step:545/1670 train_time:51405ms step_avg:94.32ms +step:546/1670 train_time:51498ms step_avg:94.32ms +step:547/1670 train_time:51591ms step_avg:94.32ms +step:548/1670 train_time:51684ms step_avg:94.31ms +step:549/1670 train_time:51777ms step_avg:94.31ms +step:550/1670 train_time:51870ms step_avg:94.31ms +step:551/1670 train_time:51962ms step_avg:94.30ms +step:552/1670 train_time:52054ms step_avg:94.30ms +step:553/1670 train_time:52147ms step_avg:94.30ms +step:554/1670 train_time:52239ms step_avg:94.29ms +step:555/1670 train_time:52332ms step_avg:94.29ms +step:556/1670 train_time:52425ms step_avg:94.29ms +step:557/1670 train_time:52518ms step_avg:94.29ms +step:558/1670 train_time:52713ms step_avg:94.47ms +step:559/1670 train_time:52789ms step_avg:94.43ms +step:560/1670 train_time:52881ms step_avg:94.43ms +step:561/1670 train_time:52974ms step_avg:94.43ms +step:562/1670 train_time:53066ms step_avg:94.42ms +step:563/1670 train_time:53159ms step_avg:94.42ms +step:564/1670 train_time:53252ms step_avg:94.42ms +step:565/1670 train_time:53344ms step_avg:94.41ms +step:566/1670 train_time:53437ms step_avg:94.41ms +step:567/1670 train_time:53530ms step_avg:94.41ms +step:568/1670 train_time:53627ms step_avg:94.41ms +step:569/1670 train_time:53725ms step_avg:94.42ms +step:570/1670 train_time:53819ms step_avg:94.42ms +step:571/1670 train_time:53913ms step_avg:94.42ms +step:572/1670 train_time:54006ms step_avg:94.42ms +step:573/1670 train_time:54099ms step_avg:94.41ms +step:574/1670 train_time:54192ms step_avg:94.41ms +step:575/1670 train_time:54284ms step_avg:94.41ms +step:576/1670 train_time:54377ms step_avg:94.41ms +step:577/1670 train_time:54471ms step_avg:94.40ms +step:578/1670 train_time:54564ms step_avg:94.40ms +step:579/1670 train_time:54660ms step_avg:94.40ms +step:580/1670 train_time:54755ms step_avg:94.41ms +step:581/1670 train_time:54850ms step_avg:94.41ms +step:582/1670 train_time:54945ms step_avg:94.41ms +step:583/1670 train_time:55038ms step_avg:94.41ms +step:584/1670 train_time:55133ms step_avg:94.41ms +step:585/1670 train_time:55225ms step_avg:94.40ms +step:586/1670 train_time:55318ms step_avg:94.40ms +step:587/1670 train_time:55411ms step_avg:94.40ms +step:588/1670 train_time:55504ms step_avg:94.39ms +step:589/1670 train_time:55598ms step_avg:94.39ms +step:590/1670 train_time:55692ms step_avg:94.39ms +step:591/1670 train_time:55786ms step_avg:94.39ms +step:592/1670 train_time:55881ms step_avg:94.39ms +step:593/1670 train_time:55976ms step_avg:94.39ms +step:594/1670 train_time:56070ms step_avg:94.39ms +step:595/1670 train_time:56163ms step_avg:94.39ms +step:596/1670 train_time:56256ms step_avg:94.39ms +step:597/1670 train_time:56350ms step_avg:94.39ms +step:598/1670 train_time:56443ms step_avg:94.39ms +step:599/1670 train_time:56537ms step_avg:94.39ms +step:600/1670 train_time:56631ms step_avg:94.39ms +step:601/1670 train_time:56725ms step_avg:94.38ms +step:602/1670 train_time:56820ms step_avg:94.38ms +step:603/1670 train_time:56914ms step_avg:94.39ms +step:604/1670 train_time:57008ms step_avg:94.38ms +step:605/1670 train_time:57102ms step_avg:94.38ms +step:606/1670 train_time:57195ms step_avg:94.38ms +step:607/1670 train_time:57289ms step_avg:94.38ms +step:608/1670 train_time:57382ms step_avg:94.38ms +step:609/1670 train_time:57476ms step_avg:94.38ms +step:610/1670 train_time:57569ms step_avg:94.38ms +step:611/1670 train_time:57664ms step_avg:94.38ms +step:612/1670 train_time:57758ms step_avg:94.38ms +step:613/1670 train_time:57853ms step_avg:94.38ms +step:614/1670 train_time:57948ms step_avg:94.38ms +step:615/1670 train_time:58042ms step_avg:94.38ms +step:616/1670 train_time:58135ms step_avg:94.38ms +step:617/1670 train_time:58229ms step_avg:94.37ms +step:618/1670 train_time:58321ms step_avg:94.37ms +step:619/1670 train_time:58415ms step_avg:94.37ms +step:620/1670 train_time:58509ms step_avg:94.37ms +step:621/1670 train_time:58602ms step_avg:94.37ms +step:622/1670 train_time:58696ms step_avg:94.37ms +step:623/1670 train_time:58790ms step_avg:94.37ms +step:624/1670 train_time:58885ms step_avg:94.37ms +step:625/1670 train_time:58979ms step_avg:94.37ms +step:625/1670 val_loss:3.6157 train_time:59071ms step_avg:94.51ms +step:626/1670 train_time:59101ms step_avg:94.41ms +step:627/1670 train_time:59176ms step_avg:94.38ms +step:628/1670 train_time:59276ms step_avg:94.39ms +step:629/1670 train_time:59371ms step_avg:94.39ms +step:630/1670 train_time:59463ms step_avg:94.39ms +step:631/1670 train_time:59556ms step_avg:94.38ms +step:632/1670 train_time:59649ms step_avg:94.38ms +step:633/1670 train_time:59742ms step_avg:94.38ms +step:634/1670 train_time:59834ms step_avg:94.38ms +step:635/1670 train_time:59927ms step_avg:94.37ms +step:636/1670 train_time:60022ms step_avg:94.37ms +step:637/1670 train_time:60118ms step_avg:94.38ms +step:638/1670 train_time:60213ms step_avg:94.38ms +step:639/1670 train_time:60659ms step_avg:94.93ms +step:640/1670 train_time:60727ms step_avg:94.89ms +step:641/1670 train_time:60820ms step_avg:94.88ms +step:642/1670 train_time:60912ms step_avg:94.88ms +step:643/1670 train_time:61005ms step_avg:94.88ms +step:644/1670 train_time:61098ms step_avg:94.87ms +step:645/1670 train_time:61191ms step_avg:94.87ms +step:646/1670 train_time:61283ms step_avg:94.87ms +step:647/1670 train_time:61376ms step_avg:94.86ms +step:648/1670 train_time:61469ms step_avg:94.86ms +step:649/1670 train_time:61565ms step_avg:94.86ms +step:650/1670 train_time:61661ms step_avg:94.86ms +step:651/1670 train_time:61756ms step_avg:94.86ms +step:652/1670 train_time:61850ms step_avg:94.86ms +step:653/1670 train_time:61943ms step_avg:94.86ms +step:654/1670 train_time:62035ms step_avg:94.86ms +step:655/1670 train_time:62128ms step_avg:94.85ms +step:656/1670 train_time:62222ms step_avg:94.85ms +step:657/1670 train_time:62315ms step_avg:94.85ms +step:658/1670 train_time:62408ms step_avg:94.84ms +step:659/1670 train_time:62502ms step_avg:94.84ms +step:660/1670 train_time:62597ms step_avg:94.84ms +step:661/1670 train_time:62692ms step_avg:94.84ms +step:662/1670 train_time:62786ms step_avg:94.84ms +step:663/1670 train_time:62880ms step_avg:94.84ms +step:664/1670 train_time:62973ms step_avg:94.84ms +step:665/1670 train_time:63066ms step_avg:94.84ms +step:666/1670 train_time:63160ms step_avg:94.83ms +step:667/1670 train_time:63254ms step_avg:94.83ms +step:668/1670 train_time:63347ms step_avg:94.83ms +step:669/1670 train_time:63441ms step_avg:94.83ms +step:670/1670 train_time:63535ms step_avg:94.83ms +step:671/1670 train_time:63629ms step_avg:94.83ms +step:672/1670 train_time:63724ms step_avg:94.83ms +step:673/1670 train_time:63818ms step_avg:94.83ms +step:674/1670 train_time:63911ms step_avg:94.82ms +step:675/1670 train_time:64004ms step_avg:94.82ms +step:676/1670 train_time:64098ms step_avg:94.82ms +step:677/1670 train_time:64191ms step_avg:94.82ms +step:678/1670 train_time:64285ms step_avg:94.82ms +step:679/1670 train_time:64379ms step_avg:94.82ms +step:680/1670 train_time:64473ms step_avg:94.81ms +step:681/1670 train_time:64567ms step_avg:94.81ms +step:682/1670 train_time:64661ms step_avg:94.81ms +step:683/1670 train_time:64755ms step_avg:94.81ms +step:684/1670 train_time:64849ms step_avg:94.81ms +step:685/1670 train_time:64943ms step_avg:94.81ms +step:686/1670 train_time:65036ms step_avg:94.80ms +step:687/1670 train_time:65129ms step_avg:94.80ms +step:688/1670 train_time:65223ms step_avg:94.80ms +step:689/1670 train_time:65317ms step_avg:94.80ms +step:690/1670 train_time:65410ms step_avg:94.80ms +step:691/1670 train_time:65504ms step_avg:94.80ms +step:692/1670 train_time:65598ms step_avg:94.79ms +step:693/1670 train_time:65691ms step_avg:94.79ms +step:694/1670 train_time:65785ms step_avg:94.79ms +step:695/1670 train_time:65879ms step_avg:94.79ms +step:696/1670 train_time:65974ms step_avg:94.79ms +step:697/1670 train_time:66067ms step_avg:94.79ms +step:698/1670 train_time:66160ms step_avg:94.79ms +step:699/1670 train_time:66255ms step_avg:94.79ms +step:700/1670 train_time:66349ms step_avg:94.78ms +step:701/1670 train_time:66443ms step_avg:94.78ms +step:702/1670 train_time:66535ms step_avg:94.78ms +step:703/1670 train_time:66628ms step_avg:94.78ms +step:704/1670 train_time:66723ms step_avg:94.78ms +step:705/1670 train_time:66817ms step_avg:94.78ms +step:706/1670 train_time:66910ms step_avg:94.77ms +step:707/1670 train_time:67005ms step_avg:94.77ms +step:708/1670 train_time:67098ms step_avg:94.77ms +step:709/1670 train_time:67191ms step_avg:94.77ms +step:710/1670 train_time:67285ms step_avg:94.77ms +step:711/1670 train_time:67379ms step_avg:94.77ms +step:712/1670 train_time:67472ms step_avg:94.76ms +step:713/1670 train_time:67566ms step_avg:94.76ms +step:714/1670 train_time:67660ms step_avg:94.76ms +step:715/1670 train_time:67754ms step_avg:94.76ms +step:716/1670 train_time:67848ms step_avg:94.76ms +step:717/1670 train_time:67942ms step_avg:94.76ms +step:718/1670 train_time:68037ms step_avg:94.76ms +step:719/1670 train_time:68130ms step_avg:94.76ms +step:720/1670 train_time:68223ms step_avg:94.75ms +step:721/1670 train_time:68317ms step_avg:94.75ms +step:722/1670 train_time:68411ms step_avg:94.75ms +step:723/1670 train_time:68505ms step_avg:94.75ms +step:724/1670 train_time:68599ms step_avg:94.75ms +step:725/1670 train_time:68694ms step_avg:94.75ms +step:726/1670 train_time:68788ms step_avg:94.75ms +step:727/1670 train_time:68882ms step_avg:94.75ms +step:728/1670 train_time:68976ms step_avg:94.75ms +step:729/1670 train_time:69069ms step_avg:94.75ms +step:730/1670 train_time:69163ms step_avg:94.74ms +step:731/1670 train_time:69257ms step_avg:94.74ms +step:732/1670 train_time:69350ms step_avg:94.74ms +step:733/1670 train_time:69444ms step_avg:94.74ms +step:734/1670 train_time:69538ms step_avg:94.74ms +step:735/1670 train_time:69631ms step_avg:94.74ms +step:736/1670 train_time:69725ms step_avg:94.73ms +step:737/1670 train_time:69819ms step_avg:94.73ms +step:738/1670 train_time:69914ms step_avg:94.73ms +step:739/1670 train_time:70008ms step_avg:94.73ms +step:740/1670 train_time:70101ms step_avg:94.73ms +step:741/1670 train_time:70195ms step_avg:94.73ms +step:742/1670 train_time:70288ms step_avg:94.73ms +step:743/1670 train_time:70383ms step_avg:94.73ms +step:744/1670 train_time:70477ms step_avg:94.73ms +step:745/1670 train_time:70570ms step_avg:94.73ms +step:746/1670 train_time:70663ms step_avg:94.72ms +step:747/1670 train_time:70757ms step_avg:94.72ms +step:748/1670 train_time:70851ms step_avg:94.72ms +step:749/1670 train_time:70945ms step_avg:94.72ms +step:750/1670 train_time:71039ms step_avg:94.72ms +step:750/1670 val_loss:3.5631 train_time:71130ms step_avg:94.84ms +step:751/1670 train_time:71157ms step_avg:94.75ms +step:752/1670 train_time:71233ms step_avg:94.72ms +step:753/1670 train_time:71333ms step_avg:94.73ms +step:754/1670 train_time:71428ms step_avg:94.73ms +step:755/1670 train_time:71521ms step_avg:94.73ms +step:756/1670 train_time:71614ms step_avg:94.73ms +step:757/1670 train_time:71707ms step_avg:94.73ms +step:758/1670 train_time:71800ms step_avg:94.72ms +step:759/1670 train_time:71892ms step_avg:94.72ms +step:760/1670 train_time:71985ms step_avg:94.72ms +step:761/1670 train_time:72078ms step_avg:94.72ms +step:762/1670 train_time:72173ms step_avg:94.72ms +step:763/1670 train_time:72271ms step_avg:94.72ms +step:764/1670 train_time:72366ms step_avg:94.72ms +step:765/1670 train_time:72461ms step_avg:94.72ms +step:766/1670 train_time:72555ms step_avg:94.72ms +step:767/1670 train_time:72648ms step_avg:94.72ms +step:768/1670 train_time:72741ms step_avg:94.72ms +step:769/1670 train_time:72834ms step_avg:94.71ms +step:770/1670 train_time:72927ms step_avg:94.71ms +step:771/1670 train_time:73020ms step_avg:94.71ms +step:772/1670 train_time:73113ms step_avg:94.71ms +step:773/1670 train_time:73207ms step_avg:94.71ms +step:774/1670 train_time:73303ms step_avg:94.71ms +step:775/1670 train_time:73398ms step_avg:94.71ms +step:776/1670 train_time:73492ms step_avg:94.71ms +step:777/1670 train_time:73586ms step_avg:94.70ms +step:778/1670 train_time:73679ms step_avg:94.70ms +step:779/1670 train_time:73772ms step_avg:94.70ms +step:780/1670 train_time:73865ms step_avg:94.70ms +step:781/1670 train_time:73958ms step_avg:94.70ms +step:782/1670 train_time:74052ms step_avg:94.70ms +step:783/1670 train_time:74145ms step_avg:94.69ms +step:784/1670 train_time:74240ms step_avg:94.69ms +step:785/1670 train_time:74334ms step_avg:94.69ms +step:786/1670 train_time:74428ms step_avg:94.69ms +step:787/1670 train_time:74522ms step_avg:94.69ms +step:788/1670 train_time:74616ms step_avg:94.69ms +step:789/1670 train_time:74709ms step_avg:94.69ms +step:790/1670 train_time:74802ms step_avg:94.69ms +step:791/1670 train_time:74896ms step_avg:94.68ms +step:792/1670 train_time:74989ms step_avg:94.68ms +step:793/1670 train_time:75083ms step_avg:94.68ms +step:794/1670 train_time:75177ms step_avg:94.68ms +step:795/1670 train_time:75271ms step_avg:94.68ms +step:796/1670 train_time:75365ms step_avg:94.68ms +step:797/1670 train_time:75461ms step_avg:94.68ms +step:798/1670 train_time:75554ms step_avg:94.68ms +step:799/1670 train_time:75648ms step_avg:94.68ms +step:800/1670 train_time:75741ms step_avg:94.68ms +step:801/1670 train_time:75835ms step_avg:94.67ms +step:802/1670 train_time:75928ms step_avg:94.67ms +step:803/1670 train_time:76021ms step_avg:94.67ms +step:804/1670 train_time:76114ms step_avg:94.67ms +step:805/1670 train_time:76208ms step_avg:94.67ms +step:806/1670 train_time:76301ms step_avg:94.67ms +step:807/1670 train_time:76395ms step_avg:94.67ms +step:808/1670 train_time:76490ms step_avg:94.67ms +step:809/1670 train_time:76584ms step_avg:94.67ms +step:810/1670 train_time:76678ms step_avg:94.66ms +step:811/1670 train_time:76771ms step_avg:94.66ms +step:812/1670 train_time:76865ms step_avg:94.66ms +step:813/1670 train_time:76958ms step_avg:94.66ms +step:814/1670 train_time:77052ms step_avg:94.66ms +step:815/1670 train_time:77145ms step_avg:94.66ms +step:816/1670 train_time:77239ms step_avg:94.66ms +step:817/1670 train_time:77333ms step_avg:94.66ms +step:818/1670 train_time:77427ms step_avg:94.65ms +step:819/1670 train_time:77522ms step_avg:94.65ms +step:820/1670 train_time:77616ms step_avg:94.65ms +step:821/1670 train_time:77709ms step_avg:94.65ms +step:822/1670 train_time:77802ms step_avg:94.65ms +step:823/1670 train_time:77896ms step_avg:94.65ms +step:824/1670 train_time:77990ms step_avg:94.65ms +step:825/1670 train_time:78085ms step_avg:94.65ms +step:826/1670 train_time:78179ms step_avg:94.65ms +step:827/1670 train_time:78272ms step_avg:94.65ms +step:828/1670 train_time:78367ms step_avg:94.65ms +step:829/1670 train_time:78461ms step_avg:94.65ms +step:830/1670 train_time:78555ms step_avg:94.64ms +step:831/1670 train_time:78648ms step_avg:94.64ms +step:832/1670 train_time:78742ms step_avg:94.64ms +step:833/1670 train_time:78836ms step_avg:94.64ms +step:834/1670 train_time:78929ms step_avg:94.64ms +step:835/1670 train_time:79022ms step_avg:94.64ms +step:836/1670 train_time:79116ms step_avg:94.64ms +step:837/1670 train_time:79210ms step_avg:94.64ms +step:838/1670 train_time:79303ms step_avg:94.63ms +step:839/1670 train_time:79397ms step_avg:94.63ms +step:840/1670 train_time:79492ms step_avg:94.63ms +step:841/1670 train_time:79586ms step_avg:94.63ms +step:842/1670 train_time:79679ms step_avg:94.63ms +step:843/1670 train_time:79773ms step_avg:94.63ms +step:844/1670 train_time:79866ms step_avg:94.63ms +step:845/1670 train_time:79961ms step_avg:94.63ms +step:846/1670 train_time:80055ms step_avg:94.63ms +step:847/1670 train_time:80148ms step_avg:94.63ms +step:848/1670 train_time:80241ms step_avg:94.62ms +step:849/1670 train_time:80335ms step_avg:94.62ms +step:850/1670 train_time:80429ms step_avg:94.62ms +step:851/1670 train_time:80850ms step_avg:95.01ms +step:852/1670 train_time:80953ms step_avg:95.02ms +step:853/1670 train_time:81046ms step_avg:95.01ms +step:854/1670 train_time:81139ms step_avg:95.01ms +step:855/1670 train_time:81231ms step_avg:95.01ms +step:856/1670 train_time:81325ms step_avg:95.01ms +step:857/1670 train_time:81417ms step_avg:95.00ms +step:858/1670 train_time:81509ms step_avg:95.00ms +step:859/1670 train_time:81602ms step_avg:95.00ms +step:860/1670 train_time:81695ms step_avg:94.99ms +step:861/1670 train_time:81792ms step_avg:95.00ms +step:862/1670 train_time:81891ms step_avg:95.00ms +step:863/1670 train_time:81987ms step_avg:95.00ms +step:864/1670 train_time:82081ms step_avg:95.00ms +step:865/1670 train_time:82174ms step_avg:95.00ms +step:866/1670 train_time:82266ms step_avg:95.00ms +step:867/1670 train_time:82360ms step_avg:94.99ms +step:868/1670 train_time:82453ms step_avg:94.99ms +step:869/1670 train_time:82546ms step_avg:94.99ms +step:870/1670 train_time:82639ms step_avg:94.99ms +step:871/1670 train_time:82733ms step_avg:94.99ms +step:872/1670 train_time:82828ms step_avg:94.99ms +step:873/1670 train_time:82923ms step_avg:94.99ms +step:874/1670 train_time:83019ms step_avg:94.99ms +step:875/1670 train_time:83113ms step_avg:94.99ms +step:875/1670 val_loss:3.5188 train_time:83204ms step_avg:95.09ms +step:876/1670 train_time:83231ms step_avg:95.01ms +step:877/1670 train_time:83307ms step_avg:94.99ms +step:878/1670 train_time:83407ms step_avg:95.00ms +step:879/1670 train_time:83501ms step_avg:95.00ms +step:880/1670 train_time:83595ms step_avg:94.99ms +step:881/1670 train_time:83687ms step_avg:94.99ms +step:882/1670 train_time:83780ms step_avg:94.99ms +step:883/1670 train_time:83873ms step_avg:94.99ms +step:884/1670 train_time:83965ms step_avg:94.98ms +step:885/1670 train_time:84058ms step_avg:94.98ms +step:886/1670 train_time:84151ms step_avg:94.98ms +step:887/1670 train_time:84245ms step_avg:94.98ms +step:888/1670 train_time:84342ms step_avg:94.98ms +step:889/1670 train_time:84438ms step_avg:94.98ms +step:890/1670 train_time:84532ms step_avg:94.98ms +step:891/1670 train_time:84626ms step_avg:94.98ms +step:892/1670 train_time:84719ms step_avg:94.98ms +step:893/1670 train_time:84811ms step_avg:94.97ms +step:894/1670 train_time:84904ms step_avg:94.97ms +step:895/1670 train_time:84998ms step_avg:94.97ms +step:896/1670 train_time:85092ms step_avg:94.97ms +step:897/1670 train_time:85185ms step_avg:94.97ms +step:898/1670 train_time:85279ms step_avg:94.97ms +step:899/1670 train_time:85373ms step_avg:94.96ms +step:900/1670 train_time:85468ms step_avg:94.96ms +step:901/1670 train_time:85562ms step_avg:94.96ms +step:902/1670 train_time:85655ms step_avg:94.96ms +step:903/1670 train_time:85748ms step_avg:94.96ms +step:904/1670 train_time:85841ms step_avg:94.96ms +step:905/1670 train_time:85935ms step_avg:94.96ms +step:906/1670 train_time:86028ms step_avg:94.95ms +step:907/1670 train_time:86122ms step_avg:94.95ms +step:908/1670 train_time:86215ms step_avg:94.95ms +step:909/1670 train_time:86309ms step_avg:94.95ms +step:910/1670 train_time:86404ms step_avg:94.95ms +step:911/1670 train_time:86499ms step_avg:94.95ms +step:912/1670 train_time:86592ms step_avg:94.95ms +step:913/1670 train_time:86686ms step_avg:94.95ms +step:914/1670 train_time:86778ms step_avg:94.94ms +step:915/1670 train_time:86871ms step_avg:94.94ms +step:916/1670 train_time:86964ms step_avg:94.94ms +step:917/1670 train_time:87058ms step_avg:94.94ms +step:918/1670 train_time:87151ms step_avg:94.94ms +step:919/1670 train_time:87245ms step_avg:94.93ms +step:920/1670 train_time:87339ms step_avg:94.93ms +step:921/1670 train_time:87434ms step_avg:94.93ms +step:922/1670 train_time:87528ms step_avg:94.93ms +step:923/1670 train_time:87622ms step_avg:94.93ms +step:924/1670 train_time:87716ms step_avg:94.93ms +step:925/1670 train_time:87809ms step_avg:94.93ms +step:926/1670 train_time:87902ms step_avg:94.93ms +step:927/1670 train_time:87996ms step_avg:94.93ms +step:928/1670 train_time:88090ms step_avg:94.92ms +step:929/1670 train_time:88184ms step_avg:94.92ms +step:930/1670 train_time:88277ms step_avg:94.92ms +step:931/1670 train_time:88372ms step_avg:94.92ms +step:932/1670 train_time:88466ms step_avg:94.92ms +step:933/1670 train_time:88560ms step_avg:94.92ms +step:934/1670 train_time:88654ms step_avg:94.92ms +step:935/1670 train_time:88747ms step_avg:94.92ms +step:936/1670 train_time:88840ms step_avg:94.92ms +step:937/1670 train_time:88934ms step_avg:94.91ms +step:938/1670 train_time:89028ms step_avg:94.91ms +step:939/1670 train_time:89121ms step_avg:94.91ms +step:940/1670 train_time:89215ms step_avg:94.91ms +step:941/1670 train_time:89308ms step_avg:94.91ms +step:942/1670 train_time:89403ms step_avg:94.91ms +step:943/1670 train_time:89497ms step_avg:94.91ms +step:944/1670 train_time:89592ms step_avg:94.91ms +step:945/1670 train_time:89685ms step_avg:94.90ms +step:946/1670 train_time:89779ms step_avg:94.90ms +step:947/1670 train_time:89873ms step_avg:94.90ms +step:948/1670 train_time:89967ms step_avg:94.90ms +step:949/1670 train_time:90061ms step_avg:94.90ms +step:950/1670 train_time:90154ms step_avg:94.90ms +step:951/1670 train_time:90247ms step_avg:94.90ms +step:952/1670 train_time:90341ms step_avg:94.90ms +step:953/1670 train_time:90435ms step_avg:94.90ms +step:954/1670 train_time:90529ms step_avg:94.89ms +step:955/1670 train_time:90623ms step_avg:94.89ms +step:956/1670 train_time:90718ms step_avg:94.89ms +step:957/1670 train_time:90811ms step_avg:94.89ms +step:958/1670 train_time:90905ms step_avg:94.89ms +step:959/1670 train_time:90998ms step_avg:94.89ms +step:960/1670 train_time:91092ms step_avg:94.89ms +step:961/1670 train_time:91185ms step_avg:94.89ms +step:962/1670 train_time:91278ms step_avg:94.88ms +step:963/1670 train_time:91372ms step_avg:94.88ms +step:964/1670 train_time:91466ms step_avg:94.88ms +step:965/1670 train_time:91560ms step_avg:94.88ms +step:966/1670 train_time:91653ms step_avg:94.88ms +step:967/1670 train_time:91747ms step_avg:94.88ms +step:968/1670 train_time:91840ms step_avg:94.88ms +step:969/1670 train_time:91935ms step_avg:94.88ms +step:970/1670 train_time:92028ms step_avg:94.87ms +step:971/1670 train_time:92122ms step_avg:94.87ms +step:972/1670 train_time:92214ms step_avg:94.87ms +step:973/1670 train_time:92308ms step_avg:94.87ms +step:974/1670 train_time:92401ms step_avg:94.87ms +step:975/1670 train_time:92495ms step_avg:94.87ms +step:976/1670 train_time:92590ms step_avg:94.87ms +step:977/1670 train_time:92683ms step_avg:94.87ms +step:978/1670 train_time:92777ms step_avg:94.86ms +step:979/1670 train_time:92872ms step_avg:94.86ms +step:980/1670 train_time:92965ms step_avg:94.86ms +step:981/1670 train_time:93058ms step_avg:94.86ms +step:982/1670 train_time:93152ms step_avg:94.86ms +step:983/1670 train_time:93247ms step_avg:94.86ms +step:984/1670 train_time:93340ms step_avg:94.86ms +step:985/1670 train_time:93434ms step_avg:94.86ms +step:986/1670 train_time:93528ms step_avg:94.86ms +step:987/1670 train_time:93621ms step_avg:94.85ms +step:988/1670 train_time:93715ms step_avg:94.85ms +step:989/1670 train_time:93808ms step_avg:94.85ms +step:990/1670 train_time:93902ms step_avg:94.85ms +step:991/1670 train_time:93995ms step_avg:94.85ms +step:992/1670 train_time:94089ms step_avg:94.85ms +step:993/1670 train_time:94182ms step_avg:94.85ms +step:994/1670 train_time:94276ms step_avg:94.84ms +step:995/1670 train_time:94370ms step_avg:94.84ms +step:996/1670 train_time:94463ms step_avg:94.84ms +step:997/1670 train_time:94557ms step_avg:94.84ms +step:998/1670 train_time:94651ms step_avg:94.84ms +step:999/1670 train_time:94745ms step_avg:94.84ms +step:1000/1670 train_time:94839ms step_avg:94.84ms +step:1000/1670 val_loss:3.4686 train_time:94930ms step_avg:94.93ms +step:1001/1670 train_time:94957ms step_avg:94.86ms +step:1002/1670 train_time:95032ms step_avg:94.84ms +step:1003/1670 train_time:95130ms step_avg:94.85ms +step:1004/1670 train_time:95224ms step_avg:94.84ms +step:1005/1670 train_time:95316ms step_avg:94.84ms +step:1006/1670 train_time:95409ms step_avg:94.84ms +step:1007/1670 train_time:95502ms step_avg:94.84ms +step:1008/1670 train_time:95594ms step_avg:94.84ms +step:1009/1670 train_time:95687ms step_avg:94.83ms +step:1010/1670 train_time:95780ms step_avg:94.83ms +step:1011/1670 train_time:95873ms step_avg:94.83ms +step:1012/1670 train_time:95970ms step_avg:94.83ms +step:1013/1670 train_time:96066ms step_avg:94.83ms +step:1014/1670 train_time:96162ms step_avg:94.83ms +step:1015/1670 train_time:96256ms step_avg:94.83ms +step:1016/1670 train_time:96349ms step_avg:94.83ms +step:1017/1670 train_time:96442ms step_avg:94.83ms +step:1018/1670 train_time:96535ms step_avg:94.83ms +step:1019/1670 train_time:96629ms step_avg:94.83ms +step:1020/1670 train_time:96722ms step_avg:94.83ms +step:1021/1670 train_time:96814ms step_avg:94.82ms +step:1022/1670 train_time:96908ms step_avg:94.82ms +step:1023/1670 train_time:97003ms step_avg:94.82ms +step:1024/1670 train_time:97099ms step_avg:94.82ms +step:1025/1670 train_time:97193ms step_avg:94.82ms +step:1026/1670 train_time:97288ms step_avg:94.82ms +step:1027/1670 train_time:97382ms step_avg:94.82ms +step:1028/1670 train_time:97476ms step_avg:94.82ms +step:1029/1670 train_time:97568ms step_avg:94.82ms +step:1030/1670 train_time:97662ms step_avg:94.82ms +step:1031/1670 train_time:97755ms step_avg:94.82ms +step:1032/1670 train_time:97848ms step_avg:94.81ms +step:1033/1670 train_time:97942ms step_avg:94.81ms +step:1034/1670 train_time:98036ms step_avg:94.81ms +step:1035/1670 train_time:98131ms step_avg:94.81ms +step:1036/1670 train_time:98225ms step_avg:94.81ms +step:1037/1670 train_time:98319ms step_avg:94.81ms +step:1038/1670 train_time:98413ms step_avg:94.81ms +step:1039/1670 train_time:98507ms step_avg:94.81ms +step:1040/1670 train_time:98600ms step_avg:94.81ms +step:1041/1670 train_time:98693ms step_avg:94.81ms +step:1042/1670 train_time:98787ms step_avg:94.81ms +step:1043/1670 train_time:98881ms step_avg:94.80ms +step:1044/1670 train_time:98975ms step_avg:94.80ms +step:1045/1670 train_time:99069ms step_avg:94.80ms +step:1046/1670 train_time:99163ms step_avg:94.80ms +step:1047/1670 train_time:99257ms step_avg:94.80ms +step:1048/1670 train_time:99351ms step_avg:94.80ms +step:1049/1670 train_time:99445ms step_avg:94.80ms +step:1050/1670 train_time:99539ms step_avg:94.80ms +step:1051/1670 train_time:99632ms step_avg:94.80ms +step:1052/1670 train_time:99726ms step_avg:94.80ms +step:1053/1670 train_time:99820ms step_avg:94.80ms +step:1054/1670 train_time:99914ms step_avg:94.80ms +step:1055/1670 train_time:100008ms step_avg:94.79ms +step:1056/1670 train_time:100102ms step_avg:94.79ms +step:1057/1670 train_time:100196ms step_avg:94.79ms +step:1058/1670 train_time:100290ms step_avg:94.79ms +step:1059/1670 train_time:100384ms step_avg:94.79ms +step:1060/1670 train_time:100478ms step_avg:94.79ms +step:1061/1670 train_time:100571ms step_avg:94.79ms +step:1062/1670 train_time:101016ms step_avg:95.12ms +step:1063/1670 train_time:101091ms step_avg:95.10ms +step:1064/1670 train_time:101183ms step_avg:95.10ms +step:1065/1670 train_time:101276ms step_avg:95.09ms +step:1066/1670 train_time:101368ms step_avg:95.09ms +step:1067/1670 train_time:101461ms step_avg:95.09ms +step:1068/1670 train_time:101553ms step_avg:95.09ms +step:1069/1670 train_time:101645ms step_avg:95.08ms +step:1070/1670 train_time:101738ms step_avg:95.08ms +step:1071/1670 train_time:101831ms step_avg:95.08ms +step:1072/1670 train_time:101925ms step_avg:95.08ms +step:1073/1670 train_time:102024ms step_avg:95.08ms +step:1074/1670 train_time:102121ms step_avg:95.08ms +step:1075/1670 train_time:102215ms step_avg:95.08ms +step:1076/1670 train_time:102310ms step_avg:95.08ms +step:1077/1670 train_time:102403ms step_avg:95.08ms +step:1078/1670 train_time:102496ms step_avg:95.08ms +step:1079/1670 train_time:102589ms step_avg:95.08ms +step:1080/1670 train_time:102682ms step_avg:95.08ms +step:1081/1670 train_time:102775ms step_avg:95.07ms +step:1082/1670 train_time:102868ms step_avg:95.07ms +step:1083/1670 train_time:102963ms step_avg:95.07ms +step:1084/1670 train_time:103059ms step_avg:95.07ms +step:1085/1670 train_time:103154ms step_avg:95.07ms +step:1086/1670 train_time:103249ms step_avg:95.07ms +step:1087/1670 train_time:103343ms step_avg:95.07ms +step:1088/1670 train_time:103436ms step_avg:95.07ms +step:1089/1670 train_time:103530ms step_avg:95.07ms +step:1090/1670 train_time:103623ms step_avg:95.07ms +step:1091/1670 train_time:103716ms step_avg:95.06ms +step:1092/1670 train_time:103809ms step_avg:95.06ms +step:1093/1670 train_time:103903ms step_avg:95.06ms +step:1094/1670 train_time:103997ms step_avg:95.06ms +step:1095/1670 train_time:104091ms step_avg:95.06ms +step:1096/1670 train_time:104186ms step_avg:95.06ms +step:1097/1670 train_time:104280ms step_avg:95.06ms +step:1098/1670 train_time:104373ms step_avg:95.06ms +step:1099/1670 train_time:104468ms step_avg:95.06ms +step:1100/1670 train_time:104562ms step_avg:95.06ms +step:1101/1670 train_time:104655ms step_avg:95.05ms +step:1102/1670 train_time:104749ms step_avg:95.05ms +step:1103/1670 train_time:104842ms step_avg:95.05ms +step:1104/1670 train_time:104935ms step_avg:95.05ms +step:1105/1670 train_time:105030ms step_avg:95.05ms +step:1106/1670 train_time:105124ms step_avg:95.05ms +step:1107/1670 train_time:105218ms step_avg:95.05ms +step:1108/1670 train_time:105311ms step_avg:95.05ms +step:1109/1670 train_time:105405ms step_avg:95.05ms +step:1110/1670 train_time:105499ms step_avg:95.04ms +step:1111/1670 train_time:105593ms step_avg:95.04ms +step:1112/1670 train_time:105686ms step_avg:95.04ms +step:1113/1670 train_time:105779ms step_avg:95.04ms +step:1114/1670 train_time:105872ms step_avg:95.04ms +step:1115/1670 train_time:106070ms step_avg:95.13ms +step:1116/1670 train_time:106146ms step_avg:95.11ms +step:1117/1670 train_time:106239ms step_avg:95.11ms +step:1118/1670 train_time:106333ms step_avg:95.11ms +step:1119/1670 train_time:106426ms step_avg:95.11ms +step:1120/1670 train_time:106520ms step_avg:95.11ms +step:1121/1670 train_time:106613ms step_avg:95.11ms +step:1122/1670 train_time:106708ms step_avg:95.10ms +step:1123/1670 train_time:106801ms step_avg:95.10ms +step:1124/1670 train_time:106895ms step_avg:95.10ms +step:1125/1670 train_time:106996ms step_avg:95.11ms +step:1125/1670 val_loss:3.4159 train_time:107091ms step_avg:95.19ms +step:1126/1670 train_time:107119ms step_avg:95.13ms +step:1127/1670 train_time:107195ms step_avg:95.11ms +step:1128/1670 train_time:107294ms step_avg:95.12ms +step:1129/1670 train_time:107387ms step_avg:95.12ms +step:1130/1670 train_time:107480ms step_avg:95.12ms +step:1131/1670 train_time:107574ms step_avg:95.11ms +step:1132/1670 train_time:107667ms step_avg:95.11ms +step:1133/1670 train_time:107761ms step_avg:95.11ms +step:1134/1670 train_time:107855ms step_avg:95.11ms +step:1135/1670 train_time:107948ms step_avg:95.11ms +step:1136/1670 train_time:108045ms step_avg:95.11ms +step:1137/1670 train_time:108144ms step_avg:95.11ms +step:1138/1670 train_time:108241ms step_avg:95.11ms +step:1139/1670 train_time:108337ms step_avg:95.12ms +step:1140/1670 train_time:108430ms step_avg:95.11ms +step:1141/1670 train_time:108524ms step_avg:95.11ms +step:1142/1670 train_time:108618ms step_avg:95.11ms +step:1143/1670 train_time:108711ms step_avg:95.11ms +step:1144/1670 train_time:108805ms step_avg:95.11ms +step:1145/1670 train_time:108899ms step_avg:95.11ms +step:1146/1670 train_time:108993ms step_avg:95.11ms +step:1147/1670 train_time:109090ms step_avg:95.11ms +step:1148/1670 train_time:109186ms step_avg:95.11ms +step:1149/1670 train_time:109281ms step_avg:95.11ms +step:1150/1670 train_time:109376ms step_avg:95.11ms +step:1151/1670 train_time:109470ms step_avg:95.11ms +step:1152/1670 train_time:109564ms step_avg:95.11ms +step:1153/1670 train_time:109659ms step_avg:95.11ms +step:1154/1670 train_time:109752ms step_avg:95.11ms +step:1155/1670 train_time:109846ms step_avg:95.10ms +step:1156/1670 train_time:109941ms step_avg:95.10ms +step:1157/1670 train_time:110034ms step_avg:95.10ms +step:1158/1670 train_time:110129ms step_avg:95.10ms +step:1159/1670 train_time:110225ms step_avg:95.10ms +step:1160/1670 train_time:110321ms step_avg:95.10ms +step:1161/1670 train_time:110415ms step_avg:95.10ms +step:1162/1670 train_time:110508ms step_avg:95.10ms +step:1163/1670 train_time:110603ms step_avg:95.10ms +step:1164/1670 train_time:110699ms step_avg:95.10ms +step:1165/1670 train_time:110793ms step_avg:95.10ms +step:1166/1670 train_time:110887ms step_avg:95.10ms +step:1167/1670 train_time:110981ms step_avg:95.10ms +step:1168/1670 train_time:111077ms step_avg:95.10ms +step:1169/1670 train_time:111171ms step_avg:95.10ms +step:1170/1670 train_time:111267ms step_avg:95.10ms +step:1171/1670 train_time:111362ms step_avg:95.10ms +step:1172/1670 train_time:111457ms step_avg:95.10ms +step:1173/1670 train_time:111552ms step_avg:95.10ms +step:1174/1670 train_time:111647ms step_avg:95.10ms +step:1175/1670 train_time:111741ms step_avg:95.10ms +step:1176/1670 train_time:111835ms step_avg:95.10ms +step:1177/1670 train_time:111929ms step_avg:95.10ms +step:1178/1670 train_time:112023ms step_avg:95.10ms +step:1179/1670 train_time:112119ms step_avg:95.10ms +step:1180/1670 train_time:112214ms step_avg:95.10ms +step:1181/1670 train_time:112309ms step_avg:95.10ms +step:1182/1670 train_time:112403ms step_avg:95.10ms +step:1183/1670 train_time:112498ms step_avg:95.10ms +step:1184/1670 train_time:112592ms step_avg:95.09ms +step:1185/1670 train_time:112688ms step_avg:95.10ms +step:1186/1670 train_time:112782ms step_avg:95.09ms +step:1187/1670 train_time:112875ms step_avg:95.09ms +step:1188/1670 train_time:112970ms step_avg:95.09ms +step:1189/1670 train_time:113065ms step_avg:95.09ms +step:1190/1670 train_time:113160ms step_avg:95.09ms +step:1191/1670 train_time:113255ms step_avg:95.09ms +step:1192/1670 train_time:113350ms step_avg:95.09ms +step:1193/1670 train_time:113446ms step_avg:95.09ms +step:1194/1670 train_time:113540ms step_avg:95.09ms +step:1195/1670 train_time:113634ms step_avg:95.09ms +step:1196/1670 train_time:113729ms step_avg:95.09ms +step:1197/1670 train_time:113824ms step_avg:95.09ms +step:1198/1670 train_time:113918ms step_avg:95.09ms +step:1199/1670 train_time:114012ms step_avg:95.09ms +step:1200/1670 train_time:114106ms step_avg:95.09ms +step:1201/1670 train_time:114201ms step_avg:95.09ms +step:1202/1670 train_time:114296ms step_avg:95.09ms +step:1203/1670 train_time:114391ms step_avg:95.09ms +step:1204/1670 train_time:114486ms step_avg:95.09ms +step:1205/1670 train_time:114581ms step_avg:95.09ms +step:1206/1670 train_time:114675ms step_avg:95.09ms +step:1207/1670 train_time:114770ms step_avg:95.09ms +step:1208/1670 train_time:114864ms step_avg:95.09ms +step:1209/1670 train_time:114959ms step_avg:95.09ms +step:1210/1670 train_time:115053ms step_avg:95.09ms +step:1211/1670 train_time:115148ms step_avg:95.09ms +step:1212/1670 train_time:115243ms step_avg:95.08ms +step:1213/1670 train_time:115338ms step_avg:95.09ms +step:1214/1670 train_time:115433ms step_avg:95.08ms +step:1215/1670 train_time:115528ms step_avg:95.08ms +step:1216/1670 train_time:115622ms step_avg:95.08ms +step:1217/1670 train_time:115718ms step_avg:95.08ms +step:1218/1670 train_time:115812ms step_avg:95.08ms +step:1219/1670 train_time:115906ms step_avg:95.08ms +step:1220/1670 train_time:116001ms step_avg:95.08ms +step:1221/1670 train_time:116095ms step_avg:95.08ms +step:1222/1670 train_time:116189ms step_avg:95.08ms +step:1223/1670 train_time:116285ms step_avg:95.08ms +step:1224/1670 train_time:116380ms step_avg:95.08ms +step:1225/1670 train_time:116474ms step_avg:95.08ms +step:1226/1670 train_time:116569ms step_avg:95.08ms +step:1227/1670 train_time:116664ms step_avg:95.08ms +step:1228/1670 train_time:116759ms step_avg:95.08ms +step:1229/1670 train_time:116853ms step_avg:95.08ms +step:1230/1670 train_time:116947ms step_avg:95.08ms +step:1231/1670 train_time:117042ms step_avg:95.08ms +step:1232/1670 train_time:117136ms step_avg:95.08ms +step:1233/1670 train_time:117230ms step_avg:95.08ms +step:1234/1670 train_time:117325ms step_avg:95.08ms +step:1235/1670 train_time:117420ms step_avg:95.08ms +step:1236/1670 train_time:117514ms step_avg:95.08ms +step:1237/1670 train_time:117609ms step_avg:95.08ms +step:1238/1670 train_time:117704ms step_avg:95.08ms +step:1239/1670 train_time:117798ms step_avg:95.08ms +step:1240/1670 train_time:117892ms step_avg:95.07ms +step:1241/1670 train_time:117987ms step_avg:95.07ms +step:1242/1670 train_time:118081ms step_avg:95.07ms +step:1243/1670 train_time:118175ms step_avg:95.07ms +step:1244/1670 train_time:118269ms step_avg:95.07ms +step:1245/1670 train_time:118364ms step_avg:95.07ms +step:1246/1670 train_time:118460ms step_avg:95.07ms +step:1247/1670 train_time:118554ms step_avg:95.07ms +step:1248/1670 train_time:118648ms step_avg:95.07ms +step:1249/1670 train_time:118744ms step_avg:95.07ms +step:1250/1670 train_time:118838ms step_avg:95.07ms +step:1250/1670 val_loss:3.3775 train_time:118930ms step_avg:95.14ms +step:1251/1670 train_time:118957ms step_avg:95.09ms +step:1252/1670 train_time:119032ms step_avg:95.07ms +step:1253/1670 train_time:119135ms step_avg:95.08ms +step:1254/1670 train_time:119231ms step_avg:95.08ms +step:1255/1670 train_time:119325ms step_avg:95.08ms +step:1256/1670 train_time:119418ms step_avg:95.08ms +step:1257/1670 train_time:119511ms step_avg:95.08ms +step:1258/1670 train_time:119604ms step_avg:95.07ms +step:1259/1670 train_time:119698ms step_avg:95.07ms +step:1260/1670 train_time:119791ms step_avg:95.07ms +step:1261/1670 train_time:119885ms step_avg:95.07ms +step:1262/1670 train_time:119980ms step_avg:95.07ms +step:1263/1670 train_time:120079ms step_avg:95.07ms +step:1264/1670 train_time:120176ms step_avg:95.08ms +step:1265/1670 train_time:120273ms step_avg:95.08ms +step:1266/1670 train_time:120368ms step_avg:95.08ms +step:1267/1670 train_time:120462ms step_avg:95.08ms +step:1268/1670 train_time:120557ms step_avg:95.08ms +step:1269/1670 train_time:120650ms step_avg:95.08ms +step:1270/1670 train_time:120744ms step_avg:95.07ms +step:1271/1670 train_time:120837ms step_avg:95.07ms +step:1272/1670 train_time:120931ms step_avg:95.07ms +step:1273/1670 train_time:121028ms step_avg:95.07ms +step:1274/1670 train_time:121477ms step_avg:95.35ms +step:1275/1670 train_time:121548ms step_avg:95.33ms +step:1276/1670 train_time:121641ms step_avg:95.33ms +step:1277/1670 train_time:121734ms step_avg:95.33ms +step:1278/1670 train_time:121827ms step_avg:95.33ms +step:1279/1670 train_time:121920ms step_avg:95.32ms +step:1280/1670 train_time:122013ms step_avg:95.32ms +step:1281/1670 train_time:122107ms step_avg:95.32ms +step:1282/1670 train_time:122200ms step_avg:95.32ms +step:1283/1670 train_time:122294ms step_avg:95.32ms +step:1284/1670 train_time:122394ms step_avg:95.32ms +step:1285/1670 train_time:122492ms step_avg:95.32ms +step:1286/1670 train_time:122588ms step_avg:95.33ms +step:1287/1670 train_time:122682ms step_avg:95.32ms +step:1288/1670 train_time:122776ms step_avg:95.32ms +step:1289/1670 train_time:122870ms step_avg:95.32ms +step:1290/1670 train_time:122964ms step_avg:95.32ms +step:1291/1670 train_time:123057ms step_avg:95.32ms +step:1292/1670 train_time:123151ms step_avg:95.32ms +step:1293/1670 train_time:123245ms step_avg:95.32ms +step:1294/1670 train_time:123340ms step_avg:95.32ms +step:1295/1670 train_time:123436ms step_avg:95.32ms +step:1296/1670 train_time:123533ms step_avg:95.32ms +step:1297/1670 train_time:123629ms step_avg:95.32ms +step:1298/1670 train_time:123723ms step_avg:95.32ms +step:1299/1670 train_time:123817ms step_avg:95.32ms +step:1300/1670 train_time:123911ms step_avg:95.32ms +step:1301/1670 train_time:124005ms step_avg:95.32ms +step:1302/1670 train_time:124099ms step_avg:95.31ms +step:1303/1670 train_time:124192ms step_avg:95.31ms +step:1304/1670 train_time:124287ms step_avg:95.31ms +step:1305/1670 train_time:124382ms step_avg:95.31ms +step:1306/1670 train_time:124477ms step_avg:95.31ms +step:1307/1670 train_time:124572ms step_avg:95.31ms +step:1308/1670 train_time:124667ms step_avg:95.31ms +step:1309/1670 train_time:124761ms step_avg:95.31ms +step:1310/1670 train_time:124856ms step_avg:95.31ms +step:1311/1670 train_time:124951ms step_avg:95.31ms +step:1312/1670 train_time:125045ms step_avg:95.31ms +step:1313/1670 train_time:125139ms step_avg:95.31ms +step:1314/1670 train_time:125233ms step_avg:95.31ms +step:1315/1670 train_time:125328ms step_avg:95.31ms +step:1316/1670 train_time:125423ms step_avg:95.31ms +step:1317/1670 train_time:125517ms step_avg:95.31ms +step:1318/1670 train_time:125613ms step_avg:95.31ms +step:1319/1670 train_time:125708ms step_avg:95.31ms +step:1320/1670 train_time:125803ms step_avg:95.31ms +step:1321/1670 train_time:125899ms step_avg:95.31ms +step:1322/1670 train_time:125992ms step_avg:95.30ms +step:1323/1670 train_time:126087ms step_avg:95.30ms +step:1324/1670 train_time:126180ms step_avg:95.30ms +step:1325/1670 train_time:126275ms step_avg:95.30ms +step:1326/1670 train_time:126371ms step_avg:95.30ms +step:1327/1670 train_time:126465ms step_avg:95.30ms +step:1328/1670 train_time:126561ms step_avg:95.30ms +step:1329/1670 train_time:126656ms step_avg:95.30ms +step:1330/1670 train_time:126751ms step_avg:95.30ms +step:1331/1670 train_time:126846ms step_avg:95.30ms +step:1332/1670 train_time:126940ms step_avg:95.30ms +step:1333/1670 train_time:127035ms step_avg:95.30ms +step:1334/1670 train_time:127129ms step_avg:95.30ms +step:1335/1670 train_time:127223ms step_avg:95.30ms +step:1336/1670 train_time:127317ms step_avg:95.30ms +step:1337/1670 train_time:127412ms step_avg:95.30ms +step:1338/1670 train_time:127508ms step_avg:95.30ms +step:1339/1670 train_time:127603ms step_avg:95.30ms +step:1340/1670 train_time:127696ms step_avg:95.30ms +step:1341/1670 train_time:127794ms step_avg:95.30ms +step:1342/1670 train_time:127889ms step_avg:95.30ms +step:1343/1670 train_time:127983ms step_avg:95.30ms +step:1344/1670 train_time:128077ms step_avg:95.30ms +step:1345/1670 train_time:128171ms step_avg:95.29ms +step:1346/1670 train_time:128266ms step_avg:95.29ms +step:1347/1670 train_time:128360ms step_avg:95.29ms +step:1348/1670 train_time:128454ms step_avg:95.29ms +step:1349/1670 train_time:128548ms step_avg:95.29ms +step:1350/1670 train_time:128643ms step_avg:95.29ms +step:1351/1670 train_time:128739ms step_avg:95.29ms +step:1352/1670 train_time:128833ms step_avg:95.29ms +step:1353/1670 train_time:128928ms step_avg:95.29ms +step:1354/1670 train_time:129023ms step_avg:95.29ms +step:1355/1670 train_time:129117ms step_avg:95.29ms +step:1356/1670 train_time:129211ms step_avg:95.29ms +step:1357/1670 train_time:129306ms step_avg:95.29ms +step:1358/1670 train_time:129399ms step_avg:95.29ms +step:1359/1670 train_time:129495ms step_avg:95.29ms +step:1360/1670 train_time:129589ms step_avg:95.29ms +step:1361/1670 train_time:129684ms step_avg:95.29ms +step:1362/1670 train_time:129778ms step_avg:95.29ms +step:1363/1670 train_time:129874ms step_avg:95.29ms +step:1364/1670 train_time:129969ms step_avg:95.29ms +step:1365/1670 train_time:130064ms step_avg:95.28ms +step:1366/1670 train_time:130157ms step_avg:95.28ms +step:1367/1670 train_time:130252ms step_avg:95.28ms +step:1368/1670 train_time:130348ms step_avg:95.28ms +step:1369/1670 train_time:130441ms step_avg:95.28ms +step:1370/1670 train_time:130536ms step_avg:95.28ms +step:1371/1670 train_time:130630ms step_avg:95.28ms +step:1372/1670 train_time:130725ms step_avg:95.28ms +step:1373/1670 train_time:130820ms step_avg:95.28ms +step:1374/1670 train_time:130916ms step_avg:95.28ms +step:1375/1670 train_time:131011ms step_avg:95.28ms +step:1375/1670 val_loss:3.3430 train_time:131102ms step_avg:95.35ms +step:1376/1670 train_time:131129ms step_avg:95.30ms +step:1377/1670 train_time:131209ms step_avg:95.29ms +step:1378/1670 train_time:131313ms step_avg:95.29ms +step:1379/1670 train_time:131407ms step_avg:95.29ms +step:1380/1670 train_time:131502ms step_avg:95.29ms +step:1381/1670 train_time:131595ms step_avg:95.29ms +step:1382/1670 train_time:131688ms step_avg:95.29ms +step:1383/1670 train_time:131782ms step_avg:95.29ms +step:1384/1670 train_time:131876ms step_avg:95.29ms +step:1385/1670 train_time:131969ms step_avg:95.28ms +step:1386/1670 train_time:132063ms step_avg:95.28ms +step:1387/1670 train_time:132160ms step_avg:95.28ms +step:1388/1670 train_time:132260ms step_avg:95.29ms +step:1389/1670 train_time:132356ms step_avg:95.29ms +step:1390/1670 train_time:132451ms step_avg:95.29ms +step:1391/1670 train_time:132546ms step_avg:95.29ms +step:1392/1670 train_time:132639ms step_avg:95.29ms +step:1393/1670 train_time:132733ms step_avg:95.29ms +step:1394/1670 train_time:132827ms step_avg:95.28ms +step:1395/1670 train_time:132921ms step_avg:95.28ms +step:1396/1670 train_time:133015ms step_avg:95.28ms +step:1397/1670 train_time:133109ms step_avg:95.28ms +step:1398/1670 train_time:133206ms step_avg:95.28ms +step:1399/1670 train_time:133302ms step_avg:95.28ms +step:1400/1670 train_time:133397ms step_avg:95.28ms +step:1401/1670 train_time:133493ms step_avg:95.28ms +step:1402/1670 train_time:133587ms step_avg:95.28ms +step:1403/1670 train_time:133682ms step_avg:95.28ms +step:1404/1670 train_time:133776ms step_avg:95.28ms +step:1405/1670 train_time:133870ms step_avg:95.28ms +step:1406/1670 train_time:133964ms step_avg:95.28ms +step:1407/1670 train_time:134058ms step_avg:95.28ms +step:1408/1670 train_time:134154ms step_avg:95.28ms +step:1409/1670 train_time:134249ms step_avg:95.28ms +step:1410/1670 train_time:134346ms step_avg:95.28ms +step:1411/1670 train_time:134441ms step_avg:95.28ms +step:1412/1670 train_time:134535ms step_avg:95.28ms +step:1413/1670 train_time:134629ms step_avg:95.28ms +step:1414/1670 train_time:134724ms step_avg:95.28ms +step:1415/1670 train_time:134818ms step_avg:95.28ms +step:1416/1670 train_time:134912ms step_avg:95.28ms +step:1417/1670 train_time:135006ms step_avg:95.28ms +step:1418/1670 train_time:135101ms step_avg:95.28ms +step:1419/1670 train_time:135197ms step_avg:95.28ms +step:1420/1670 train_time:135293ms step_avg:95.28ms +step:1421/1670 train_time:135388ms step_avg:95.28ms +step:1422/1670 train_time:135483ms step_avg:95.28ms +step:1423/1670 train_time:135578ms step_avg:95.28ms +step:1424/1670 train_time:135673ms step_avg:95.28ms +step:1425/1670 train_time:135766ms step_avg:95.27ms +step:1426/1670 train_time:135861ms step_avg:95.27ms +step:1427/1670 train_time:135956ms step_avg:95.27ms +step:1428/1670 train_time:136050ms step_avg:95.27ms +step:1429/1670 train_time:136145ms step_avg:95.27ms +step:1430/1670 train_time:136240ms step_avg:95.27ms +step:1431/1670 train_time:136335ms step_avg:95.27ms +step:1432/1670 train_time:136430ms step_avg:95.27ms +step:1433/1670 train_time:136525ms step_avg:95.27ms +step:1434/1670 train_time:136619ms step_avg:95.27ms +step:1435/1670 train_time:136713ms step_avg:95.27ms +step:1436/1670 train_time:136807ms step_avg:95.27ms +step:1437/1670 train_time:136902ms step_avg:95.27ms +step:1438/1670 train_time:136996ms step_avg:95.27ms +step:1439/1670 train_time:137091ms step_avg:95.27ms +step:1440/1670 train_time:137186ms step_avg:95.27ms +step:1441/1670 train_time:137282ms step_avg:95.27ms +step:1442/1670 train_time:137377ms step_avg:95.27ms +step:1443/1670 train_time:137472ms step_avg:95.27ms +step:1444/1670 train_time:137567ms step_avg:95.27ms +step:1445/1670 train_time:137661ms step_avg:95.27ms +step:1446/1670 train_time:137757ms step_avg:95.27ms +step:1447/1670 train_time:137851ms step_avg:95.27ms +step:1448/1670 train_time:137946ms step_avg:95.27ms +step:1449/1670 train_time:138040ms step_avg:95.27ms +step:1450/1670 train_time:138135ms step_avg:95.27ms +step:1451/1670 train_time:138231ms step_avg:95.27ms +step:1452/1670 train_time:138326ms step_avg:95.27ms +step:1453/1670 train_time:138421ms step_avg:95.27ms +step:1454/1670 train_time:138516ms step_avg:95.27ms +step:1455/1670 train_time:138611ms step_avg:95.27ms +step:1456/1670 train_time:138706ms step_avg:95.27ms +step:1457/1670 train_time:138801ms step_avg:95.26ms +step:1458/1670 train_time:138895ms step_avg:95.26ms +step:1459/1670 train_time:138989ms step_avg:95.26ms +step:1460/1670 train_time:139085ms step_avg:95.26ms +step:1461/1670 train_time:139179ms step_avg:95.26ms +step:1462/1670 train_time:139274ms step_avg:95.26ms +step:1463/1670 train_time:139368ms step_avg:95.26ms +step:1464/1670 train_time:139463ms step_avg:95.26ms +step:1465/1670 train_time:139558ms step_avg:95.26ms +step:1466/1670 train_time:139652ms step_avg:95.26ms +step:1467/1670 train_time:139747ms step_avg:95.26ms +step:1468/1670 train_time:139842ms step_avg:95.26ms +step:1469/1670 train_time:139937ms step_avg:95.26ms +step:1470/1670 train_time:140031ms step_avg:95.26ms +step:1471/1670 train_time:140125ms step_avg:95.26ms +step:1472/1670 train_time:140220ms step_avg:95.26ms +step:1473/1670 train_time:140316ms step_avg:95.26ms +step:1474/1670 train_time:140410ms step_avg:95.26ms +step:1475/1670 train_time:140506ms step_avg:95.26ms +step:1476/1670 train_time:140600ms step_avg:95.26ms +step:1477/1670 train_time:140695ms step_avg:95.26ms +step:1478/1670 train_time:140790ms step_avg:95.26ms +step:1479/1670 train_time:140885ms step_avg:95.26ms +step:1480/1670 train_time:140979ms step_avg:95.26ms +step:1481/1670 train_time:141074ms step_avg:95.26ms +step:1482/1670 train_time:141168ms step_avg:95.25ms +step:1483/1670 train_time:141264ms step_avg:95.26ms +step:1484/1670 train_time:141358ms step_avg:95.25ms +step:1485/1670 train_time:141703ms step_avg:95.42ms +step:1486/1670 train_time:141875ms step_avg:95.47ms +step:1487/1670 train_time:141968ms step_avg:95.47ms +step:1488/1670 train_time:142062ms step_avg:95.47ms +step:1489/1670 train_time:142155ms step_avg:95.47ms +step:1490/1670 train_time:142248ms step_avg:95.47ms +step:1491/1670 train_time:142342ms step_avg:95.47ms +step:1492/1670 train_time:142435ms step_avg:95.47ms +step:1493/1670 train_time:142528ms step_avg:95.46ms +step:1494/1670 train_time:142622ms step_avg:95.46ms +step:1495/1670 train_time:142718ms step_avg:95.46ms +step:1496/1670 train_time:142818ms step_avg:95.47ms +step:1497/1670 train_time:142916ms step_avg:95.47ms +step:1498/1670 train_time:143011ms step_avg:95.47ms +step:1499/1670 train_time:143105ms step_avg:95.47ms +step:1500/1670 train_time:143199ms step_avg:95.47ms +step:1500/1670 val_loss:3.3130 train_time:143290ms step_avg:95.53ms +step:1501/1670 train_time:143318ms step_avg:95.48ms +step:1502/1670 train_time:143397ms step_avg:95.47ms +step:1503/1670 train_time:143497ms step_avg:95.47ms +step:1504/1670 train_time:143591ms step_avg:95.47ms +step:1505/1670 train_time:143684ms step_avg:95.47ms +step:1506/1670 train_time:143778ms step_avg:95.47ms +step:1507/1670 train_time:143870ms step_avg:95.47ms +step:1508/1670 train_time:143964ms step_avg:95.47ms +step:1509/1670 train_time:144057ms step_avg:95.47ms +step:1510/1670 train_time:144151ms step_avg:95.46ms +step:1511/1670 train_time:144246ms step_avg:95.46ms +step:1512/1670 train_time:144343ms step_avg:95.46ms +step:1513/1670 train_time:144439ms step_avg:95.47ms +step:1514/1670 train_time:144535ms step_avg:95.47ms +step:1515/1670 train_time:144632ms step_avg:95.47ms +step:1516/1670 train_time:144727ms step_avg:95.47ms +step:1517/1670 train_time:144820ms step_avg:95.46ms +step:1518/1670 train_time:144914ms step_avg:95.46ms +step:1519/1670 train_time:145007ms step_avg:95.46ms +step:1520/1670 train_time:145102ms step_avg:95.46ms +step:1521/1670 train_time:145197ms step_avg:95.46ms +step:1522/1670 train_time:145290ms step_avg:95.46ms +step:1523/1670 train_time:145387ms step_avg:95.46ms +step:1524/1670 train_time:145482ms step_avg:95.46ms +step:1525/1670 train_time:145579ms step_avg:95.46ms +step:1526/1670 train_time:145673ms step_avg:95.46ms +step:1527/1670 train_time:145767ms step_avg:95.46ms +step:1528/1670 train_time:145861ms step_avg:95.46ms +step:1529/1670 train_time:145954ms step_avg:95.46ms +step:1530/1670 train_time:146049ms step_avg:95.46ms +step:1531/1670 train_time:146142ms step_avg:95.46ms +step:1532/1670 train_time:146237ms step_avg:95.46ms +step:1533/1670 train_time:146332ms step_avg:95.45ms +step:1534/1670 train_time:146427ms step_avg:95.45ms +step:1535/1670 train_time:146523ms step_avg:95.45ms +step:1536/1670 train_time:146617ms step_avg:95.45ms +step:1537/1670 train_time:146713ms step_avg:95.45ms +step:1538/1670 train_time:146808ms step_avg:95.45ms +step:1539/1670 train_time:146902ms step_avg:95.45ms +step:1540/1670 train_time:146996ms step_avg:95.45ms +step:1541/1670 train_time:147090ms step_avg:95.45ms +step:1542/1670 train_time:147185ms step_avg:95.45ms +step:1543/1670 train_time:147279ms step_avg:95.45ms +step:1544/1670 train_time:147374ms step_avg:95.45ms +step:1545/1670 train_time:147470ms step_avg:95.45ms +step:1546/1670 train_time:147565ms step_avg:95.45ms +step:1547/1670 train_time:147661ms step_avg:95.45ms +step:1548/1670 train_time:147755ms step_avg:95.45ms +step:1549/1670 train_time:147851ms step_avg:95.45ms +step:1550/1670 train_time:147945ms step_avg:95.45ms +step:1551/1670 train_time:148039ms step_avg:95.45ms +step:1552/1670 train_time:148134ms step_avg:95.45ms +step:1553/1670 train_time:148229ms step_avg:95.45ms +step:1554/1670 train_time:148323ms step_avg:95.45ms +step:1555/1670 train_time:148418ms step_avg:95.45ms +step:1556/1670 train_time:148513ms step_avg:95.45ms +step:1557/1670 train_time:148609ms step_avg:95.45ms +step:1558/1670 train_time:148705ms step_avg:95.45ms +step:1559/1670 train_time:148799ms step_avg:95.45ms +step:1560/1670 train_time:148894ms step_avg:95.44ms +step:1561/1670 train_time:148988ms step_avg:95.44ms +step:1562/1670 train_time:149083ms step_avg:95.44ms +step:1563/1670 train_time:149177ms step_avg:95.44ms +step:1564/1670 train_time:149272ms step_avg:95.44ms +step:1565/1670 train_time:149367ms step_avg:95.44ms +step:1566/1670 train_time:149462ms step_avg:95.44ms +step:1567/1670 train_time:149557ms step_avg:95.44ms +step:1568/1670 train_time:149652ms step_avg:95.44ms +step:1569/1670 train_time:149747ms step_avg:95.44ms +step:1570/1670 train_time:149842ms step_avg:95.44ms +step:1571/1670 train_time:149936ms step_avg:95.44ms +step:1572/1670 train_time:150031ms step_avg:95.44ms +step:1573/1670 train_time:150126ms step_avg:95.44ms +step:1574/1670 train_time:150220ms step_avg:95.44ms +step:1575/1670 train_time:150314ms step_avg:95.44ms +step:1576/1670 train_time:150409ms step_avg:95.44ms +step:1577/1670 train_time:150504ms step_avg:95.44ms +step:1578/1670 train_time:150599ms step_avg:95.44ms +step:1579/1670 train_time:150694ms step_avg:95.44ms +step:1580/1670 train_time:150788ms step_avg:95.44ms +step:1581/1670 train_time:150884ms step_avg:95.44ms +step:1582/1670 train_time:150978ms step_avg:95.44ms +step:1583/1670 train_time:151073ms step_avg:95.43ms +step:1584/1670 train_time:151168ms step_avg:95.43ms +step:1585/1670 train_time:151262ms step_avg:95.43ms +step:1586/1670 train_time:151356ms step_avg:95.43ms +step:1587/1670 train_time:151452ms step_avg:95.43ms +step:1588/1670 train_time:151546ms step_avg:95.43ms +step:1589/1670 train_time:151640ms step_avg:95.43ms +step:1590/1670 train_time:151735ms step_avg:95.43ms +step:1591/1670 train_time:151830ms step_avg:95.43ms +step:1592/1670 train_time:151926ms step_avg:95.43ms +step:1593/1670 train_time:152022ms step_avg:95.43ms +step:1594/1670 train_time:152117ms step_avg:95.43ms +step:1595/1670 train_time:152212ms step_avg:95.43ms +step:1596/1670 train_time:152306ms step_avg:95.43ms +step:1597/1670 train_time:152401ms step_avg:95.43ms +step:1598/1670 train_time:152495ms step_avg:95.43ms +step:1599/1670 train_time:152589ms step_avg:95.43ms +step:1600/1670 train_time:152684ms step_avg:95.43ms +step:1601/1670 train_time:152779ms step_avg:95.43ms +step:1602/1670 train_time:152874ms step_avg:95.43ms +step:1603/1670 train_time:152970ms step_avg:95.43ms +step:1604/1670 train_time:153065ms step_avg:95.43ms +step:1605/1670 train_time:153160ms step_avg:95.43ms +step:1606/1670 train_time:153254ms step_avg:95.43ms +step:1607/1670 train_time:153348ms step_avg:95.43ms +step:1608/1670 train_time:153443ms step_avg:95.42ms +step:1609/1670 train_time:153538ms step_avg:95.42ms +step:1610/1670 train_time:153632ms step_avg:95.42ms +step:1611/1670 train_time:153727ms step_avg:95.42ms +step:1612/1670 train_time:153822ms step_avg:95.42ms +step:1613/1670 train_time:153917ms step_avg:95.42ms +step:1614/1670 train_time:154012ms step_avg:95.42ms +step:1615/1670 train_time:154107ms step_avg:95.42ms +step:1616/1670 train_time:154201ms step_avg:95.42ms +step:1617/1670 train_time:154296ms step_avg:95.42ms +step:1618/1670 train_time:154391ms step_avg:95.42ms +step:1619/1670 train_time:154486ms step_avg:95.42ms +step:1620/1670 train_time:154582ms step_avg:95.42ms +step:1621/1670 train_time:154676ms step_avg:95.42ms +step:1622/1670 train_time:154771ms step_avg:95.42ms +step:1623/1670 train_time:154866ms step_avg:95.42ms +step:1624/1670 train_time:154961ms step_avg:95.42ms +step:1625/1670 train_time:155056ms step_avg:95.42ms +step:1625/1670 val_loss:3.2879 train_time:155148ms step_avg:95.48ms +step:1626/1670 train_time:155176ms step_avg:95.43ms +step:1627/1670 train_time:155251ms step_avg:95.42ms +step:1628/1670 train_time:155351ms step_avg:95.42ms +step:1629/1670 train_time:155448ms step_avg:95.43ms +step:1630/1670 train_time:155542ms step_avg:95.42ms +step:1631/1670 train_time:155636ms step_avg:95.42ms +step:1632/1670 train_time:155729ms step_avg:95.42ms +step:1633/1670 train_time:155822ms step_avg:95.42ms +step:1634/1670 train_time:155916ms step_avg:95.42ms +step:1635/1670 train_time:156010ms step_avg:95.42ms +step:1636/1670 train_time:156105ms step_avg:95.42ms +step:1637/1670 train_time:156203ms step_avg:95.42ms +step:1638/1670 train_time:156302ms step_avg:95.42ms +step:1639/1670 train_time:156398ms step_avg:95.42ms +step:1640/1670 train_time:156494ms step_avg:95.42ms +step:1641/1670 train_time:156589ms step_avg:95.42ms +step:1642/1670 train_time:156682ms step_avg:95.42ms +step:1643/1670 train_time:156776ms step_avg:95.42ms +step:1644/1670 train_time:156870ms step_avg:95.42ms +step:1645/1670 train_time:156964ms step_avg:95.42ms +step:1646/1670 train_time:157058ms step_avg:95.42ms +step:1647/1670 train_time:157153ms step_avg:95.42ms +step:1648/1670 train_time:157250ms step_avg:95.42ms +step:1649/1670 train_time:157345ms step_avg:95.42ms +step:1650/1670 train_time:157441ms step_avg:95.42ms +step:1651/1670 train_time:157536ms step_avg:95.42ms +step:1652/1670 train_time:157630ms step_avg:95.42ms +step:1653/1670 train_time:157725ms step_avg:95.42ms +step:1654/1670 train_time:157819ms step_avg:95.42ms +step:1655/1670 train_time:157913ms step_avg:95.42ms +step:1656/1670 train_time:158008ms step_avg:95.42ms +step:1657/1670 train_time:158102ms step_avg:95.41ms +step:1658/1670 train_time:158197ms step_avg:95.41ms +step:1659/1670 train_time:158293ms step_avg:95.41ms +step:1660/1670 train_time:158388ms step_avg:95.41ms +step:1661/1670 train_time:158482ms step_avg:95.41ms +step:1662/1670 train_time:158577ms step_avg:95.41ms +step:1663/1670 train_time:158671ms step_avg:95.41ms +step:1664/1670 train_time:158766ms step_avg:95.41ms +step:1665/1670 train_time:158861ms step_avg:95.41ms +step:1666/1670 train_time:158954ms step_avg:95.41ms +step:1667/1670 train_time:159048ms step_avg:95.41ms +step:1668/1670 train_time:159144ms step_avg:95.41ms +step:1669/1670 train_time:159239ms step_avg:95.41ms +step:1670/1670 train_time:159334ms step_avg:95.41ms +step:1670/1670 val_loss:3.2789 train_time:159503ms step_avg:95.51ms +peak memory allocated: 32460 MiB reserved: 47576 MiB diff --git a/records/091025_Yarn/783d22ec-c441-4d93-9fd7-cd00d2c473e8.txt b/records/091025_Yarn/783d22ec-c441-4d93-9fd7-cd00d2c473e8.txt new file mode 100644 index 000000000..44ea15333 --- /dev/null +++ b/records/091025_Yarn/783d22ec-c441-4d93-9fd7-cd00d2c473e8.txt @@ -0,0 +1,2863 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math + +from dataclasses import dataclass +from functools import lru_cache +from itertools import accumulate +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate = CastedLinear(12, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, attn_args: AttnArgs): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) + if ve is not None: + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = sa_lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None + + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args: AttnArgs): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), attn_args) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, x0, lambdas[i], attn_args) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = f"yarn/{uuid.uuid4()}" + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + head_dim=128, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +ws = get_ws(0) +for step in range(train_steps + 1): + last_step = (step == train_steps) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Thu Sep 11 05:44:12 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | Off | +| N/A 36C P0 120W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | Off | +| N/A 41C P0 125W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | Off | +| N/A 42C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | Off | +| N/A 35C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | Off | +| N/A 35C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | Off | +| N/A 42C P0 127W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | Off | +| N/A 40C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | Off | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 98682 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 98683 C /usr/bin/python3 614MiB | +| 0 N/A N/A 98684 C /usr/bin/python3 614MiB | +| 0 N/A N/A 98685 C /usr/bin/python3 614MiB | +| 0 N/A N/A 98686 C /usr/bin/python3 614MiB | +| 0 N/A N/A 98687 C /usr/bin/python3 614MiB | +| 0 N/A N/A 98688 C /usr/bin/python3 614MiB | +| 0 N/A N/A 98689 C /usr/bin/python3 614MiB | +| 1 N/A N/A 98683 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 98684 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 98685 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 98686 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 98687 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 98688 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 98689 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1670 train_time:458ms step_avg:458.31ms +step:2/1670 train_time:484ms step_avg:241.85ms +step:3/1670 train_time:551ms step_avg:183.55ms +step:4/1670 train_time:641ms step_avg:160.36ms +step:5/1670 train_time:733ms step_avg:146.66ms +step:6/1670 train_time:825ms step_avg:137.57ms +step:7/1670 train_time:917ms step_avg:131.01ms +step:8/1670 train_time:1009ms step_avg:126.17ms +step:9/1670 train_time:1101ms step_avg:122.37ms +step:10/1670 train_time:1193ms step_avg:119.28ms +step:11/1670 train_time:1285ms step_avg:116.78ms +step:12/1670 train_time:1376ms step_avg:114.69ms +step:13/1670 train_time:1471ms step_avg:113.18ms +step:14/1670 train_time:1566ms step_avg:111.86ms +step:15/1670 train_time:1659ms step_avg:110.61ms +step:16/1670 train_time:1752ms step_avg:109.52ms +step:17/1670 train_time:1844ms step_avg:108.49ms +step:18/1670 train_time:1937ms step_avg:107.60ms +step:19/1670 train_time:2029ms step_avg:106.80ms +step:20/1670 train_time:2121ms step_avg:106.05ms +step:21/1670 train_time:2213ms step_avg:105.40ms +step:22/1670 train_time:2306ms step_avg:104.83ms +step:23/1670 train_time:2400ms step_avg:104.33ms +step:24/1670 train_time:2493ms step_avg:103.88ms +step:25/1670 train_time:2587ms step_avg:103.46ms +step:26/1670 train_time:2679ms step_avg:103.05ms +step:27/1670 train_time:2773ms step_avg:102.69ms +step:28/1670 train_time:2866ms step_avg:102.34ms +step:29/1670 train_time:2958ms step_avg:102.00ms +step:30/1670 train_time:3050ms step_avg:101.68ms +step:31/1670 train_time:3143ms step_avg:101.38ms +step:32/1670 train_time:3235ms step_avg:101.09ms +step:33/1670 train_time:3328ms step_avg:100.86ms +step:34/1670 train_time:3421ms step_avg:100.63ms +step:35/1670 train_time:3514ms step_avg:100.41ms +step:36/1670 train_time:3607ms step_avg:100.20ms +step:37/1670 train_time:3700ms step_avg:100.00ms +step:38/1670 train_time:3792ms step_avg:99.80ms +step:39/1670 train_time:3886ms step_avg:99.63ms +step:40/1670 train_time:3977ms step_avg:99.43ms +step:41/1670 train_time:4070ms step_avg:99.27ms +step:42/1670 train_time:4163ms step_avg:99.12ms +step:43/1670 train_time:4255ms step_avg:98.96ms +step:44/1670 train_time:4349ms step_avg:98.84ms +step:45/1670 train_time:4442ms step_avg:98.71ms +step:46/1670 train_time:4536ms step_avg:98.60ms +step:47/1670 train_time:4630ms step_avg:98.50ms +step:48/1670 train_time:4722ms step_avg:98.38ms +step:49/1670 train_time:4815ms step_avg:98.26ms +step:50/1670 train_time:4907ms step_avg:98.15ms +step:51/1670 train_time:5000ms step_avg:98.04ms +step:52/1670 train_time:5093ms step_avg:97.93ms +step:53/1670 train_time:5185ms step_avg:97.82ms +step:54/1670 train_time:5277ms step_avg:97.72ms +step:55/1670 train_time:5370ms step_avg:97.63ms +step:56/1670 train_time:5462ms step_avg:97.54ms +step:57/1670 train_time:5555ms step_avg:97.46ms +step:58/1670 train_time:5649ms step_avg:97.40ms +step:59/1670 train_time:5742ms step_avg:97.32ms +step:60/1670 train_time:5835ms step_avg:97.24ms +step:61/1670 train_time:5927ms step_avg:97.16ms +step:62/1670 train_time:6019ms step_avg:97.08ms +step:63/1670 train_time:6111ms step_avg:97.01ms +step:64/1670 train_time:6204ms step_avg:96.94ms +step:65/1670 train_time:6297ms step_avg:96.87ms +step:66/1670 train_time:6390ms step_avg:96.81ms +step:67/1670 train_time:6482ms step_avg:96.74ms +step:68/1670 train_time:6575ms step_avg:96.69ms +step:69/1670 train_time:6668ms step_avg:96.63ms +step:70/1670 train_time:6761ms step_avg:96.58ms +step:71/1670 train_time:6853ms step_avg:96.52ms +step:72/1670 train_time:6946ms step_avg:96.47ms +step:73/1670 train_time:7038ms step_avg:96.41ms +step:74/1670 train_time:7131ms step_avg:96.37ms +step:75/1670 train_time:7224ms step_avg:96.32ms +step:76/1670 train_time:7316ms step_avg:96.27ms +step:77/1670 train_time:7409ms step_avg:96.23ms +step:78/1670 train_time:7502ms step_avg:96.17ms +step:79/1670 train_time:7595ms step_avg:96.14ms +step:80/1670 train_time:7688ms step_avg:96.10ms +step:81/1670 train_time:7780ms step_avg:96.05ms +step:82/1670 train_time:7873ms step_avg:96.01ms +step:83/1670 train_time:7966ms step_avg:95.98ms +step:84/1670 train_time:8058ms step_avg:95.93ms +step:85/1670 train_time:8151ms step_avg:95.90ms +step:86/1670 train_time:8244ms step_avg:95.86ms +step:87/1670 train_time:8337ms step_avg:95.82ms +step:88/1670 train_time:8430ms step_avg:95.79ms +step:89/1670 train_time:8522ms step_avg:95.76ms +step:90/1670 train_time:8614ms step_avg:95.71ms +step:91/1670 train_time:8707ms step_avg:95.68ms +step:92/1670 train_time:8800ms step_avg:95.65ms +step:93/1670 train_time:8892ms step_avg:95.62ms +step:94/1670 train_time:8985ms step_avg:95.58ms +step:95/1670 train_time:9077ms step_avg:95.54ms +step:96/1670 train_time:9169ms step_avg:95.51ms +step:97/1670 train_time:9262ms step_avg:95.49ms +step:98/1670 train_time:9354ms step_avg:95.45ms +step:99/1670 train_time:9447ms step_avg:95.42ms +step:100/1670 train_time:9540ms step_avg:95.40ms +step:101/1670 train_time:9633ms step_avg:95.38ms +step:102/1670 train_time:9727ms step_avg:95.36ms +step:103/1670 train_time:9818ms step_avg:95.32ms +step:104/1670 train_time:9911ms step_avg:95.30ms +step:105/1670 train_time:10003ms step_avg:95.27ms +step:106/1670 train_time:10096ms step_avg:95.24ms +step:107/1670 train_time:10188ms step_avg:95.21ms +step:108/1670 train_time:10280ms step_avg:95.18ms +step:109/1670 train_time:10373ms step_avg:95.16ms +step:110/1670 train_time:10465ms step_avg:95.14ms +step:111/1670 train_time:10557ms step_avg:95.11ms +step:112/1670 train_time:10650ms step_avg:95.09ms +step:113/1670 train_time:10742ms step_avg:95.07ms +step:114/1670 train_time:10835ms step_avg:95.04ms +step:115/1670 train_time:10928ms step_avg:95.03ms +step:116/1670 train_time:11020ms step_avg:95.00ms +step:117/1670 train_time:11112ms step_avg:94.98ms +step:118/1670 train_time:11205ms step_avg:94.96ms +step:119/1670 train_time:11298ms step_avg:94.94ms +step:120/1670 train_time:11391ms step_avg:94.92ms +step:121/1670 train_time:11482ms step_avg:94.89ms +step:122/1670 train_time:11574ms step_avg:94.87ms +step:123/1670 train_time:11667ms step_avg:94.86ms +step:124/1670 train_time:11760ms step_avg:94.84ms +step:125/1670 train_time:11853ms step_avg:94.82ms +step:125/1670 val_loss:4.2986 train_time:11943ms step_avg:95.54ms +step:126/1670 train_time:11970ms step_avg:95.00ms +step:127/1670 train_time:12041ms step_avg:94.81ms +step:128/1670 train_time:12144ms step_avg:94.88ms +step:129/1670 train_time:12242ms step_avg:94.90ms +step:130/1670 train_time:12336ms step_avg:94.89ms +step:131/1670 train_time:12427ms step_avg:94.86ms +step:132/1670 train_time:12518ms step_avg:94.84ms +step:133/1670 train_time:12609ms step_avg:94.81ms +step:134/1670 train_time:12701ms step_avg:94.78ms +step:135/1670 train_time:12793ms step_avg:94.76ms +step:136/1670 train_time:12884ms step_avg:94.74ms +step:137/1670 train_time:12976ms step_avg:94.71ms +step:138/1670 train_time:13070ms step_avg:94.71ms +step:139/1670 train_time:13164ms step_avg:94.70ms +step:140/1670 train_time:13258ms step_avg:94.70ms +step:141/1670 train_time:13351ms step_avg:94.69ms +step:142/1670 train_time:13443ms step_avg:94.67ms +step:143/1670 train_time:13534ms step_avg:94.65ms +step:144/1670 train_time:13626ms step_avg:94.63ms +step:145/1670 train_time:13718ms step_avg:94.60ms +step:146/1670 train_time:13810ms step_avg:94.59ms +step:147/1670 train_time:13902ms step_avg:94.57ms +step:148/1670 train_time:13993ms step_avg:94.55ms +step:149/1670 train_time:14087ms step_avg:94.54ms +step:150/1670 train_time:14180ms step_avg:94.54ms +step:151/1670 train_time:14273ms step_avg:94.52ms +step:152/1670 train_time:14367ms step_avg:94.52ms +step:153/1670 train_time:14459ms step_avg:94.50ms +step:154/1670 train_time:14551ms step_avg:94.49ms +step:155/1670 train_time:14643ms step_avg:94.47ms +step:156/1670 train_time:14735ms step_avg:94.46ms +step:157/1670 train_time:14828ms step_avg:94.44ms +step:158/1670 train_time:14919ms step_avg:94.43ms +step:159/1670 train_time:15011ms step_avg:94.41ms +step:160/1670 train_time:15104ms step_avg:94.40ms +step:161/1670 train_time:15196ms step_avg:94.38ms +step:162/1670 train_time:15289ms step_avg:94.38ms +step:163/1670 train_time:15382ms step_avg:94.37ms +step:164/1670 train_time:15476ms step_avg:94.36ms +step:165/1670 train_time:15569ms step_avg:94.36ms +step:166/1670 train_time:15661ms step_avg:94.34ms +step:167/1670 train_time:15753ms step_avg:94.33ms +step:168/1670 train_time:15845ms step_avg:94.32ms +step:169/1670 train_time:15937ms step_avg:94.30ms +step:170/1670 train_time:16029ms step_avg:94.29ms +step:171/1670 train_time:16121ms step_avg:94.27ms +step:172/1670 train_time:16213ms step_avg:94.26ms +step:173/1670 train_time:16306ms step_avg:94.26ms +step:174/1670 train_time:16399ms step_avg:94.25ms +step:175/1670 train_time:16491ms step_avg:94.23ms +step:176/1670 train_time:16584ms step_avg:94.23ms +step:177/1670 train_time:16677ms step_avg:94.22ms +step:178/1670 train_time:16769ms step_avg:94.21ms +step:179/1670 train_time:16862ms step_avg:94.20ms +step:180/1670 train_time:16953ms step_avg:94.19ms +step:181/1670 train_time:17046ms step_avg:94.18ms +step:182/1670 train_time:17139ms step_avg:94.17ms +step:183/1670 train_time:17230ms step_avg:94.15ms +step:184/1670 train_time:17324ms step_avg:94.15ms +step:185/1670 train_time:17416ms step_avg:94.14ms +step:186/1670 train_time:17509ms step_avg:94.13ms +step:187/1670 train_time:17602ms step_avg:94.13ms +step:188/1670 train_time:17694ms step_avg:94.12ms +step:189/1670 train_time:17786ms step_avg:94.11ms +step:190/1670 train_time:17879ms step_avg:94.10ms +step:191/1670 train_time:17972ms step_avg:94.09ms +step:192/1670 train_time:18064ms step_avg:94.08ms +step:193/1670 train_time:18156ms step_avg:94.07ms +step:194/1670 train_time:18248ms step_avg:94.06ms +step:195/1670 train_time:18341ms step_avg:94.05ms +step:196/1670 train_time:18433ms step_avg:94.04ms +step:197/1670 train_time:18526ms step_avg:94.04ms +step:198/1670 train_time:18618ms step_avg:94.03ms +step:199/1670 train_time:18710ms step_avg:94.02ms +step:200/1670 train_time:18804ms step_avg:94.02ms +step:201/1670 train_time:18898ms step_avg:94.02ms +step:202/1670 train_time:18989ms step_avg:94.01ms +step:203/1670 train_time:19082ms step_avg:94.00ms +step:204/1670 train_time:19175ms step_avg:94.00ms +step:205/1670 train_time:19268ms step_avg:93.99ms +step:206/1670 train_time:19360ms step_avg:93.98ms +step:207/1670 train_time:19453ms step_avg:93.97ms +step:208/1670 train_time:19545ms step_avg:93.97ms +step:209/1670 train_time:19637ms step_avg:93.95ms +step:210/1670 train_time:19730ms step_avg:93.95ms +step:211/1670 train_time:19823ms step_avg:93.95ms +step:212/1670 train_time:19915ms step_avg:93.94ms +step:213/1670 train_time:20329ms step_avg:95.44ms +step:214/1670 train_time:20395ms step_avg:95.31ms +step:215/1670 train_time:20486ms step_avg:95.28ms +step:216/1670 train_time:20577ms step_avg:95.26ms +step:217/1670 train_time:20668ms step_avg:95.25ms +step:218/1670 train_time:20759ms step_avg:95.23ms +step:219/1670 train_time:20851ms step_avg:95.21ms +step:220/1670 train_time:20942ms step_avg:95.19ms +step:221/1670 train_time:21034ms step_avg:95.17ms +step:222/1670 train_time:21125ms step_avg:95.16ms +step:223/1670 train_time:21218ms step_avg:95.15ms +step:224/1670 train_time:21314ms step_avg:95.15ms +step:225/1670 train_time:21411ms step_avg:95.16ms +step:226/1670 train_time:21504ms step_avg:95.15ms +step:227/1670 train_time:21596ms step_avg:95.14ms +step:228/1670 train_time:21688ms step_avg:95.12ms +step:229/1670 train_time:21780ms step_avg:95.11ms +step:230/1670 train_time:21871ms step_avg:95.09ms +step:231/1670 train_time:21963ms step_avg:95.08ms +step:232/1670 train_time:22055ms step_avg:95.07ms +step:233/1670 train_time:22147ms step_avg:95.05ms +step:234/1670 train_time:22240ms step_avg:95.04ms +step:235/1670 train_time:22333ms step_avg:95.03ms +step:236/1670 train_time:22427ms step_avg:95.03ms +step:237/1670 train_time:22519ms step_avg:95.02ms +step:238/1670 train_time:22611ms step_avg:95.01ms +step:239/1670 train_time:22704ms step_avg:95.00ms +step:240/1670 train_time:22796ms step_avg:94.98ms +step:241/1670 train_time:22889ms step_avg:94.97ms +step:242/1670 train_time:22980ms step_avg:94.96ms +step:243/1670 train_time:23073ms step_avg:94.95ms +step:244/1670 train_time:23165ms step_avg:94.94ms +step:245/1670 train_time:23257ms step_avg:94.92ms +step:246/1670 train_time:23349ms step_avg:94.91ms +step:247/1670 train_time:23443ms step_avg:94.91ms +step:248/1670 train_time:23536ms step_avg:94.90ms +step:249/1670 train_time:23628ms step_avg:94.89ms +step:250/1670 train_time:23721ms step_avg:94.88ms +step:250/1670 val_loss:3.9703 train_time:23810ms step_avg:95.24ms +step:251/1670 train_time:23838ms step_avg:94.97ms +step:252/1670 train_time:23909ms step_avg:94.88ms +step:253/1670 train_time:24007ms step_avg:94.89ms +step:254/1670 train_time:24104ms step_avg:94.90ms +step:255/1670 train_time:24195ms step_avg:94.88ms +step:256/1670 train_time:24287ms step_avg:94.87ms +step:257/1670 train_time:24378ms step_avg:94.86ms +step:258/1670 train_time:24470ms step_avg:94.84ms +step:259/1670 train_time:24561ms step_avg:94.83ms +step:260/1670 train_time:24653ms step_avg:94.82ms +step:261/1670 train_time:24745ms step_avg:94.81ms +step:262/1670 train_time:24838ms step_avg:94.80ms +step:263/1670 train_time:24932ms step_avg:94.80ms +step:264/1670 train_time:25027ms step_avg:94.80ms +step:265/1670 train_time:25119ms step_avg:94.79ms +step:266/1670 train_time:25212ms step_avg:94.78ms +step:267/1670 train_time:25304ms step_avg:94.77ms +step:268/1670 train_time:25396ms step_avg:94.76ms +step:269/1670 train_time:25488ms step_avg:94.75ms +step:270/1670 train_time:25580ms step_avg:94.74ms +step:271/1670 train_time:25671ms step_avg:94.73ms +step:272/1670 train_time:25763ms step_avg:94.72ms +step:273/1670 train_time:25855ms step_avg:94.71ms +step:274/1670 train_time:25948ms step_avg:94.70ms +step:275/1670 train_time:26041ms step_avg:94.70ms +step:276/1670 train_time:26134ms step_avg:94.69ms +step:277/1670 train_time:26228ms step_avg:94.69ms +step:278/1670 train_time:26320ms step_avg:94.68ms +step:279/1670 train_time:26412ms step_avg:94.67ms +step:280/1670 train_time:26504ms step_avg:94.66ms +step:281/1670 train_time:26595ms step_avg:94.65ms +step:282/1670 train_time:26687ms step_avg:94.63ms +step:283/1670 train_time:26779ms step_avg:94.62ms +step:284/1670 train_time:26871ms step_avg:94.62ms +step:285/1670 train_time:26963ms step_avg:94.61ms +step:286/1670 train_time:27056ms step_avg:94.60ms +step:287/1670 train_time:27149ms step_avg:94.60ms +step:288/1670 train_time:27243ms step_avg:94.59ms +step:289/1670 train_time:27335ms step_avg:94.58ms +step:290/1670 train_time:27427ms step_avg:94.58ms +step:291/1670 train_time:27519ms step_avg:94.57ms +step:292/1670 train_time:27611ms step_avg:94.56ms +step:293/1670 train_time:27703ms step_avg:94.55ms +step:294/1670 train_time:27796ms step_avg:94.54ms +step:295/1670 train_time:27889ms step_avg:94.54ms +step:296/1670 train_time:27980ms step_avg:94.53ms +step:297/1670 train_time:28073ms step_avg:94.52ms +step:298/1670 train_time:28165ms step_avg:94.51ms +step:299/1670 train_time:28259ms step_avg:94.51ms +step:300/1670 train_time:28352ms step_avg:94.51ms +step:301/1670 train_time:28444ms step_avg:94.50ms +step:302/1670 train_time:28536ms step_avg:94.49ms +step:303/1670 train_time:28628ms step_avg:94.48ms +step:304/1670 train_time:28720ms step_avg:94.47ms +step:305/1670 train_time:28813ms step_avg:94.47ms +step:306/1670 train_time:28906ms step_avg:94.46ms +step:307/1670 train_time:28998ms step_avg:94.46ms +step:308/1670 train_time:29090ms step_avg:94.45ms +step:309/1670 train_time:29183ms step_avg:94.44ms +step:310/1670 train_time:29276ms step_avg:94.44ms +step:311/1670 train_time:29368ms step_avg:94.43ms +step:312/1670 train_time:29461ms step_avg:94.42ms +step:313/1670 train_time:29553ms step_avg:94.42ms +step:314/1670 train_time:29645ms step_avg:94.41ms +step:315/1670 train_time:29738ms step_avg:94.41ms +step:316/1670 train_time:29831ms step_avg:94.40ms +step:317/1670 train_time:29923ms step_avg:94.40ms +step:318/1670 train_time:30016ms step_avg:94.39ms +step:319/1670 train_time:30108ms step_avg:94.38ms +step:320/1670 train_time:30200ms step_avg:94.38ms +step:321/1670 train_time:30293ms step_avg:94.37ms +step:322/1670 train_time:30385ms step_avg:94.36ms +step:323/1670 train_time:30478ms step_avg:94.36ms +step:324/1670 train_time:30570ms step_avg:94.35ms +step:325/1670 train_time:30662ms step_avg:94.34ms +step:326/1670 train_time:30755ms step_avg:94.34ms +step:327/1670 train_time:30848ms step_avg:94.34ms +step:328/1670 train_time:30940ms step_avg:94.33ms +step:329/1670 train_time:31032ms step_avg:94.32ms +step:330/1670 train_time:31124ms step_avg:94.32ms +step:331/1670 train_time:31217ms step_avg:94.31ms +step:332/1670 train_time:31309ms step_avg:94.30ms +step:333/1670 train_time:31401ms step_avg:94.30ms +step:334/1670 train_time:31495ms step_avg:94.29ms +step:335/1670 train_time:31586ms step_avg:94.29ms +step:336/1670 train_time:31678ms step_avg:94.28ms +step:337/1670 train_time:31771ms step_avg:94.27ms +step:338/1670 train_time:31863ms step_avg:94.27ms +step:339/1670 train_time:31956ms step_avg:94.27ms +step:340/1670 train_time:32049ms step_avg:94.26ms +step:341/1670 train_time:32141ms step_avg:94.25ms +step:342/1670 train_time:32234ms step_avg:94.25ms +step:343/1670 train_time:32326ms step_avg:94.24ms +step:344/1670 train_time:32418ms step_avg:94.24ms +step:345/1670 train_time:32511ms step_avg:94.23ms +step:346/1670 train_time:32603ms step_avg:94.23ms +step:347/1670 train_time:32695ms step_avg:94.22ms +step:348/1670 train_time:32788ms step_avg:94.22ms +step:349/1670 train_time:32880ms step_avg:94.21ms +step:350/1670 train_time:32973ms step_avg:94.21ms +step:351/1670 train_time:33065ms step_avg:94.20ms +step:352/1670 train_time:33159ms step_avg:94.20ms +step:353/1670 train_time:33252ms step_avg:94.20ms +step:354/1670 train_time:33343ms step_avg:94.19ms +step:355/1670 train_time:33436ms step_avg:94.19ms +step:356/1670 train_time:33528ms step_avg:94.18ms +step:357/1670 train_time:33621ms step_avg:94.18ms +step:358/1670 train_time:33714ms step_avg:94.17ms +step:359/1670 train_time:33806ms step_avg:94.17ms +step:360/1670 train_time:33898ms step_avg:94.16ms +step:361/1670 train_time:33991ms step_avg:94.16ms +step:362/1670 train_time:34083ms step_avg:94.15ms +step:363/1670 train_time:34176ms step_avg:94.15ms +step:364/1670 train_time:34269ms step_avg:94.14ms +step:365/1670 train_time:34361ms step_avg:94.14ms +step:366/1670 train_time:34454ms step_avg:94.14ms +step:367/1670 train_time:34545ms step_avg:94.13ms +step:368/1670 train_time:34638ms step_avg:94.13ms +step:369/1670 train_time:34732ms step_avg:94.12ms +step:370/1670 train_time:34824ms step_avg:94.12ms +step:371/1670 train_time:34917ms step_avg:94.12ms +step:372/1670 train_time:35009ms step_avg:94.11ms +step:373/1670 train_time:35102ms step_avg:94.11ms +step:374/1670 train_time:35195ms step_avg:94.10ms +step:375/1670 train_time:35288ms step_avg:94.10ms +step:375/1670 val_loss:3.8130 train_time:35377ms step_avg:94.34ms +step:376/1670 train_time:35404ms step_avg:94.16ms +step:377/1670 train_time:35478ms step_avg:94.11ms +step:378/1670 train_time:35578ms step_avg:94.12ms +step:379/1670 train_time:35672ms step_avg:94.12ms +step:380/1670 train_time:35764ms step_avg:94.12ms +step:381/1670 train_time:35856ms step_avg:94.11ms +step:382/1670 train_time:35947ms step_avg:94.10ms +step:383/1670 train_time:36039ms step_avg:94.10ms +step:384/1670 train_time:36130ms step_avg:94.09ms +step:385/1670 train_time:36222ms step_avg:94.08ms +step:386/1670 train_time:36313ms step_avg:94.08ms +step:387/1670 train_time:36408ms step_avg:94.08ms +step:388/1670 train_time:36502ms step_avg:94.08ms +step:389/1670 train_time:36596ms step_avg:94.08ms +step:390/1670 train_time:36688ms step_avg:94.07ms +step:391/1670 train_time:36782ms step_avg:94.07ms +step:392/1670 train_time:36874ms step_avg:94.07ms +step:393/1670 train_time:36966ms step_avg:94.06ms +step:394/1670 train_time:37057ms step_avg:94.05ms +step:395/1670 train_time:37149ms step_avg:94.05ms +step:396/1670 train_time:37240ms step_avg:94.04ms +step:397/1670 train_time:37332ms step_avg:94.04ms +step:398/1670 train_time:37425ms step_avg:94.03ms +step:399/1670 train_time:37518ms step_avg:94.03ms +step:400/1670 train_time:37611ms step_avg:94.03ms +step:401/1670 train_time:37704ms step_avg:94.03ms +step:402/1670 train_time:37797ms step_avg:94.02ms +step:403/1670 train_time:37889ms step_avg:94.02ms +step:404/1670 train_time:37981ms step_avg:94.01ms +step:405/1670 train_time:38073ms step_avg:94.01ms +step:406/1670 train_time:38165ms step_avg:94.00ms +step:407/1670 train_time:38257ms step_avg:94.00ms +step:408/1670 train_time:38350ms step_avg:93.99ms +step:409/1670 train_time:38443ms step_avg:93.99ms +step:410/1670 train_time:38536ms step_avg:93.99ms +step:411/1670 train_time:38629ms step_avg:93.99ms +step:412/1670 train_time:38721ms step_avg:93.98ms +step:413/1670 train_time:38813ms step_avg:93.98ms +step:414/1670 train_time:38906ms step_avg:93.98ms +step:415/1670 train_time:38999ms step_avg:93.97ms +step:416/1670 train_time:39090ms step_avg:93.97ms +step:417/1670 train_time:39183ms step_avg:93.96ms +step:418/1670 train_time:39275ms step_avg:93.96ms +step:419/1670 train_time:39367ms step_avg:93.96ms +step:420/1670 train_time:39460ms step_avg:93.95ms +step:421/1670 train_time:39552ms step_avg:93.95ms +step:422/1670 train_time:39645ms step_avg:93.94ms +step:423/1670 train_time:39737ms step_avg:93.94ms +step:424/1670 train_time:39829ms step_avg:93.94ms +step:425/1670 train_time:40145ms step_avg:94.46ms +step:426/1670 train_time:40235ms step_avg:94.45ms +step:427/1670 train_time:40325ms step_avg:94.44ms +step:428/1670 train_time:40416ms step_avg:94.43ms +step:429/1670 train_time:40507ms step_avg:94.42ms +step:430/1670 train_time:40599ms step_avg:94.42ms +step:431/1670 train_time:40690ms step_avg:94.41ms +step:432/1670 train_time:40782ms step_avg:94.40ms +step:433/1670 train_time:40873ms step_avg:94.40ms +step:434/1670 train_time:40964ms step_avg:94.39ms +step:435/1670 train_time:41058ms step_avg:94.39ms +step:436/1670 train_time:41155ms step_avg:94.39ms +step:437/1670 train_time:41249ms step_avg:94.39ms +step:438/1670 train_time:41343ms step_avg:94.39ms +step:439/1670 train_time:41435ms step_avg:94.39ms +step:440/1670 train_time:41527ms step_avg:94.38ms +step:441/1670 train_time:41619ms step_avg:94.37ms +step:442/1670 train_time:41710ms step_avg:94.37ms +step:443/1670 train_time:41801ms step_avg:94.36ms +step:444/1670 train_time:41892ms step_avg:94.35ms +step:445/1670 train_time:41985ms step_avg:94.35ms +step:446/1670 train_time:42078ms step_avg:94.35ms +step:447/1670 train_time:42172ms step_avg:94.34ms +step:448/1670 train_time:42265ms step_avg:94.34ms +step:449/1670 train_time:42358ms step_avg:94.34ms +step:450/1670 train_time:42451ms step_avg:94.34ms +step:451/1670 train_time:42543ms step_avg:94.33ms +step:452/1670 train_time:42635ms step_avg:94.32ms +step:453/1670 train_time:42727ms step_avg:94.32ms +step:454/1670 train_time:42818ms step_avg:94.31ms +step:455/1670 train_time:42909ms step_avg:94.31ms +step:456/1670 train_time:43002ms step_avg:94.30ms +step:457/1670 train_time:43095ms step_avg:94.30ms +step:458/1670 train_time:43187ms step_avg:94.30ms +step:459/1670 train_time:43281ms step_avg:94.29ms +step:460/1670 train_time:43374ms step_avg:94.29ms +step:461/1670 train_time:43468ms step_avg:94.29ms +step:462/1670 train_time:43560ms step_avg:94.29ms +step:463/1670 train_time:43652ms step_avg:94.28ms +step:464/1670 train_time:43744ms step_avg:94.28ms +step:465/1670 train_time:43836ms step_avg:94.27ms +step:466/1670 train_time:43927ms step_avg:94.26ms +step:467/1670 train_time:44020ms step_avg:94.26ms +step:468/1670 train_time:44111ms step_avg:94.25ms +step:469/1670 train_time:44204ms step_avg:94.25ms +step:470/1670 train_time:44297ms step_avg:94.25ms +step:471/1670 train_time:44389ms step_avg:94.24ms +step:472/1670 train_time:44484ms step_avg:94.25ms +step:473/1670 train_time:44578ms step_avg:94.24ms +step:474/1670 train_time:44670ms step_avg:94.24ms +step:475/1670 train_time:44762ms step_avg:94.24ms +step:476/1670 train_time:44855ms step_avg:94.23ms +step:477/1670 train_time:44947ms step_avg:94.23ms +step:478/1670 train_time:45038ms step_avg:94.22ms +step:479/1670 train_time:45130ms step_avg:94.22ms +step:480/1670 train_time:45224ms step_avg:94.22ms +step:481/1670 train_time:45316ms step_avg:94.21ms +step:482/1670 train_time:45409ms step_avg:94.21ms +step:483/1670 train_time:45503ms step_avg:94.21ms +step:484/1670 train_time:45596ms step_avg:94.21ms +step:485/1670 train_time:45688ms step_avg:94.20ms +step:486/1670 train_time:45780ms step_avg:94.20ms +step:487/1670 train_time:45873ms step_avg:94.19ms +step:488/1670 train_time:45965ms step_avg:94.19ms +step:489/1670 train_time:46057ms step_avg:94.19ms +step:490/1670 train_time:46149ms step_avg:94.18ms +step:491/1670 train_time:46242ms step_avg:94.18ms +step:492/1670 train_time:46334ms step_avg:94.18ms +step:493/1670 train_time:46427ms step_avg:94.17ms +step:494/1670 train_time:46520ms step_avg:94.17ms +step:495/1670 train_time:46612ms step_avg:94.17ms +step:496/1670 train_time:46704ms step_avg:94.16ms +step:497/1670 train_time:46796ms step_avg:94.16ms +step:498/1670 train_time:46888ms step_avg:94.15ms +step:499/1670 train_time:46981ms step_avg:94.15ms +step:500/1670 train_time:47073ms step_avg:94.15ms +step:500/1670 val_loss:3.7124 train_time:47163ms step_avg:94.33ms +step:501/1670 train_time:47190ms step_avg:94.19ms +step:502/1670 train_time:47261ms step_avg:94.15ms +step:503/1670 train_time:47361ms step_avg:94.16ms +step:504/1670 train_time:47456ms step_avg:94.16ms +step:505/1670 train_time:47549ms step_avg:94.16ms +step:506/1670 train_time:47640ms step_avg:94.15ms +step:507/1670 train_time:47732ms step_avg:94.15ms +step:508/1670 train_time:47823ms step_avg:94.14ms +step:509/1670 train_time:47915ms step_avg:94.13ms +step:510/1670 train_time:48006ms step_avg:94.13ms +step:511/1670 train_time:48098ms step_avg:94.12ms +step:512/1670 train_time:48190ms step_avg:94.12ms +step:513/1670 train_time:48283ms step_avg:94.12ms +step:514/1670 train_time:48379ms step_avg:94.12ms +step:515/1670 train_time:48473ms step_avg:94.12ms +step:516/1670 train_time:48565ms step_avg:94.12ms +step:517/1670 train_time:48658ms step_avg:94.12ms +step:518/1670 train_time:48750ms step_avg:94.11ms +step:519/1670 train_time:48841ms step_avg:94.11ms +step:520/1670 train_time:48933ms step_avg:94.10ms +step:521/1670 train_time:49025ms step_avg:94.10ms +step:522/1670 train_time:49117ms step_avg:94.09ms +step:523/1670 train_time:49209ms step_avg:94.09ms +step:524/1670 train_time:49302ms step_avg:94.09ms +step:525/1670 train_time:49396ms step_avg:94.09ms +step:526/1670 train_time:49489ms step_avg:94.09ms +step:527/1670 train_time:49582ms step_avg:94.08ms +step:528/1670 train_time:49674ms step_avg:94.08ms +step:529/1670 train_time:49766ms step_avg:94.07ms +step:530/1670 train_time:49858ms step_avg:94.07ms +step:531/1670 train_time:49950ms step_avg:94.07ms +step:532/1670 train_time:50042ms step_avg:94.06ms +step:533/1670 train_time:50135ms step_avg:94.06ms +step:534/1670 train_time:50227ms step_avg:94.06ms +step:535/1670 train_time:50320ms step_avg:94.06ms +step:536/1670 train_time:50413ms step_avg:94.05ms +step:537/1670 train_time:50506ms step_avg:94.05ms +step:538/1670 train_time:50598ms step_avg:94.05ms +step:539/1670 train_time:50691ms step_avg:94.05ms +step:540/1670 train_time:50784ms step_avg:94.04ms +step:541/1670 train_time:50876ms step_avg:94.04ms +step:542/1670 train_time:50968ms step_avg:94.04ms +step:543/1670 train_time:51059ms step_avg:94.03ms +step:544/1670 train_time:51152ms step_avg:94.03ms +step:545/1670 train_time:51244ms step_avg:94.03ms +step:546/1670 train_time:51338ms step_avg:94.03ms +step:547/1670 train_time:51432ms step_avg:94.03ms +step:548/1670 train_time:51524ms step_avg:94.02ms +step:549/1670 train_time:51617ms step_avg:94.02ms +step:550/1670 train_time:51709ms step_avg:94.02ms +step:551/1670 train_time:51801ms step_avg:94.01ms +step:552/1670 train_time:51893ms step_avg:94.01ms +step:553/1670 train_time:51985ms step_avg:94.01ms +step:554/1670 train_time:52077ms step_avg:94.00ms +step:555/1670 train_time:52169ms step_avg:94.00ms +step:556/1670 train_time:52261ms step_avg:93.99ms +step:557/1670 train_time:52354ms step_avg:93.99ms +step:558/1670 train_time:52545ms step_avg:94.17ms +step:559/1670 train_time:52624ms step_avg:94.14ms +step:560/1670 train_time:52716ms step_avg:94.14ms +step:561/1670 train_time:52809ms step_avg:94.13ms +step:562/1670 train_time:52901ms step_avg:94.13ms +step:563/1670 train_time:52994ms step_avg:94.13ms +step:564/1670 train_time:53087ms step_avg:94.13ms +step:565/1670 train_time:53179ms step_avg:94.12ms +step:566/1670 train_time:53272ms step_avg:94.12ms +step:567/1670 train_time:53365ms step_avg:94.12ms +step:568/1670 train_time:53464ms step_avg:94.13ms +step:569/1670 train_time:53561ms step_avg:94.13ms +step:570/1670 train_time:53656ms step_avg:94.13ms +step:571/1670 train_time:53749ms step_avg:94.13ms +step:572/1670 train_time:53841ms step_avg:94.13ms +step:573/1670 train_time:53934ms step_avg:94.13ms +step:574/1670 train_time:54028ms step_avg:94.13ms +step:575/1670 train_time:54120ms step_avg:94.12ms +step:576/1670 train_time:54213ms step_avg:94.12ms +step:577/1670 train_time:54306ms step_avg:94.12ms +step:578/1670 train_time:54400ms step_avg:94.12ms +step:579/1670 train_time:54496ms step_avg:94.12ms +step:580/1670 train_time:54590ms step_avg:94.12ms +step:581/1670 train_time:54683ms step_avg:94.12ms +step:582/1670 train_time:54778ms step_avg:94.12ms +step:583/1670 train_time:54871ms step_avg:94.12ms +step:584/1670 train_time:54963ms step_avg:94.11ms +step:585/1670 train_time:55057ms step_avg:94.11ms +step:586/1670 train_time:55150ms step_avg:94.11ms +step:587/1670 train_time:55243ms step_avg:94.11ms +step:588/1670 train_time:55337ms step_avg:94.11ms +step:589/1670 train_time:55432ms step_avg:94.11ms +step:590/1670 train_time:55527ms step_avg:94.11ms +step:591/1670 train_time:55621ms step_avg:94.11ms +step:592/1670 train_time:55715ms step_avg:94.11ms +step:593/1670 train_time:55809ms step_avg:94.11ms +step:594/1670 train_time:55902ms step_avg:94.11ms +step:595/1670 train_time:55996ms step_avg:94.11ms +step:596/1670 train_time:56090ms step_avg:94.11ms +step:597/1670 train_time:56182ms step_avg:94.11ms +step:598/1670 train_time:56276ms step_avg:94.11ms +step:599/1670 train_time:56370ms step_avg:94.11ms +step:600/1670 train_time:56464ms step_avg:94.11ms +step:601/1670 train_time:56560ms step_avg:94.11ms +step:602/1670 train_time:56655ms step_avg:94.11ms +step:603/1670 train_time:56748ms step_avg:94.11ms +step:604/1670 train_time:56842ms step_avg:94.11ms +step:605/1670 train_time:56936ms step_avg:94.11ms +step:606/1670 train_time:57029ms step_avg:94.11ms +step:607/1670 train_time:57122ms step_avg:94.10ms +step:608/1670 train_time:57215ms step_avg:94.10ms +step:609/1670 train_time:57308ms step_avg:94.10ms +step:610/1670 train_time:57402ms step_avg:94.10ms +step:611/1670 train_time:57497ms step_avg:94.10ms +step:612/1670 train_time:57592ms step_avg:94.10ms +step:613/1670 train_time:57685ms step_avg:94.10ms +step:614/1670 train_time:57778ms step_avg:94.10ms +step:615/1670 train_time:57872ms step_avg:94.10ms +step:616/1670 train_time:57966ms step_avg:94.10ms +step:617/1670 train_time:58059ms step_avg:94.10ms +step:618/1670 train_time:58152ms step_avg:94.10ms +step:619/1670 train_time:58245ms step_avg:94.09ms +step:620/1670 train_time:58338ms step_avg:94.09ms +step:621/1670 train_time:58432ms step_avg:94.09ms +step:622/1670 train_time:58526ms step_avg:94.09ms +step:623/1670 train_time:58620ms step_avg:94.09ms +step:624/1670 train_time:58715ms step_avg:94.09ms +step:625/1670 train_time:58808ms step_avg:94.09ms +step:625/1670 val_loss:3.6122 train_time:58899ms step_avg:94.24ms +step:626/1670 train_time:58928ms step_avg:94.13ms +step:627/1670 train_time:59003ms step_avg:94.10ms +step:628/1670 train_time:59101ms step_avg:94.11ms +step:629/1670 train_time:59194ms step_avg:94.11ms +step:630/1670 train_time:59286ms step_avg:94.10ms +step:631/1670 train_time:59378ms step_avg:94.10ms +step:632/1670 train_time:59471ms step_avg:94.10ms +step:633/1670 train_time:59564ms step_avg:94.10ms +step:634/1670 train_time:59656ms step_avg:94.09ms +step:635/1670 train_time:59749ms step_avg:94.09ms +step:636/1670 train_time:59846ms step_avg:94.10ms +step:637/1670 train_time:59944ms step_avg:94.10ms +step:638/1670 train_time:60039ms step_avg:94.11ms +step:639/1670 train_time:60365ms step_avg:94.47ms +step:640/1670 train_time:60558ms step_avg:94.62ms +step:641/1670 train_time:60650ms step_avg:94.62ms +step:642/1670 train_time:60743ms step_avg:94.62ms +step:643/1670 train_time:60836ms step_avg:94.61ms +step:644/1670 train_time:60928ms step_avg:94.61ms +step:645/1670 train_time:61020ms step_avg:94.61ms +step:646/1670 train_time:61113ms step_avg:94.60ms +step:647/1670 train_time:61205ms step_avg:94.60ms +step:648/1670 train_time:61298ms step_avg:94.60ms +step:649/1670 train_time:61394ms step_avg:94.60ms +step:650/1670 train_time:61492ms step_avg:94.60ms +step:651/1670 train_time:61588ms step_avg:94.61ms +step:652/1670 train_time:61682ms step_avg:94.60ms +step:653/1670 train_time:61775ms step_avg:94.60ms +step:654/1670 train_time:61868ms step_avg:94.60ms +step:655/1670 train_time:61960ms step_avg:94.60ms +step:656/1670 train_time:62053ms step_avg:94.59ms +step:657/1670 train_time:62146ms step_avg:94.59ms +step:658/1670 train_time:62239ms step_avg:94.59ms +step:659/1670 train_time:62332ms step_avg:94.59ms +step:660/1670 train_time:62427ms step_avg:94.59ms +step:661/1670 train_time:62522ms step_avg:94.59ms +step:662/1670 train_time:62617ms step_avg:94.59ms +step:663/1670 train_time:62710ms step_avg:94.59ms +step:664/1670 train_time:62805ms step_avg:94.59ms +step:665/1670 train_time:62899ms step_avg:94.58ms +step:666/1670 train_time:62991ms step_avg:94.58ms +step:667/1670 train_time:63084ms step_avg:94.58ms +step:668/1670 train_time:63177ms step_avg:94.58ms +step:669/1670 train_time:63270ms step_avg:94.57ms +step:670/1670 train_time:63364ms step_avg:94.57ms +step:671/1670 train_time:63457ms step_avg:94.57ms +step:672/1670 train_time:63552ms step_avg:94.57ms +step:673/1670 train_time:63646ms step_avg:94.57ms +step:674/1670 train_time:63740ms step_avg:94.57ms +step:675/1670 train_time:63833ms step_avg:94.57ms +step:676/1670 train_time:63927ms step_avg:94.57ms +step:677/1670 train_time:64021ms step_avg:94.57ms +step:678/1670 train_time:64114ms step_avg:94.56ms +step:679/1670 train_time:64208ms step_avg:94.56ms +step:680/1670 train_time:64301ms step_avg:94.56ms +step:681/1670 train_time:64395ms step_avg:94.56ms +step:682/1670 train_time:64488ms step_avg:94.56ms +step:683/1670 train_time:64583ms step_avg:94.56ms +step:684/1670 train_time:64677ms step_avg:94.56ms +step:685/1670 train_time:64771ms step_avg:94.56ms +step:686/1670 train_time:64864ms step_avg:94.55ms +step:687/1670 train_time:64957ms step_avg:94.55ms +step:688/1670 train_time:65050ms step_avg:94.55ms +step:689/1670 train_time:65144ms step_avg:94.55ms +step:690/1670 train_time:65237ms step_avg:94.55ms +step:691/1670 train_time:65331ms step_avg:94.55ms +step:692/1670 train_time:65424ms step_avg:94.54ms +step:693/1670 train_time:65517ms step_avg:94.54ms +step:694/1670 train_time:65611ms step_avg:94.54ms +step:695/1670 train_time:65706ms step_avg:94.54ms +step:696/1670 train_time:65800ms step_avg:94.54ms +step:697/1670 train_time:65892ms step_avg:94.54ms +step:698/1670 train_time:65986ms step_avg:94.54ms +step:699/1670 train_time:66080ms step_avg:94.54ms +step:700/1670 train_time:66173ms step_avg:94.53ms +step:701/1670 train_time:66267ms step_avg:94.53ms +step:702/1670 train_time:66361ms step_avg:94.53ms +step:703/1670 train_time:66454ms step_avg:94.53ms +step:704/1670 train_time:66547ms step_avg:94.53ms +step:705/1670 train_time:66642ms step_avg:94.53ms +step:706/1670 train_time:66735ms step_avg:94.53ms +step:707/1670 train_time:66829ms step_avg:94.52ms +step:708/1670 train_time:66922ms step_avg:94.52ms +step:709/1670 train_time:67015ms step_avg:94.52ms +step:710/1670 train_time:67109ms step_avg:94.52ms +step:711/1670 train_time:67202ms step_avg:94.52ms +step:712/1670 train_time:67296ms step_avg:94.52ms +step:713/1670 train_time:67389ms step_avg:94.51ms +step:714/1670 train_time:67483ms step_avg:94.51ms +step:715/1670 train_time:67576ms step_avg:94.51ms +step:716/1670 train_time:67670ms step_avg:94.51ms +step:717/1670 train_time:67765ms step_avg:94.51ms +step:718/1670 train_time:67858ms step_avg:94.51ms +step:719/1670 train_time:67951ms step_avg:94.51ms +step:720/1670 train_time:68045ms step_avg:94.51ms +step:721/1670 train_time:68139ms step_avg:94.51ms +step:722/1670 train_time:68232ms step_avg:94.50ms +step:723/1670 train_time:68326ms step_avg:94.50ms +step:724/1670 train_time:68419ms step_avg:94.50ms +step:725/1670 train_time:68513ms step_avg:94.50ms +step:726/1670 train_time:68608ms step_avg:94.50ms +step:727/1670 train_time:68701ms step_avg:94.50ms +step:728/1670 train_time:68795ms step_avg:94.50ms +step:729/1670 train_time:68888ms step_avg:94.50ms +step:730/1670 train_time:68983ms step_avg:94.50ms +step:731/1670 train_time:69076ms step_avg:94.49ms +step:732/1670 train_time:69169ms step_avg:94.49ms +step:733/1670 train_time:69263ms step_avg:94.49ms +step:734/1670 train_time:69356ms step_avg:94.49ms +step:735/1670 train_time:69450ms step_avg:94.49ms +step:736/1670 train_time:69544ms step_avg:94.49ms +step:737/1670 train_time:69637ms step_avg:94.49ms +step:738/1670 train_time:69732ms step_avg:94.49ms +step:739/1670 train_time:69825ms step_avg:94.49ms +step:740/1670 train_time:69919ms step_avg:94.48ms +step:741/1670 train_time:70012ms step_avg:94.48ms +step:742/1670 train_time:70106ms step_avg:94.48ms +step:743/1670 train_time:70200ms step_avg:94.48ms +step:744/1670 train_time:70293ms step_avg:94.48ms +step:745/1670 train_time:70386ms step_avg:94.48ms +step:746/1670 train_time:70480ms step_avg:94.48ms +step:747/1670 train_time:70574ms step_avg:94.48ms +step:748/1670 train_time:70667ms step_avg:94.47ms +step:749/1670 train_time:70761ms step_avg:94.47ms +step:750/1670 train_time:70854ms step_avg:94.47ms +step:750/1670 val_loss:3.5615 train_time:70945ms step_avg:94.59ms +step:751/1670 train_time:70972ms step_avg:94.50ms +step:752/1670 train_time:71049ms step_avg:94.48ms +step:753/1670 train_time:71149ms step_avg:94.49ms +step:754/1670 train_time:71244ms step_avg:94.49ms +step:755/1670 train_time:71338ms step_avg:94.49ms +step:756/1670 train_time:71430ms step_avg:94.48ms +step:757/1670 train_time:71523ms step_avg:94.48ms +step:758/1670 train_time:71616ms step_avg:94.48ms +step:759/1670 train_time:71709ms step_avg:94.48ms +step:760/1670 train_time:71802ms step_avg:94.48ms +step:761/1670 train_time:71894ms step_avg:94.47ms +step:762/1670 train_time:71988ms step_avg:94.47ms +step:763/1670 train_time:72084ms step_avg:94.47ms +step:764/1670 train_time:72180ms step_avg:94.48ms +step:765/1670 train_time:72274ms step_avg:94.48ms +step:766/1670 train_time:72367ms step_avg:94.47ms +step:767/1670 train_time:72460ms step_avg:94.47ms +step:768/1670 train_time:72554ms step_avg:94.47ms +step:769/1670 train_time:72646ms step_avg:94.47ms +step:770/1670 train_time:72740ms step_avg:94.47ms +step:771/1670 train_time:72832ms step_avg:94.46ms +step:772/1670 train_time:72925ms step_avg:94.46ms +step:773/1670 train_time:73021ms step_avg:94.46ms +step:774/1670 train_time:73118ms step_avg:94.47ms +step:775/1670 train_time:73213ms step_avg:94.47ms +step:776/1670 train_time:73306ms step_avg:94.47ms +step:777/1670 train_time:73399ms step_avg:94.46ms +step:778/1670 train_time:73492ms step_avg:94.46ms +step:779/1670 train_time:73585ms step_avg:94.46ms +step:780/1670 train_time:73679ms step_avg:94.46ms +step:781/1670 train_time:73772ms step_avg:94.46ms +step:782/1670 train_time:73865ms step_avg:94.46ms +step:783/1670 train_time:73958ms step_avg:94.45ms +step:784/1670 train_time:74053ms step_avg:94.45ms +step:785/1670 train_time:74147ms step_avg:94.45ms +step:786/1670 train_time:74241ms step_avg:94.45ms +step:787/1670 train_time:74335ms step_avg:94.45ms +step:788/1670 train_time:74428ms step_avg:94.45ms +step:789/1670 train_time:74522ms step_avg:94.45ms +step:790/1670 train_time:74616ms step_avg:94.45ms +step:791/1670 train_time:74709ms step_avg:94.45ms +step:792/1670 train_time:74802ms step_avg:94.45ms +step:793/1670 train_time:74895ms step_avg:94.45ms +step:794/1670 train_time:74988ms step_avg:94.44ms +step:795/1670 train_time:75082ms step_avg:94.44ms +step:796/1670 train_time:75178ms step_avg:94.44ms +step:797/1670 train_time:75272ms step_avg:94.44ms +step:798/1670 train_time:75365ms step_avg:94.44ms +step:799/1670 train_time:75459ms step_avg:94.44ms +step:800/1670 train_time:75552ms step_avg:94.44ms +step:801/1670 train_time:75646ms step_avg:94.44ms +step:802/1670 train_time:75740ms step_avg:94.44ms +step:803/1670 train_time:75834ms step_avg:94.44ms +step:804/1670 train_time:75926ms step_avg:94.44ms +step:805/1670 train_time:76019ms step_avg:94.43ms +step:806/1670 train_time:76114ms step_avg:94.43ms +step:807/1670 train_time:76208ms step_avg:94.43ms +step:808/1670 train_time:76302ms step_avg:94.43ms +step:809/1670 train_time:76396ms step_avg:94.43ms +step:810/1670 train_time:76489ms step_avg:94.43ms +step:811/1670 train_time:76583ms step_avg:94.43ms +step:812/1670 train_time:76676ms step_avg:94.43ms +step:813/1670 train_time:76769ms step_avg:94.43ms +step:814/1670 train_time:76863ms step_avg:94.43ms +step:815/1670 train_time:76957ms step_avg:94.43ms +step:816/1670 train_time:77052ms step_avg:94.43ms +step:817/1670 train_time:77146ms step_avg:94.43ms +step:818/1670 train_time:77240ms step_avg:94.43ms +step:819/1670 train_time:77334ms step_avg:94.42ms +step:820/1670 train_time:77426ms step_avg:94.42ms +step:821/1670 train_time:77521ms step_avg:94.42ms +step:822/1670 train_time:77614ms step_avg:94.42ms +step:823/1670 train_time:77707ms step_avg:94.42ms +step:824/1670 train_time:77801ms step_avg:94.42ms +step:825/1670 train_time:77895ms step_avg:94.42ms +step:826/1670 train_time:77988ms step_avg:94.42ms +step:827/1670 train_time:78082ms step_avg:94.42ms +step:828/1670 train_time:78176ms step_avg:94.42ms +step:829/1670 train_time:78270ms step_avg:94.41ms +step:830/1670 train_time:78364ms step_avg:94.41ms +step:831/1670 train_time:78458ms step_avg:94.41ms +step:832/1670 train_time:78551ms step_avg:94.41ms +step:833/1670 train_time:78645ms step_avg:94.41ms +step:834/1670 train_time:78739ms step_avg:94.41ms +step:835/1670 train_time:78832ms step_avg:94.41ms +step:836/1670 train_time:78925ms step_avg:94.41ms +step:837/1670 train_time:79020ms step_avg:94.41ms +step:838/1670 train_time:79114ms step_avg:94.41ms +step:839/1670 train_time:79207ms step_avg:94.41ms +step:840/1670 train_time:79301ms step_avg:94.41ms +step:841/1670 train_time:79395ms step_avg:94.41ms +step:842/1670 train_time:79488ms step_avg:94.40ms +step:843/1670 train_time:79582ms step_avg:94.40ms +step:844/1670 train_time:79676ms step_avg:94.40ms +step:845/1670 train_time:79769ms step_avg:94.40ms +step:846/1670 train_time:79862ms step_avg:94.40ms +step:847/1670 train_time:79956ms step_avg:94.40ms +step:848/1670 train_time:80049ms step_avg:94.40ms +step:849/1670 train_time:80143ms step_avg:94.40ms +step:850/1670 train_time:80237ms step_avg:94.40ms +step:851/1670 train_time:80571ms step_avg:94.68ms +step:852/1670 train_time:80758ms step_avg:94.79ms +step:853/1670 train_time:80849ms step_avg:94.78ms +step:854/1670 train_time:80942ms step_avg:94.78ms +step:855/1670 train_time:81035ms step_avg:94.78ms +step:856/1670 train_time:81127ms step_avg:94.78ms +step:857/1670 train_time:81221ms step_avg:94.77ms +step:858/1670 train_time:81313ms step_avg:94.77ms +step:859/1670 train_time:81406ms step_avg:94.77ms +step:860/1670 train_time:81498ms step_avg:94.77ms +step:861/1670 train_time:81594ms step_avg:94.77ms +step:862/1670 train_time:81691ms step_avg:94.77ms +step:863/1670 train_time:81786ms step_avg:94.77ms +step:864/1670 train_time:81880ms step_avg:94.77ms +step:865/1670 train_time:81973ms step_avg:94.77ms +step:866/1670 train_time:82067ms step_avg:94.77ms +step:867/1670 train_time:82160ms step_avg:94.76ms +step:868/1670 train_time:82252ms step_avg:94.76ms +step:869/1670 train_time:82345ms step_avg:94.76ms +step:870/1670 train_time:82438ms step_avg:94.76ms +step:871/1670 train_time:82532ms step_avg:94.76ms +step:872/1670 train_time:82627ms step_avg:94.76ms +step:873/1670 train_time:82722ms step_avg:94.76ms +step:874/1670 train_time:82817ms step_avg:94.76ms +step:875/1670 train_time:82910ms step_avg:94.75ms +step:875/1670 val_loss:3.5171 train_time:83001ms step_avg:94.86ms +step:876/1670 train_time:83031ms step_avg:94.78ms +step:877/1670 train_time:83103ms step_avg:94.76ms +step:878/1670 train_time:83204ms step_avg:94.77ms +step:879/1670 train_time:83300ms step_avg:94.77ms +step:880/1670 train_time:83393ms step_avg:94.76ms +step:881/1670 train_time:83486ms step_avg:94.76ms +step:882/1670 train_time:83578ms step_avg:94.76ms +step:883/1670 train_time:83671ms step_avg:94.76ms +step:884/1670 train_time:83764ms step_avg:94.76ms +step:885/1670 train_time:83856ms step_avg:94.75ms +step:886/1670 train_time:83950ms step_avg:94.75ms +step:887/1670 train_time:84044ms step_avg:94.75ms +step:888/1670 train_time:84141ms step_avg:94.75ms +step:889/1670 train_time:84238ms step_avg:94.76ms +step:890/1670 train_time:84332ms step_avg:94.76ms +step:891/1670 train_time:84426ms step_avg:94.75ms +step:892/1670 train_time:84519ms step_avg:94.75ms +step:893/1670 train_time:84612ms step_avg:94.75ms +step:894/1670 train_time:84704ms step_avg:94.75ms +step:895/1670 train_time:84797ms step_avg:94.75ms +step:896/1670 train_time:84890ms step_avg:94.74ms +step:897/1670 train_time:84984ms step_avg:94.74ms +step:898/1670 train_time:85078ms step_avg:94.74ms +step:899/1670 train_time:85173ms step_avg:94.74ms +step:900/1670 train_time:85267ms step_avg:94.74ms +step:901/1670 train_time:85361ms step_avg:94.74ms +step:902/1670 train_time:85455ms step_avg:94.74ms +step:903/1670 train_time:85549ms step_avg:94.74ms +step:904/1670 train_time:85642ms step_avg:94.74ms +step:905/1670 train_time:85735ms step_avg:94.73ms +step:906/1670 train_time:85827ms step_avg:94.73ms +step:907/1670 train_time:85921ms step_avg:94.73ms +step:908/1670 train_time:86014ms step_avg:94.73ms +step:909/1670 train_time:86108ms step_avg:94.73ms +step:910/1670 train_time:86203ms step_avg:94.73ms +step:911/1670 train_time:86298ms step_avg:94.73ms +step:912/1670 train_time:86392ms step_avg:94.73ms +step:913/1670 train_time:86485ms step_avg:94.73ms +step:914/1670 train_time:86579ms step_avg:94.73ms +step:915/1670 train_time:86673ms step_avg:94.72ms +step:916/1670 train_time:86765ms step_avg:94.72ms +step:917/1670 train_time:86858ms step_avg:94.72ms +step:918/1670 train_time:86951ms step_avg:94.72ms +step:919/1670 train_time:87045ms step_avg:94.72ms +step:920/1670 train_time:87140ms step_avg:94.72ms +step:921/1670 train_time:87234ms step_avg:94.72ms +step:922/1670 train_time:87328ms step_avg:94.72ms +step:923/1670 train_time:87421ms step_avg:94.71ms +step:924/1670 train_time:87516ms step_avg:94.71ms +step:925/1670 train_time:87609ms step_avg:94.71ms +step:926/1670 train_time:87702ms step_avg:94.71ms +step:927/1670 train_time:87795ms step_avg:94.71ms +step:928/1670 train_time:87889ms step_avg:94.71ms +step:929/1670 train_time:87982ms step_avg:94.71ms +step:930/1670 train_time:88076ms step_avg:94.71ms +step:931/1670 train_time:88170ms step_avg:94.70ms +step:932/1670 train_time:88263ms step_avg:94.70ms +step:933/1670 train_time:88358ms step_avg:94.70ms +step:934/1670 train_time:88452ms step_avg:94.70ms +step:935/1670 train_time:88546ms step_avg:94.70ms +step:936/1670 train_time:88639ms step_avg:94.70ms +step:937/1670 train_time:88733ms step_avg:94.70ms +step:938/1670 train_time:88826ms step_avg:94.70ms +step:939/1670 train_time:88919ms step_avg:94.70ms +step:940/1670 train_time:89013ms step_avg:94.70ms +step:941/1670 train_time:89107ms step_avg:94.69ms +step:942/1670 train_time:89201ms step_avg:94.69ms +step:943/1670 train_time:89294ms step_avg:94.69ms +step:944/1670 train_time:89387ms step_avg:94.69ms +step:945/1670 train_time:89482ms step_avg:94.69ms +step:946/1670 train_time:89576ms step_avg:94.69ms +step:947/1670 train_time:89671ms step_avg:94.69ms +step:948/1670 train_time:89763ms step_avg:94.69ms +step:949/1670 train_time:89857ms step_avg:94.69ms +step:950/1670 train_time:89950ms step_avg:94.68ms +step:951/1670 train_time:90043ms step_avg:94.68ms +step:952/1670 train_time:90137ms step_avg:94.68ms +step:953/1670 train_time:90232ms step_avg:94.68ms +step:954/1670 train_time:90325ms step_avg:94.68ms +step:955/1670 train_time:90419ms step_avg:94.68ms +step:956/1670 train_time:90514ms step_avg:94.68ms +step:957/1670 train_time:90608ms step_avg:94.68ms +step:958/1670 train_time:90701ms step_avg:94.68ms +step:959/1670 train_time:90794ms step_avg:94.68ms +step:960/1670 train_time:90888ms step_avg:94.68ms +step:961/1670 train_time:90982ms step_avg:94.67ms +step:962/1670 train_time:91075ms step_avg:94.67ms +step:963/1670 train_time:91168ms step_avg:94.67ms +step:964/1670 train_time:91262ms step_avg:94.67ms +step:965/1670 train_time:91356ms step_avg:94.67ms +step:966/1670 train_time:91450ms step_avg:94.67ms +step:967/1670 train_time:91544ms step_avg:94.67ms +step:968/1670 train_time:91638ms step_avg:94.67ms +step:969/1670 train_time:91731ms step_avg:94.67ms +step:970/1670 train_time:91824ms step_avg:94.66ms +step:971/1670 train_time:91918ms step_avg:94.66ms +step:972/1670 train_time:92011ms step_avg:94.66ms +step:973/1670 train_time:92104ms step_avg:94.66ms +step:974/1670 train_time:92197ms step_avg:94.66ms +step:975/1670 train_time:92291ms step_avg:94.66ms +step:976/1670 train_time:92385ms step_avg:94.66ms +step:977/1670 train_time:92479ms step_avg:94.66ms +step:978/1670 train_time:92572ms step_avg:94.65ms +step:979/1670 train_time:92666ms step_avg:94.65ms +step:980/1670 train_time:92759ms step_avg:94.65ms +step:981/1670 train_time:92853ms step_avg:94.65ms +step:982/1670 train_time:92946ms step_avg:94.65ms +step:983/1670 train_time:93041ms step_avg:94.65ms +step:984/1670 train_time:93134ms step_avg:94.65ms +step:985/1670 train_time:93228ms step_avg:94.65ms +step:986/1670 train_time:93322ms step_avg:94.65ms +step:987/1670 train_time:93416ms step_avg:94.65ms +step:988/1670 train_time:93510ms step_avg:94.65ms +step:989/1670 train_time:93603ms step_avg:94.64ms +step:990/1670 train_time:93697ms step_avg:94.64ms +step:991/1670 train_time:93791ms step_avg:94.64ms +step:992/1670 train_time:93885ms step_avg:94.64ms +step:993/1670 train_time:93979ms step_avg:94.64ms +step:994/1670 train_time:94073ms step_avg:94.64ms +step:995/1670 train_time:94167ms step_avg:94.64ms +step:996/1670 train_time:94261ms step_avg:94.64ms +step:997/1670 train_time:94354ms step_avg:94.64ms +step:998/1670 train_time:94448ms step_avg:94.64ms +step:999/1670 train_time:94542ms step_avg:94.64ms +step:1000/1670 train_time:94635ms step_avg:94.63ms +step:1000/1670 val_loss:3.4675 train_time:94726ms step_avg:94.73ms +step:1001/1670 train_time:94754ms step_avg:94.66ms +step:1002/1670 train_time:94828ms step_avg:94.64ms +step:1003/1670 train_time:94928ms step_avg:94.64ms +step:1004/1670 train_time:95023ms step_avg:94.64ms +step:1005/1670 train_time:95117ms step_avg:94.64ms +step:1006/1670 train_time:95209ms step_avg:94.64ms +step:1007/1670 train_time:95302ms step_avg:94.64ms +step:1008/1670 train_time:95394ms step_avg:94.64ms +step:1009/1670 train_time:95487ms step_avg:94.64ms +step:1010/1670 train_time:95580ms step_avg:94.63ms +step:1011/1670 train_time:95674ms step_avg:94.63ms +step:1012/1670 train_time:95767ms step_avg:94.63ms +step:1013/1670 train_time:95864ms step_avg:94.63ms +step:1014/1670 train_time:95958ms step_avg:94.63ms +step:1015/1670 train_time:96053ms step_avg:94.63ms +step:1016/1670 train_time:96147ms step_avg:94.63ms +step:1017/1670 train_time:96240ms step_avg:94.63ms +step:1018/1670 train_time:96334ms step_avg:94.63ms +step:1019/1670 train_time:96427ms step_avg:94.63ms +step:1020/1670 train_time:96521ms step_avg:94.63ms +step:1021/1670 train_time:96614ms step_avg:94.63ms +step:1022/1670 train_time:96707ms step_avg:94.63ms +step:1023/1670 train_time:96801ms step_avg:94.62ms +step:1024/1670 train_time:96896ms step_avg:94.62ms +step:1025/1670 train_time:96990ms step_avg:94.62ms +step:1026/1670 train_time:97085ms step_avg:94.62ms +step:1027/1670 train_time:97178ms step_avg:94.62ms +step:1028/1670 train_time:97272ms step_avg:94.62ms +step:1029/1670 train_time:97365ms step_avg:94.62ms +step:1030/1670 train_time:97458ms step_avg:94.62ms +step:1031/1670 train_time:97552ms step_avg:94.62ms +step:1032/1670 train_time:97645ms step_avg:94.62ms +step:1033/1670 train_time:97739ms step_avg:94.62ms +step:1034/1670 train_time:97833ms step_avg:94.62ms +step:1035/1670 train_time:97926ms step_avg:94.61ms +step:1036/1670 train_time:98022ms step_avg:94.62ms +step:1037/1670 train_time:98116ms step_avg:94.62ms +step:1038/1670 train_time:98209ms step_avg:94.61ms +step:1039/1670 train_time:98304ms step_avg:94.61ms +step:1040/1670 train_time:98397ms step_avg:94.61ms +step:1041/1670 train_time:98490ms step_avg:94.61ms +step:1042/1670 train_time:98583ms step_avg:94.61ms +step:1043/1670 train_time:98677ms step_avg:94.61ms +step:1044/1670 train_time:98771ms step_avg:94.61ms +step:1045/1670 train_time:98865ms step_avg:94.61ms +step:1046/1670 train_time:98960ms step_avg:94.61ms +step:1047/1670 train_time:99054ms step_avg:94.61ms +step:1048/1670 train_time:99147ms step_avg:94.61ms +step:1049/1670 train_time:99241ms step_avg:94.61ms +step:1050/1670 train_time:99335ms step_avg:94.60ms +step:1051/1670 train_time:99429ms step_avg:94.60ms +step:1052/1670 train_time:99522ms step_avg:94.60ms +step:1053/1670 train_time:99615ms step_avg:94.60ms +step:1054/1670 train_time:99708ms step_avg:94.60ms +step:1055/1670 train_time:99802ms step_avg:94.60ms +step:1056/1670 train_time:99896ms step_avg:94.60ms +step:1057/1670 train_time:99990ms step_avg:94.60ms +step:1058/1670 train_time:100084ms step_avg:94.60ms +step:1059/1670 train_time:100179ms step_avg:94.60ms +step:1060/1670 train_time:100273ms step_avg:94.60ms +step:1061/1670 train_time:100367ms step_avg:94.60ms +step:1062/1670 train_time:100712ms step_avg:94.83ms +step:1063/1670 train_time:100883ms step_avg:94.90ms +step:1064/1670 train_time:100976ms step_avg:94.90ms +step:1065/1670 train_time:101068ms step_avg:94.90ms +step:1066/1670 train_time:101160ms step_avg:94.90ms +step:1067/1670 train_time:101253ms step_avg:94.89ms +step:1068/1670 train_time:101345ms step_avg:94.89ms +step:1069/1670 train_time:101438ms step_avg:94.89ms +step:1070/1670 train_time:101530ms step_avg:94.89ms +step:1071/1670 train_time:101622ms step_avg:94.88ms +step:1072/1670 train_time:101718ms step_avg:94.89ms +step:1073/1670 train_time:101814ms step_avg:94.89ms +step:1074/1670 train_time:101909ms step_avg:94.89ms +step:1075/1670 train_time:102004ms step_avg:94.89ms +step:1076/1670 train_time:102097ms step_avg:94.89ms +step:1077/1670 train_time:102191ms step_avg:94.89ms +step:1078/1670 train_time:102284ms step_avg:94.88ms +step:1079/1670 train_time:102378ms step_avg:94.88ms +step:1080/1670 train_time:102471ms step_avg:94.88ms +step:1081/1670 train_time:102563ms step_avg:94.88ms +step:1082/1670 train_time:102658ms step_avg:94.88ms +step:1083/1670 train_time:102752ms step_avg:94.88ms +step:1084/1670 train_time:102846ms step_avg:94.88ms +step:1085/1670 train_time:102940ms step_avg:94.88ms +step:1086/1670 train_time:103034ms step_avg:94.87ms +step:1087/1670 train_time:103128ms step_avg:94.87ms +step:1088/1670 train_time:103221ms step_avg:94.87ms +step:1089/1670 train_time:103314ms step_avg:94.87ms +step:1090/1670 train_time:103407ms step_avg:94.87ms +step:1091/1670 train_time:103500ms step_avg:94.87ms +step:1092/1670 train_time:103594ms step_avg:94.87ms +step:1093/1670 train_time:103689ms step_avg:94.87ms +step:1094/1670 train_time:103783ms step_avg:94.87ms +step:1095/1670 train_time:103877ms step_avg:94.87ms +step:1096/1670 train_time:103971ms step_avg:94.86ms +step:1097/1670 train_time:104065ms step_avg:94.86ms +step:1098/1670 train_time:104158ms step_avg:94.86ms +step:1099/1670 train_time:104252ms step_avg:94.86ms +step:1100/1670 train_time:104345ms step_avg:94.86ms +step:1101/1670 train_time:104439ms step_avg:94.86ms +step:1102/1670 train_time:104533ms step_avg:94.86ms +step:1103/1670 train_time:104626ms step_avg:94.86ms +step:1104/1670 train_time:104720ms step_avg:94.86ms +step:1105/1670 train_time:104815ms step_avg:94.86ms +step:1106/1670 train_time:104908ms step_avg:94.85ms +step:1107/1670 train_time:105002ms step_avg:94.85ms +step:1108/1670 train_time:105096ms step_avg:94.85ms +step:1109/1670 train_time:105189ms step_avg:94.85ms +step:1110/1670 train_time:105283ms step_avg:94.85ms +step:1111/1670 train_time:105376ms step_avg:94.85ms +step:1112/1670 train_time:105469ms step_avg:94.85ms +step:1113/1670 train_time:105562ms step_avg:94.84ms +step:1114/1670 train_time:105657ms step_avg:94.84ms +step:1115/1670 train_time:105858ms step_avg:94.94ms +step:1116/1670 train_time:105929ms step_avg:94.92ms +step:1117/1670 train_time:106022ms step_avg:94.92ms +step:1118/1670 train_time:106115ms step_avg:94.92ms +step:1119/1670 train_time:106208ms step_avg:94.91ms +step:1120/1670 train_time:106301ms step_avg:94.91ms +step:1121/1670 train_time:106395ms step_avg:94.91ms +step:1122/1670 train_time:106488ms step_avg:94.91ms +step:1123/1670 train_time:106581ms step_avg:94.91ms +step:1124/1670 train_time:106674ms step_avg:94.91ms +step:1125/1670 train_time:106774ms step_avg:94.91ms +step:1125/1670 val_loss:3.4153 train_time:106871ms step_avg:95.00ms +step:1126/1670 train_time:106898ms step_avg:94.94ms +step:1127/1670 train_time:106978ms step_avg:94.92ms +step:1128/1670 train_time:107080ms step_avg:94.93ms +step:1129/1670 train_time:107175ms step_avg:94.93ms +step:1130/1670 train_time:107268ms step_avg:94.93ms +step:1131/1670 train_time:107361ms step_avg:94.93ms +step:1132/1670 train_time:107455ms step_avg:94.92ms +step:1133/1670 train_time:107548ms step_avg:94.92ms +step:1134/1670 train_time:107641ms step_avg:94.92ms +step:1135/1670 train_time:107735ms step_avg:94.92ms +step:1136/1670 train_time:107832ms step_avg:94.92ms +step:1137/1670 train_time:107929ms step_avg:94.92ms +step:1138/1670 train_time:108025ms step_avg:94.93ms +step:1139/1670 train_time:108120ms step_avg:94.93ms +step:1140/1670 train_time:108215ms step_avg:94.93ms +step:1141/1670 train_time:108309ms step_avg:94.92ms +step:1142/1670 train_time:108402ms step_avg:94.92ms +step:1143/1670 train_time:108496ms step_avg:94.92ms +step:1144/1670 train_time:108590ms step_avg:94.92ms +step:1145/1670 train_time:108683ms step_avg:94.92ms +step:1146/1670 train_time:108778ms step_avg:94.92ms +step:1147/1670 train_time:108875ms step_avg:94.92ms +step:1148/1670 train_time:108971ms step_avg:94.92ms +step:1149/1670 train_time:109066ms step_avg:94.92ms +step:1150/1670 train_time:109160ms step_avg:94.92ms +step:1151/1670 train_time:109256ms step_avg:94.92ms +step:1152/1670 train_time:109350ms step_avg:94.92ms +step:1153/1670 train_time:109443ms step_avg:94.92ms +step:1154/1670 train_time:109536ms step_avg:94.92ms +step:1155/1670 train_time:109631ms step_avg:94.92ms +step:1156/1670 train_time:109725ms step_avg:94.92ms +step:1157/1670 train_time:109820ms step_avg:94.92ms +step:1158/1670 train_time:109915ms step_avg:94.92ms +step:1159/1670 train_time:110011ms step_avg:94.92ms +step:1160/1670 train_time:110106ms step_avg:94.92ms +step:1161/1670 train_time:110200ms step_avg:94.92ms +step:1162/1670 train_time:110295ms step_avg:94.92ms +step:1163/1670 train_time:110390ms step_avg:94.92ms +step:1164/1670 train_time:110485ms step_avg:94.92ms +step:1165/1670 train_time:110578ms step_avg:94.92ms +step:1166/1670 train_time:110672ms step_avg:94.92ms +step:1167/1670 train_time:110766ms step_avg:94.92ms +step:1168/1670 train_time:110860ms step_avg:94.91ms +step:1169/1670 train_time:110955ms step_avg:94.91ms +step:1170/1670 train_time:111050ms step_avg:94.91ms +step:1171/1670 train_time:111145ms step_avg:94.91ms +step:1172/1670 train_time:111240ms step_avg:94.91ms +step:1173/1670 train_time:111334ms step_avg:94.91ms +step:1174/1670 train_time:111429ms step_avg:94.91ms +step:1175/1670 train_time:111524ms step_avg:94.91ms +step:1176/1670 train_time:111618ms step_avg:94.91ms +step:1177/1670 train_time:111712ms step_avg:94.91ms +step:1178/1670 train_time:111806ms step_avg:94.91ms +step:1179/1670 train_time:111900ms step_avg:94.91ms +step:1180/1670 train_time:111994ms step_avg:94.91ms +step:1181/1670 train_time:112090ms step_avg:94.91ms +step:1182/1670 train_time:112184ms step_avg:94.91ms +step:1183/1670 train_time:112278ms step_avg:94.91ms +step:1184/1670 train_time:112373ms step_avg:94.91ms +step:1185/1670 train_time:112468ms step_avg:94.91ms +step:1186/1670 train_time:112561ms step_avg:94.91ms +step:1187/1670 train_time:112656ms step_avg:94.91ms +step:1188/1670 train_time:112750ms step_avg:94.91ms +step:1189/1670 train_time:112845ms step_avg:94.91ms +step:1190/1670 train_time:112938ms step_avg:94.91ms +step:1191/1670 train_time:113033ms step_avg:94.91ms +step:1192/1670 train_time:113129ms step_avg:94.91ms +step:1193/1670 train_time:113223ms step_avg:94.91ms +step:1194/1670 train_time:113317ms step_avg:94.91ms +step:1195/1670 train_time:113412ms step_avg:94.91ms +step:1196/1670 train_time:113507ms step_avg:94.91ms +step:1197/1670 train_time:113602ms step_avg:94.91ms +step:1198/1670 train_time:113696ms step_avg:94.91ms +step:1199/1670 train_time:113791ms step_avg:94.91ms +step:1200/1670 train_time:113886ms step_avg:94.90ms +step:1201/1670 train_time:113979ms step_avg:94.90ms +step:1202/1670 train_time:114073ms step_avg:94.90ms +step:1203/1670 train_time:114168ms step_avg:94.90ms +step:1204/1670 train_time:114262ms step_avg:94.90ms +step:1205/1670 train_time:114358ms step_avg:94.90ms +step:1206/1670 train_time:114452ms step_avg:94.90ms +step:1207/1670 train_time:114546ms step_avg:94.90ms +step:1208/1670 train_time:114641ms step_avg:94.90ms +step:1209/1670 train_time:114735ms step_avg:94.90ms +step:1210/1670 train_time:114830ms step_avg:94.90ms +step:1211/1670 train_time:114925ms step_avg:94.90ms +step:1212/1670 train_time:115018ms step_avg:94.90ms +step:1213/1670 train_time:115113ms step_avg:94.90ms +step:1214/1670 train_time:115208ms step_avg:94.90ms +step:1215/1670 train_time:115302ms step_avg:94.90ms +step:1216/1670 train_time:115396ms step_avg:94.90ms +step:1217/1670 train_time:115493ms step_avg:94.90ms +step:1218/1670 train_time:115587ms step_avg:94.90ms +step:1219/1670 train_time:115681ms step_avg:94.90ms +step:1220/1670 train_time:115776ms step_avg:94.90ms +step:1221/1670 train_time:115870ms step_avg:94.90ms +step:1222/1670 train_time:115964ms step_avg:94.90ms +step:1223/1670 train_time:116058ms step_avg:94.90ms +step:1224/1670 train_time:116153ms step_avg:94.90ms +step:1225/1670 train_time:116248ms step_avg:94.90ms +step:1226/1670 train_time:116342ms step_avg:94.90ms +step:1227/1670 train_time:116436ms step_avg:94.89ms +step:1228/1670 train_time:116530ms step_avg:94.89ms +step:1229/1670 train_time:116625ms step_avg:94.89ms +step:1230/1670 train_time:116719ms step_avg:94.89ms +step:1231/1670 train_time:116813ms step_avg:94.89ms +step:1232/1670 train_time:116908ms step_avg:94.89ms +step:1233/1670 train_time:117002ms step_avg:94.89ms +step:1234/1670 train_time:117097ms step_avg:94.89ms +step:1235/1670 train_time:117192ms step_avg:94.89ms +step:1236/1670 train_time:117286ms step_avg:94.89ms +step:1237/1670 train_time:117380ms step_avg:94.89ms +step:1238/1670 train_time:117475ms step_avg:94.89ms +step:1239/1670 train_time:117569ms step_avg:94.89ms +step:1240/1670 train_time:117664ms step_avg:94.89ms +step:1241/1670 train_time:117759ms step_avg:94.89ms +step:1242/1670 train_time:117854ms step_avg:94.89ms +step:1243/1670 train_time:117949ms step_avg:94.89ms +step:1244/1670 train_time:118042ms step_avg:94.89ms +step:1245/1670 train_time:118137ms step_avg:94.89ms +step:1246/1670 train_time:118232ms step_avg:94.89ms +step:1247/1670 train_time:118326ms step_avg:94.89ms +step:1248/1670 train_time:118420ms step_avg:94.89ms +step:1249/1670 train_time:118514ms step_avg:94.89ms +step:1250/1670 train_time:118609ms step_avg:94.89ms +step:1250/1670 val_loss:3.3756 train_time:118702ms step_avg:94.96ms +step:1251/1670 train_time:118729ms step_avg:94.91ms +step:1252/1670 train_time:118803ms step_avg:94.89ms +step:1253/1670 train_time:118902ms step_avg:94.89ms +step:1254/1670 train_time:118996ms step_avg:94.89ms +step:1255/1670 train_time:119089ms step_avg:94.89ms +step:1256/1670 train_time:119183ms step_avg:94.89ms +step:1257/1670 train_time:119276ms step_avg:94.89ms +step:1258/1670 train_time:119370ms step_avg:94.89ms +step:1259/1670 train_time:119463ms step_avg:94.89ms +step:1260/1670 train_time:119556ms step_avg:94.89ms +step:1261/1670 train_time:119651ms step_avg:94.89ms +step:1262/1670 train_time:119747ms step_avg:94.89ms +step:1263/1670 train_time:119844ms step_avg:94.89ms +step:1264/1670 train_time:119941ms step_avg:94.89ms +step:1265/1670 train_time:120035ms step_avg:94.89ms +step:1266/1670 train_time:120129ms step_avg:94.89ms +step:1267/1670 train_time:120223ms step_avg:94.89ms +step:1268/1670 train_time:120316ms step_avg:94.89ms +step:1269/1670 train_time:120411ms step_avg:94.89ms +step:1270/1670 train_time:120504ms step_avg:94.88ms +step:1271/1670 train_time:120597ms step_avg:94.88ms +step:1272/1670 train_time:120693ms step_avg:94.88ms +step:1273/1670 train_time:120790ms step_avg:94.89ms +step:1274/1670 train_time:121237ms step_avg:95.16ms +step:1275/1670 train_time:121307ms step_avg:95.14ms +step:1276/1670 train_time:121399ms step_avg:95.14ms +step:1277/1670 train_time:121493ms step_avg:95.14ms +step:1278/1670 train_time:121586ms step_avg:95.14ms +step:1279/1670 train_time:121679ms step_avg:95.14ms +step:1280/1670 train_time:121773ms step_avg:95.14ms +step:1281/1670 train_time:121866ms step_avg:95.13ms +step:1282/1670 train_time:121959ms step_avg:95.13ms +step:1283/1670 train_time:122053ms step_avg:95.13ms +step:1284/1670 train_time:122151ms step_avg:95.13ms +step:1285/1670 train_time:122250ms step_avg:95.14ms +step:1286/1670 train_time:122347ms step_avg:95.14ms +step:1287/1670 train_time:122441ms step_avg:95.14ms +step:1288/1670 train_time:122535ms step_avg:95.14ms +step:1289/1670 train_time:122629ms step_avg:95.14ms +step:1290/1670 train_time:122723ms step_avg:95.13ms +step:1291/1670 train_time:122816ms step_avg:95.13ms +step:1292/1670 train_time:122911ms step_avg:95.13ms +step:1293/1670 train_time:123005ms step_avg:95.13ms +step:1294/1670 train_time:123101ms step_avg:95.13ms +step:1295/1670 train_time:123197ms step_avg:95.13ms +step:1296/1670 train_time:123292ms step_avg:95.13ms +step:1297/1670 train_time:123388ms step_avg:95.13ms +step:1298/1670 train_time:123482ms step_avg:95.13ms +step:1299/1670 train_time:123577ms step_avg:95.13ms +step:1300/1670 train_time:123670ms step_avg:95.13ms +step:1301/1670 train_time:123766ms step_avg:95.13ms +step:1302/1670 train_time:123859ms step_avg:95.13ms +step:1303/1670 train_time:123953ms step_avg:95.13ms +step:1304/1670 train_time:124047ms step_avg:95.13ms +step:1305/1670 train_time:124142ms step_avg:95.13ms +step:1306/1670 train_time:124237ms step_avg:95.13ms +step:1307/1670 train_time:124333ms step_avg:95.13ms +step:1308/1670 train_time:124428ms step_avg:95.13ms +step:1309/1670 train_time:124522ms step_avg:95.13ms +step:1310/1670 train_time:124616ms step_avg:95.13ms +step:1311/1670 train_time:124711ms step_avg:95.13ms +step:1312/1670 train_time:124805ms step_avg:95.13ms +step:1313/1670 train_time:124899ms step_avg:95.12ms +step:1314/1670 train_time:124994ms step_avg:95.12ms +step:1315/1670 train_time:125088ms step_avg:95.12ms +step:1316/1670 train_time:125183ms step_avg:95.12ms +step:1317/1670 train_time:125277ms step_avg:95.12ms +step:1318/1670 train_time:125371ms step_avg:95.12ms +step:1319/1670 train_time:125467ms step_avg:95.12ms +step:1320/1670 train_time:125562ms step_avg:95.12ms +step:1321/1670 train_time:125656ms step_avg:95.12ms +step:1322/1670 train_time:125750ms step_avg:95.12ms +step:1323/1670 train_time:125845ms step_avg:95.12ms +step:1324/1670 train_time:125937ms step_avg:95.12ms +step:1325/1670 train_time:126031ms step_avg:95.12ms +step:1326/1670 train_time:126127ms step_avg:95.12ms +step:1327/1670 train_time:126221ms step_avg:95.12ms +step:1328/1670 train_time:126316ms step_avg:95.12ms +step:1329/1670 train_time:126411ms step_avg:95.12ms +step:1330/1670 train_time:126506ms step_avg:95.12ms +step:1331/1670 train_time:126601ms step_avg:95.12ms +step:1332/1670 train_time:126694ms step_avg:95.12ms +step:1333/1670 train_time:126789ms step_avg:95.12ms +step:1334/1670 train_time:126882ms step_avg:95.11ms +step:1335/1670 train_time:126976ms step_avg:95.11ms +step:1336/1670 train_time:127070ms step_avg:95.11ms +step:1337/1670 train_time:127166ms step_avg:95.11ms +step:1338/1670 train_time:127261ms step_avg:95.11ms +step:1339/1670 train_time:127354ms step_avg:95.11ms +step:1340/1670 train_time:127450ms step_avg:95.11ms +step:1341/1670 train_time:127545ms step_avg:95.11ms +step:1342/1670 train_time:127640ms step_avg:95.11ms +step:1343/1670 train_time:127734ms step_avg:95.11ms +step:1344/1670 train_time:127829ms step_avg:95.11ms +step:1345/1670 train_time:127923ms step_avg:95.11ms +step:1346/1670 train_time:128017ms step_avg:95.11ms +step:1347/1670 train_time:128111ms step_avg:95.11ms +step:1348/1670 train_time:128205ms step_avg:95.11ms +step:1349/1670 train_time:128299ms step_avg:95.11ms +step:1350/1670 train_time:128394ms step_avg:95.11ms +step:1351/1670 train_time:128489ms step_avg:95.11ms +step:1352/1670 train_time:128584ms step_avg:95.11ms +step:1353/1670 train_time:128678ms step_avg:95.11ms +step:1354/1670 train_time:128772ms step_avg:95.11ms +step:1355/1670 train_time:128866ms step_avg:95.10ms +step:1356/1670 train_time:128961ms step_avg:95.10ms +step:1357/1670 train_time:129056ms step_avg:95.10ms +step:1358/1670 train_time:129150ms step_avg:95.10ms +step:1359/1670 train_time:129246ms step_avg:95.10ms +step:1360/1670 train_time:129341ms step_avg:95.10ms +step:1361/1670 train_time:129434ms step_avg:95.10ms +step:1362/1670 train_time:129530ms step_avg:95.10ms +step:1363/1670 train_time:129624ms step_avg:95.10ms +step:1364/1670 train_time:129717ms step_avg:95.10ms +step:1365/1670 train_time:129812ms step_avg:95.10ms +step:1366/1670 train_time:129906ms step_avg:95.10ms +step:1367/1670 train_time:130001ms step_avg:95.10ms +step:1368/1670 train_time:130096ms step_avg:95.10ms +step:1369/1670 train_time:130190ms step_avg:95.10ms +step:1370/1670 train_time:130286ms step_avg:95.10ms +step:1371/1670 train_time:130381ms step_avg:95.10ms +step:1372/1670 train_time:130474ms step_avg:95.10ms +step:1373/1670 train_time:130570ms step_avg:95.10ms +step:1374/1670 train_time:130664ms step_avg:95.10ms +step:1375/1670 train_time:130759ms step_avg:95.10ms +step:1375/1670 val_loss:3.3417 train_time:130851ms step_avg:95.16ms +step:1376/1670 train_time:130878ms step_avg:95.11ms +step:1377/1670 train_time:130953ms step_avg:95.10ms +step:1378/1670 train_time:131054ms step_avg:95.10ms +step:1379/1670 train_time:131149ms step_avg:95.10ms +step:1380/1670 train_time:131243ms step_avg:95.10ms +step:1381/1670 train_time:131337ms step_avg:95.10ms +step:1382/1670 train_time:131430ms step_avg:95.10ms +step:1383/1670 train_time:131523ms step_avg:95.10ms +step:1384/1670 train_time:131617ms step_avg:95.10ms +step:1385/1670 train_time:131710ms step_avg:95.10ms +step:1386/1670 train_time:131804ms step_avg:95.10ms +step:1387/1670 train_time:131901ms step_avg:95.10ms +step:1388/1670 train_time:131999ms step_avg:95.10ms +step:1389/1670 train_time:132097ms step_avg:95.10ms +step:1390/1670 train_time:132192ms step_avg:95.10ms +step:1391/1670 train_time:132286ms step_avg:95.10ms +step:1392/1670 train_time:132380ms step_avg:95.10ms +step:1393/1670 train_time:132473ms step_avg:95.10ms +step:1394/1670 train_time:132566ms step_avg:95.10ms +step:1395/1670 train_time:132660ms step_avg:95.10ms +step:1396/1670 train_time:132753ms step_avg:95.10ms +step:1397/1670 train_time:132847ms step_avg:95.09ms +step:1398/1670 train_time:132944ms step_avg:95.10ms +step:1399/1670 train_time:133040ms step_avg:95.10ms +step:1400/1670 train_time:133137ms step_avg:95.10ms +step:1401/1670 train_time:133231ms step_avg:95.10ms +step:1402/1670 train_time:133325ms step_avg:95.10ms +step:1403/1670 train_time:133420ms step_avg:95.10ms +step:1404/1670 train_time:133514ms step_avg:95.10ms +step:1405/1670 train_time:133607ms step_avg:95.09ms +step:1406/1670 train_time:133701ms step_avg:95.09ms +step:1407/1670 train_time:133796ms step_avg:95.09ms +step:1408/1670 train_time:133890ms step_avg:95.09ms +step:1409/1670 train_time:133985ms step_avg:95.09ms +step:1410/1670 train_time:134080ms step_avg:95.09ms +step:1411/1670 train_time:134176ms step_avg:95.09ms +step:1412/1670 train_time:134271ms step_avg:95.09ms +step:1413/1670 train_time:134365ms step_avg:95.09ms +step:1414/1670 train_time:134460ms step_avg:95.09ms +step:1415/1670 train_time:134554ms step_avg:95.09ms +step:1416/1670 train_time:134648ms step_avg:95.09ms +step:1417/1670 train_time:134741ms step_avg:95.09ms +step:1418/1670 train_time:134836ms step_avg:95.09ms +step:1419/1670 train_time:134930ms step_avg:95.09ms +step:1420/1670 train_time:135024ms step_avg:95.09ms +step:1421/1670 train_time:135120ms step_avg:95.09ms +step:1422/1670 train_time:135215ms step_avg:95.09ms +step:1423/1670 train_time:135309ms step_avg:95.09ms +step:1424/1670 train_time:135403ms step_avg:95.09ms +step:1425/1670 train_time:135499ms step_avg:95.09ms +step:1426/1670 train_time:135594ms step_avg:95.09ms +step:1427/1670 train_time:135688ms step_avg:95.09ms +step:1428/1670 train_time:135781ms step_avg:95.08ms +step:1429/1670 train_time:135876ms step_avg:95.08ms +step:1430/1670 train_time:135970ms step_avg:95.08ms +step:1431/1670 train_time:136064ms step_avg:95.08ms +step:1432/1670 train_time:136159ms step_avg:95.08ms +step:1433/1670 train_time:136254ms step_avg:95.08ms +step:1434/1670 train_time:136348ms step_avg:95.08ms +step:1435/1670 train_time:136443ms step_avg:95.08ms +step:1436/1670 train_time:136538ms step_avg:95.08ms +step:1437/1670 train_time:136632ms step_avg:95.08ms +step:1438/1670 train_time:136726ms step_avg:95.08ms +step:1439/1670 train_time:136821ms step_avg:95.08ms +step:1440/1670 train_time:136916ms step_avg:95.08ms +step:1441/1670 train_time:137011ms step_avg:95.08ms +step:1442/1670 train_time:137105ms step_avg:95.08ms +step:1443/1670 train_time:137200ms step_avg:95.08ms +step:1444/1670 train_time:137295ms step_avg:95.08ms +step:1445/1670 train_time:137390ms step_avg:95.08ms +step:1446/1670 train_time:137484ms step_avg:95.08ms +step:1447/1670 train_time:137579ms step_avg:95.08ms +step:1448/1670 train_time:137673ms step_avg:95.08ms +step:1449/1670 train_time:137767ms step_avg:95.08ms +step:1450/1670 train_time:137862ms step_avg:95.08ms +step:1451/1670 train_time:137957ms step_avg:95.08ms +step:1452/1670 train_time:138052ms step_avg:95.08ms +step:1453/1670 train_time:138146ms step_avg:95.08ms +step:1454/1670 train_time:138241ms step_avg:95.08ms +step:1455/1670 train_time:138336ms step_avg:95.08ms +step:1456/1670 train_time:138431ms step_avg:95.08ms +step:1457/1670 train_time:138525ms step_avg:95.08ms +step:1458/1670 train_time:138620ms step_avg:95.08ms +step:1459/1670 train_time:138715ms step_avg:95.08ms +step:1460/1670 train_time:138810ms step_avg:95.08ms +step:1461/1670 train_time:138904ms step_avg:95.07ms +step:1462/1670 train_time:138998ms step_avg:95.07ms +step:1463/1670 train_time:139093ms step_avg:95.07ms +step:1464/1670 train_time:139188ms step_avg:95.07ms +step:1465/1670 train_time:139282ms step_avg:95.07ms +step:1466/1670 train_time:139378ms step_avg:95.07ms +step:1467/1670 train_time:139473ms step_avg:95.07ms +step:1468/1670 train_time:139567ms step_avg:95.07ms +step:1469/1670 train_time:139661ms step_avg:95.07ms +step:1470/1670 train_time:139755ms step_avg:95.07ms +step:1471/1670 train_time:139849ms step_avg:95.07ms +step:1472/1670 train_time:139943ms step_avg:95.07ms +step:1473/1670 train_time:140038ms step_avg:95.07ms +step:1474/1670 train_time:140133ms step_avg:95.07ms +step:1475/1670 train_time:140227ms step_avg:95.07ms +step:1476/1670 train_time:140322ms step_avg:95.07ms +step:1477/1670 train_time:140418ms step_avg:95.07ms +step:1478/1670 train_time:140513ms step_avg:95.07ms +step:1479/1670 train_time:140606ms step_avg:95.07ms +step:1480/1670 train_time:140701ms step_avg:95.07ms +step:1481/1670 train_time:140796ms step_avg:95.07ms +step:1482/1670 train_time:140890ms step_avg:95.07ms +step:1483/1670 train_time:140985ms step_avg:95.07ms +step:1484/1670 train_time:141079ms step_avg:95.07ms +step:1485/1670 train_time:141431ms step_avg:95.24ms +step:1486/1670 train_time:141593ms step_avg:95.28ms +step:1487/1670 train_time:141685ms step_avg:95.28ms +step:1488/1670 train_time:141778ms step_avg:95.28ms +step:1489/1670 train_time:141871ms step_avg:95.28ms +step:1490/1670 train_time:141964ms step_avg:95.28ms +step:1491/1670 train_time:142058ms step_avg:95.28ms +step:1492/1670 train_time:142151ms step_avg:95.28ms +step:1493/1670 train_time:142245ms step_avg:95.27ms +step:1494/1670 train_time:142338ms step_avg:95.27ms +step:1495/1670 train_time:142433ms step_avg:95.27ms +step:1496/1670 train_time:142532ms step_avg:95.28ms +step:1497/1670 train_time:142629ms step_avg:95.28ms +step:1498/1670 train_time:142725ms step_avg:95.28ms +step:1499/1670 train_time:142819ms step_avg:95.28ms +step:1500/1670 train_time:142912ms step_avg:95.27ms +step:1500/1670 val_loss:3.3122 train_time:143004ms step_avg:95.34ms +step:1501/1670 train_time:143032ms step_avg:95.29ms +step:1502/1670 train_time:143109ms step_avg:95.28ms +step:1503/1670 train_time:143209ms step_avg:95.28ms +step:1504/1670 train_time:143306ms step_avg:95.28ms +step:1505/1670 train_time:143400ms step_avg:95.28ms +step:1506/1670 train_time:143493ms step_avg:95.28ms +step:1507/1670 train_time:143586ms step_avg:95.28ms +step:1508/1670 train_time:143680ms step_avg:95.28ms +step:1509/1670 train_time:143773ms step_avg:95.28ms +step:1510/1670 train_time:143866ms step_avg:95.28ms +step:1511/1670 train_time:143961ms step_avg:95.28ms +step:1512/1670 train_time:144057ms step_avg:95.28ms +step:1513/1670 train_time:144153ms step_avg:95.28ms +step:1514/1670 train_time:144249ms step_avg:95.28ms +step:1515/1670 train_time:144345ms step_avg:95.28ms +step:1516/1670 train_time:144439ms step_avg:95.28ms +step:1517/1670 train_time:144533ms step_avg:95.28ms +step:1518/1670 train_time:144627ms step_avg:95.27ms +step:1519/1670 train_time:144720ms step_avg:95.27ms +step:1520/1670 train_time:144813ms step_avg:95.27ms +step:1521/1670 train_time:144908ms step_avg:95.27ms +step:1522/1670 train_time:145004ms step_avg:95.27ms +step:1523/1670 train_time:145100ms step_avg:95.27ms +step:1524/1670 train_time:145197ms step_avg:95.27ms +step:1525/1670 train_time:145292ms step_avg:95.27ms +step:1526/1670 train_time:145386ms step_avg:95.27ms +step:1527/1670 train_time:145480ms step_avg:95.27ms +step:1528/1670 train_time:145574ms step_avg:95.27ms +step:1529/1670 train_time:145669ms step_avg:95.27ms +step:1530/1670 train_time:145763ms step_avg:95.27ms +step:1531/1670 train_time:145856ms step_avg:95.27ms +step:1532/1670 train_time:145950ms step_avg:95.27ms +step:1533/1670 train_time:146045ms step_avg:95.27ms +step:1534/1670 train_time:146140ms step_avg:95.27ms +step:1535/1670 train_time:146235ms step_avg:95.27ms +step:1536/1670 train_time:146329ms step_avg:95.27ms +step:1537/1670 train_time:146425ms step_avg:95.27ms +step:1538/1670 train_time:146519ms step_avg:95.27ms +step:1539/1670 train_time:146613ms step_avg:95.26ms +step:1540/1670 train_time:146707ms step_avg:95.26ms +step:1541/1670 train_time:146801ms step_avg:95.26ms +step:1542/1670 train_time:146895ms step_avg:95.26ms +step:1543/1670 train_time:146990ms step_avg:95.26ms +step:1544/1670 train_time:147085ms step_avg:95.26ms +step:1545/1670 train_time:147181ms step_avg:95.26ms +step:1546/1670 train_time:147275ms step_avg:95.26ms +step:1547/1670 train_time:147370ms step_avg:95.26ms +step:1548/1670 train_time:147465ms step_avg:95.26ms +step:1549/1670 train_time:147559ms step_avg:95.26ms +step:1550/1670 train_time:147652ms step_avg:95.26ms +step:1551/1670 train_time:147747ms step_avg:95.26ms +step:1552/1670 train_time:147842ms step_avg:95.26ms +step:1553/1670 train_time:147936ms step_avg:95.26ms +step:1554/1670 train_time:148030ms step_avg:95.26ms +step:1555/1670 train_time:148125ms step_avg:95.26ms +step:1556/1670 train_time:148220ms step_avg:95.26ms +step:1557/1670 train_time:148316ms step_avg:95.26ms +step:1558/1670 train_time:148411ms step_avg:95.26ms +step:1559/1670 train_time:148506ms step_avg:95.26ms +step:1560/1670 train_time:148601ms step_avg:95.26ms +step:1561/1670 train_time:148695ms step_avg:95.26ms +step:1562/1670 train_time:148789ms step_avg:95.26ms +step:1563/1670 train_time:148883ms step_avg:95.25ms +step:1564/1670 train_time:148977ms step_avg:95.25ms +step:1565/1670 train_time:149071ms step_avg:95.25ms +step:1566/1670 train_time:149167ms step_avg:95.25ms +step:1567/1670 train_time:149262ms step_avg:95.25ms +step:1568/1670 train_time:149357ms step_avg:95.25ms +step:1569/1670 train_time:149452ms step_avg:95.25ms +step:1570/1670 train_time:149547ms step_avg:95.25ms +step:1571/1670 train_time:149641ms step_avg:95.25ms +step:1572/1670 train_time:149735ms step_avg:95.25ms +step:1573/1670 train_time:149829ms step_avg:95.25ms +step:1574/1670 train_time:149924ms step_avg:95.25ms +step:1575/1670 train_time:150019ms step_avg:95.25ms +step:1576/1670 train_time:150112ms step_avg:95.25ms +step:1577/1670 train_time:150207ms step_avg:95.25ms +step:1578/1670 train_time:150303ms step_avg:95.25ms +step:1579/1670 train_time:150397ms step_avg:95.25ms +step:1580/1670 train_time:150491ms step_avg:95.25ms +step:1581/1670 train_time:150586ms step_avg:95.25ms +step:1582/1670 train_time:150681ms step_avg:95.25ms +step:1583/1670 train_time:150775ms step_avg:95.25ms +step:1584/1670 train_time:150869ms step_avg:95.25ms +step:1585/1670 train_time:150964ms step_avg:95.25ms +step:1586/1670 train_time:151058ms step_avg:95.24ms +step:1587/1670 train_time:151152ms step_avg:95.24ms +step:1588/1670 train_time:151247ms step_avg:95.24ms +step:1589/1670 train_time:151343ms step_avg:95.24ms +step:1590/1670 train_time:151438ms step_avg:95.24ms +step:1591/1670 train_time:151532ms step_avg:95.24ms +step:1592/1670 train_time:151626ms step_avg:95.24ms +step:1593/1670 train_time:151721ms step_avg:95.24ms +step:1594/1670 train_time:151816ms step_avg:95.24ms +step:1595/1670 train_time:151909ms step_avg:95.24ms +step:1596/1670 train_time:152004ms step_avg:95.24ms +step:1597/1670 train_time:152098ms step_avg:95.24ms +step:1598/1670 train_time:152192ms step_avg:95.24ms +step:1599/1670 train_time:152287ms step_avg:95.24ms +step:1600/1670 train_time:152382ms step_avg:95.24ms +step:1601/1670 train_time:152477ms step_avg:95.24ms +step:1602/1670 train_time:152571ms step_avg:95.24ms +step:1603/1670 train_time:152666ms step_avg:95.24ms +step:1604/1670 train_time:152761ms step_avg:95.24ms +step:1605/1670 train_time:152855ms step_avg:95.24ms +step:1606/1670 train_time:152949ms step_avg:95.24ms +step:1607/1670 train_time:153044ms step_avg:95.24ms +step:1608/1670 train_time:153138ms step_avg:95.24ms +step:1609/1670 train_time:153233ms step_avg:95.23ms +step:1610/1670 train_time:153327ms step_avg:95.23ms +step:1611/1670 train_time:153422ms step_avg:95.23ms +step:1612/1670 train_time:153517ms step_avg:95.23ms +step:1613/1670 train_time:153612ms step_avg:95.23ms +step:1614/1670 train_time:153707ms step_avg:95.23ms +step:1615/1670 train_time:153801ms step_avg:95.23ms +step:1616/1670 train_time:153896ms step_avg:95.23ms +step:1617/1670 train_time:153990ms step_avg:95.23ms +step:1618/1670 train_time:154085ms step_avg:95.23ms +step:1619/1670 train_time:154179ms step_avg:95.23ms +step:1620/1670 train_time:154273ms step_avg:95.23ms +step:1621/1670 train_time:154370ms step_avg:95.23ms +step:1622/1670 train_time:154465ms step_avg:95.23ms +step:1623/1670 train_time:154560ms step_avg:95.23ms +step:1624/1670 train_time:154654ms step_avg:95.23ms +step:1625/1670 train_time:154750ms step_avg:95.23ms +step:1625/1670 val_loss:3.2867 train_time:154842ms step_avg:95.29ms +step:1626/1670 train_time:154869ms step_avg:95.25ms +step:1627/1670 train_time:154946ms step_avg:95.23ms +step:1628/1670 train_time:155047ms step_avg:95.24ms +step:1629/1670 train_time:155143ms step_avg:95.24ms +step:1630/1670 train_time:155237ms step_avg:95.24ms +step:1631/1670 train_time:155330ms step_avg:95.24ms +step:1632/1670 train_time:155423ms step_avg:95.23ms +step:1633/1670 train_time:155517ms step_avg:95.23ms +step:1634/1670 train_time:155610ms step_avg:95.23ms +step:1635/1670 train_time:155704ms step_avg:95.23ms +step:1636/1670 train_time:155798ms step_avg:95.23ms +step:1637/1670 train_time:155893ms step_avg:95.23ms +step:1638/1670 train_time:155990ms step_avg:95.23ms +step:1639/1670 train_time:156087ms step_avg:95.23ms +step:1640/1670 train_time:156184ms step_avg:95.23ms +step:1641/1670 train_time:156277ms step_avg:95.23ms +step:1642/1670 train_time:156371ms step_avg:95.23ms +step:1643/1670 train_time:156466ms step_avg:95.23ms +step:1644/1670 train_time:156559ms step_avg:95.23ms +step:1645/1670 train_time:156653ms step_avg:95.23ms +step:1646/1670 train_time:156747ms step_avg:95.23ms +step:1647/1670 train_time:156841ms step_avg:95.23ms +step:1648/1670 train_time:156938ms step_avg:95.23ms +step:1649/1670 train_time:157036ms step_avg:95.23ms +step:1650/1670 train_time:157132ms step_avg:95.23ms +step:1651/1670 train_time:157227ms step_avg:95.23ms +step:1652/1670 train_time:157321ms step_avg:95.23ms +step:1653/1670 train_time:157415ms step_avg:95.23ms +step:1654/1670 train_time:157509ms step_avg:95.23ms +step:1655/1670 train_time:157603ms step_avg:95.23ms +step:1656/1670 train_time:157697ms step_avg:95.23ms +step:1657/1670 train_time:157790ms step_avg:95.23ms +step:1658/1670 train_time:157884ms step_avg:95.23ms +step:1659/1670 train_time:157981ms step_avg:95.23ms +step:1660/1670 train_time:158077ms step_avg:95.23ms +step:1661/1670 train_time:158173ms step_avg:95.23ms +step:1662/1670 train_time:158266ms step_avg:95.23ms +step:1663/1670 train_time:158361ms step_avg:95.23ms +step:1664/1670 train_time:158456ms step_avg:95.23ms +step:1665/1670 train_time:158550ms step_avg:95.23ms +step:1666/1670 train_time:158644ms step_avg:95.22ms +step:1667/1670 train_time:158737ms step_avg:95.22ms +step:1668/1670 train_time:158832ms step_avg:95.22ms +step:1669/1670 train_time:158926ms step_avg:95.22ms +step:1670/1670 train_time:159021ms step_avg:95.22ms +step:1670/1670 val_loss:3.2778 train_time:159191ms step_avg:95.32ms +peak memory allocated: 32470 MiB reserved: 47756 MiB diff --git a/records/091025_Yarn/9121a353-d3ce-4f54-98de-0b466773fe0b.txt b/records/091025_Yarn/9121a353-d3ce-4f54-98de-0b466773fe0b.txt new file mode 100644 index 000000000..6639de4c0 --- /dev/null +++ b/records/091025_Yarn/9121a353-d3ce-4f54-98de-0b466773fe0b.txt @@ -0,0 +1,2863 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math + +from dataclasses import dataclass +from functools import lru_cache +from itertools import accumulate +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate = CastedLinear(12, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, attn_args): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) + if ve is not None: + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = sa_lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None + + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), attn_args) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, x0, lambdas[i], attn_args) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = f"yarn/{uuid.uuid4()}" + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + head_dim=128, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +ws = get_ws(0) +for step in range(train_steps + 1): + last_step = (step == train_steps) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Thu Sep 11 04:09:29 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | Off | +| N/A 37C P0 119W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | Off | +| N/A 41C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | Off | +| N/A 43C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | Off | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | Off | +| N/A 35C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | Off | +| N/A 43C P0 127W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | Off | +| N/A 41C P0 126W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | Off | +| N/A 37C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 66339 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 66340 C /usr/bin/python3 614MiB | +| 0 N/A N/A 66341 C /usr/bin/python3 614MiB | +| 0 N/A N/A 66342 C /usr/bin/python3 614MiB | +| 0 N/A N/A 66343 C /usr/bin/python3 614MiB | +| 0 N/A N/A 66344 C /usr/bin/python3 614MiB | +| 0 N/A N/A 66345 C /usr/bin/python3 614MiB | +| 0 N/A N/A 66346 C /usr/bin/python3 614MiB | +| 1 N/A N/A 66340 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 66341 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 66342 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 66343 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 66344 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 66345 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 66346 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1670 train_time:456ms step_avg:455.70ms +step:2/1670 train_time:481ms step_avg:240.37ms +step:3/1670 train_time:548ms step_avg:182.61ms +step:4/1670 train_time:638ms step_avg:159.58ms +step:5/1670 train_time:730ms step_avg:145.93ms +step:6/1670 train_time:821ms step_avg:136.92ms +step:7/1670 train_time:914ms step_avg:130.50ms +step:8/1670 train_time:1005ms step_avg:125.68ms +step:9/1670 train_time:1098ms step_avg:121.98ms +step:10/1670 train_time:1189ms step_avg:118.92ms +step:11/1670 train_time:1281ms step_avg:116.46ms +step:12/1670 train_time:1373ms step_avg:114.45ms +step:13/1670 train_time:1470ms step_avg:113.05ms +step:14/1670 train_time:1565ms step_avg:111.79ms +step:15/1670 train_time:1658ms step_avg:110.53ms +step:16/1670 train_time:1750ms step_avg:109.37ms +step:17/1670 train_time:1842ms step_avg:108.37ms +step:18/1670 train_time:1935ms step_avg:107.50ms +step:19/1670 train_time:2028ms step_avg:106.72ms +step:20/1670 train_time:2119ms step_avg:105.97ms +step:21/1670 train_time:2211ms step_avg:105.30ms +step:22/1670 train_time:2303ms step_avg:104.68ms +step:23/1670 train_time:2396ms step_avg:104.18ms +step:24/1670 train_time:2489ms step_avg:103.70ms +step:25/1670 train_time:2583ms step_avg:103.30ms +step:26/1670 train_time:2676ms step_avg:102.91ms +step:27/1670 train_time:2768ms step_avg:102.53ms +step:28/1670 train_time:2862ms step_avg:102.20ms +step:29/1670 train_time:2955ms step_avg:101.89ms +step:30/1670 train_time:3047ms step_avg:101.57ms +step:31/1670 train_time:3141ms step_avg:101.31ms +step:32/1670 train_time:3233ms step_avg:101.03ms +step:33/1670 train_time:3326ms step_avg:100.78ms +step:34/1670 train_time:3419ms step_avg:100.56ms +step:35/1670 train_time:3512ms step_avg:100.35ms +step:36/1670 train_time:3605ms step_avg:100.14ms +step:37/1670 train_time:3698ms step_avg:99.94ms +step:38/1670 train_time:3790ms step_avg:99.74ms +step:39/1670 train_time:3883ms step_avg:99.57ms +step:40/1670 train_time:3976ms step_avg:99.39ms +step:41/1670 train_time:4068ms step_avg:99.22ms +step:42/1670 train_time:4161ms step_avg:99.06ms +step:43/1670 train_time:4253ms step_avg:98.90ms +step:44/1670 train_time:4345ms step_avg:98.74ms +step:45/1670 train_time:4438ms step_avg:98.62ms +step:46/1670 train_time:4530ms step_avg:98.49ms +step:47/1670 train_time:4623ms step_avg:98.36ms +step:48/1670 train_time:4716ms step_avg:98.25ms +step:49/1670 train_time:4808ms step_avg:98.13ms +step:50/1670 train_time:4902ms step_avg:98.05ms +step:51/1670 train_time:4995ms step_avg:97.94ms +step:52/1670 train_time:5087ms step_avg:97.84ms +step:53/1670 train_time:5181ms step_avg:97.75ms +step:54/1670 train_time:5273ms step_avg:97.65ms +step:55/1670 train_time:5366ms step_avg:97.56ms +step:56/1670 train_time:5459ms step_avg:97.48ms +step:57/1670 train_time:5552ms step_avg:97.40ms +step:58/1670 train_time:5644ms step_avg:97.31ms +step:59/1670 train_time:5737ms step_avg:97.24ms +step:60/1670 train_time:5830ms step_avg:97.17ms +step:61/1670 train_time:5922ms step_avg:97.09ms +step:62/1670 train_time:6015ms step_avg:97.02ms +step:63/1670 train_time:6108ms step_avg:96.95ms +step:64/1670 train_time:6200ms step_avg:96.87ms +step:65/1670 train_time:6292ms step_avg:96.80ms +step:66/1670 train_time:6384ms step_avg:96.73ms +step:67/1670 train_time:6477ms step_avg:96.67ms +step:68/1670 train_time:6570ms step_avg:96.61ms +step:69/1670 train_time:6662ms step_avg:96.55ms +step:70/1670 train_time:6755ms step_avg:96.50ms +step:71/1670 train_time:6847ms step_avg:96.44ms +step:72/1670 train_time:6940ms step_avg:96.39ms +step:73/1670 train_time:7034ms step_avg:96.36ms +step:74/1670 train_time:7127ms step_avg:96.32ms +step:75/1670 train_time:7219ms step_avg:96.26ms +step:76/1670 train_time:7312ms step_avg:96.21ms +step:77/1670 train_time:7404ms step_avg:96.16ms +step:78/1670 train_time:7497ms step_avg:96.11ms +step:79/1670 train_time:7589ms step_avg:96.06ms +step:80/1670 train_time:7682ms step_avg:96.02ms +step:81/1670 train_time:7774ms step_avg:95.98ms +step:82/1670 train_time:7867ms step_avg:95.94ms +step:83/1670 train_time:7960ms step_avg:95.90ms +step:84/1670 train_time:8052ms step_avg:95.86ms +step:85/1670 train_time:8144ms step_avg:95.82ms +step:86/1670 train_time:8237ms step_avg:95.78ms +step:87/1670 train_time:8330ms step_avg:95.74ms +step:88/1670 train_time:8422ms step_avg:95.70ms +step:89/1670 train_time:8515ms step_avg:95.67ms +step:90/1670 train_time:8608ms step_avg:95.65ms +step:91/1670 train_time:8700ms step_avg:95.60ms +step:92/1670 train_time:8793ms step_avg:95.57ms +step:93/1670 train_time:8885ms step_avg:95.53ms +step:94/1670 train_time:8977ms step_avg:95.50ms +step:95/1670 train_time:9070ms step_avg:95.47ms +step:96/1670 train_time:9163ms step_avg:95.45ms +step:97/1670 train_time:9256ms step_avg:95.42ms +step:98/1670 train_time:9348ms step_avg:95.38ms +step:99/1670 train_time:9440ms step_avg:95.36ms +step:100/1670 train_time:9533ms step_avg:95.33ms +step:101/1670 train_time:9625ms step_avg:95.30ms +step:102/1670 train_time:9717ms step_avg:95.26ms +step:103/1670 train_time:9811ms step_avg:95.25ms +step:104/1670 train_time:9903ms step_avg:95.22ms +step:105/1670 train_time:9995ms step_avg:95.19ms +step:106/1670 train_time:10088ms step_avg:95.17ms +step:107/1670 train_time:10180ms step_avg:95.14ms +step:108/1670 train_time:10272ms step_avg:95.11ms +step:109/1670 train_time:10366ms step_avg:95.10ms +step:110/1670 train_time:10458ms step_avg:95.07ms +step:111/1670 train_time:10550ms step_avg:95.05ms +step:112/1670 train_time:10643ms step_avg:95.02ms +step:113/1670 train_time:10735ms step_avg:95.00ms +step:114/1670 train_time:10828ms step_avg:94.98ms +step:115/1670 train_time:10919ms step_avg:94.95ms +step:116/1670 train_time:11013ms step_avg:94.94ms +step:117/1670 train_time:11104ms step_avg:94.91ms +step:118/1670 train_time:11196ms step_avg:94.88ms +step:119/1670 train_time:11289ms step_avg:94.87ms +step:120/1670 train_time:11382ms step_avg:94.85ms +step:121/1670 train_time:11474ms step_avg:94.82ms +step:122/1670 train_time:11567ms step_avg:94.81ms +step:123/1670 train_time:11659ms step_avg:94.79ms +step:124/1670 train_time:11751ms step_avg:94.77ms +step:125/1670 train_time:11844ms step_avg:94.75ms +step:125/1670 val_loss:4.3045 train_time:11935ms step_avg:95.48ms +step:126/1670 train_time:11963ms step_avg:94.94ms +step:127/1670 train_time:12034ms step_avg:94.76ms +step:128/1670 train_time:12139ms step_avg:94.84ms +step:129/1670 train_time:12235ms step_avg:94.85ms +step:130/1670 train_time:12328ms step_avg:94.83ms +step:131/1670 train_time:12420ms step_avg:94.81ms +step:132/1670 train_time:12512ms step_avg:94.79ms +step:133/1670 train_time:12603ms step_avg:94.76ms +step:134/1670 train_time:12694ms step_avg:94.73ms +step:135/1670 train_time:12786ms step_avg:94.71ms +step:136/1670 train_time:12877ms step_avg:94.69ms +step:137/1670 train_time:12969ms step_avg:94.66ms +step:138/1670 train_time:13063ms step_avg:94.66ms +step:139/1670 train_time:13158ms step_avg:94.66ms +step:140/1670 train_time:13252ms step_avg:94.65ms +step:141/1670 train_time:13345ms step_avg:94.65ms +step:142/1670 train_time:13438ms step_avg:94.63ms +step:143/1670 train_time:13530ms step_avg:94.62ms +step:144/1670 train_time:13621ms step_avg:94.59ms +step:145/1670 train_time:13713ms step_avg:94.57ms +step:146/1670 train_time:13804ms step_avg:94.55ms +step:147/1670 train_time:13896ms step_avg:94.53ms +step:148/1670 train_time:13987ms step_avg:94.51ms +step:149/1670 train_time:14080ms step_avg:94.50ms +step:150/1670 train_time:14173ms step_avg:94.49ms +step:151/1670 train_time:14266ms step_avg:94.48ms +step:152/1670 train_time:14360ms step_avg:94.47ms +step:153/1670 train_time:14452ms step_avg:94.46ms +step:154/1670 train_time:14545ms step_avg:94.45ms +step:155/1670 train_time:14637ms step_avg:94.43ms +step:156/1670 train_time:14728ms step_avg:94.41ms +step:157/1670 train_time:14821ms step_avg:94.40ms +step:158/1670 train_time:14913ms step_avg:94.39ms +step:159/1670 train_time:15007ms step_avg:94.38ms +step:160/1670 train_time:15099ms step_avg:94.37ms +step:161/1670 train_time:15192ms step_avg:94.36ms +step:162/1670 train_time:15285ms step_avg:94.35ms +step:163/1670 train_time:15379ms step_avg:94.35ms +step:164/1670 train_time:15472ms step_avg:94.34ms +step:165/1670 train_time:15565ms step_avg:94.33ms +step:166/1670 train_time:15657ms step_avg:94.32ms +step:167/1670 train_time:15750ms step_avg:94.31ms +step:168/1670 train_time:15841ms step_avg:94.29ms +step:169/1670 train_time:15933ms step_avg:94.28ms +step:170/1670 train_time:16026ms step_avg:94.27ms +step:171/1670 train_time:16118ms step_avg:94.26ms +step:172/1670 train_time:16211ms step_avg:94.25ms +step:173/1670 train_time:16304ms step_avg:94.24ms +step:174/1670 train_time:16397ms step_avg:94.24ms +step:175/1670 train_time:16489ms step_avg:94.22ms +step:176/1670 train_time:16581ms step_avg:94.21ms +step:177/1670 train_time:16674ms step_avg:94.20ms +step:178/1670 train_time:16766ms step_avg:94.19ms +step:179/1670 train_time:16859ms step_avg:94.19ms +step:180/1670 train_time:16951ms step_avg:94.17ms +step:181/1670 train_time:17043ms step_avg:94.16ms +step:182/1670 train_time:17136ms step_avg:94.15ms +step:183/1670 train_time:17228ms step_avg:94.14ms +step:184/1670 train_time:17321ms step_avg:94.13ms +step:185/1670 train_time:17413ms step_avg:94.12ms +step:186/1670 train_time:17506ms step_avg:94.12ms +step:187/1670 train_time:17598ms step_avg:94.10ms +step:188/1670 train_time:17690ms step_avg:94.09ms +step:189/1670 train_time:17782ms step_avg:94.09ms +step:190/1670 train_time:17875ms step_avg:94.08ms +step:191/1670 train_time:17967ms step_avg:94.07ms +step:192/1670 train_time:18060ms step_avg:94.06ms +step:193/1670 train_time:18153ms step_avg:94.06ms +step:194/1670 train_time:18245ms step_avg:94.05ms +step:195/1670 train_time:18337ms step_avg:94.04ms +step:196/1670 train_time:18430ms step_avg:94.03ms +step:197/1670 train_time:18523ms step_avg:94.02ms +step:198/1670 train_time:18615ms step_avg:94.02ms +step:199/1670 train_time:18708ms step_avg:94.01ms +step:200/1670 train_time:18800ms step_avg:94.00ms +step:201/1670 train_time:18892ms step_avg:93.99ms +step:202/1670 train_time:18985ms step_avg:93.98ms +step:203/1670 train_time:19077ms step_avg:93.98ms +step:204/1670 train_time:19169ms step_avg:93.97ms +step:205/1670 train_time:19262ms step_avg:93.96ms +step:206/1670 train_time:19354ms step_avg:93.95ms +step:207/1670 train_time:19447ms step_avg:93.95ms +step:208/1670 train_time:19539ms step_avg:93.94ms +step:209/1670 train_time:19632ms step_avg:93.93ms +step:210/1670 train_time:19725ms step_avg:93.93ms +step:211/1670 train_time:19817ms step_avg:93.92ms +step:212/1670 train_time:19909ms step_avg:93.91ms +step:213/1670 train_time:20253ms step_avg:95.09ms +step:214/1670 train_time:20388ms step_avg:95.27ms +step:215/1670 train_time:20479ms step_avg:95.25ms +step:216/1670 train_time:20570ms step_avg:95.23ms +step:217/1670 train_time:20661ms step_avg:95.21ms +step:218/1670 train_time:20753ms step_avg:95.20ms +step:219/1670 train_time:20844ms step_avg:95.18ms +step:220/1670 train_time:20936ms step_avg:95.16ms +step:221/1670 train_time:21027ms step_avg:95.15ms +step:222/1670 train_time:21119ms step_avg:95.13ms +step:223/1670 train_time:21212ms step_avg:95.12ms +step:224/1670 train_time:21307ms step_avg:95.12ms +step:225/1670 train_time:21403ms step_avg:95.12ms +step:226/1670 train_time:21496ms step_avg:95.12ms +step:227/1670 train_time:21587ms step_avg:95.10ms +step:228/1670 train_time:21679ms step_avg:95.08ms +step:229/1670 train_time:21772ms step_avg:95.07ms +step:230/1670 train_time:21863ms step_avg:95.06ms +step:231/1670 train_time:21955ms step_avg:95.04ms +step:232/1670 train_time:22047ms step_avg:95.03ms +step:233/1670 train_time:22139ms step_avg:95.02ms +step:234/1670 train_time:22232ms step_avg:95.01ms +step:235/1670 train_time:22326ms step_avg:95.00ms +step:236/1670 train_time:22419ms step_avg:94.99ms +step:237/1670 train_time:22512ms step_avg:94.99ms +step:238/1670 train_time:22605ms step_avg:94.98ms +step:239/1670 train_time:22698ms step_avg:94.97ms +step:240/1670 train_time:22794ms step_avg:94.97ms +step:241/1670 train_time:22882ms step_avg:94.95ms +step:242/1670 train_time:22975ms step_avg:94.94ms +step:243/1670 train_time:23067ms step_avg:94.93ms +step:244/1670 train_time:23159ms step_avg:94.92ms +step:245/1670 train_time:23252ms step_avg:94.91ms +step:246/1670 train_time:23345ms step_avg:94.90ms +step:247/1670 train_time:23438ms step_avg:94.89ms +step:248/1670 train_time:23531ms step_avg:94.88ms +step:249/1670 train_time:23624ms step_avg:94.87ms +step:250/1670 train_time:23716ms step_avg:94.86ms +step:250/1670 val_loss:3.9649 train_time:23806ms step_avg:95.22ms +step:251/1670 train_time:23833ms step_avg:94.95ms +step:252/1670 train_time:23908ms step_avg:94.87ms +step:253/1670 train_time:24008ms step_avg:94.89ms +step:254/1670 train_time:24102ms step_avg:94.89ms +step:255/1670 train_time:24195ms step_avg:94.88ms +step:256/1670 train_time:24287ms step_avg:94.87ms +step:257/1670 train_time:24378ms step_avg:94.86ms +step:258/1670 train_time:24470ms step_avg:94.84ms +step:259/1670 train_time:24561ms step_avg:94.83ms +step:260/1670 train_time:24652ms step_avg:94.82ms +step:261/1670 train_time:24744ms step_avg:94.80ms +step:262/1670 train_time:24838ms step_avg:94.80ms +step:263/1670 train_time:24932ms step_avg:94.80ms +step:264/1670 train_time:25029ms step_avg:94.81ms +step:265/1670 train_time:25122ms step_avg:94.80ms +step:266/1670 train_time:25214ms step_avg:94.79ms +step:267/1670 train_time:25307ms step_avg:94.78ms +step:268/1670 train_time:25399ms step_avg:94.77ms +step:269/1670 train_time:25491ms step_avg:94.76ms +step:270/1670 train_time:25583ms step_avg:94.75ms +step:271/1670 train_time:25675ms step_avg:94.74ms +step:272/1670 train_time:25766ms step_avg:94.73ms +step:273/1670 train_time:25859ms step_avg:94.72ms +step:274/1670 train_time:25954ms step_avg:94.72ms +step:275/1670 train_time:26047ms step_avg:94.72ms +step:276/1670 train_time:26140ms step_avg:94.71ms +step:277/1670 train_time:26234ms step_avg:94.71ms +step:278/1670 train_time:26325ms step_avg:94.69ms +step:279/1670 train_time:26417ms step_avg:94.69ms +step:280/1670 train_time:26509ms step_avg:94.68ms +step:281/1670 train_time:26601ms step_avg:94.67ms +step:282/1670 train_time:26693ms step_avg:94.66ms +step:283/1670 train_time:26786ms step_avg:94.65ms +step:284/1670 train_time:26878ms step_avg:94.64ms +step:285/1670 train_time:26971ms step_avg:94.63ms +step:286/1670 train_time:27064ms step_avg:94.63ms +step:287/1670 train_time:27157ms step_avg:94.62ms +step:288/1670 train_time:27250ms step_avg:94.62ms +step:289/1670 train_time:27343ms step_avg:94.61ms +step:290/1670 train_time:27436ms step_avg:94.61ms +step:291/1670 train_time:27528ms step_avg:94.60ms +step:292/1670 train_time:27620ms step_avg:94.59ms +step:293/1670 train_time:27712ms step_avg:94.58ms +step:294/1670 train_time:27805ms step_avg:94.57ms +step:295/1670 train_time:27897ms step_avg:94.57ms +step:296/1670 train_time:27990ms step_avg:94.56ms +step:297/1670 train_time:28082ms step_avg:94.55ms +step:298/1670 train_time:28175ms step_avg:94.55ms +step:299/1670 train_time:28267ms step_avg:94.54ms +step:300/1670 train_time:28360ms step_avg:94.53ms +step:301/1670 train_time:28452ms step_avg:94.53ms +step:302/1670 train_time:28544ms step_avg:94.52ms +step:303/1670 train_time:28637ms step_avg:94.51ms +step:304/1670 train_time:28728ms step_avg:94.50ms +step:305/1670 train_time:28820ms step_avg:94.49ms +step:306/1670 train_time:28912ms step_avg:94.48ms +step:307/1670 train_time:29004ms step_avg:94.48ms +step:308/1670 train_time:29098ms step_avg:94.47ms +step:309/1670 train_time:29190ms step_avg:94.46ms +step:310/1670 train_time:29282ms step_avg:94.46ms +step:311/1670 train_time:29375ms step_avg:94.45ms +step:312/1670 train_time:29467ms step_avg:94.45ms +step:313/1670 train_time:29560ms step_avg:94.44ms +step:314/1670 train_time:29652ms step_avg:94.43ms +step:315/1670 train_time:29744ms step_avg:94.43ms +step:316/1670 train_time:29837ms step_avg:94.42ms +step:317/1670 train_time:29929ms step_avg:94.41ms +step:318/1670 train_time:30021ms step_avg:94.41ms +step:319/1670 train_time:30114ms step_avg:94.40ms +step:320/1670 train_time:30207ms step_avg:94.40ms +step:321/1670 train_time:30300ms step_avg:94.39ms +step:322/1670 train_time:30392ms step_avg:94.39ms +step:323/1670 train_time:30484ms step_avg:94.38ms +step:324/1670 train_time:30577ms step_avg:94.37ms +step:325/1670 train_time:30670ms step_avg:94.37ms +step:326/1670 train_time:30762ms step_avg:94.36ms +step:327/1670 train_time:30855ms step_avg:94.36ms +step:328/1670 train_time:30947ms step_avg:94.35ms +step:329/1670 train_time:31040ms step_avg:94.35ms +step:330/1670 train_time:31131ms step_avg:94.34ms +step:331/1670 train_time:31225ms step_avg:94.33ms +step:332/1670 train_time:31318ms step_avg:94.33ms +step:333/1670 train_time:31410ms step_avg:94.32ms +step:334/1670 train_time:31503ms step_avg:94.32ms +step:335/1670 train_time:31595ms step_avg:94.31ms +step:336/1670 train_time:31687ms step_avg:94.31ms +step:337/1670 train_time:31779ms step_avg:94.30ms +step:338/1670 train_time:31872ms step_avg:94.30ms +step:339/1670 train_time:31964ms step_avg:94.29ms +step:340/1670 train_time:32057ms step_avg:94.29ms +step:341/1670 train_time:32149ms step_avg:94.28ms +step:342/1670 train_time:32242ms step_avg:94.28ms +step:343/1670 train_time:32334ms step_avg:94.27ms +step:344/1670 train_time:32427ms step_avg:94.26ms +step:345/1670 train_time:32519ms step_avg:94.26ms +step:346/1670 train_time:32611ms step_avg:94.25ms +step:347/1670 train_time:32705ms step_avg:94.25ms +step:348/1670 train_time:32799ms step_avg:94.25ms +step:349/1670 train_time:32890ms step_avg:94.24ms +step:350/1670 train_time:32983ms step_avg:94.24ms +step:351/1670 train_time:33075ms step_avg:94.23ms +step:352/1670 train_time:33167ms step_avg:94.22ms +step:353/1670 train_time:33260ms step_avg:94.22ms +step:354/1670 train_time:33352ms step_avg:94.22ms +step:355/1670 train_time:33445ms step_avg:94.21ms +step:356/1670 train_time:33538ms step_avg:94.21ms +step:357/1670 train_time:33631ms step_avg:94.20ms +step:358/1670 train_time:33724ms step_avg:94.20ms +step:359/1670 train_time:33816ms step_avg:94.19ms +step:360/1670 train_time:33908ms step_avg:94.19ms +step:361/1670 train_time:34002ms step_avg:94.19ms +step:362/1670 train_time:34094ms step_avg:94.18ms +step:363/1670 train_time:34187ms step_avg:94.18ms +step:364/1670 train_time:34279ms step_avg:94.17ms +step:365/1670 train_time:34372ms step_avg:94.17ms +step:366/1670 train_time:34464ms step_avg:94.16ms +step:367/1670 train_time:34558ms step_avg:94.16ms +step:368/1670 train_time:34651ms step_avg:94.16ms +step:369/1670 train_time:34743ms step_avg:94.15ms +step:370/1670 train_time:34835ms step_avg:94.15ms +step:371/1670 train_time:34928ms step_avg:94.14ms +step:372/1670 train_time:35021ms step_avg:94.14ms +step:373/1670 train_time:35113ms step_avg:94.14ms +step:374/1670 train_time:35206ms step_avg:94.13ms +step:375/1670 train_time:35298ms step_avg:94.13ms +step:375/1670 val_loss:3.8193 train_time:35389ms step_avg:94.37ms +step:376/1670 train_time:35415ms step_avg:94.19ms +step:377/1670 train_time:35489ms step_avg:94.13ms +step:378/1670 train_time:35586ms step_avg:94.14ms +step:379/1670 train_time:35682ms step_avg:94.15ms +step:380/1670 train_time:35774ms step_avg:94.14ms +step:381/1670 train_time:35865ms step_avg:94.13ms +step:382/1670 train_time:35957ms step_avg:94.13ms +step:383/1670 train_time:36048ms step_avg:94.12ms +step:384/1670 train_time:36140ms step_avg:94.11ms +step:385/1670 train_time:36231ms step_avg:94.11ms +step:386/1670 train_time:36323ms step_avg:94.10ms +step:387/1670 train_time:36416ms step_avg:94.10ms +step:388/1670 train_time:36511ms step_avg:94.10ms +step:389/1670 train_time:36606ms step_avg:94.10ms +step:390/1670 train_time:36699ms step_avg:94.10ms +step:391/1670 train_time:36792ms step_avg:94.10ms +step:392/1670 train_time:36885ms step_avg:94.09ms +step:393/1670 train_time:36977ms step_avg:94.09ms +step:394/1670 train_time:37069ms step_avg:94.08ms +step:395/1670 train_time:37160ms step_avg:94.08ms +step:396/1670 train_time:37252ms step_avg:94.07ms +step:397/1670 train_time:37345ms step_avg:94.07ms +step:398/1670 train_time:37437ms step_avg:94.06ms +step:399/1670 train_time:37531ms step_avg:94.06ms +step:400/1670 train_time:37623ms step_avg:94.06ms +step:401/1670 train_time:37716ms step_avg:94.06ms +step:402/1670 train_time:37810ms step_avg:94.05ms +step:403/1670 train_time:37902ms step_avg:94.05ms +step:404/1670 train_time:37994ms step_avg:94.04ms +step:405/1670 train_time:38087ms step_avg:94.04ms +step:406/1670 train_time:38179ms step_avg:94.04ms +step:407/1670 train_time:38271ms step_avg:94.03ms +step:408/1670 train_time:38364ms step_avg:94.03ms +step:409/1670 train_time:38456ms step_avg:94.03ms +step:410/1670 train_time:38549ms step_avg:94.02ms +step:411/1670 train_time:38642ms step_avg:94.02ms +step:412/1670 train_time:38735ms step_avg:94.02ms +step:413/1670 train_time:38828ms step_avg:94.01ms +step:414/1670 train_time:38920ms step_avg:94.01ms +step:415/1670 train_time:39012ms step_avg:94.01ms +step:416/1670 train_time:39105ms step_avg:94.00ms +step:417/1670 train_time:39197ms step_avg:94.00ms +step:418/1670 train_time:39289ms step_avg:93.99ms +step:419/1670 train_time:39383ms step_avg:93.99ms +step:420/1670 train_time:39475ms step_avg:93.99ms +step:421/1670 train_time:39568ms step_avg:93.99ms +step:422/1670 train_time:39660ms step_avg:93.98ms +step:423/1670 train_time:39754ms step_avg:93.98ms +step:424/1670 train_time:39847ms step_avg:93.98ms +step:425/1670 train_time:40160ms step_avg:94.49ms +step:426/1670 train_time:40349ms step_avg:94.72ms +step:427/1670 train_time:40439ms step_avg:94.71ms +step:428/1670 train_time:40531ms step_avg:94.70ms +step:429/1670 train_time:40622ms step_avg:94.69ms +step:430/1670 train_time:40713ms step_avg:94.68ms +step:431/1670 train_time:40805ms step_avg:94.68ms +step:432/1670 train_time:40896ms step_avg:94.67ms +step:433/1670 train_time:40988ms step_avg:94.66ms +step:434/1670 train_time:41079ms step_avg:94.65ms +step:435/1670 train_time:41171ms step_avg:94.65ms +step:436/1670 train_time:41266ms step_avg:94.65ms +step:437/1670 train_time:41363ms step_avg:94.65ms +step:438/1670 train_time:41456ms step_avg:94.65ms +step:439/1670 train_time:41550ms step_avg:94.65ms +step:440/1670 train_time:41642ms step_avg:94.64ms +step:441/1670 train_time:41733ms step_avg:94.63ms +step:442/1670 train_time:41825ms step_avg:94.63ms +step:443/1670 train_time:41918ms step_avg:94.62ms +step:444/1670 train_time:42010ms step_avg:94.62ms +step:445/1670 train_time:42101ms step_avg:94.61ms +step:446/1670 train_time:42194ms step_avg:94.61ms +step:447/1670 train_time:42288ms step_avg:94.61ms +step:448/1670 train_time:42383ms step_avg:94.60ms +step:449/1670 train_time:42476ms step_avg:94.60ms +step:450/1670 train_time:42569ms step_avg:94.60ms +step:451/1670 train_time:42661ms step_avg:94.59ms +step:452/1670 train_time:42753ms step_avg:94.59ms +step:453/1670 train_time:42845ms step_avg:94.58ms +step:454/1670 train_time:42937ms step_avg:94.57ms +step:455/1670 train_time:43029ms step_avg:94.57ms +step:456/1670 train_time:43121ms step_avg:94.56ms +step:457/1670 train_time:43214ms step_avg:94.56ms +step:458/1670 train_time:43308ms step_avg:94.56ms +step:459/1670 train_time:43401ms step_avg:94.55ms +step:460/1670 train_time:43494ms step_avg:94.55ms +step:461/1670 train_time:43586ms step_avg:94.55ms +step:462/1670 train_time:43679ms step_avg:94.54ms +step:463/1670 train_time:43771ms step_avg:94.54ms +step:464/1670 train_time:43863ms step_avg:94.53ms +step:465/1670 train_time:43956ms step_avg:94.53ms +step:466/1670 train_time:44047ms step_avg:94.52ms +step:467/1670 train_time:44140ms step_avg:94.52ms +step:468/1670 train_time:44233ms step_avg:94.51ms +step:469/1670 train_time:44325ms step_avg:94.51ms +step:470/1670 train_time:44418ms step_avg:94.51ms +step:471/1670 train_time:44511ms step_avg:94.50ms +step:472/1670 train_time:44604ms step_avg:94.50ms +step:473/1670 train_time:44697ms step_avg:94.50ms +step:474/1670 train_time:44789ms step_avg:94.49ms +step:475/1670 train_time:44881ms step_avg:94.49ms +step:476/1670 train_time:44973ms step_avg:94.48ms +step:477/1670 train_time:45066ms step_avg:94.48ms +step:478/1670 train_time:45158ms step_avg:94.47ms +step:479/1670 train_time:45251ms step_avg:94.47ms +step:480/1670 train_time:45343ms step_avg:94.46ms +step:481/1670 train_time:45436ms step_avg:94.46ms +step:482/1670 train_time:45529ms step_avg:94.46ms +step:483/1670 train_time:45623ms step_avg:94.46ms +step:484/1670 train_time:45716ms step_avg:94.45ms +step:485/1670 train_time:45809ms step_avg:94.45ms +step:486/1670 train_time:45900ms step_avg:94.45ms +step:487/1670 train_time:45994ms step_avg:94.44ms +step:488/1670 train_time:46087ms step_avg:94.44ms +step:489/1670 train_time:46180ms step_avg:94.44ms +step:490/1670 train_time:46273ms step_avg:94.43ms +step:491/1670 train_time:46365ms step_avg:94.43ms +step:492/1670 train_time:46457ms step_avg:94.43ms +step:493/1670 train_time:46550ms step_avg:94.42ms +step:494/1670 train_time:46643ms step_avg:94.42ms +step:495/1670 train_time:46736ms step_avg:94.42ms +step:496/1670 train_time:46829ms step_avg:94.41ms +step:497/1670 train_time:46921ms step_avg:94.41ms +step:498/1670 train_time:47013ms step_avg:94.40ms +step:499/1670 train_time:47106ms step_avg:94.40ms +step:500/1670 train_time:47198ms step_avg:94.40ms +step:500/1670 val_loss:3.7146 train_time:47288ms step_avg:94.58ms +step:501/1670 train_time:47314ms step_avg:94.44ms +step:502/1670 train_time:47387ms step_avg:94.40ms +step:503/1670 train_time:47491ms step_avg:94.42ms +step:504/1670 train_time:47585ms step_avg:94.41ms +step:505/1670 train_time:47677ms step_avg:94.41ms +step:506/1670 train_time:47769ms step_avg:94.40ms +step:507/1670 train_time:47860ms step_avg:94.40ms +step:508/1670 train_time:47951ms step_avg:94.39ms +step:509/1670 train_time:48043ms step_avg:94.39ms +step:510/1670 train_time:48135ms step_avg:94.38ms +step:511/1670 train_time:48226ms step_avg:94.38ms +step:512/1670 train_time:48319ms step_avg:94.37ms +step:513/1670 train_time:48412ms step_avg:94.37ms +step:514/1670 train_time:48506ms step_avg:94.37ms +step:515/1670 train_time:48600ms step_avg:94.37ms +step:516/1670 train_time:48693ms step_avg:94.37ms +step:517/1670 train_time:48785ms step_avg:94.36ms +step:518/1670 train_time:48877ms step_avg:94.36ms +step:519/1670 train_time:48969ms step_avg:94.35ms +step:520/1670 train_time:49061ms step_avg:94.35ms +step:521/1670 train_time:49153ms step_avg:94.34ms +step:522/1670 train_time:49245ms step_avg:94.34ms +step:523/1670 train_time:49338ms step_avg:94.34ms +step:524/1670 train_time:49431ms step_avg:94.33ms +step:525/1670 train_time:49525ms step_avg:94.33ms +step:526/1670 train_time:49618ms step_avg:94.33ms +step:527/1670 train_time:49711ms step_avg:94.33ms +step:528/1670 train_time:49803ms step_avg:94.32ms +step:529/1670 train_time:49896ms step_avg:94.32ms +step:530/1670 train_time:49988ms step_avg:94.32ms +step:531/1670 train_time:50081ms step_avg:94.31ms +step:532/1670 train_time:50173ms step_avg:94.31ms +step:533/1670 train_time:50266ms step_avg:94.31ms +step:534/1670 train_time:50359ms step_avg:94.31ms +step:535/1670 train_time:50452ms step_avg:94.30ms +step:536/1670 train_time:50546ms step_avg:94.30ms +step:537/1670 train_time:50639ms step_avg:94.30ms +step:538/1670 train_time:50732ms step_avg:94.30ms +step:539/1670 train_time:50825ms step_avg:94.29ms +step:540/1670 train_time:50917ms step_avg:94.29ms +step:541/1670 train_time:51010ms step_avg:94.29ms +step:542/1670 train_time:51101ms step_avg:94.28ms +step:543/1670 train_time:51194ms step_avg:94.28ms +step:544/1670 train_time:51287ms step_avg:94.28ms +step:545/1670 train_time:51380ms step_avg:94.28ms +step:546/1670 train_time:51472ms step_avg:94.27ms +step:547/1670 train_time:51565ms step_avg:94.27ms +step:548/1670 train_time:51659ms step_avg:94.27ms +step:549/1670 train_time:51752ms step_avg:94.27ms +step:550/1670 train_time:51844ms step_avg:94.26ms +step:551/1670 train_time:51936ms step_avg:94.26ms +step:552/1670 train_time:52029ms step_avg:94.25ms +step:553/1670 train_time:52121ms step_avg:94.25ms +step:554/1670 train_time:52214ms step_avg:94.25ms +step:555/1670 train_time:52306ms step_avg:94.25ms +step:556/1670 train_time:52399ms step_avg:94.24ms +step:557/1670 train_time:52493ms step_avg:94.24ms +step:558/1670 train_time:52698ms step_avg:94.44ms +step:559/1670 train_time:52765ms step_avg:94.39ms +step:560/1670 train_time:52856ms step_avg:94.39ms +step:561/1670 train_time:52949ms step_avg:94.38ms +step:562/1670 train_time:53042ms step_avg:94.38ms +step:563/1670 train_time:53135ms step_avg:94.38ms +step:564/1670 train_time:53227ms step_avg:94.37ms +step:565/1670 train_time:53320ms step_avg:94.37ms +step:566/1670 train_time:53412ms step_avg:94.37ms +step:567/1670 train_time:53505ms step_avg:94.36ms +step:568/1670 train_time:53602ms step_avg:94.37ms +step:569/1670 train_time:53700ms step_avg:94.38ms +step:570/1670 train_time:53795ms step_avg:94.38ms +step:571/1670 train_time:53889ms step_avg:94.38ms +step:572/1670 train_time:53981ms step_avg:94.37ms +step:573/1670 train_time:54074ms step_avg:94.37ms +step:574/1670 train_time:54167ms step_avg:94.37ms +step:575/1670 train_time:54260ms step_avg:94.36ms +step:576/1670 train_time:54352ms step_avg:94.36ms +step:577/1670 train_time:54445ms step_avg:94.36ms +step:578/1670 train_time:54539ms step_avg:94.36ms +step:579/1670 train_time:54635ms step_avg:94.36ms +step:580/1670 train_time:54730ms step_avg:94.36ms +step:581/1670 train_time:54825ms step_avg:94.36ms +step:582/1670 train_time:54919ms step_avg:94.36ms +step:583/1670 train_time:55013ms step_avg:94.36ms +step:584/1670 train_time:55106ms step_avg:94.36ms +step:585/1670 train_time:55199ms step_avg:94.36ms +step:586/1670 train_time:55292ms step_avg:94.35ms +step:587/1670 train_time:55384ms step_avg:94.35ms +step:588/1670 train_time:55478ms step_avg:94.35ms +step:589/1670 train_time:55572ms step_avg:94.35ms +step:590/1670 train_time:55667ms step_avg:94.35ms +step:591/1670 train_time:55762ms step_avg:94.35ms +step:592/1670 train_time:55857ms step_avg:94.35ms +step:593/1670 train_time:55950ms step_avg:94.35ms +step:594/1670 train_time:56044ms step_avg:94.35ms +step:595/1670 train_time:56137ms step_avg:94.35ms +step:596/1670 train_time:56231ms step_avg:94.35ms +step:597/1670 train_time:56324ms step_avg:94.34ms +step:598/1670 train_time:56417ms step_avg:94.34ms +step:599/1670 train_time:56511ms step_avg:94.34ms +step:600/1670 train_time:56605ms step_avg:94.34ms +step:601/1670 train_time:56700ms step_avg:94.34ms +step:602/1670 train_time:56794ms step_avg:94.34ms +step:603/1670 train_time:56889ms step_avg:94.34ms +step:604/1670 train_time:56982ms step_avg:94.34ms +step:605/1670 train_time:57076ms step_avg:94.34ms +step:606/1670 train_time:57170ms step_avg:94.34ms +step:607/1670 train_time:57263ms step_avg:94.34ms +step:608/1670 train_time:57356ms step_avg:94.34ms +step:609/1670 train_time:57449ms step_avg:94.33ms +step:610/1670 train_time:57543ms step_avg:94.33ms +step:611/1670 train_time:57638ms step_avg:94.33ms +step:612/1670 train_time:57732ms step_avg:94.33ms +step:613/1670 train_time:57826ms step_avg:94.33ms +step:614/1670 train_time:57920ms step_avg:94.33ms +step:615/1670 train_time:58014ms step_avg:94.33ms +step:616/1670 train_time:58108ms step_avg:94.33ms +step:617/1670 train_time:58202ms step_avg:94.33ms +step:618/1670 train_time:58296ms step_avg:94.33ms +step:619/1670 train_time:58389ms step_avg:94.33ms +step:620/1670 train_time:58483ms step_avg:94.33ms +step:621/1670 train_time:58576ms step_avg:94.32ms +step:622/1670 train_time:58670ms step_avg:94.32ms +step:623/1670 train_time:58763ms step_avg:94.32ms +step:624/1670 train_time:58859ms step_avg:94.33ms +step:625/1670 train_time:58952ms step_avg:94.32ms +step:625/1670 val_loss:3.6140 train_time:59044ms step_avg:94.47ms +step:626/1670 train_time:59070ms step_avg:94.36ms +step:627/1670 train_time:59150ms step_avg:94.34ms +step:628/1670 train_time:59247ms step_avg:94.34ms +step:629/1670 train_time:59341ms step_avg:94.34ms +step:630/1670 train_time:59433ms step_avg:94.34ms +step:631/1670 train_time:59526ms step_avg:94.34ms +step:632/1670 train_time:59619ms step_avg:94.33ms +step:633/1670 train_time:59712ms step_avg:94.33ms +step:634/1670 train_time:59804ms step_avg:94.33ms +step:635/1670 train_time:59897ms step_avg:94.33ms +step:636/1670 train_time:59991ms step_avg:94.33ms +step:637/1670 train_time:60088ms step_avg:94.33ms +step:638/1670 train_time:60184ms step_avg:94.33ms +step:639/1670 train_time:60625ms step_avg:94.88ms +step:640/1670 train_time:60703ms step_avg:94.85ms +step:641/1670 train_time:60796ms step_avg:94.85ms +step:642/1670 train_time:60888ms step_avg:94.84ms +step:643/1670 train_time:60981ms step_avg:94.84ms +step:644/1670 train_time:61074ms step_avg:94.83ms +step:645/1670 train_time:61166ms step_avg:94.83ms +step:646/1670 train_time:61258ms step_avg:94.83ms +step:647/1670 train_time:61351ms step_avg:94.82ms +step:648/1670 train_time:61444ms step_avg:94.82ms +step:649/1670 train_time:61539ms step_avg:94.82ms +step:650/1670 train_time:61636ms step_avg:94.82ms +step:651/1670 train_time:61731ms step_avg:94.82ms +step:652/1670 train_time:61824ms step_avg:94.82ms +step:653/1670 train_time:61917ms step_avg:94.82ms +step:654/1670 train_time:62011ms step_avg:94.82ms +step:655/1670 train_time:62103ms step_avg:94.81ms +step:656/1670 train_time:62196ms step_avg:94.81ms +step:657/1670 train_time:62289ms step_avg:94.81ms +step:658/1670 train_time:62382ms step_avg:94.81ms +step:659/1670 train_time:62476ms step_avg:94.80ms +step:660/1670 train_time:62571ms step_avg:94.80ms +step:661/1670 train_time:62667ms step_avg:94.81ms +step:662/1670 train_time:62761ms step_avg:94.81ms +step:663/1670 train_time:62855ms step_avg:94.80ms +step:664/1670 train_time:62949ms step_avg:94.80ms +step:665/1670 train_time:63043ms step_avg:94.80ms +step:666/1670 train_time:63136ms step_avg:94.80ms +step:667/1670 train_time:63229ms step_avg:94.80ms +step:668/1670 train_time:63322ms step_avg:94.79ms +step:669/1670 train_time:63416ms step_avg:94.79ms +step:670/1670 train_time:63510ms step_avg:94.79ms +step:671/1670 train_time:63604ms step_avg:94.79ms +step:672/1670 train_time:63699ms step_avg:94.79ms +step:673/1670 train_time:63793ms step_avg:94.79ms +step:674/1670 train_time:63887ms step_avg:94.79ms +step:675/1670 train_time:63980ms step_avg:94.79ms +step:676/1670 train_time:64074ms step_avg:94.78ms +step:677/1670 train_time:64167ms step_avg:94.78ms +step:678/1670 train_time:64260ms step_avg:94.78ms +step:679/1670 train_time:64353ms step_avg:94.78ms +step:680/1670 train_time:64447ms step_avg:94.78ms +step:681/1670 train_time:64541ms step_avg:94.77ms +step:682/1670 train_time:64635ms step_avg:94.77ms +step:683/1670 train_time:64729ms step_avg:94.77ms +step:684/1670 train_time:64823ms step_avg:94.77ms +step:685/1670 train_time:64917ms step_avg:94.77ms +step:686/1670 train_time:65011ms step_avg:94.77ms +step:687/1670 train_time:65104ms step_avg:94.77ms +step:688/1670 train_time:65198ms step_avg:94.76ms +step:689/1670 train_time:65292ms step_avg:94.76ms +step:690/1670 train_time:65385ms step_avg:94.76ms +step:691/1670 train_time:65480ms step_avg:94.76ms +step:692/1670 train_time:65573ms step_avg:94.76ms +step:693/1670 train_time:65667ms step_avg:94.76ms +step:694/1670 train_time:65761ms step_avg:94.76ms +step:695/1670 train_time:65855ms step_avg:94.76ms +step:696/1670 train_time:65950ms step_avg:94.76ms +step:697/1670 train_time:66044ms step_avg:94.75ms +step:698/1670 train_time:66138ms step_avg:94.75ms +step:699/1670 train_time:66231ms step_avg:94.75ms +step:700/1670 train_time:66325ms step_avg:94.75ms +step:701/1670 train_time:66419ms step_avg:94.75ms +step:702/1670 train_time:66512ms step_avg:94.75ms +step:703/1670 train_time:66606ms step_avg:94.75ms +step:704/1670 train_time:66700ms step_avg:94.74ms +step:705/1670 train_time:66794ms step_avg:94.74ms +step:706/1670 train_time:66887ms step_avg:94.74ms +step:707/1670 train_time:66982ms step_avg:94.74ms +step:708/1670 train_time:67076ms step_avg:94.74ms +step:709/1670 train_time:67169ms step_avg:94.74ms +step:710/1670 train_time:67262ms step_avg:94.74ms +step:711/1670 train_time:67356ms step_avg:94.73ms +step:712/1670 train_time:67450ms step_avg:94.73ms +step:713/1670 train_time:67543ms step_avg:94.73ms +step:714/1670 train_time:67638ms step_avg:94.73ms +step:715/1670 train_time:67731ms step_avg:94.73ms +step:716/1670 train_time:67825ms step_avg:94.73ms +step:717/1670 train_time:67919ms step_avg:94.73ms +step:718/1670 train_time:68013ms step_avg:94.73ms +step:719/1670 train_time:68107ms step_avg:94.72ms +step:720/1670 train_time:68201ms step_avg:94.72ms +step:721/1670 train_time:68295ms step_avg:94.72ms +step:722/1670 train_time:68387ms step_avg:94.72ms +step:723/1670 train_time:68481ms step_avg:94.72ms +step:724/1670 train_time:68574ms step_avg:94.72ms +step:725/1670 train_time:68668ms step_avg:94.71ms +step:726/1670 train_time:68761ms step_avg:94.71ms +step:727/1670 train_time:68855ms step_avg:94.71ms +step:728/1670 train_time:68950ms step_avg:94.71ms +step:729/1670 train_time:69043ms step_avg:94.71ms +step:730/1670 train_time:69138ms step_avg:94.71ms +step:731/1670 train_time:69231ms step_avg:94.71ms +step:732/1670 train_time:69324ms step_avg:94.71ms +step:733/1670 train_time:69419ms step_avg:94.70ms +step:734/1670 train_time:69512ms step_avg:94.70ms +step:735/1670 train_time:69606ms step_avg:94.70ms +step:736/1670 train_time:69700ms step_avg:94.70ms +step:737/1670 train_time:69794ms step_avg:94.70ms +step:738/1670 train_time:69888ms step_avg:94.70ms +step:739/1670 train_time:69981ms step_avg:94.70ms +step:740/1670 train_time:70076ms step_avg:94.70ms +step:741/1670 train_time:70169ms step_avg:94.69ms +step:742/1670 train_time:70263ms step_avg:94.69ms +step:743/1670 train_time:70357ms step_avg:94.69ms +step:744/1670 train_time:70451ms step_avg:94.69ms +step:745/1670 train_time:70545ms step_avg:94.69ms +step:746/1670 train_time:70639ms step_avg:94.69ms +step:747/1670 train_time:70733ms step_avg:94.69ms +step:748/1670 train_time:70826ms step_avg:94.69ms +step:749/1670 train_time:70920ms step_avg:94.69ms +step:750/1670 train_time:71014ms step_avg:94.69ms +step:750/1670 val_loss:3.5629 train_time:71106ms step_avg:94.81ms +step:751/1670 train_time:71133ms step_avg:94.72ms +step:752/1670 train_time:71210ms step_avg:94.69ms +step:753/1670 train_time:71311ms step_avg:94.70ms +step:754/1670 train_time:71408ms step_avg:94.71ms +step:755/1670 train_time:71501ms step_avg:94.70ms +step:756/1670 train_time:71594ms step_avg:94.70ms +step:757/1670 train_time:71686ms step_avg:94.70ms +step:758/1670 train_time:71779ms step_avg:94.69ms +step:759/1670 train_time:71871ms step_avg:94.69ms +step:760/1670 train_time:71964ms step_avg:94.69ms +step:761/1670 train_time:72057ms step_avg:94.69ms +step:762/1670 train_time:72151ms step_avg:94.69ms +step:763/1670 train_time:72247ms step_avg:94.69ms +step:764/1670 train_time:72342ms step_avg:94.69ms +step:765/1670 train_time:72438ms step_avg:94.69ms +step:766/1670 train_time:72532ms step_avg:94.69ms +step:767/1670 train_time:72626ms step_avg:94.69ms +step:768/1670 train_time:72719ms step_avg:94.69ms +step:769/1670 train_time:72812ms step_avg:94.68ms +step:770/1670 train_time:72905ms step_avg:94.68ms +step:771/1670 train_time:72997ms step_avg:94.68ms +step:772/1670 train_time:73091ms step_avg:94.68ms +step:773/1670 train_time:73186ms step_avg:94.68ms +step:774/1670 train_time:73281ms step_avg:94.68ms +step:775/1670 train_time:73376ms step_avg:94.68ms +step:776/1670 train_time:73469ms step_avg:94.68ms +step:777/1670 train_time:73563ms step_avg:94.68ms +step:778/1670 train_time:73657ms step_avg:94.67ms +step:779/1670 train_time:73750ms step_avg:94.67ms +step:780/1670 train_time:73843ms step_avg:94.67ms +step:781/1670 train_time:73936ms step_avg:94.67ms +step:782/1670 train_time:74029ms step_avg:94.67ms +step:783/1670 train_time:74123ms step_avg:94.67ms +step:784/1670 train_time:74217ms step_avg:94.66ms +step:785/1670 train_time:74311ms step_avg:94.66ms +step:786/1670 train_time:74406ms step_avg:94.66ms +step:787/1670 train_time:74500ms step_avg:94.66ms +step:788/1670 train_time:74594ms step_avg:94.66ms +step:789/1670 train_time:74687ms step_avg:94.66ms +step:790/1670 train_time:74781ms step_avg:94.66ms +step:791/1670 train_time:74874ms step_avg:94.66ms +step:792/1670 train_time:74967ms step_avg:94.66ms +step:793/1670 train_time:75061ms step_avg:94.65ms +step:794/1670 train_time:75154ms step_avg:94.65ms +step:795/1670 train_time:75248ms step_avg:94.65ms +step:796/1670 train_time:75343ms step_avg:94.65ms +step:797/1670 train_time:75437ms step_avg:94.65ms +step:798/1670 train_time:75530ms step_avg:94.65ms +step:799/1670 train_time:75624ms step_avg:94.65ms +step:800/1670 train_time:75718ms step_avg:94.65ms +step:801/1670 train_time:75812ms step_avg:94.65ms +step:802/1670 train_time:75905ms step_avg:94.64ms +step:803/1670 train_time:75998ms step_avg:94.64ms +step:804/1670 train_time:76091ms step_avg:94.64ms +step:805/1670 train_time:76185ms step_avg:94.64ms +step:806/1670 train_time:76279ms step_avg:94.64ms +step:807/1670 train_time:76373ms step_avg:94.64ms +step:808/1670 train_time:76467ms step_avg:94.64ms +step:809/1670 train_time:76560ms step_avg:94.64ms +step:810/1670 train_time:76654ms step_avg:94.64ms +step:811/1670 train_time:76748ms step_avg:94.63ms +step:812/1670 train_time:76842ms step_avg:94.63ms +step:813/1670 train_time:76935ms step_avg:94.63ms +step:814/1670 train_time:77028ms step_avg:94.63ms +step:815/1670 train_time:77121ms step_avg:94.63ms +step:816/1670 train_time:77216ms step_avg:94.63ms +step:817/1670 train_time:77309ms step_avg:94.63ms +step:818/1670 train_time:77403ms step_avg:94.62ms +step:819/1670 train_time:77497ms step_avg:94.62ms +step:820/1670 train_time:77591ms step_avg:94.62ms +step:821/1670 train_time:77684ms step_avg:94.62ms +step:822/1670 train_time:77778ms step_avg:94.62ms +step:823/1670 train_time:77872ms step_avg:94.62ms +step:824/1670 train_time:77967ms step_avg:94.62ms +step:825/1670 train_time:78060ms step_avg:94.62ms +step:826/1670 train_time:78155ms step_avg:94.62ms +step:827/1670 train_time:78249ms step_avg:94.62ms +step:828/1670 train_time:78342ms step_avg:94.62ms +step:829/1670 train_time:78435ms step_avg:94.61ms +step:830/1670 train_time:78528ms step_avg:94.61ms +step:831/1670 train_time:78622ms step_avg:94.61ms +step:832/1670 train_time:78716ms step_avg:94.61ms +step:833/1670 train_time:78810ms step_avg:94.61ms +step:834/1670 train_time:78905ms step_avg:94.61ms +step:835/1670 train_time:78998ms step_avg:94.61ms +step:836/1670 train_time:79093ms step_avg:94.61ms +step:837/1670 train_time:79187ms step_avg:94.61ms +step:838/1670 train_time:79281ms step_avg:94.61ms +step:839/1670 train_time:79375ms step_avg:94.61ms +step:840/1670 train_time:79467ms step_avg:94.60ms +step:841/1670 train_time:79561ms step_avg:94.60ms +step:842/1670 train_time:79655ms step_avg:94.60ms +step:843/1670 train_time:79749ms step_avg:94.60ms +step:844/1670 train_time:79843ms step_avg:94.60ms +step:845/1670 train_time:79937ms step_avg:94.60ms +step:846/1670 train_time:80031ms step_avg:94.60ms +step:847/1670 train_time:80126ms step_avg:94.60ms +step:848/1670 train_time:80220ms step_avg:94.60ms +step:849/1670 train_time:80313ms step_avg:94.60ms +step:850/1670 train_time:80407ms step_avg:94.60ms +step:851/1670 train_time:80847ms step_avg:95.00ms +step:852/1670 train_time:80915ms step_avg:94.97ms +step:853/1670 train_time:81007ms step_avg:94.97ms +step:854/1670 train_time:81099ms step_avg:94.96ms +step:855/1670 train_time:81192ms step_avg:94.96ms +step:856/1670 train_time:81285ms step_avg:94.96ms +step:857/1670 train_time:81378ms step_avg:94.96ms +step:858/1670 train_time:81470ms step_avg:94.95ms +step:859/1670 train_time:81563ms step_avg:94.95ms +step:860/1670 train_time:81656ms step_avg:94.95ms +step:861/1670 train_time:81753ms step_avg:94.95ms +step:862/1670 train_time:81851ms step_avg:94.95ms +step:863/1670 train_time:81947ms step_avg:94.96ms +step:864/1670 train_time:82041ms step_avg:94.96ms +step:865/1670 train_time:82134ms step_avg:94.95ms +step:866/1670 train_time:82227ms step_avg:94.95ms +step:867/1670 train_time:82321ms step_avg:94.95ms +step:868/1670 train_time:82413ms step_avg:94.95ms +step:869/1670 train_time:82506ms step_avg:94.94ms +step:870/1670 train_time:82598ms step_avg:94.94ms +step:871/1670 train_time:82692ms step_avg:94.94ms +step:872/1670 train_time:82788ms step_avg:94.94ms +step:873/1670 train_time:82885ms step_avg:94.94ms +step:874/1670 train_time:82979ms step_avg:94.94ms +step:875/1670 train_time:83073ms step_avg:94.94ms +step:875/1670 val_loss:3.5191 train_time:83165ms step_avg:95.05ms +step:876/1670 train_time:83191ms step_avg:94.97ms +step:877/1670 train_time:83268ms step_avg:94.95ms +step:878/1670 train_time:83365ms step_avg:94.95ms +step:879/1670 train_time:83460ms step_avg:94.95ms +step:880/1670 train_time:83553ms step_avg:94.95ms +step:881/1670 train_time:83646ms step_avg:94.94ms +step:882/1670 train_time:83739ms step_avg:94.94ms +step:883/1670 train_time:83832ms step_avg:94.94ms +step:884/1670 train_time:83924ms step_avg:94.94ms +step:885/1670 train_time:84017ms step_avg:94.93ms +step:886/1670 train_time:84110ms step_avg:94.93ms +step:887/1670 train_time:84205ms step_avg:94.93ms +step:888/1670 train_time:84302ms step_avg:94.93ms +step:889/1670 train_time:84397ms step_avg:94.93ms +step:890/1670 train_time:84492ms step_avg:94.94ms +step:891/1670 train_time:84586ms step_avg:94.93ms +step:892/1670 train_time:84679ms step_avg:94.93ms +step:893/1670 train_time:84772ms step_avg:94.93ms +step:894/1670 train_time:84865ms step_avg:94.93ms +step:895/1670 train_time:84957ms step_avg:94.92ms +step:896/1670 train_time:85050ms step_avg:94.92ms +step:897/1670 train_time:85144ms step_avg:94.92ms +step:898/1670 train_time:85239ms step_avg:94.92ms +step:899/1670 train_time:85334ms step_avg:94.92ms +step:900/1670 train_time:85428ms step_avg:94.92ms +step:901/1670 train_time:85523ms step_avg:94.92ms +step:902/1670 train_time:85617ms step_avg:94.92ms +step:903/1670 train_time:85711ms step_avg:94.92ms +step:904/1670 train_time:85803ms step_avg:94.92ms +step:905/1670 train_time:85897ms step_avg:94.91ms +step:906/1670 train_time:85991ms step_avg:94.91ms +step:907/1670 train_time:86084ms step_avg:94.91ms +step:908/1670 train_time:86177ms step_avg:94.91ms +step:909/1670 train_time:86271ms step_avg:94.91ms +step:910/1670 train_time:86366ms step_avg:94.91ms +step:911/1670 train_time:86461ms step_avg:94.91ms +step:912/1670 train_time:86555ms step_avg:94.91ms +step:913/1670 train_time:86649ms step_avg:94.91ms +step:914/1670 train_time:86743ms step_avg:94.90ms +step:915/1670 train_time:86837ms step_avg:94.90ms +step:916/1670 train_time:86930ms step_avg:94.90ms +step:917/1670 train_time:87024ms step_avg:94.90ms +step:918/1670 train_time:87117ms step_avg:94.90ms +step:919/1670 train_time:87210ms step_avg:94.90ms +step:920/1670 train_time:87304ms step_avg:94.90ms +step:921/1670 train_time:87398ms step_avg:94.89ms +step:922/1670 train_time:87492ms step_avg:94.89ms +step:923/1670 train_time:87586ms step_avg:94.89ms +step:924/1670 train_time:87680ms step_avg:94.89ms +step:925/1670 train_time:87774ms step_avg:94.89ms +step:926/1670 train_time:87866ms step_avg:94.89ms +step:927/1670 train_time:87960ms step_avg:94.89ms +step:928/1670 train_time:88054ms step_avg:94.89ms +step:929/1670 train_time:88147ms step_avg:94.88ms +step:930/1670 train_time:88241ms step_avg:94.88ms +step:931/1670 train_time:88334ms step_avg:94.88ms +step:932/1670 train_time:88428ms step_avg:94.88ms +step:933/1670 train_time:88524ms step_avg:94.88ms +step:934/1670 train_time:88617ms step_avg:94.88ms +step:935/1670 train_time:88711ms step_avg:94.88ms +step:936/1670 train_time:88805ms step_avg:94.88ms +step:937/1670 train_time:88898ms step_avg:94.88ms +step:938/1670 train_time:88992ms step_avg:94.87ms +step:939/1670 train_time:89086ms step_avg:94.87ms +step:940/1670 train_time:89180ms step_avg:94.87ms +step:941/1670 train_time:89273ms step_avg:94.87ms +step:942/1670 train_time:89367ms step_avg:94.87ms +step:943/1670 train_time:89461ms step_avg:94.87ms +step:944/1670 train_time:89555ms step_avg:94.87ms +step:945/1670 train_time:89649ms step_avg:94.87ms +step:946/1670 train_time:89743ms step_avg:94.87ms +step:947/1670 train_time:89836ms step_avg:94.86ms +step:948/1670 train_time:89930ms step_avg:94.86ms +step:949/1670 train_time:90024ms step_avg:94.86ms +step:950/1670 train_time:90117ms step_avg:94.86ms +step:951/1670 train_time:90211ms step_avg:94.86ms +step:952/1670 train_time:90305ms step_avg:94.86ms +step:953/1670 train_time:90399ms step_avg:94.86ms +step:954/1670 train_time:90492ms step_avg:94.86ms +step:955/1670 train_time:90586ms step_avg:94.85ms +step:956/1670 train_time:90681ms step_avg:94.85ms +step:957/1670 train_time:90775ms step_avg:94.85ms +step:958/1670 train_time:90868ms step_avg:94.85ms +step:959/1670 train_time:90961ms step_avg:94.85ms +step:960/1670 train_time:91055ms step_avg:94.85ms +step:961/1670 train_time:91149ms step_avg:94.85ms +step:962/1670 train_time:91242ms step_avg:94.85ms +step:963/1670 train_time:91335ms step_avg:94.84ms +step:964/1670 train_time:91430ms step_avg:94.84ms +step:965/1670 train_time:91524ms step_avg:94.84ms +step:966/1670 train_time:91618ms step_avg:94.84ms +step:967/1670 train_time:91712ms step_avg:94.84ms +step:968/1670 train_time:91805ms step_avg:94.84ms +step:969/1670 train_time:91899ms step_avg:94.84ms +step:970/1670 train_time:91992ms step_avg:94.84ms +step:971/1670 train_time:92086ms step_avg:94.84ms +step:972/1670 train_time:92180ms step_avg:94.84ms +step:973/1670 train_time:92273ms step_avg:94.83ms +step:974/1670 train_time:92367ms step_avg:94.83ms +step:975/1670 train_time:92461ms step_avg:94.83ms +step:976/1670 train_time:92555ms step_avg:94.83ms +step:977/1670 train_time:92650ms step_avg:94.83ms +step:978/1670 train_time:92744ms step_avg:94.83ms +step:979/1670 train_time:92837ms step_avg:94.83ms +step:980/1670 train_time:92931ms step_avg:94.83ms +step:981/1670 train_time:93024ms step_avg:94.83ms +step:982/1670 train_time:93118ms step_avg:94.83ms +step:983/1670 train_time:93213ms step_avg:94.82ms +step:984/1670 train_time:93305ms step_avg:94.82ms +step:985/1670 train_time:93399ms step_avg:94.82ms +step:986/1670 train_time:93493ms step_avg:94.82ms +step:987/1670 train_time:93586ms step_avg:94.82ms +step:988/1670 train_time:93681ms step_avg:94.82ms +step:989/1670 train_time:93775ms step_avg:94.82ms +step:990/1670 train_time:93869ms step_avg:94.82ms +step:991/1670 train_time:93963ms step_avg:94.82ms +step:992/1670 train_time:94057ms step_avg:94.82ms +step:993/1670 train_time:94150ms step_avg:94.81ms +step:994/1670 train_time:94244ms step_avg:94.81ms +step:995/1670 train_time:94337ms step_avg:94.81ms +step:996/1670 train_time:94431ms step_avg:94.81ms +step:997/1670 train_time:94525ms step_avg:94.81ms +step:998/1670 train_time:94619ms step_avg:94.81ms +step:999/1670 train_time:94714ms step_avg:94.81ms +step:1000/1670 train_time:94807ms step_avg:94.81ms +step:1000/1670 val_loss:3.4699 train_time:94899ms step_avg:94.90ms +step:1001/1670 train_time:94926ms step_avg:94.83ms +step:1002/1670 train_time:95001ms step_avg:94.81ms +step:1003/1670 train_time:95100ms step_avg:94.82ms +step:1004/1670 train_time:95193ms step_avg:94.81ms +step:1005/1670 train_time:95286ms step_avg:94.81ms +step:1006/1670 train_time:95379ms step_avg:94.81ms +step:1007/1670 train_time:95472ms step_avg:94.81ms +step:1008/1670 train_time:95564ms step_avg:94.81ms +step:1009/1670 train_time:95657ms step_avg:94.80ms +step:1010/1670 train_time:95751ms step_avg:94.80ms +step:1011/1670 train_time:95845ms step_avg:94.80ms +step:1012/1670 train_time:95940ms step_avg:94.80ms +step:1013/1670 train_time:96036ms step_avg:94.80ms +step:1014/1670 train_time:96131ms step_avg:94.80ms +step:1015/1670 train_time:96225ms step_avg:94.80ms +step:1016/1670 train_time:96319ms step_avg:94.80ms +step:1017/1670 train_time:96412ms step_avg:94.80ms +step:1018/1670 train_time:96505ms step_avg:94.80ms +step:1019/1670 train_time:96598ms step_avg:94.80ms +step:1020/1670 train_time:96691ms step_avg:94.79ms +step:1021/1670 train_time:96784ms step_avg:94.79ms +step:1022/1670 train_time:96878ms step_avg:94.79ms +step:1023/1670 train_time:96973ms step_avg:94.79ms +step:1024/1670 train_time:97068ms step_avg:94.79ms +step:1025/1670 train_time:97162ms step_avg:94.79ms +step:1026/1670 train_time:97257ms step_avg:94.79ms +step:1027/1670 train_time:97351ms step_avg:94.79ms +step:1028/1670 train_time:97445ms step_avg:94.79ms +step:1029/1670 train_time:97538ms step_avg:94.79ms +step:1030/1670 train_time:97631ms step_avg:94.79ms +step:1031/1670 train_time:97724ms step_avg:94.79ms +step:1032/1670 train_time:97818ms step_avg:94.79ms +step:1033/1670 train_time:97912ms step_avg:94.78ms +step:1034/1670 train_time:98006ms step_avg:94.78ms +step:1035/1670 train_time:98100ms step_avg:94.78ms +step:1036/1670 train_time:98195ms step_avg:94.78ms +step:1037/1670 train_time:98289ms step_avg:94.78ms +step:1038/1670 train_time:98382ms step_avg:94.78ms +step:1039/1670 train_time:98476ms step_avg:94.78ms +step:1040/1670 train_time:98569ms step_avg:94.78ms +step:1041/1670 train_time:98662ms step_avg:94.78ms +step:1042/1670 train_time:98756ms step_avg:94.78ms +step:1043/1670 train_time:98849ms step_avg:94.77ms +step:1044/1670 train_time:98943ms step_avg:94.77ms +step:1045/1670 train_time:99037ms step_avg:94.77ms +step:1046/1670 train_time:99131ms step_avg:94.77ms +step:1047/1670 train_time:99225ms step_avg:94.77ms +step:1048/1670 train_time:99319ms step_avg:94.77ms +step:1049/1670 train_time:99413ms step_avg:94.77ms +step:1050/1670 train_time:99506ms step_avg:94.77ms +step:1051/1670 train_time:99600ms step_avg:94.77ms +step:1052/1670 train_time:99693ms step_avg:94.77ms +step:1053/1670 train_time:99787ms step_avg:94.76ms +step:1054/1670 train_time:99881ms step_avg:94.76ms +step:1055/1670 train_time:99975ms step_avg:94.76ms +step:1056/1670 train_time:100069ms step_avg:94.76ms +step:1057/1670 train_time:100164ms step_avg:94.76ms +step:1058/1670 train_time:100259ms step_avg:94.76ms +step:1059/1670 train_time:100353ms step_avg:94.76ms +step:1060/1670 train_time:100447ms step_avg:94.76ms +step:1061/1670 train_time:100540ms step_avg:94.76ms +step:1062/1670 train_time:100986ms step_avg:95.09ms +step:1063/1670 train_time:101056ms step_avg:95.07ms +step:1064/1670 train_time:101148ms step_avg:95.06ms +step:1065/1670 train_time:101240ms step_avg:95.06ms +step:1066/1670 train_time:101333ms step_avg:95.06ms +step:1067/1670 train_time:101426ms step_avg:95.06ms +step:1068/1670 train_time:101518ms step_avg:95.05ms +step:1069/1670 train_time:101611ms step_avg:95.05ms +step:1070/1670 train_time:101703ms step_avg:95.05ms +step:1071/1670 train_time:101796ms step_avg:95.05ms +step:1072/1670 train_time:101892ms step_avg:95.05ms +step:1073/1670 train_time:101988ms step_avg:95.05ms +step:1074/1670 train_time:102085ms step_avg:95.05ms +step:1075/1670 train_time:102179ms step_avg:95.05ms +step:1076/1670 train_time:102272ms step_avg:95.05ms +step:1077/1670 train_time:102365ms step_avg:95.05ms +step:1078/1670 train_time:102459ms step_avg:95.05ms +step:1079/1670 train_time:102551ms step_avg:95.04ms +step:1080/1670 train_time:102644ms step_avg:95.04ms +step:1081/1670 train_time:102736ms step_avg:95.04ms +step:1082/1670 train_time:102830ms step_avg:95.04ms +step:1083/1670 train_time:102925ms step_avg:95.04ms +step:1084/1670 train_time:103021ms step_avg:95.04ms +step:1085/1670 train_time:103117ms step_avg:95.04ms +step:1086/1670 train_time:103210ms step_avg:95.04ms +step:1087/1670 train_time:103304ms step_avg:95.04ms +step:1088/1670 train_time:103397ms step_avg:95.03ms +step:1089/1670 train_time:103490ms step_avg:95.03ms +step:1090/1670 train_time:103583ms step_avg:95.03ms +step:1091/1670 train_time:103677ms step_avg:95.03ms +step:1092/1670 train_time:103771ms step_avg:95.03ms +step:1093/1670 train_time:103866ms step_avg:95.03ms +step:1094/1670 train_time:103960ms step_avg:95.03ms +step:1095/1670 train_time:104055ms step_avg:95.03ms +step:1096/1670 train_time:104150ms step_avg:95.03ms +step:1097/1670 train_time:104244ms step_avg:95.03ms +step:1098/1670 train_time:104338ms step_avg:95.03ms +step:1099/1670 train_time:104431ms step_avg:95.02ms +step:1100/1670 train_time:104524ms step_avg:95.02ms +step:1101/1670 train_time:104618ms step_avg:95.02ms +step:1102/1670 train_time:104711ms step_avg:95.02ms +step:1103/1670 train_time:104804ms step_avg:95.02ms +step:1104/1670 train_time:104898ms step_avg:95.02ms +step:1105/1670 train_time:104993ms step_avg:95.02ms +step:1106/1670 train_time:105087ms step_avg:95.02ms +step:1107/1670 train_time:105182ms step_avg:95.02ms +step:1108/1670 train_time:105276ms step_avg:95.01ms +step:1109/1670 train_time:105370ms step_avg:95.01ms +step:1110/1670 train_time:105464ms step_avg:95.01ms +step:1111/1670 train_time:105557ms step_avg:95.01ms +step:1112/1670 train_time:105650ms step_avg:95.01ms +step:1113/1670 train_time:105744ms step_avg:95.01ms +step:1114/1670 train_time:105837ms step_avg:95.01ms +step:1115/1670 train_time:106042ms step_avg:95.10ms +step:1116/1670 train_time:106111ms step_avg:95.08ms +step:1117/1670 train_time:106204ms step_avg:95.08ms +step:1118/1670 train_time:106298ms step_avg:95.08ms +step:1119/1670 train_time:106391ms step_avg:95.08ms +step:1120/1670 train_time:106485ms step_avg:95.08ms +step:1121/1670 train_time:106578ms step_avg:95.07ms +step:1122/1670 train_time:106671ms step_avg:95.07ms +step:1123/1670 train_time:106765ms step_avg:95.07ms +step:1124/1670 train_time:106858ms step_avg:95.07ms +step:1125/1670 train_time:106956ms step_avg:95.07ms +step:1125/1670 val_loss:3.4166 train_time:107052ms step_avg:95.16ms +step:1126/1670 train_time:107078ms step_avg:95.10ms +step:1127/1670 train_time:107160ms step_avg:95.08ms +step:1128/1670 train_time:107261ms step_avg:95.09ms +step:1129/1670 train_time:107356ms step_avg:95.09ms +step:1130/1670 train_time:107449ms step_avg:95.09ms +step:1131/1670 train_time:107544ms step_avg:95.09ms +step:1132/1670 train_time:107637ms step_avg:95.09ms +step:1133/1670 train_time:107731ms step_avg:95.08ms +step:1134/1670 train_time:107824ms step_avg:95.08ms +step:1135/1670 train_time:107918ms step_avg:95.08ms +step:1136/1670 train_time:108012ms step_avg:95.08ms +step:1137/1670 train_time:108109ms step_avg:95.08ms +step:1138/1670 train_time:108208ms step_avg:95.09ms +step:1139/1670 train_time:108304ms step_avg:95.09ms +step:1140/1670 train_time:108399ms step_avg:95.09ms +step:1141/1670 train_time:108493ms step_avg:95.09ms +step:1142/1670 train_time:108587ms step_avg:95.08ms +step:1143/1670 train_time:108681ms step_avg:95.08ms +step:1144/1670 train_time:108774ms step_avg:95.08ms +step:1145/1670 train_time:108868ms step_avg:95.08ms +step:1146/1670 train_time:108962ms step_avg:95.08ms +step:1147/1670 train_time:109055ms step_avg:95.08ms +step:1148/1670 train_time:109151ms step_avg:95.08ms +step:1149/1670 train_time:109246ms step_avg:95.08ms +step:1150/1670 train_time:109341ms step_avg:95.08ms +step:1151/1670 train_time:109436ms step_avg:95.08ms +step:1152/1670 train_time:109530ms step_avg:95.08ms +step:1153/1670 train_time:109624ms step_avg:95.08ms +step:1154/1670 train_time:109718ms step_avg:95.08ms +step:1155/1670 train_time:109812ms step_avg:95.08ms +step:1156/1670 train_time:109905ms step_avg:95.07ms +step:1157/1670 train_time:110000ms step_avg:95.07ms +step:1158/1670 train_time:110096ms step_avg:95.07ms +step:1159/1670 train_time:110191ms step_avg:95.07ms +step:1160/1670 train_time:110286ms step_avg:95.07ms +step:1161/1670 train_time:110381ms step_avg:95.07ms +step:1162/1670 train_time:110475ms step_avg:95.07ms +step:1163/1670 train_time:110571ms step_avg:95.07ms +step:1164/1670 train_time:110665ms step_avg:95.07ms +step:1165/1670 train_time:110759ms step_avg:95.07ms +step:1166/1670 train_time:110852ms step_avg:95.07ms +step:1167/1670 train_time:110947ms step_avg:95.07ms +step:1168/1670 train_time:111042ms step_avg:95.07ms +step:1169/1670 train_time:111137ms step_avg:95.07ms +step:1170/1670 train_time:111233ms step_avg:95.07ms +step:1171/1670 train_time:111329ms step_avg:95.07ms +step:1172/1670 train_time:111424ms step_avg:95.07ms +step:1173/1670 train_time:111519ms step_avg:95.07ms +step:1174/1670 train_time:111613ms step_avg:95.07ms +step:1175/1670 train_time:111708ms step_avg:95.07ms +step:1176/1670 train_time:111803ms step_avg:95.07ms +step:1177/1670 train_time:111897ms step_avg:95.07ms +step:1178/1670 train_time:111991ms step_avg:95.07ms +step:1179/1670 train_time:112085ms step_avg:95.07ms +step:1180/1670 train_time:112180ms step_avg:95.07ms +step:1181/1670 train_time:112276ms step_avg:95.07ms +step:1182/1670 train_time:112371ms step_avg:95.07ms +step:1183/1670 train_time:112467ms step_avg:95.07ms +step:1184/1670 train_time:112561ms step_avg:95.07ms +step:1185/1670 train_time:112655ms step_avg:95.07ms +step:1186/1670 train_time:112750ms step_avg:95.07ms +step:1187/1670 train_time:112844ms step_avg:95.07ms +step:1188/1670 train_time:112939ms step_avg:95.07ms +step:1189/1670 train_time:113033ms step_avg:95.07ms +step:1190/1670 train_time:113128ms step_avg:95.07ms +step:1191/1670 train_time:113223ms step_avg:95.07ms +step:1192/1670 train_time:113318ms step_avg:95.07ms +step:1193/1670 train_time:113413ms step_avg:95.07ms +step:1194/1670 train_time:113509ms step_avg:95.07ms +step:1195/1670 train_time:113604ms step_avg:95.07ms +step:1196/1670 train_time:113697ms step_avg:95.06ms +step:1197/1670 train_time:113792ms step_avg:95.06ms +step:1198/1670 train_time:113887ms step_avg:95.06ms +step:1199/1670 train_time:113981ms step_avg:95.06ms +step:1200/1670 train_time:114075ms step_avg:95.06ms +step:1201/1670 train_time:114170ms step_avg:95.06ms +step:1202/1670 train_time:114266ms step_avg:95.06ms +step:1203/1670 train_time:114360ms step_avg:95.06ms +step:1204/1670 train_time:114456ms step_avg:95.06ms +step:1205/1670 train_time:114551ms step_avg:95.06ms +step:1206/1670 train_time:114645ms step_avg:95.06ms +step:1207/1670 train_time:114740ms step_avg:95.06ms +step:1208/1670 train_time:114835ms step_avg:95.06ms +step:1209/1670 train_time:114930ms step_avg:95.06ms +step:1210/1670 train_time:115024ms step_avg:95.06ms +step:1211/1670 train_time:115119ms step_avg:95.06ms +step:1212/1670 train_time:115214ms step_avg:95.06ms +step:1213/1670 train_time:115308ms step_avg:95.06ms +step:1214/1670 train_time:115404ms step_avg:95.06ms +step:1215/1670 train_time:115499ms step_avg:95.06ms +step:1216/1670 train_time:115593ms step_avg:95.06ms +step:1217/1670 train_time:115688ms step_avg:95.06ms +step:1218/1670 train_time:115783ms step_avg:95.06ms +step:1219/1670 train_time:115878ms step_avg:95.06ms +step:1220/1670 train_time:115973ms step_avg:95.06ms +step:1221/1670 train_time:116067ms step_avg:95.06ms +step:1222/1670 train_time:116161ms step_avg:95.06ms +step:1223/1670 train_time:116254ms step_avg:95.06ms +step:1224/1670 train_time:116349ms step_avg:95.06ms +step:1225/1670 train_time:116445ms step_avg:95.06ms +step:1226/1670 train_time:116539ms step_avg:95.06ms +step:1227/1670 train_time:116634ms step_avg:95.06ms +step:1228/1670 train_time:116729ms step_avg:95.06ms +step:1229/1670 train_time:116823ms step_avg:95.06ms +step:1230/1670 train_time:116917ms step_avg:95.05ms +step:1231/1670 train_time:117012ms step_avg:95.05ms +step:1232/1670 train_time:117107ms step_avg:95.05ms +step:1233/1670 train_time:117201ms step_avg:95.05ms +step:1234/1670 train_time:117295ms step_avg:95.05ms +step:1235/1670 train_time:117390ms step_avg:95.05ms +step:1236/1670 train_time:117485ms step_avg:95.05ms +step:1237/1670 train_time:117580ms step_avg:95.05ms +step:1238/1670 train_time:117676ms step_avg:95.05ms +step:1239/1670 train_time:117771ms step_avg:95.05ms +step:1240/1670 train_time:117866ms step_avg:95.05ms +step:1241/1670 train_time:117960ms step_avg:95.05ms +step:1242/1670 train_time:118055ms step_avg:95.05ms +step:1243/1670 train_time:118149ms step_avg:95.05ms +step:1244/1670 train_time:118244ms step_avg:95.05ms +step:1245/1670 train_time:118339ms step_avg:95.05ms +step:1246/1670 train_time:118434ms step_avg:95.05ms +step:1247/1670 train_time:118528ms step_avg:95.05ms +step:1248/1670 train_time:118623ms step_avg:95.05ms +step:1249/1670 train_time:118717ms step_avg:95.05ms +step:1250/1670 train_time:118811ms step_avg:95.05ms +step:1250/1670 val_loss:3.3777 train_time:118904ms step_avg:95.12ms +step:1251/1670 train_time:118930ms step_avg:95.07ms +step:1252/1670 train_time:119007ms step_avg:95.05ms +step:1253/1670 train_time:119109ms step_avg:95.06ms +step:1254/1670 train_time:119205ms step_avg:95.06ms +step:1255/1670 train_time:119299ms step_avg:95.06ms +step:1256/1670 train_time:119392ms step_avg:95.06ms +step:1257/1670 train_time:119486ms step_avg:95.06ms +step:1258/1670 train_time:119579ms step_avg:95.05ms +step:1259/1670 train_time:119673ms step_avg:95.05ms +step:1260/1670 train_time:119766ms step_avg:95.05ms +step:1261/1670 train_time:119860ms step_avg:95.05ms +step:1262/1670 train_time:119957ms step_avg:95.05ms +step:1263/1670 train_time:120056ms step_avg:95.06ms +step:1264/1670 train_time:120153ms step_avg:95.06ms +step:1265/1670 train_time:120249ms step_avg:95.06ms +step:1266/1670 train_time:120346ms step_avg:95.06ms +step:1267/1670 train_time:120440ms step_avg:95.06ms +step:1268/1670 train_time:120533ms step_avg:95.06ms +step:1269/1670 train_time:120627ms step_avg:95.06ms +step:1270/1670 train_time:120720ms step_avg:95.06ms +step:1271/1670 train_time:120814ms step_avg:95.05ms +step:1272/1670 train_time:120909ms step_avg:95.05ms +step:1273/1670 train_time:121006ms step_avg:95.06ms +step:1274/1670 train_time:121451ms step_avg:95.33ms +step:1275/1670 train_time:121519ms step_avg:95.31ms +step:1276/1670 train_time:121612ms step_avg:95.31ms +step:1277/1670 train_time:121705ms step_avg:95.31ms +step:1278/1670 train_time:121798ms step_avg:95.30ms +step:1279/1670 train_time:121892ms step_avg:95.30ms +step:1280/1670 train_time:121986ms step_avg:95.30ms +step:1281/1670 train_time:122079ms step_avg:95.30ms +step:1282/1670 train_time:122172ms step_avg:95.30ms +step:1283/1670 train_time:122265ms step_avg:95.30ms +step:1284/1670 train_time:122361ms step_avg:95.30ms +step:1285/1670 train_time:122460ms step_avg:95.30ms +step:1286/1670 train_time:122555ms step_avg:95.30ms +step:1287/1670 train_time:122650ms step_avg:95.30ms +step:1288/1670 train_time:122744ms step_avg:95.30ms +step:1289/1670 train_time:122838ms step_avg:95.30ms +step:1290/1670 train_time:122933ms step_avg:95.30ms +step:1291/1670 train_time:123026ms step_avg:95.30ms +step:1292/1670 train_time:123120ms step_avg:95.29ms +step:1293/1670 train_time:123215ms step_avg:95.29ms +step:1294/1670 train_time:123308ms step_avg:95.29ms +step:1295/1670 train_time:123406ms step_avg:95.29ms +step:1296/1670 train_time:123503ms step_avg:95.30ms +step:1297/1670 train_time:123598ms step_avg:95.30ms +step:1298/1670 train_time:123692ms step_avg:95.29ms +step:1299/1670 train_time:123787ms step_avg:95.29ms +step:1300/1670 train_time:123881ms step_avg:95.29ms +step:1301/1670 train_time:123976ms step_avg:95.29ms +step:1302/1670 train_time:124069ms step_avg:95.29ms +step:1303/1670 train_time:124163ms step_avg:95.29ms +step:1304/1670 train_time:124259ms step_avg:95.29ms +step:1305/1670 train_time:124354ms step_avg:95.29ms +step:1306/1670 train_time:124449ms step_avg:95.29ms +step:1307/1670 train_time:124544ms step_avg:95.29ms +step:1308/1670 train_time:124640ms step_avg:95.29ms +step:1309/1670 train_time:124734ms step_avg:95.29ms +step:1310/1670 train_time:124829ms step_avg:95.29ms +step:1311/1670 train_time:124923ms step_avg:95.29ms +step:1312/1670 train_time:125018ms step_avg:95.29ms +step:1313/1670 train_time:125112ms step_avg:95.29ms +step:1314/1670 train_time:125206ms step_avg:95.29ms +step:1315/1670 train_time:125301ms step_avg:95.29ms +step:1316/1670 train_time:125396ms step_avg:95.29ms +step:1317/1670 train_time:125491ms step_avg:95.29ms +step:1318/1670 train_time:125586ms step_avg:95.29ms +step:1319/1670 train_time:125681ms step_avg:95.28ms +step:1320/1670 train_time:125776ms step_avg:95.28ms +step:1321/1670 train_time:125870ms step_avg:95.28ms +step:1322/1670 train_time:125963ms step_avg:95.28ms +step:1323/1670 train_time:126059ms step_avg:95.28ms +step:1324/1670 train_time:126154ms step_avg:95.28ms +step:1325/1670 train_time:126248ms step_avg:95.28ms +step:1326/1670 train_time:126343ms step_avg:95.28ms +step:1327/1670 train_time:126437ms step_avg:95.28ms +step:1328/1670 train_time:126532ms step_avg:95.28ms +step:1329/1670 train_time:126627ms step_avg:95.28ms +step:1330/1670 train_time:126722ms step_avg:95.28ms +step:1331/1670 train_time:126816ms step_avg:95.28ms +step:1332/1670 train_time:126910ms step_avg:95.28ms +step:1333/1670 train_time:127005ms step_avg:95.28ms +step:1334/1670 train_time:127100ms step_avg:95.28ms +step:1335/1670 train_time:127195ms step_avg:95.28ms +step:1336/1670 train_time:127290ms step_avg:95.28ms +step:1337/1670 train_time:127384ms step_avg:95.28ms +step:1338/1670 train_time:127479ms step_avg:95.28ms +step:1339/1670 train_time:127574ms step_avg:95.28ms +step:1340/1670 train_time:127669ms step_avg:95.28ms +step:1341/1670 train_time:127764ms step_avg:95.28ms +step:1342/1670 train_time:127858ms step_avg:95.27ms +step:1343/1670 train_time:127953ms step_avg:95.27ms +step:1344/1670 train_time:128047ms step_avg:95.27ms +step:1345/1670 train_time:128141ms step_avg:95.27ms +step:1346/1670 train_time:128237ms step_avg:95.27ms +step:1347/1670 train_time:128331ms step_avg:95.27ms +step:1348/1670 train_time:128425ms step_avg:95.27ms +step:1349/1670 train_time:128520ms step_avg:95.27ms +step:1350/1670 train_time:128615ms step_avg:95.27ms +step:1351/1670 train_time:128709ms step_avg:95.27ms +step:1352/1670 train_time:128805ms step_avg:95.27ms +step:1353/1670 train_time:128899ms step_avg:95.27ms +step:1354/1670 train_time:128993ms step_avg:95.27ms +step:1355/1670 train_time:129088ms step_avg:95.27ms +step:1356/1670 train_time:129182ms step_avg:95.27ms +step:1357/1670 train_time:129277ms step_avg:95.27ms +step:1358/1670 train_time:129371ms step_avg:95.27ms +step:1359/1670 train_time:129465ms step_avg:95.27ms +step:1360/1670 train_time:129561ms step_avg:95.27ms +step:1361/1670 train_time:129656ms step_avg:95.26ms +step:1362/1670 train_time:129751ms step_avg:95.26ms +step:1363/1670 train_time:129845ms step_avg:95.26ms +step:1364/1670 train_time:129939ms step_avg:95.26ms +step:1365/1670 train_time:130033ms step_avg:95.26ms +step:1366/1670 train_time:130128ms step_avg:95.26ms +step:1367/1670 train_time:130223ms step_avg:95.26ms +step:1368/1670 train_time:130317ms step_avg:95.26ms +step:1369/1670 train_time:130412ms step_avg:95.26ms +step:1370/1670 train_time:130506ms step_avg:95.26ms +step:1371/1670 train_time:130601ms step_avg:95.26ms +step:1372/1670 train_time:130695ms step_avg:95.26ms +step:1373/1670 train_time:130791ms step_avg:95.26ms +step:1374/1670 train_time:130886ms step_avg:95.26ms +step:1375/1670 train_time:130980ms step_avg:95.26ms +step:1375/1670 val_loss:3.3429 train_time:131074ms step_avg:95.33ms +step:1376/1670 train_time:131100ms step_avg:95.28ms +step:1377/1670 train_time:131179ms step_avg:95.26ms +step:1378/1670 train_time:131280ms step_avg:95.27ms +step:1379/1670 train_time:131375ms step_avg:95.27ms +step:1380/1670 train_time:131469ms step_avg:95.27ms +step:1381/1670 train_time:131563ms step_avg:95.27ms +step:1382/1670 train_time:131656ms step_avg:95.27ms +step:1383/1670 train_time:131750ms step_avg:95.26ms +step:1384/1670 train_time:131844ms step_avg:95.26ms +step:1385/1670 train_time:131938ms step_avg:95.26ms +step:1386/1670 train_time:132032ms step_avg:95.26ms +step:1387/1670 train_time:132127ms step_avg:95.26ms +step:1388/1670 train_time:132224ms step_avg:95.26ms +step:1389/1670 train_time:132319ms step_avg:95.26ms +step:1390/1670 train_time:132415ms step_avg:95.26ms +step:1391/1670 train_time:132509ms step_avg:95.26ms +step:1392/1670 train_time:132603ms step_avg:95.26ms +step:1393/1670 train_time:132697ms step_avg:95.26ms +step:1394/1670 train_time:132791ms step_avg:95.26ms +step:1395/1670 train_time:132884ms step_avg:95.26ms +step:1396/1670 train_time:132979ms step_avg:95.26ms +step:1397/1670 train_time:133073ms step_avg:95.26ms +step:1398/1670 train_time:133169ms step_avg:95.26ms +step:1399/1670 train_time:133266ms step_avg:95.26ms +step:1400/1670 train_time:133360ms step_avg:95.26ms +step:1401/1670 train_time:133455ms step_avg:95.26ms +step:1402/1670 train_time:133549ms step_avg:95.26ms +step:1403/1670 train_time:133644ms step_avg:95.26ms +step:1404/1670 train_time:133738ms step_avg:95.25ms +step:1405/1670 train_time:133833ms step_avg:95.26ms +step:1406/1670 train_time:133927ms step_avg:95.25ms +step:1407/1670 train_time:134021ms step_avg:95.25ms +step:1408/1670 train_time:134115ms step_avg:95.25ms +step:1409/1670 train_time:134211ms step_avg:95.25ms +step:1410/1670 train_time:134306ms step_avg:95.25ms +step:1411/1670 train_time:134401ms step_avg:95.25ms +step:1412/1670 train_time:134496ms step_avg:95.25ms +step:1413/1670 train_time:134591ms step_avg:95.25ms +step:1414/1670 train_time:134684ms step_avg:95.25ms +step:1415/1670 train_time:134779ms step_avg:95.25ms +step:1416/1670 train_time:134874ms step_avg:95.25ms +step:1417/1670 train_time:134969ms step_avg:95.25ms +step:1418/1670 train_time:135063ms step_avg:95.25ms +step:1419/1670 train_time:135158ms step_avg:95.25ms +step:1420/1670 train_time:135254ms step_avg:95.25ms +step:1421/1670 train_time:135349ms step_avg:95.25ms +step:1422/1670 train_time:135444ms step_avg:95.25ms +step:1423/1670 train_time:135538ms step_avg:95.25ms +step:1424/1670 train_time:135633ms step_avg:95.25ms +step:1425/1670 train_time:135727ms step_avg:95.25ms +step:1426/1670 train_time:135821ms step_avg:95.25ms +step:1427/1670 train_time:135915ms step_avg:95.25ms +step:1428/1670 train_time:136010ms step_avg:95.24ms +step:1429/1670 train_time:136105ms step_avg:95.25ms +step:1430/1670 train_time:136200ms step_avg:95.24ms +step:1431/1670 train_time:136295ms step_avg:95.24ms +step:1432/1670 train_time:136389ms step_avg:95.24ms +step:1433/1670 train_time:136484ms step_avg:95.24ms +step:1434/1670 train_time:136579ms step_avg:95.24ms +step:1435/1670 train_time:136673ms step_avg:95.24ms +step:1436/1670 train_time:136768ms step_avg:95.24ms +step:1437/1670 train_time:136863ms step_avg:95.24ms +step:1438/1670 train_time:136956ms step_avg:95.24ms +step:1439/1670 train_time:137052ms step_avg:95.24ms +step:1440/1670 train_time:137147ms step_avg:95.24ms +step:1441/1670 train_time:137243ms step_avg:95.24ms +step:1442/1670 train_time:137338ms step_avg:95.24ms +step:1443/1670 train_time:137432ms step_avg:95.24ms +step:1444/1670 train_time:137527ms step_avg:95.24ms +step:1445/1670 train_time:137622ms step_avg:95.24ms +step:1446/1670 train_time:137717ms step_avg:95.24ms +step:1447/1670 train_time:137812ms step_avg:95.24ms +step:1448/1670 train_time:137907ms step_avg:95.24ms +step:1449/1670 train_time:138001ms step_avg:95.24ms +step:1450/1670 train_time:138096ms step_avg:95.24ms +step:1451/1670 train_time:138191ms step_avg:95.24ms +step:1452/1670 train_time:138286ms step_avg:95.24ms +step:1453/1670 train_time:138381ms step_avg:95.24ms +step:1454/1670 train_time:138476ms step_avg:95.24ms +step:1455/1670 train_time:138572ms step_avg:95.24ms +step:1456/1670 train_time:138666ms step_avg:95.24ms +step:1457/1670 train_time:138760ms step_avg:95.24ms +step:1458/1670 train_time:138856ms step_avg:95.24ms +step:1459/1670 train_time:138950ms step_avg:95.24ms +step:1460/1670 train_time:139044ms step_avg:95.24ms +step:1461/1670 train_time:139139ms step_avg:95.24ms +step:1462/1670 train_time:139233ms step_avg:95.23ms +step:1463/1670 train_time:139329ms step_avg:95.24ms +step:1464/1670 train_time:139424ms step_avg:95.24ms +step:1465/1670 train_time:139519ms step_avg:95.23ms +step:1466/1670 train_time:139613ms step_avg:95.23ms +step:1467/1670 train_time:139708ms step_avg:95.23ms +step:1468/1670 train_time:139803ms step_avg:95.23ms +step:1469/1670 train_time:139898ms step_avg:95.23ms +step:1470/1670 train_time:139992ms step_avg:95.23ms +step:1471/1670 train_time:140086ms step_avg:95.23ms +step:1472/1670 train_time:140180ms step_avg:95.23ms +step:1473/1670 train_time:140276ms step_avg:95.23ms +step:1474/1670 train_time:140370ms step_avg:95.23ms +step:1475/1670 train_time:140465ms step_avg:95.23ms +step:1476/1670 train_time:140561ms step_avg:95.23ms +step:1477/1670 train_time:140655ms step_avg:95.23ms +step:1478/1670 train_time:140750ms step_avg:95.23ms +step:1479/1670 train_time:140846ms step_avg:95.23ms +step:1480/1670 train_time:140940ms step_avg:95.23ms +step:1481/1670 train_time:141035ms step_avg:95.23ms +step:1482/1670 train_time:141130ms step_avg:95.23ms +step:1483/1670 train_time:141224ms step_avg:95.23ms +step:1484/1670 train_time:141319ms step_avg:95.23ms +step:1485/1670 train_time:141763ms step_avg:95.46ms +step:1486/1670 train_time:141831ms step_avg:95.44ms +step:1487/1670 train_time:141923ms step_avg:95.44ms +step:1488/1670 train_time:142017ms step_avg:95.44ms +step:1489/1670 train_time:142110ms step_avg:95.44ms +step:1490/1670 train_time:142204ms step_avg:95.44ms +step:1491/1670 train_time:142297ms step_avg:95.44ms +step:1492/1670 train_time:142391ms step_avg:95.44ms +step:1493/1670 train_time:142484ms step_avg:95.43ms +step:1494/1670 train_time:142578ms step_avg:95.43ms +step:1495/1670 train_time:142672ms step_avg:95.43ms +step:1496/1670 train_time:142774ms step_avg:95.44ms +step:1497/1670 train_time:142872ms step_avg:95.44ms +step:1498/1670 train_time:142966ms step_avg:95.44ms +step:1499/1670 train_time:143060ms step_avg:95.44ms +step:1500/1670 train_time:143154ms step_avg:95.44ms +step:1500/1670 val_loss:3.3130 train_time:143247ms step_avg:95.50ms +step:1501/1670 train_time:143273ms step_avg:95.45ms +step:1502/1670 train_time:143349ms step_avg:95.44ms +step:1503/1670 train_time:143453ms step_avg:95.44ms +step:1504/1670 train_time:143549ms step_avg:95.44ms +step:1505/1670 train_time:143642ms step_avg:95.44ms +step:1506/1670 train_time:143736ms step_avg:95.44ms +step:1507/1670 train_time:143829ms step_avg:95.44ms +step:1508/1670 train_time:143923ms step_avg:95.44ms +step:1509/1670 train_time:144016ms step_avg:95.44ms +step:1510/1670 train_time:144109ms step_avg:95.44ms +step:1511/1670 train_time:144203ms step_avg:95.44ms +step:1512/1670 train_time:144300ms step_avg:95.44ms +step:1513/1670 train_time:144398ms step_avg:95.44ms +step:1514/1670 train_time:144496ms step_avg:95.44ms +step:1515/1670 train_time:144592ms step_avg:95.44ms +step:1516/1670 train_time:144686ms step_avg:95.44ms +step:1517/1670 train_time:144779ms step_avg:95.44ms +step:1518/1670 train_time:144873ms step_avg:95.44ms +step:1519/1670 train_time:144966ms step_avg:95.43ms +step:1520/1670 train_time:145059ms step_avg:95.43ms +step:1521/1670 train_time:145153ms step_avg:95.43ms +step:1522/1670 train_time:145248ms step_avg:95.43ms +step:1523/1670 train_time:145345ms step_avg:95.43ms +step:1524/1670 train_time:145441ms step_avg:95.43ms +step:1525/1670 train_time:145536ms step_avg:95.43ms +step:1526/1670 train_time:145631ms step_avg:95.43ms +step:1527/1670 train_time:145726ms step_avg:95.43ms +step:1528/1670 train_time:145820ms step_avg:95.43ms +step:1529/1670 train_time:145914ms step_avg:95.43ms +step:1530/1670 train_time:146008ms step_avg:95.43ms +step:1531/1670 train_time:146101ms step_avg:95.43ms +step:1532/1670 train_time:146195ms step_avg:95.43ms +step:1533/1670 train_time:146292ms step_avg:95.43ms +step:1534/1670 train_time:146387ms step_avg:95.43ms +step:1535/1670 train_time:146483ms step_avg:95.43ms +step:1536/1670 train_time:146577ms step_avg:95.43ms +step:1537/1670 train_time:146673ms step_avg:95.43ms +step:1538/1670 train_time:146767ms step_avg:95.43ms +step:1539/1670 train_time:146861ms step_avg:95.43ms +step:1540/1670 train_time:146955ms step_avg:95.43ms +step:1541/1670 train_time:147050ms step_avg:95.42ms +step:1542/1670 train_time:147144ms step_avg:95.42ms +step:1543/1670 train_time:147238ms step_avg:95.42ms +step:1544/1670 train_time:147334ms step_avg:95.42ms +step:1545/1670 train_time:147428ms step_avg:95.42ms +step:1546/1670 train_time:147523ms step_avg:95.42ms +step:1547/1670 train_time:147618ms step_avg:95.42ms +step:1548/1670 train_time:147714ms step_avg:95.42ms +step:1549/1670 train_time:147808ms step_avg:95.42ms +step:1550/1670 train_time:147902ms step_avg:95.42ms +step:1551/1670 train_time:147996ms step_avg:95.42ms +step:1552/1670 train_time:148090ms step_avg:95.42ms +step:1553/1670 train_time:148184ms step_avg:95.42ms +step:1554/1670 train_time:148279ms step_avg:95.42ms +step:1555/1670 train_time:148375ms step_avg:95.42ms +step:1556/1670 train_time:148470ms step_avg:95.42ms +step:1557/1670 train_time:148564ms step_avg:95.42ms +step:1558/1670 train_time:148659ms step_avg:95.42ms +step:1559/1670 train_time:148754ms step_avg:95.42ms +step:1560/1670 train_time:148849ms step_avg:95.42ms +step:1561/1670 train_time:148944ms step_avg:95.42ms +step:1562/1670 train_time:149038ms step_avg:95.41ms +step:1563/1670 train_time:149132ms step_avg:95.41ms +step:1564/1670 train_time:149228ms step_avg:95.41ms +step:1565/1670 train_time:149322ms step_avg:95.41ms +step:1566/1670 train_time:149418ms step_avg:95.41ms +step:1567/1670 train_time:149513ms step_avg:95.41ms +step:1568/1670 train_time:149608ms step_avg:95.41ms +step:1569/1670 train_time:149703ms step_avg:95.41ms +step:1570/1670 train_time:149798ms step_avg:95.41ms +step:1571/1670 train_time:149892ms step_avg:95.41ms +step:1572/1670 train_time:149986ms step_avg:95.41ms +step:1573/1670 train_time:150081ms step_avg:95.41ms +step:1574/1670 train_time:150176ms step_avg:95.41ms +step:1575/1670 train_time:150269ms step_avg:95.41ms +step:1576/1670 train_time:150364ms step_avg:95.41ms +step:1577/1670 train_time:150458ms step_avg:95.41ms +step:1578/1670 train_time:150553ms step_avg:95.41ms +step:1579/1670 train_time:150648ms step_avg:95.41ms +step:1580/1670 train_time:150743ms step_avg:95.41ms +step:1581/1670 train_time:150838ms step_avg:95.41ms +step:1582/1670 train_time:150932ms step_avg:95.41ms +step:1583/1670 train_time:151027ms step_avg:95.41ms +step:1584/1670 train_time:151121ms step_avg:95.40ms +step:1585/1670 train_time:151216ms step_avg:95.40ms +step:1586/1670 train_time:151310ms step_avg:95.40ms +step:1587/1670 train_time:151405ms step_avg:95.40ms +step:1588/1670 train_time:151500ms step_avg:95.40ms +step:1589/1670 train_time:151595ms step_avg:95.40ms +step:1590/1670 train_time:151690ms step_avg:95.40ms +step:1591/1670 train_time:151784ms step_avg:95.40ms +step:1592/1670 train_time:151878ms step_avg:95.40ms +step:1593/1670 train_time:151973ms step_avg:95.40ms +step:1594/1670 train_time:152067ms step_avg:95.40ms +step:1595/1670 train_time:152163ms step_avg:95.40ms +step:1596/1670 train_time:152257ms step_avg:95.40ms +step:1597/1670 train_time:152351ms step_avg:95.40ms +step:1598/1670 train_time:152446ms step_avg:95.40ms +step:1599/1670 train_time:152540ms step_avg:95.40ms +step:1600/1670 train_time:152637ms step_avg:95.40ms +step:1601/1670 train_time:152731ms step_avg:95.40ms +step:1602/1670 train_time:152826ms step_avg:95.40ms +step:1603/1670 train_time:152921ms step_avg:95.40ms +step:1604/1670 train_time:153015ms step_avg:95.40ms +step:1605/1670 train_time:153109ms step_avg:95.40ms +step:1606/1670 train_time:153204ms step_avg:95.40ms +step:1607/1670 train_time:153299ms step_avg:95.39ms +step:1608/1670 train_time:153394ms step_avg:95.39ms +step:1609/1670 train_time:153489ms step_avg:95.39ms +step:1610/1670 train_time:153584ms step_avg:95.39ms +step:1611/1670 train_time:153678ms step_avg:95.39ms +step:1612/1670 train_time:153771ms step_avg:95.39ms +step:1613/1670 train_time:153866ms step_avg:95.39ms +step:1614/1670 train_time:153961ms step_avg:95.39ms +step:1615/1670 train_time:154056ms step_avg:95.39ms +step:1616/1670 train_time:154151ms step_avg:95.39ms +step:1617/1670 train_time:154246ms step_avg:95.39ms +step:1618/1670 train_time:154341ms step_avg:95.39ms +step:1619/1670 train_time:154437ms step_avg:95.39ms +step:1620/1670 train_time:154531ms step_avg:95.39ms +step:1621/1670 train_time:154626ms step_avg:95.39ms +step:1622/1670 train_time:154720ms step_avg:95.39ms +step:1623/1670 train_time:154815ms step_avg:95.39ms +step:1624/1670 train_time:154910ms step_avg:95.39ms +step:1625/1670 train_time:155005ms step_avg:95.39ms +step:1625/1670 val_loss:3.2878 train_time:155098ms step_avg:95.44ms +step:1626/1670 train_time:155125ms step_avg:95.40ms +step:1627/1670 train_time:155203ms step_avg:95.39ms +step:1628/1670 train_time:155304ms step_avg:95.40ms +step:1629/1670 train_time:155400ms step_avg:95.40ms +step:1630/1670 train_time:155494ms step_avg:95.40ms +step:1631/1670 train_time:155588ms step_avg:95.39ms +step:1632/1670 train_time:155681ms step_avg:95.39ms +step:1633/1670 train_time:155775ms step_avg:95.39ms +step:1634/1670 train_time:155869ms step_avg:95.39ms +step:1635/1670 train_time:155962ms step_avg:95.39ms +step:1636/1670 train_time:156057ms step_avg:95.39ms +step:1637/1670 train_time:156154ms step_avg:95.39ms +step:1638/1670 train_time:156252ms step_avg:95.39ms +step:1639/1670 train_time:156348ms step_avg:95.39ms +step:1640/1670 train_time:156444ms step_avg:95.39ms +step:1641/1670 train_time:156539ms step_avg:95.39ms +step:1642/1670 train_time:156633ms step_avg:95.39ms +step:1643/1670 train_time:156727ms step_avg:95.39ms +step:1644/1670 train_time:156820ms step_avg:95.39ms +step:1645/1670 train_time:156914ms step_avg:95.39ms +step:1646/1670 train_time:157009ms step_avg:95.39ms +step:1647/1670 train_time:157104ms step_avg:95.39ms +step:1648/1670 train_time:157201ms step_avg:95.39ms +step:1649/1670 train_time:157298ms step_avg:95.39ms +step:1650/1670 train_time:157393ms step_avg:95.39ms +step:1651/1670 train_time:157487ms step_avg:95.39ms +step:1652/1670 train_time:157583ms step_avg:95.39ms +step:1653/1670 train_time:157677ms step_avg:95.39ms +step:1654/1670 train_time:157771ms step_avg:95.39ms +step:1655/1670 train_time:157865ms step_avg:95.39ms +step:1656/1670 train_time:157959ms step_avg:95.39ms +step:1657/1670 train_time:158054ms step_avg:95.39ms +step:1658/1670 train_time:158148ms step_avg:95.38ms +step:1659/1670 train_time:158243ms step_avg:95.38ms +step:1660/1670 train_time:158338ms step_avg:95.38ms +step:1661/1670 train_time:158434ms step_avg:95.38ms +step:1662/1670 train_time:158529ms step_avg:95.38ms +step:1663/1670 train_time:158625ms step_avg:95.39ms +step:1664/1670 train_time:158721ms step_avg:95.39ms +step:1665/1670 train_time:158816ms step_avg:95.38ms +step:1666/1670 train_time:158909ms step_avg:95.38ms +step:1667/1670 train_time:159004ms step_avg:95.38ms +step:1668/1670 train_time:159099ms step_avg:95.38ms +step:1669/1670 train_time:159194ms step_avg:95.38ms +step:1670/1670 train_time:159289ms step_avg:95.38ms +step:1670/1670 val_loss:3.2789 train_time:159467ms step_avg:95.49ms +peak memory allocated: 32712 MiB reserved: 46816 MiB diff --git a/records/091025_Yarn/ReadMe.md b/records/091025_Yarn/ReadMe.md new file mode 100644 index 000000000..745ee1742 --- /dev/null +++ b/records/091025_Yarn/ReadMe.md @@ -0,0 +1,98 @@ +This PR of 159.3s incorporates YaRN into the training window schedule and final validation. https://arxiv.org/pdf/2309.00071 +This submission includes all recent WR improvements, including dropping initial MLP layer by @EmelyanenkoK in [PR 120](https://github.com/KellerJordan/modded-nanogpt/pull/120). + +Longer attention windows take longer to train, but produce models with lower loss. Two phenomena occur in RoPE when the attention window is increased during or after training: +1. Dimensions with low frequency rotations experience unfamiliar rotation angles. For instance, a dimension that rotates 0.1 degrees per position will have experienced 0.1*384=38.4 degrees of rotation during training on ws 384. When the sliding window is expanded to 896, it experiences up to 89.6 degrees of rotation. This out of distribution data causes a temporary loss spike. +2. In particular when K and Q vectored are normed, perplexity of the attn mechanism increases as the number of keys increases. Applying a scaling factor d to softmax(d*QK) enables the perplexity of the data to be controlled as the number of keys in the attention window increases. + +A single copy of rotary embeddings is stored in the model root to reduce update time, reduce memory size, and potentially improve cache performance. +``` +# store single copy of rotary tensors +angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) +# half-truncate RoPE by @YouJiacheng (w/ base freq tuning) +angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) +t = torch.arange(self.max_seq_len, dtype=torch.float32) +theta = torch.outer(t, angular_freq) +self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) +self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) +``` + +Based on empirical testing, the 0.1 constant in 0.1*log(curr/prev)+1 formula from YaRN is updated to 0.2. +The constant attn_scale of 0.12 is updated to a starting value of 0.1, such that the distribution over training has a similar mean, ranging between 0.1 and 0.14. +image + +``` +# scale attention factor f in attn=softmax(f*qk) logarithmically with window size +windows = list(dict.fromkeys(args.ws_schedule + [args.ws_validate])) +scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] +# start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 +attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) +self.attn_scales = dict(zip(windows, attn_scales)) +``` + +YaRN has a straighforward implementation, shown below. alpha and beta are left at the default constants of 1 and 32, based on the original YaRN paper which was tuned for Llama. The frequency update incurred by YaRN is most notable from ws 3->7 and dimensions 5 to 10. +image +``` +def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) +``` + + + +Arg ws_validate enables the model to be validated at a longer attention window than training. This arg is set to 13, which differs from the final training window size of 11. +image + +``` +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] +``` + +Attention args are batched to improve readablility. cooldown_frac is increased from 0.45 to 0.5 to compliment the reduction from 1705 to 1670 steps, following the heuristic of a fixed number of cooldown steps. Dropping below 1695 steps has a secondary benefit of eliminating the 9th file read, saving roughly 200ms. + +Without YaRN, there is a substantial spike in validation loss when the attention window is abrubtly increased from 3 to 7. +image + +Extending the final validation window out shows roughly a 0.0015 improvement in loss for 11->13. Interestingly, odd increments perform substantially better. @varunneal has noted that "One thing to note is that floor division (ws_short = ws_long // 2) has different behavior for odd vs short window sizes. I generally found odd window sizes performed surprisingly better." The attention schedule follows (long/short) (3/1) -> (7/3) -> (11/5). It may be that the short attention window performs better when it is under 50% of the long window, or it may be that the model learns to fit the long/short ratio, and performs poorly when this ratio is substantially altered, or there may be a completely different explanation. + +Ablations were ran to measure the impact of each change: +* new_record +* no_attn_scale. Keep constant attn scale of 0.12. +* no_freq_scale. Keep constant rotary freq based on 1024^(0..1). +* prior_record. Prior record with updated steps from 1705 to 1670 and cooldown frac to 0.5. +image + + +Future Considerations: +* Right now model training is like a racecar with no brakes. There may be a way to effectively dampen the optimizer state momentum terms when the model updates its attention window size and 'changes direction'. Preliminary testing here on only the Muon params gave negative results. +* There may be a way to distribute the load of finding bos token indicies for all 8 files. If each GPU is given 1 file instead of 8 to locate the bos_tokens, this could save up to roughly 200ms*7 = 1.4 seconds assuming zero overhead. +* Starting RoPE at a max angular frequency of 1 radian per position, or 57 degrees, seems arbitrary. However, increasing this to 180 degrees did not show an improvement in performance. +* Plotting validation loss every 125 iterations masks critical issues like loss spikes on attn window updates. In general, more granular monitoring seems useful. + +Validation: +``` +import scipy.stats +import torch +accs = [3.2779, 3.2779, 3.2789, 3.2778, 3.2789, 3.2785, 3.2806] +times = [159.447, 158.998, 159.467, 159.191, 159.503, 159.259, 159.468] + +print('p=%.4f' % scipy.stats.ttest_1samp(accs, 3.28, alternative='less').pvalue) +# p=0.0053 + +print('acc:',torch.std_mean(torch.tensor(accs))) +# acc: (tensor(0.0010), tensor(3.2786)) + +print('time:',torch.std_mean(torch.tensor(times))) +# time: (tensor(0.1897), tensor(159.3333)) +``` diff --git a/records/091025_Yarn/ef66c943-e262-400f-822f-068d397a1dc9.txt b/records/091025_Yarn/ef66c943-e262-400f-822f-068d397a1dc9.txt new file mode 100644 index 000000000..ff868602b --- /dev/null +++ b/records/091025_Yarn/ef66c943-e262-400f-822f-068d397a1dc9.txt @@ -0,0 +1,2863 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math + +from dataclasses import dataclass +from functools import lru_cache +from itertools import accumulate +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_varlen_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate = CastedLinear(12, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, attn_args): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "varlen sequences requires B == 1" + assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) + if ve is not None: + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = sa_lambdas[0] * v + + max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) + + # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng + y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None + + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), attn_args) + if self.mlp is not None: + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) + + def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws * args.block_size, (ws // 2) * args.block_size + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, x0, lambdas[i], attn_args) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +BOS_ID = 50256 + +class BOSFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1): + # Precompute BOS positions once per shard + self.size = tokens.numel() + self.bos_idx = (tokens == BOS_ID).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 + self.world_size = world_size + + def next_batch(self, num_tokens_local: int, max_seq_len: int): + n = len(self.bos_idx) + starts = [[] for _ in range(self.world_size)] + ends = [[] for _ in range(self.world_size)] + + idx = self.i + for r in range(self.world_size): + cur_len = 0 + while cur_len <= num_tokens_local: + if idx >= n: + raise StopIteration(f"Insufficient BOS ahead of position {cur}; hit tail of shard.") + cur = self.bos_idx[idx] + starts[r].append(cur) + end = min(self.bos_idx[idx + 1] if idx + 1 < n else self.size, + cur + max_seq_len, + cur + num_tokens_local - cur_len + 1) + ends[r].append(end) + cur_len += end - cur + idx += 1 + + assert cur_len == num_tokens_local + 1 + self.i = idx + + return starts, ends + +def distributed_data_generator(filename_pattern: str, num_tokens: int, max_seq_len: int, grad_accum_steps: int = 1, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token, sequences truncated to max_seq_len + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert num_tokens % (world_size * grad_accum_steps) == 0, "Batch size must be divisible by world size" + num_tokens = num_tokens // grad_accum_steps + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) if align_to_bos else None + pos = 0 # for unaligned case + + while True: + num_tokens_local = num_tokens // world_size + max_num_docs = next_multiple_of_n(num_tokens_local // 300, n=128) # median doc length is ~400 + + if align_to_bos: + try: + seq_starts, seq_ends = finder.next_batch(num_tokens_local, max_seq_len) + start_idxs, end_idxs = torch.tensor(seq_starts[rank]), torch.tensor(seq_ends[rank]) + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens = _load_data_shard(next(file_iter)) + finder = BOSFinder(tokens, world_size=world_size) + continue + + buf = torch.cat([tokens[i:j] for i, j in zip(start_idxs, end_idxs)]) + _inputs = buf[:-1] + _targets = buf[1:] + end_idxs[-1] -= 1 # last document was too long to account for _targets offset + cum_lengths = (end_idxs - start_idxs).cumsum(0) + + else: + if pos + num_tokens + 1 >= len(tokens): # should not occur for val data + tokens, pos = _load_data_shard(next(file_iter)), 0 + + pos_local = pos + rank * num_tokens_local + buf = tokens[pos_local: pos_local + num_tokens_local + 1] + _inputs = buf[:-1].view(num_tokens_local, ) + _targets = buf[1:].view(num_tokens_local, ) + + cum_lengths = torch.nonzero(_inputs == BOS_ID)[:, 0] + pos += num_tokens + + + _cum_lengths = torch.full((max_num_docs,), num_tokens_local) + _cum_lengths[0] = 0 + _cum_lengths[1:len(cum_lengths) + 1] = cum_lengths + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True), + _cum_lengths.to(device="cuda", dtype=torch.int32, non_blocking=True) + ) + + if new_params is not None: + # makes it possible for generator to receive new (num_tokens, max_seq_len, grad_accum_steps) via .send() + new_num_tokens, new_max_seq_len, new_grad_accum_steps = new_params + assert new_num_tokens % (world_size * grad_accum_steps) == 0, "Num tokens must be divisible by world size" + num_tokens = new_num_tokens + max_seq_len = new_max_seq_len + grad_accum_steps = new_grad_accum_steps + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_batch_size: int = 2048 * 24 * 8 + train_max_seq_len: int = 128 * 16 + val_batch_size: int = 4 * 64 * 1024 * 8 + # optimization + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = f"yarn/{uuid.uuid4()}" + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + block_size: int = 128 + ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + head_dim=128, + model_dim=768, + max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + return lr + +def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate + x = step / (1 + args.num_iterations) + assert 0 <= x < 1 + ws_idx = int(len(args.ws_schedule) * x) + return args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 30 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +for step in range(warmup_steps): + inputs, targets, cum_seqlens = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up each + model(inputs, targets, cum_seqlens, ws).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_max_seq_len, grad_accum_steps=grad_accum_steps) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +ws = get_ws(0) +for step in range(train_steps + 1): + last_step = (step == train_steps) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % args.val_batch_size == 0 + val_steps = grad_accum_steps * args.val_tokens // args.val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_batch_size, -1, grad_accum_steps=grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets, cum_seqlens = next(val_loader) + val_loss += model(inputs, targets, cum_seqlens, ws) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets, cum_seqlens = next(train_loader) + model(inputs, targets, cum_seqlens, ws).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Thu Sep 11 04:05:30 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | Off | +| N/A 37C P0 120W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | Off | +| N/A 41C P0 125W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | Off | +| N/A 43C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | Off | +| N/A 36C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | Off | +| N/A 36C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | Off | +| N/A 43C P0 128W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | Off | +| N/A 41C P0 126W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | Off | +| N/A 37C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 64827 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 64828 C /usr/bin/python3 614MiB | +| 0 N/A N/A 64829 C /usr/bin/python3 614MiB | +| 0 N/A N/A 64830 C /usr/bin/python3 614MiB | +| 0 N/A N/A 64831 C /usr/bin/python3 614MiB | +| 0 N/A N/A 64832 C /usr/bin/python3 614MiB | +| 0 N/A N/A 64833 C /usr/bin/python3 614MiB | +| 0 N/A N/A 64834 C /usr/bin/python3 614MiB | +| 1 N/A N/A 64828 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 64829 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 64830 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 64831 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 64832 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 64833 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 64834 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1670 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1670 train_time:435ms step_avg:434.72ms +step:2/1670 train_time:459ms step_avg:229.31ms +step:3/1670 train_time:527ms step_avg:175.56ms +step:4/1670 train_time:617ms step_avg:154.28ms +step:5/1670 train_time:709ms step_avg:141.72ms +step:6/1670 train_time:800ms step_avg:133.38ms +step:7/1670 train_time:892ms step_avg:127.40ms +step:8/1670 train_time:984ms step_avg:123.03ms +step:9/1670 train_time:1076ms step_avg:119.60ms +step:10/1670 train_time:1168ms step_avg:116.80ms +step:11/1670 train_time:1260ms step_avg:114.50ms +step:12/1670 train_time:1353ms step_avg:112.79ms +step:13/1670 train_time:1451ms step_avg:111.61ms +step:14/1670 train_time:1544ms step_avg:110.28ms +step:15/1670 train_time:1637ms step_avg:109.10ms +step:16/1670 train_time:1729ms step_avg:108.08ms +step:17/1670 train_time:1821ms step_avg:107.13ms +step:18/1670 train_time:1914ms step_avg:106.31ms +step:19/1670 train_time:2006ms step_avg:105.58ms +step:20/1670 train_time:2098ms step_avg:104.89ms +step:21/1670 train_time:2190ms step_avg:104.28ms +step:22/1670 train_time:2282ms step_avg:103.74ms +step:23/1670 train_time:2376ms step_avg:103.31ms +step:24/1670 train_time:2471ms step_avg:102.95ms +step:25/1670 train_time:2564ms step_avg:102.57ms +step:26/1670 train_time:2657ms step_avg:102.21ms +step:27/1670 train_time:2750ms step_avg:101.86ms +step:28/1670 train_time:2843ms step_avg:101.54ms +step:29/1670 train_time:2937ms step_avg:101.27ms +step:30/1670 train_time:3029ms step_avg:100.97ms +step:31/1670 train_time:3121ms step_avg:100.69ms +step:32/1670 train_time:3213ms step_avg:100.42ms +step:33/1670 train_time:3306ms step_avg:100.18ms +step:34/1670 train_time:3400ms step_avg:100.01ms +step:35/1670 train_time:3494ms step_avg:99.82ms +step:36/1670 train_time:3586ms step_avg:99.62ms +step:37/1670 train_time:3681ms step_avg:99.48ms +step:38/1670 train_time:3773ms step_avg:99.30ms +step:39/1670 train_time:3865ms step_avg:99.11ms +step:40/1670 train_time:3959ms step_avg:98.97ms +step:41/1670 train_time:4051ms step_avg:98.81ms +step:42/1670 train_time:4144ms step_avg:98.67ms +step:43/1670 train_time:4237ms step_avg:98.53ms +step:44/1670 train_time:4329ms step_avg:98.39ms +step:45/1670 train_time:4422ms step_avg:98.26ms +step:46/1670 train_time:4515ms step_avg:98.15ms +step:47/1670 train_time:4608ms step_avg:98.05ms +step:48/1670 train_time:4702ms step_avg:97.95ms +step:49/1670 train_time:4794ms step_avg:97.84ms +step:50/1670 train_time:4887ms step_avg:97.73ms +step:51/1670 train_time:4980ms step_avg:97.65ms +step:52/1670 train_time:5073ms step_avg:97.56ms +step:53/1670 train_time:5165ms step_avg:97.46ms +step:54/1670 train_time:5258ms step_avg:97.38ms +step:55/1670 train_time:5350ms step_avg:97.27ms +step:56/1670 train_time:5443ms step_avg:97.19ms +step:57/1670 train_time:5535ms step_avg:97.10ms +step:58/1670 train_time:5628ms step_avg:97.03ms +step:59/1670 train_time:5721ms step_avg:96.96ms +step:60/1670 train_time:5814ms step_avg:96.89ms +step:61/1670 train_time:5907ms step_avg:96.84ms +step:62/1670 train_time:6001ms step_avg:96.79ms +step:63/1670 train_time:6094ms step_avg:96.73ms +step:64/1670 train_time:6186ms step_avg:96.65ms +step:65/1670 train_time:6278ms step_avg:96.58ms +step:66/1670 train_time:6371ms step_avg:96.53ms +step:67/1670 train_time:6463ms step_avg:96.47ms +step:68/1670 train_time:6556ms step_avg:96.41ms +step:69/1670 train_time:6649ms step_avg:96.36ms +step:70/1670 train_time:6741ms step_avg:96.30ms +step:71/1670 train_time:6834ms step_avg:96.25ms +step:72/1670 train_time:6928ms step_avg:96.22ms +step:73/1670 train_time:7021ms step_avg:96.18ms +step:74/1670 train_time:7114ms step_avg:96.13ms +step:75/1670 train_time:7207ms step_avg:96.09ms +step:76/1670 train_time:7300ms step_avg:96.05ms +step:77/1670 train_time:7392ms step_avg:96.00ms +step:78/1670 train_time:7485ms step_avg:95.97ms +step:79/1670 train_time:7578ms step_avg:95.93ms +step:80/1670 train_time:7671ms step_avg:95.89ms +step:81/1670 train_time:7763ms step_avg:95.84ms +step:82/1670 train_time:7856ms step_avg:95.80ms +step:83/1670 train_time:7948ms step_avg:95.76ms +step:84/1670 train_time:8041ms step_avg:95.72ms +step:85/1670 train_time:8134ms step_avg:95.70ms +step:86/1670 train_time:8226ms step_avg:95.65ms +step:87/1670 train_time:8319ms step_avg:95.62ms +step:88/1670 train_time:8411ms step_avg:95.59ms +step:89/1670 train_time:8503ms step_avg:95.54ms +step:90/1670 train_time:8596ms step_avg:95.51ms +step:91/1670 train_time:8688ms step_avg:95.47ms +step:92/1670 train_time:8781ms step_avg:95.45ms +step:93/1670 train_time:8874ms step_avg:95.42ms +step:94/1670 train_time:8967ms step_avg:95.39ms +step:95/1670 train_time:9059ms step_avg:95.36ms +step:96/1670 train_time:9151ms step_avg:95.33ms +step:97/1670 train_time:9244ms step_avg:95.30ms +step:98/1670 train_time:9336ms step_avg:95.27ms +step:99/1670 train_time:9429ms step_avg:95.24ms +step:100/1670 train_time:9521ms step_avg:95.21ms +step:101/1670 train_time:9613ms step_avg:95.18ms +step:102/1670 train_time:9706ms step_avg:95.15ms +step:103/1670 train_time:9798ms step_avg:95.13ms +step:104/1670 train_time:9891ms step_avg:95.11ms +step:105/1670 train_time:9983ms step_avg:95.07ms +step:106/1670 train_time:10076ms step_avg:95.05ms +step:107/1670 train_time:10169ms step_avg:95.03ms +step:108/1670 train_time:10260ms step_avg:95.00ms +step:109/1670 train_time:10353ms step_avg:94.98ms +step:110/1670 train_time:10446ms step_avg:94.96ms +step:111/1670 train_time:10538ms step_avg:94.93ms +step:112/1670 train_time:10630ms step_avg:94.91ms +step:113/1670 train_time:10722ms step_avg:94.89ms +step:114/1670 train_time:10815ms step_avg:94.86ms +step:115/1670 train_time:10908ms step_avg:94.85ms +step:116/1670 train_time:11000ms step_avg:94.83ms +step:117/1670 train_time:11092ms step_avg:94.80ms +step:118/1670 train_time:11185ms step_avg:94.79ms +step:119/1670 train_time:11277ms step_avg:94.76ms +step:120/1670 train_time:11370ms step_avg:94.75ms +step:121/1670 train_time:11462ms step_avg:94.72ms +step:122/1670 train_time:11554ms step_avg:94.71ms +step:123/1670 train_time:11647ms step_avg:94.69ms +step:124/1670 train_time:11740ms step_avg:94.68ms +step:125/1670 train_time:11833ms step_avg:94.66ms +step:125/1670 val_loss:4.3038 train_time:11924ms step_avg:95.39ms +step:126/1670 train_time:11949ms step_avg:94.83ms +step:127/1670 train_time:12021ms step_avg:94.66ms +step:128/1670 train_time:12124ms step_avg:94.72ms +step:129/1670 train_time:12222ms step_avg:94.74ms +step:130/1670 train_time:12314ms step_avg:94.73ms +step:131/1670 train_time:12407ms step_avg:94.71ms +step:132/1670 train_time:12499ms step_avg:94.69ms +step:133/1670 train_time:12590ms step_avg:94.67ms +step:134/1670 train_time:12682ms step_avg:94.64ms +step:135/1670 train_time:12773ms step_avg:94.62ms +step:136/1670 train_time:12865ms step_avg:94.59ms +step:137/1670 train_time:12957ms step_avg:94.58ms +step:138/1670 train_time:13050ms step_avg:94.57ms +step:139/1670 train_time:13144ms step_avg:94.56ms +step:140/1670 train_time:13237ms step_avg:94.55ms +step:141/1670 train_time:13330ms step_avg:94.54ms +step:142/1670 train_time:13423ms step_avg:94.53ms +step:143/1670 train_time:13515ms step_avg:94.51ms +step:144/1670 train_time:13606ms step_avg:94.49ms +step:145/1670 train_time:13698ms step_avg:94.47ms +step:146/1670 train_time:13790ms step_avg:94.45ms +step:147/1670 train_time:13882ms step_avg:94.43ms +step:148/1670 train_time:13974ms step_avg:94.42ms +step:149/1670 train_time:14066ms step_avg:94.40ms +step:150/1670 train_time:14160ms step_avg:94.40ms +step:151/1670 train_time:14254ms step_avg:94.39ms +step:152/1670 train_time:14346ms step_avg:94.38ms +step:153/1670 train_time:14438ms step_avg:94.37ms +step:154/1670 train_time:14530ms step_avg:94.35ms +step:155/1670 train_time:14623ms step_avg:94.34ms +step:156/1670 train_time:14715ms step_avg:94.33ms +step:157/1670 train_time:14807ms step_avg:94.31ms +step:158/1670 train_time:14899ms step_avg:94.30ms +step:159/1670 train_time:14991ms step_avg:94.29ms +step:160/1670 train_time:15084ms step_avg:94.28ms +step:161/1670 train_time:15177ms step_avg:94.27ms +step:162/1670 train_time:15270ms step_avg:94.26ms +step:163/1670 train_time:15363ms step_avg:94.25ms +step:164/1670 train_time:15457ms step_avg:94.25ms +step:165/1670 train_time:15549ms step_avg:94.23ms +step:166/1670 train_time:15641ms step_avg:94.22ms +step:167/1670 train_time:15734ms step_avg:94.21ms +step:168/1670 train_time:15826ms step_avg:94.21ms +step:169/1670 train_time:15918ms step_avg:94.19ms +step:170/1670 train_time:16010ms step_avg:94.18ms +step:171/1670 train_time:16103ms step_avg:94.17ms +step:172/1670 train_time:16195ms step_avg:94.16ms +step:173/1670 train_time:16288ms step_avg:94.15ms +step:174/1670 train_time:16381ms step_avg:94.15ms +step:175/1670 train_time:16474ms step_avg:94.14ms +step:176/1670 train_time:16566ms step_avg:94.13ms +step:177/1670 train_time:16659ms step_avg:94.12ms +step:178/1670 train_time:16750ms step_avg:94.10ms +step:179/1670 train_time:16844ms step_avg:94.10ms +step:180/1670 train_time:16936ms step_avg:94.09ms +step:181/1670 train_time:17029ms step_avg:94.08ms +step:182/1670 train_time:17121ms step_avg:94.07ms +step:183/1670 train_time:17214ms step_avg:94.07ms +step:184/1670 train_time:17306ms step_avg:94.06ms +step:185/1670 train_time:17399ms step_avg:94.05ms +step:186/1670 train_time:17492ms step_avg:94.04ms +step:187/1670 train_time:17585ms step_avg:94.04ms +step:188/1670 train_time:17677ms step_avg:94.03ms +step:189/1670 train_time:17769ms step_avg:94.02ms +step:190/1670 train_time:17861ms step_avg:94.01ms +step:191/1670 train_time:17954ms step_avg:94.00ms +step:192/1670 train_time:18046ms step_avg:93.99ms +step:193/1670 train_time:18138ms step_avg:93.98ms +step:194/1670 train_time:18230ms step_avg:93.97ms +step:195/1670 train_time:18323ms step_avg:93.97ms +step:196/1670 train_time:18416ms step_avg:93.96ms +step:197/1670 train_time:18509ms step_avg:93.95ms +step:198/1670 train_time:18602ms step_avg:93.95ms +step:199/1670 train_time:18694ms step_avg:93.94ms +step:200/1670 train_time:18786ms step_avg:93.93ms +step:201/1670 train_time:18878ms step_avg:93.92ms +step:202/1670 train_time:18971ms step_avg:93.92ms +step:203/1670 train_time:19065ms step_avg:93.91ms +step:204/1670 train_time:19157ms step_avg:93.91ms +step:205/1670 train_time:19249ms step_avg:93.90ms +step:206/1670 train_time:19343ms step_avg:93.90ms +step:207/1670 train_time:19436ms step_avg:93.89ms +step:208/1670 train_time:19528ms step_avg:93.88ms +step:209/1670 train_time:19622ms step_avg:93.88ms +step:210/1670 train_time:19714ms step_avg:93.88ms +step:211/1670 train_time:19806ms step_avg:93.87ms +step:212/1670 train_time:19899ms step_avg:93.86ms +step:213/1670 train_time:20255ms step_avg:95.10ms +step:214/1670 train_time:20379ms step_avg:95.23ms +step:215/1670 train_time:20471ms step_avg:95.22ms +step:216/1670 train_time:20563ms step_avg:95.20ms +step:217/1670 train_time:20654ms step_avg:95.18ms +step:218/1670 train_time:20746ms step_avg:95.16ms +step:219/1670 train_time:20837ms step_avg:95.15ms +step:220/1670 train_time:20928ms step_avg:95.13ms +step:221/1670 train_time:21019ms step_avg:95.11ms +step:222/1670 train_time:21111ms step_avg:95.09ms +step:223/1670 train_time:21203ms step_avg:95.08ms +step:224/1670 train_time:21298ms step_avg:95.08ms +step:225/1670 train_time:21394ms step_avg:95.09ms +step:226/1670 train_time:21488ms step_avg:95.08ms +step:227/1670 train_time:21580ms step_avg:95.07ms +step:228/1670 train_time:21672ms step_avg:95.05ms +step:229/1670 train_time:21764ms step_avg:95.04ms +step:230/1670 train_time:21856ms step_avg:95.03ms +step:231/1670 train_time:21948ms step_avg:95.01ms +step:232/1670 train_time:22040ms step_avg:95.00ms +step:233/1670 train_time:22131ms step_avg:94.98ms +step:234/1670 train_time:22225ms step_avg:94.98ms +step:235/1670 train_time:22318ms step_avg:94.97ms +step:236/1670 train_time:22411ms step_avg:94.96ms +step:237/1670 train_time:22504ms step_avg:94.95ms +step:238/1670 train_time:22596ms step_avg:94.94ms +step:239/1670 train_time:22688ms step_avg:94.93ms +step:240/1670 train_time:22781ms step_avg:94.92ms +step:241/1670 train_time:22873ms step_avg:94.91ms +step:242/1670 train_time:22965ms step_avg:94.90ms +step:243/1670 train_time:23057ms step_avg:94.88ms +step:244/1670 train_time:23149ms step_avg:94.87ms +step:245/1670 train_time:23243ms step_avg:94.87ms +step:246/1670 train_time:23337ms step_avg:94.86ms +step:247/1670 train_time:23429ms step_avg:94.86ms +step:248/1670 train_time:23523ms step_avg:94.85ms +step:249/1670 train_time:23616ms step_avg:94.84ms +step:250/1670 train_time:23708ms step_avg:94.83ms +step:250/1670 val_loss:3.9639 train_time:23799ms step_avg:95.20ms +step:251/1670 train_time:23825ms step_avg:94.92ms +step:252/1670 train_time:23901ms step_avg:94.84ms +step:253/1670 train_time:24001ms step_avg:94.86ms +step:254/1670 train_time:24096ms step_avg:94.87ms +step:255/1670 train_time:24187ms step_avg:94.85ms +step:256/1670 train_time:24279ms step_avg:94.84ms +step:257/1670 train_time:24370ms step_avg:94.83ms +step:258/1670 train_time:24462ms step_avg:94.81ms +step:259/1670 train_time:24553ms step_avg:94.80ms +step:260/1670 train_time:24645ms step_avg:94.79ms +step:261/1670 train_time:24737ms step_avg:94.78ms +step:262/1670 train_time:24829ms step_avg:94.77ms +step:263/1670 train_time:24924ms step_avg:94.77ms +step:264/1670 train_time:25018ms step_avg:94.77ms +step:265/1670 train_time:25112ms step_avg:94.76ms +step:266/1670 train_time:25205ms step_avg:94.75ms +step:267/1670 train_time:25297ms step_avg:94.75ms +step:268/1670 train_time:25388ms step_avg:94.73ms +step:269/1670 train_time:25480ms step_avg:94.72ms +step:270/1670 train_time:25572ms step_avg:94.71ms +step:271/1670 train_time:25663ms step_avg:94.70ms +step:272/1670 train_time:25756ms step_avg:94.69ms +step:273/1670 train_time:25849ms step_avg:94.68ms +step:274/1670 train_time:25942ms step_avg:94.68ms +step:275/1670 train_time:26035ms step_avg:94.67ms +step:276/1670 train_time:26129ms step_avg:94.67ms +step:277/1670 train_time:26222ms step_avg:94.66ms +step:278/1670 train_time:26314ms step_avg:94.66ms +step:279/1670 train_time:26406ms step_avg:94.65ms +step:280/1670 train_time:26498ms step_avg:94.64ms +step:281/1670 train_time:26589ms step_avg:94.62ms +step:282/1670 train_time:26681ms step_avg:94.61ms +step:283/1670 train_time:26773ms step_avg:94.61ms +step:284/1670 train_time:26867ms step_avg:94.60ms +step:285/1670 train_time:26960ms step_avg:94.60ms +step:286/1670 train_time:27053ms step_avg:94.59ms +step:287/1670 train_time:27147ms step_avg:94.59ms +step:288/1670 train_time:27239ms step_avg:94.58ms +step:289/1670 train_time:27332ms step_avg:94.57ms +step:290/1670 train_time:27424ms step_avg:94.57ms +step:291/1670 train_time:27516ms step_avg:94.56ms +step:292/1670 train_time:27608ms step_avg:94.55ms +step:293/1670 train_time:27701ms step_avg:94.54ms +step:294/1670 train_time:27793ms step_avg:94.53ms +step:295/1670 train_time:27885ms step_avg:94.53ms +step:296/1670 train_time:27978ms step_avg:94.52ms +step:297/1670 train_time:28071ms step_avg:94.51ms +step:298/1670 train_time:28163ms step_avg:94.51ms +step:299/1670 train_time:28255ms step_avg:94.50ms +step:300/1670 train_time:28348ms step_avg:94.49ms +step:301/1670 train_time:28440ms step_avg:94.48ms +step:302/1670 train_time:28532ms step_avg:94.48ms +step:303/1670 train_time:28625ms step_avg:94.47ms +step:304/1670 train_time:28717ms step_avg:94.46ms +step:305/1670 train_time:28809ms step_avg:94.46ms +step:306/1670 train_time:28903ms step_avg:94.45ms +step:307/1670 train_time:28995ms step_avg:94.45ms +step:308/1670 train_time:29088ms step_avg:94.44ms +step:309/1670 train_time:29181ms step_avg:94.44ms +step:310/1670 train_time:29275ms step_avg:94.44ms +step:311/1670 train_time:29367ms step_avg:94.43ms +step:312/1670 train_time:29460ms step_avg:94.42ms +step:313/1670 train_time:29553ms step_avg:94.42ms +step:314/1670 train_time:29645ms step_avg:94.41ms +step:315/1670 train_time:29737ms step_avg:94.40ms +step:316/1670 train_time:29830ms step_avg:94.40ms +step:317/1670 train_time:29922ms step_avg:94.39ms +step:318/1670 train_time:30015ms step_avg:94.39ms +step:319/1670 train_time:30108ms step_avg:94.38ms +step:320/1670 train_time:30201ms step_avg:94.38ms +step:321/1670 train_time:30294ms step_avg:94.37ms +step:322/1670 train_time:30386ms step_avg:94.37ms +step:323/1670 train_time:30479ms step_avg:94.36ms +step:324/1670 train_time:30571ms step_avg:94.35ms +step:325/1670 train_time:30663ms step_avg:94.35ms +step:326/1670 train_time:30755ms step_avg:94.34ms +step:327/1670 train_time:30848ms step_avg:94.34ms +step:328/1670 train_time:30940ms step_avg:94.33ms +step:329/1670 train_time:31033ms step_avg:94.32ms +step:330/1670 train_time:31125ms step_avg:94.32ms +step:331/1670 train_time:31218ms step_avg:94.31ms +step:332/1670 train_time:31311ms step_avg:94.31ms +step:333/1670 train_time:31403ms step_avg:94.30ms +step:334/1670 train_time:31495ms step_avg:94.30ms +step:335/1670 train_time:31587ms step_avg:94.29ms +step:336/1670 train_time:31679ms step_avg:94.28ms +step:337/1670 train_time:31772ms step_avg:94.28ms +step:338/1670 train_time:31864ms step_avg:94.27ms +step:339/1670 train_time:31957ms step_avg:94.27ms +step:340/1670 train_time:32050ms step_avg:94.26ms +step:341/1670 train_time:32142ms step_avg:94.26ms +step:342/1670 train_time:32234ms step_avg:94.25ms +step:343/1670 train_time:32327ms step_avg:94.25ms +step:344/1670 train_time:32419ms step_avg:94.24ms +step:345/1670 train_time:32512ms step_avg:94.24ms +step:346/1670 train_time:32604ms step_avg:94.23ms +step:347/1670 train_time:32696ms step_avg:94.23ms +step:348/1670 train_time:32789ms step_avg:94.22ms +step:349/1670 train_time:32881ms step_avg:94.22ms +step:350/1670 train_time:32974ms step_avg:94.21ms +step:351/1670 train_time:33067ms step_avg:94.21ms +step:352/1670 train_time:33160ms step_avg:94.20ms +step:353/1670 train_time:33253ms step_avg:94.20ms +step:354/1670 train_time:33345ms step_avg:94.20ms +step:355/1670 train_time:33437ms step_avg:94.19ms +step:356/1670 train_time:33530ms step_avg:94.19ms +step:357/1670 train_time:33622ms step_avg:94.18ms +step:358/1670 train_time:33715ms step_avg:94.18ms +step:359/1670 train_time:33807ms step_avg:94.17ms +step:360/1670 train_time:33899ms step_avg:94.16ms +step:361/1670 train_time:33992ms step_avg:94.16ms +step:362/1670 train_time:34085ms step_avg:94.16ms +step:363/1670 train_time:34178ms step_avg:94.15ms +step:364/1670 train_time:34271ms step_avg:94.15ms +step:365/1670 train_time:34362ms step_avg:94.14ms +step:366/1670 train_time:34455ms step_avg:94.14ms +step:367/1670 train_time:34548ms step_avg:94.14ms +step:368/1670 train_time:34640ms step_avg:94.13ms +step:369/1670 train_time:34733ms step_avg:94.13ms +step:370/1670 train_time:34825ms step_avg:94.12ms +step:371/1670 train_time:34918ms step_avg:94.12ms +step:372/1670 train_time:35010ms step_avg:94.11ms +step:373/1670 train_time:35104ms step_avg:94.11ms +step:374/1670 train_time:35195ms step_avg:94.11ms +step:375/1670 train_time:35288ms step_avg:94.10ms +step:375/1670 val_loss:3.8113 train_time:35378ms step_avg:94.34ms +step:376/1670 train_time:35403ms step_avg:94.16ms +step:377/1670 train_time:35478ms step_avg:94.11ms +step:378/1670 train_time:35577ms step_avg:94.12ms +step:379/1670 train_time:35673ms step_avg:94.12ms +step:380/1670 train_time:35765ms step_avg:94.12ms +step:381/1670 train_time:35857ms step_avg:94.11ms +step:382/1670 train_time:35949ms step_avg:94.11ms +step:383/1670 train_time:36040ms step_avg:94.10ms +step:384/1670 train_time:36132ms step_avg:94.09ms +step:385/1670 train_time:36224ms step_avg:94.09ms +step:386/1670 train_time:36315ms step_avg:94.08ms +step:387/1670 train_time:36407ms step_avg:94.08ms +step:388/1670 train_time:36502ms step_avg:94.08ms +step:389/1670 train_time:36597ms step_avg:94.08ms +step:390/1670 train_time:36690ms step_avg:94.08ms +step:391/1670 train_time:36782ms step_avg:94.07ms +step:392/1670 train_time:36874ms step_avg:94.07ms +step:393/1670 train_time:36967ms step_avg:94.06ms +step:394/1670 train_time:37059ms step_avg:94.06ms +step:395/1670 train_time:37151ms step_avg:94.05ms +step:396/1670 train_time:37242ms step_avg:94.05ms +step:397/1670 train_time:37333ms step_avg:94.04ms +step:398/1670 train_time:37426ms step_avg:94.03ms +step:399/1670 train_time:37521ms step_avg:94.04ms +step:400/1670 train_time:37615ms step_avg:94.04ms +step:401/1670 train_time:37708ms step_avg:94.03ms +step:402/1670 train_time:37802ms step_avg:94.03ms +step:403/1670 train_time:37894ms step_avg:94.03ms +step:404/1670 train_time:37986ms step_avg:94.02ms +step:405/1670 train_time:38078ms step_avg:94.02ms +step:406/1670 train_time:38170ms step_avg:94.01ms +step:407/1670 train_time:38261ms step_avg:94.01ms +step:408/1670 train_time:38353ms step_avg:94.00ms +step:409/1670 train_time:38446ms step_avg:94.00ms +step:410/1670 train_time:38539ms step_avg:94.00ms +step:411/1670 train_time:38632ms step_avg:94.00ms +step:412/1670 train_time:38725ms step_avg:93.99ms +step:413/1670 train_time:38819ms step_avg:93.99ms +step:414/1670 train_time:38912ms step_avg:93.99ms +step:415/1670 train_time:39004ms step_avg:93.99ms +step:416/1670 train_time:39095ms step_avg:93.98ms +step:417/1670 train_time:39187ms step_avg:93.97ms +step:418/1670 train_time:39280ms step_avg:93.97ms +step:419/1670 train_time:39373ms step_avg:93.97ms +step:420/1670 train_time:39464ms step_avg:93.96ms +step:421/1670 train_time:39557ms step_avg:93.96ms +step:422/1670 train_time:39650ms step_avg:93.96ms +step:423/1670 train_time:39743ms step_avg:93.95ms +step:424/1670 train_time:39835ms step_avg:93.95ms +step:425/1670 train_time:40165ms step_avg:94.51ms +step:426/1670 train_time:40358ms step_avg:94.74ms +step:427/1670 train_time:40448ms step_avg:94.73ms +step:428/1670 train_time:40539ms step_avg:94.72ms +step:429/1670 train_time:40630ms step_avg:94.71ms +step:430/1670 train_time:40722ms step_avg:94.70ms +step:431/1670 train_time:40814ms step_avg:94.70ms +step:432/1670 train_time:40905ms step_avg:94.69ms +step:433/1670 train_time:40997ms step_avg:94.68ms +step:434/1670 train_time:41088ms step_avg:94.67ms +step:435/1670 train_time:41182ms step_avg:94.67ms +step:436/1670 train_time:41276ms step_avg:94.67ms +step:437/1670 train_time:41374ms step_avg:94.68ms +step:438/1670 train_time:41467ms step_avg:94.67ms +step:439/1670 train_time:41560ms step_avg:94.67ms +step:440/1670 train_time:41652ms step_avg:94.66ms +step:441/1670 train_time:41744ms step_avg:94.66ms +step:442/1670 train_time:41835ms step_avg:94.65ms +step:443/1670 train_time:41931ms step_avg:94.65ms +step:444/1670 train_time:42024ms step_avg:94.65ms +step:445/1670 train_time:42116ms step_avg:94.64ms +step:446/1670 train_time:42208ms step_avg:94.64ms +step:447/1670 train_time:42297ms step_avg:94.63ms +step:448/1670 train_time:42391ms step_avg:94.62ms +step:449/1670 train_time:42484ms step_avg:94.62ms +step:450/1670 train_time:42577ms step_avg:94.62ms +step:451/1670 train_time:42670ms step_avg:94.61ms +step:452/1670 train_time:42762ms step_avg:94.61ms +step:453/1670 train_time:42854ms step_avg:94.60ms +step:454/1670 train_time:42945ms step_avg:94.59ms +step:455/1670 train_time:43037ms step_avg:94.59ms +step:456/1670 train_time:43129ms step_avg:94.58ms +step:457/1670 train_time:43223ms step_avg:94.58ms +step:458/1670 train_time:43316ms step_avg:94.58ms +step:459/1670 train_time:43408ms step_avg:94.57ms +step:460/1670 train_time:43501ms step_avg:94.57ms +step:461/1670 train_time:43594ms step_avg:94.56ms +step:462/1670 train_time:43686ms step_avg:94.56ms +step:463/1670 train_time:43779ms step_avg:94.56ms +step:464/1670 train_time:43872ms step_avg:94.55ms +step:465/1670 train_time:43964ms step_avg:94.55ms +step:466/1670 train_time:44055ms step_avg:94.54ms +step:467/1670 train_time:44148ms step_avg:94.54ms +step:468/1670 train_time:44241ms step_avg:94.53ms +step:469/1670 train_time:44334ms step_avg:94.53ms +step:470/1670 train_time:44427ms step_avg:94.52ms +step:471/1670 train_time:44520ms step_avg:94.52ms +step:472/1670 train_time:44613ms step_avg:94.52ms +step:473/1670 train_time:44705ms step_avg:94.51ms +step:474/1670 train_time:44798ms step_avg:94.51ms +step:475/1670 train_time:44891ms step_avg:94.51ms +step:476/1670 train_time:44983ms step_avg:94.50ms +step:477/1670 train_time:45075ms step_avg:94.50ms +step:478/1670 train_time:45168ms step_avg:94.49ms +step:479/1670 train_time:45259ms step_avg:94.49ms +step:480/1670 train_time:45352ms step_avg:94.48ms +step:481/1670 train_time:45444ms step_avg:94.48ms +step:482/1670 train_time:45537ms step_avg:94.47ms +step:483/1670 train_time:45629ms step_avg:94.47ms +step:484/1670 train_time:45723ms step_avg:94.47ms +step:485/1670 train_time:45816ms step_avg:94.47ms +step:486/1670 train_time:45908ms step_avg:94.46ms +step:487/1670 train_time:46000ms step_avg:94.46ms +step:488/1670 train_time:46092ms step_avg:94.45ms +step:489/1670 train_time:46185ms step_avg:94.45ms +step:490/1670 train_time:46277ms step_avg:94.44ms +step:491/1670 train_time:46369ms step_avg:94.44ms +step:492/1670 train_time:46462ms step_avg:94.44ms +step:493/1670 train_time:46554ms step_avg:94.43ms +step:494/1670 train_time:46647ms step_avg:94.43ms +step:495/1670 train_time:46740ms step_avg:94.43ms +step:496/1670 train_time:46834ms step_avg:94.42ms +step:497/1670 train_time:46926ms step_avg:94.42ms +step:498/1670 train_time:47019ms step_avg:94.42ms +step:499/1670 train_time:47113ms step_avg:94.41ms +step:500/1670 train_time:47204ms step_avg:94.41ms +step:500/1670 val_loss:3.7121 train_time:47295ms step_avg:94.59ms +step:501/1670 train_time:47320ms step_avg:94.45ms +step:502/1670 train_time:47397ms step_avg:94.42ms +step:503/1670 train_time:47494ms step_avg:94.42ms +step:504/1670 train_time:47588ms step_avg:94.42ms +step:505/1670 train_time:47680ms step_avg:94.42ms +step:506/1670 train_time:47772ms step_avg:94.41ms +step:507/1670 train_time:47863ms step_avg:94.40ms +step:508/1670 train_time:47954ms step_avg:94.40ms +step:509/1670 train_time:48045ms step_avg:94.39ms +step:510/1670 train_time:48137ms step_avg:94.39ms +step:511/1670 train_time:48229ms step_avg:94.38ms +step:512/1670 train_time:48321ms step_avg:94.38ms +step:513/1670 train_time:48415ms step_avg:94.38ms +step:514/1670 train_time:48508ms step_avg:94.37ms +step:515/1670 train_time:48602ms step_avg:94.37ms +step:516/1670 train_time:48694ms step_avg:94.37ms +step:517/1670 train_time:48786ms step_avg:94.36ms +step:518/1670 train_time:48879ms step_avg:94.36ms +step:519/1670 train_time:48971ms step_avg:94.36ms +step:520/1670 train_time:49062ms step_avg:94.35ms +step:521/1670 train_time:49154ms step_avg:94.34ms +step:522/1670 train_time:49245ms step_avg:94.34ms +step:523/1670 train_time:49339ms step_avg:94.34ms +step:524/1670 train_time:49432ms step_avg:94.34ms +step:525/1670 train_time:49526ms step_avg:94.34ms +step:526/1670 train_time:49619ms step_avg:94.33ms +step:527/1670 train_time:49711ms step_avg:94.33ms +step:528/1670 train_time:49804ms step_avg:94.33ms +step:529/1670 train_time:49896ms step_avg:94.32ms +step:530/1670 train_time:49989ms step_avg:94.32ms +step:531/1670 train_time:50081ms step_avg:94.31ms +step:532/1670 train_time:50173ms step_avg:94.31ms +step:533/1670 train_time:50265ms step_avg:94.31ms +step:534/1670 train_time:50357ms step_avg:94.30ms +step:535/1670 train_time:50451ms step_avg:94.30ms +step:536/1670 train_time:50544ms step_avg:94.30ms +step:537/1670 train_time:50636ms step_avg:94.29ms +step:538/1670 train_time:50729ms step_avg:94.29ms +step:539/1670 train_time:50822ms step_avg:94.29ms +step:540/1670 train_time:50914ms step_avg:94.29ms +step:541/1670 train_time:51006ms step_avg:94.28ms +step:542/1670 train_time:51098ms step_avg:94.28ms +step:543/1670 train_time:51191ms step_avg:94.27ms +step:544/1670 train_time:51283ms step_avg:94.27ms +step:545/1670 train_time:51375ms step_avg:94.27ms +step:546/1670 train_time:51469ms step_avg:94.27ms +step:547/1670 train_time:51562ms step_avg:94.26ms +step:548/1670 train_time:51655ms step_avg:94.26ms +step:549/1670 train_time:51748ms step_avg:94.26ms +step:550/1670 train_time:51841ms step_avg:94.26ms +step:551/1670 train_time:51934ms step_avg:94.25ms +step:552/1670 train_time:52026ms step_avg:94.25ms +step:553/1670 train_time:52118ms step_avg:94.25ms +step:554/1670 train_time:52210ms step_avg:94.24ms +step:555/1670 train_time:52304ms step_avg:94.24ms +step:556/1670 train_time:52396ms step_avg:94.24ms +step:557/1670 train_time:52488ms step_avg:94.23ms +step:558/1670 train_time:52690ms step_avg:94.43ms +step:559/1670 train_time:52758ms step_avg:94.38ms +step:560/1670 train_time:52851ms step_avg:94.38ms +step:561/1670 train_time:52943ms step_avg:94.37ms +step:562/1670 train_time:53036ms step_avg:94.37ms +step:563/1670 train_time:53129ms step_avg:94.37ms +step:564/1670 train_time:53221ms step_avg:94.36ms +step:565/1670 train_time:53314ms step_avg:94.36ms +step:566/1670 train_time:53407ms step_avg:94.36ms +step:567/1670 train_time:53500ms step_avg:94.36ms +step:568/1670 train_time:53598ms step_avg:94.36ms +step:569/1670 train_time:53695ms step_avg:94.37ms +step:570/1670 train_time:53789ms step_avg:94.37ms +step:571/1670 train_time:53883ms step_avg:94.37ms +step:572/1670 train_time:53975ms step_avg:94.36ms +step:573/1670 train_time:54068ms step_avg:94.36ms +step:574/1670 train_time:54161ms step_avg:94.36ms +step:575/1670 train_time:54254ms step_avg:94.36ms +step:576/1670 train_time:54347ms step_avg:94.35ms +step:577/1670 train_time:54440ms step_avg:94.35ms +step:578/1670 train_time:54535ms step_avg:94.35ms +step:579/1670 train_time:54631ms step_avg:94.35ms +step:580/1670 train_time:54726ms step_avg:94.36ms +step:581/1670 train_time:54821ms step_avg:94.36ms +step:582/1670 train_time:54914ms step_avg:94.35ms +step:583/1670 train_time:55008ms step_avg:94.35ms +step:584/1670 train_time:55101ms step_avg:94.35ms +step:585/1670 train_time:55195ms step_avg:94.35ms +step:586/1670 train_time:55287ms step_avg:94.35ms +step:587/1670 train_time:55380ms step_avg:94.34ms +step:588/1670 train_time:55474ms step_avg:94.34ms +step:589/1670 train_time:55568ms step_avg:94.34ms +step:590/1670 train_time:55663ms step_avg:94.34ms +step:591/1670 train_time:55757ms step_avg:94.34ms +step:592/1670 train_time:55851ms step_avg:94.34ms +step:593/1670 train_time:55944ms step_avg:94.34ms +step:594/1670 train_time:56038ms step_avg:94.34ms +step:595/1670 train_time:56132ms step_avg:94.34ms +step:596/1670 train_time:56224ms step_avg:94.34ms +step:597/1670 train_time:56318ms step_avg:94.33ms +step:598/1670 train_time:56411ms step_avg:94.33ms +step:599/1670 train_time:56505ms step_avg:94.33ms +step:600/1670 train_time:56599ms step_avg:94.33ms +step:601/1670 train_time:56692ms step_avg:94.33ms +step:602/1670 train_time:56786ms step_avg:94.33ms +step:603/1670 train_time:56880ms step_avg:94.33ms +step:604/1670 train_time:56974ms step_avg:94.33ms +step:605/1670 train_time:57067ms step_avg:94.33ms +step:606/1670 train_time:57161ms step_avg:94.33ms +step:607/1670 train_time:57255ms step_avg:94.32ms +step:608/1670 train_time:57348ms step_avg:94.32ms +step:609/1670 train_time:57442ms step_avg:94.32ms +step:610/1670 train_time:57535ms step_avg:94.32ms +step:611/1670 train_time:57629ms step_avg:94.32ms +step:612/1670 train_time:57723ms step_avg:94.32ms +step:613/1670 train_time:57816ms step_avg:94.32ms +step:614/1670 train_time:57910ms step_avg:94.32ms +step:615/1670 train_time:58004ms step_avg:94.32ms +step:616/1670 train_time:58098ms step_avg:94.31ms +step:617/1670 train_time:58191ms step_avg:94.31ms +step:618/1670 train_time:58284ms step_avg:94.31ms +step:619/1670 train_time:58378ms step_avg:94.31ms +step:620/1670 train_time:58472ms step_avg:94.31ms +step:621/1670 train_time:58566ms step_avg:94.31ms +step:622/1670 train_time:58661ms step_avg:94.31ms +step:623/1670 train_time:58754ms step_avg:94.31ms +step:624/1670 train_time:58847ms step_avg:94.31ms +step:625/1670 train_time:58941ms step_avg:94.31ms +step:625/1670 val_loss:3.6111 train_time:59034ms step_avg:94.45ms +step:626/1670 train_time:59059ms step_avg:94.34ms +step:627/1670 train_time:59141ms step_avg:94.32ms +step:628/1670 train_time:59238ms step_avg:94.33ms +step:629/1670 train_time:59333ms step_avg:94.33ms +step:630/1670 train_time:59426ms step_avg:94.33ms +step:631/1670 train_time:59519ms step_avg:94.32ms +step:632/1670 train_time:59612ms step_avg:94.32ms +step:633/1670 train_time:59704ms step_avg:94.32ms +step:634/1670 train_time:59797ms step_avg:94.32ms +step:635/1670 train_time:59889ms step_avg:94.31ms +step:636/1670 train_time:59982ms step_avg:94.31ms +step:637/1670 train_time:60078ms step_avg:94.31ms +step:638/1670 train_time:60175ms step_avg:94.32ms +step:639/1670 train_time:60618ms step_avg:94.86ms +step:640/1670 train_time:60698ms step_avg:94.84ms +step:641/1670 train_time:60790ms step_avg:94.84ms +step:642/1670 train_time:60883ms step_avg:94.83ms +step:643/1670 train_time:60976ms step_avg:94.83ms +step:644/1670 train_time:61069ms step_avg:94.83ms +step:645/1670 train_time:61161ms step_avg:94.82ms +step:646/1670 train_time:61253ms step_avg:94.82ms +step:647/1670 train_time:61346ms step_avg:94.82ms +step:648/1670 train_time:61439ms step_avg:94.81ms +step:649/1670 train_time:61536ms step_avg:94.82ms +step:650/1670 train_time:61635ms step_avg:94.82ms +step:651/1670 train_time:61730ms step_avg:94.82ms +step:652/1670 train_time:61823ms step_avg:94.82ms +step:653/1670 train_time:61916ms step_avg:94.82ms +step:654/1670 train_time:62009ms step_avg:94.81ms +step:655/1670 train_time:62101ms step_avg:94.81ms +step:656/1670 train_time:62194ms step_avg:94.81ms +step:657/1670 train_time:62286ms step_avg:94.80ms +step:658/1670 train_time:62379ms step_avg:94.80ms +step:659/1670 train_time:62474ms step_avg:94.80ms +step:660/1670 train_time:62569ms step_avg:94.80ms +step:661/1670 train_time:62665ms step_avg:94.80ms +step:662/1670 train_time:62758ms step_avg:94.80ms +step:663/1670 train_time:62852ms step_avg:94.80ms +step:664/1670 train_time:62946ms step_avg:94.80ms +step:665/1670 train_time:63039ms step_avg:94.80ms +step:666/1670 train_time:63132ms step_avg:94.79ms +step:667/1670 train_time:63225ms step_avg:94.79ms +step:668/1670 train_time:63318ms step_avg:94.79ms +step:669/1670 train_time:63412ms step_avg:94.79ms +step:670/1670 train_time:63506ms step_avg:94.79ms +step:671/1670 train_time:63601ms step_avg:94.79ms +step:672/1670 train_time:63694ms step_avg:94.78ms +step:673/1670 train_time:63789ms step_avg:94.78ms +step:674/1670 train_time:63882ms step_avg:94.78ms +step:675/1670 train_time:63976ms step_avg:94.78ms +step:676/1670 train_time:64069ms step_avg:94.78ms +step:677/1670 train_time:64163ms step_avg:94.78ms +step:678/1670 train_time:64256ms step_avg:94.77ms +step:679/1670 train_time:64350ms step_avg:94.77ms +step:680/1670 train_time:64443ms step_avg:94.77ms +step:681/1670 train_time:64537ms step_avg:94.77ms +step:682/1670 train_time:64631ms step_avg:94.77ms +step:683/1670 train_time:64725ms step_avg:94.77ms +step:684/1670 train_time:64819ms step_avg:94.76ms +step:685/1670 train_time:64912ms step_avg:94.76ms +step:686/1670 train_time:65006ms step_avg:94.76ms +step:687/1670 train_time:65099ms step_avg:94.76ms +step:688/1670 train_time:65193ms step_avg:94.76ms +step:689/1670 train_time:65285ms step_avg:94.75ms +step:690/1670 train_time:65379ms step_avg:94.75ms +step:691/1670 train_time:65474ms step_avg:94.75ms +step:692/1670 train_time:65567ms step_avg:94.75ms +step:693/1670 train_time:65661ms step_avg:94.75ms +step:694/1670 train_time:65755ms step_avg:94.75ms +step:695/1670 train_time:65849ms step_avg:94.75ms +step:696/1670 train_time:65943ms step_avg:94.75ms +step:697/1670 train_time:66036ms step_avg:94.74ms +step:698/1670 train_time:66130ms step_avg:94.74ms +step:699/1670 train_time:66223ms step_avg:94.74ms +step:700/1670 train_time:66316ms step_avg:94.74ms +step:701/1670 train_time:66410ms step_avg:94.74ms +step:702/1670 train_time:66504ms step_avg:94.73ms +step:703/1670 train_time:66598ms step_avg:94.73ms +step:704/1670 train_time:66692ms step_avg:94.73ms +step:705/1670 train_time:66785ms step_avg:94.73ms +step:706/1670 train_time:66879ms step_avg:94.73ms +step:707/1670 train_time:66973ms step_avg:94.73ms +step:708/1670 train_time:67067ms step_avg:94.73ms +step:709/1670 train_time:67161ms step_avg:94.73ms +step:710/1670 train_time:67254ms step_avg:94.72ms +step:711/1670 train_time:67348ms step_avg:94.72ms +step:712/1670 train_time:67442ms step_avg:94.72ms +step:713/1670 train_time:67535ms step_avg:94.72ms +step:714/1670 train_time:67629ms step_avg:94.72ms +step:715/1670 train_time:67723ms step_avg:94.72ms +step:716/1670 train_time:67816ms step_avg:94.72ms +step:717/1670 train_time:67910ms step_avg:94.71ms +step:718/1670 train_time:68004ms step_avg:94.71ms +step:719/1670 train_time:68098ms step_avg:94.71ms +step:720/1670 train_time:68192ms step_avg:94.71ms +step:721/1670 train_time:68286ms step_avg:94.71ms +step:722/1670 train_time:68381ms step_avg:94.71ms +step:723/1670 train_time:68474ms step_avg:94.71ms +step:724/1670 train_time:68568ms step_avg:94.71ms +step:725/1670 train_time:68661ms step_avg:94.70ms +step:726/1670 train_time:68754ms step_avg:94.70ms +step:727/1670 train_time:68848ms step_avg:94.70ms +step:728/1670 train_time:68943ms step_avg:94.70ms +step:729/1670 train_time:69038ms step_avg:94.70ms +step:730/1670 train_time:69132ms step_avg:94.70ms +step:731/1670 train_time:69226ms step_avg:94.70ms +step:732/1670 train_time:69320ms step_avg:94.70ms +step:733/1670 train_time:69414ms step_avg:94.70ms +step:734/1670 train_time:69507ms step_avg:94.70ms +step:735/1670 train_time:69602ms step_avg:94.70ms +step:736/1670 train_time:69695ms step_avg:94.69ms +step:737/1670 train_time:69789ms step_avg:94.69ms +step:738/1670 train_time:69884ms step_avg:94.69ms +step:739/1670 train_time:69977ms step_avg:94.69ms +step:740/1670 train_time:70071ms step_avg:94.69ms +step:741/1670 train_time:70166ms step_avg:94.69ms +step:742/1670 train_time:70259ms step_avg:94.69ms +step:743/1670 train_time:70353ms step_avg:94.69ms +step:744/1670 train_time:70446ms step_avg:94.69ms +step:745/1670 train_time:70539ms step_avg:94.68ms +step:746/1670 train_time:70633ms step_avg:94.68ms +step:747/1670 train_time:70726ms step_avg:94.68ms +step:748/1670 train_time:70819ms step_avg:94.68ms +step:749/1670 train_time:70912ms step_avg:94.68ms +step:750/1670 train_time:71007ms step_avg:94.68ms +step:750/1670 val_loss:3.5617 train_time:71099ms step_avg:94.80ms +step:751/1670 train_time:71124ms step_avg:94.71ms +step:752/1670 train_time:71201ms step_avg:94.68ms +step:753/1670 train_time:71302ms step_avg:94.69ms +step:754/1670 train_time:71397ms step_avg:94.69ms +step:755/1670 train_time:71490ms step_avg:94.69ms +step:756/1670 train_time:71583ms step_avg:94.69ms +step:757/1670 train_time:71675ms step_avg:94.68ms +step:758/1670 train_time:71768ms step_avg:94.68ms +step:759/1670 train_time:71861ms step_avg:94.68ms +step:760/1670 train_time:71953ms step_avg:94.68ms +step:761/1670 train_time:72047ms step_avg:94.67ms +step:762/1670 train_time:72142ms step_avg:94.67ms +step:763/1670 train_time:72239ms step_avg:94.68ms +step:764/1670 train_time:72336ms step_avg:94.68ms +step:765/1670 train_time:72429ms step_avg:94.68ms +step:766/1670 train_time:72522ms step_avg:94.68ms +step:767/1670 train_time:72616ms step_avg:94.68ms +step:768/1670 train_time:72709ms step_avg:94.67ms +step:769/1670 train_time:72802ms step_avg:94.67ms +step:770/1670 train_time:72894ms step_avg:94.67ms +step:771/1670 train_time:72987ms step_avg:94.67ms +step:772/1670 train_time:73081ms step_avg:94.66ms +step:773/1670 train_time:73176ms step_avg:94.66ms +step:774/1670 train_time:73271ms step_avg:94.67ms +step:775/1670 train_time:73366ms step_avg:94.67ms +step:776/1670 train_time:73461ms step_avg:94.67ms +step:777/1670 train_time:73554ms step_avg:94.66ms +step:778/1670 train_time:73647ms step_avg:94.66ms +step:779/1670 train_time:73742ms step_avg:94.66ms +step:780/1670 train_time:73835ms step_avg:94.66ms +step:781/1670 train_time:73929ms step_avg:94.66ms +step:782/1670 train_time:74021ms step_avg:94.66ms +step:783/1670 train_time:74115ms step_avg:94.65ms +step:784/1670 train_time:74209ms step_avg:94.65ms +step:785/1670 train_time:74302ms step_avg:94.65ms +step:786/1670 train_time:74397ms step_avg:94.65ms +step:787/1670 train_time:74491ms step_avg:94.65ms +step:788/1670 train_time:74584ms step_avg:94.65ms +step:789/1670 train_time:74678ms step_avg:94.65ms +step:790/1670 train_time:74771ms step_avg:94.65ms +step:791/1670 train_time:74865ms step_avg:94.65ms +step:792/1670 train_time:74959ms step_avg:94.64ms +step:793/1670 train_time:75053ms step_avg:94.64ms +step:794/1670 train_time:75146ms step_avg:94.64ms +step:795/1670 train_time:75240ms step_avg:94.64ms +step:796/1670 train_time:75334ms step_avg:94.64ms +step:797/1670 train_time:75428ms step_avg:94.64ms +step:798/1670 train_time:75522ms step_avg:94.64ms +step:799/1670 train_time:75615ms step_avg:94.64ms +step:800/1670 train_time:75709ms step_avg:94.64ms +step:801/1670 train_time:75803ms step_avg:94.64ms +step:802/1670 train_time:75896ms step_avg:94.63ms +step:803/1670 train_time:75990ms step_avg:94.63ms +step:804/1670 train_time:76083ms step_avg:94.63ms +step:805/1670 train_time:76178ms step_avg:94.63ms +step:806/1670 train_time:76271ms step_avg:94.63ms +step:807/1670 train_time:76364ms step_avg:94.63ms +step:808/1670 train_time:76458ms step_avg:94.63ms +step:809/1670 train_time:76551ms step_avg:94.62ms +step:810/1670 train_time:76646ms step_avg:94.62ms +step:811/1670 train_time:76739ms step_avg:94.62ms +step:812/1670 train_time:76832ms step_avg:94.62ms +step:813/1670 train_time:76925ms step_avg:94.62ms +step:814/1670 train_time:77019ms step_avg:94.62ms +step:815/1670 train_time:77112ms step_avg:94.62ms +step:816/1670 train_time:77207ms step_avg:94.62ms +step:817/1670 train_time:77301ms step_avg:94.62ms +step:818/1670 train_time:77394ms step_avg:94.61ms +step:819/1670 train_time:77488ms step_avg:94.61ms +step:820/1670 train_time:77582ms step_avg:94.61ms +step:821/1670 train_time:77676ms step_avg:94.61ms +step:822/1670 train_time:77770ms step_avg:94.61ms +step:823/1670 train_time:77864ms step_avg:94.61ms +step:824/1670 train_time:77958ms step_avg:94.61ms +step:825/1670 train_time:78051ms step_avg:94.61ms +step:826/1670 train_time:78145ms step_avg:94.61ms +step:827/1670 train_time:78240ms step_avg:94.61ms +step:828/1670 train_time:78334ms step_avg:94.61ms +step:829/1670 train_time:78427ms step_avg:94.60ms +step:830/1670 train_time:78521ms step_avg:94.60ms +step:831/1670 train_time:78614ms step_avg:94.60ms +step:832/1670 train_time:78709ms step_avg:94.60ms +step:833/1670 train_time:78802ms step_avg:94.60ms +step:834/1670 train_time:78896ms step_avg:94.60ms +step:835/1670 train_time:78989ms step_avg:94.60ms +step:836/1670 train_time:79083ms step_avg:94.60ms +step:837/1670 train_time:79178ms step_avg:94.60ms +step:838/1670 train_time:79271ms step_avg:94.60ms +step:839/1670 train_time:79365ms step_avg:94.59ms +step:840/1670 train_time:79459ms step_avg:94.59ms +step:841/1670 train_time:79553ms step_avg:94.59ms +step:842/1670 train_time:79646ms step_avg:94.59ms +step:843/1670 train_time:79740ms step_avg:94.59ms +step:844/1670 train_time:79833ms step_avg:94.59ms +step:845/1670 train_time:79927ms step_avg:94.59ms +step:846/1670 train_time:80021ms step_avg:94.59ms +step:847/1670 train_time:80115ms step_avg:94.59ms +step:848/1670 train_time:80209ms step_avg:94.59ms +step:849/1670 train_time:80303ms step_avg:94.59ms +step:850/1670 train_time:80397ms step_avg:94.58ms +step:851/1670 train_time:80815ms step_avg:94.97ms +step:852/1670 train_time:80917ms step_avg:94.97ms +step:853/1670 train_time:81009ms step_avg:94.97ms +step:854/1670 train_time:81102ms step_avg:94.97ms +step:855/1670 train_time:81195ms step_avg:94.96ms +step:856/1670 train_time:81287ms step_avg:94.96ms +step:857/1670 train_time:81381ms step_avg:94.96ms +step:858/1670 train_time:81473ms step_avg:94.96ms +step:859/1670 train_time:81566ms step_avg:94.95ms +step:860/1670 train_time:81659ms step_avg:94.95ms +step:861/1670 train_time:81755ms step_avg:94.95ms +step:862/1670 train_time:81853ms step_avg:94.96ms +step:863/1670 train_time:81950ms step_avg:94.96ms +step:864/1670 train_time:82046ms step_avg:94.96ms +step:865/1670 train_time:82138ms step_avg:94.96ms +step:866/1670 train_time:82232ms step_avg:94.96ms +step:867/1670 train_time:82325ms step_avg:94.95ms +step:868/1670 train_time:82418ms step_avg:94.95ms +step:869/1670 train_time:82511ms step_avg:94.95ms +step:870/1670 train_time:82604ms step_avg:94.95ms +step:871/1670 train_time:82698ms step_avg:94.95ms +step:872/1670 train_time:82794ms step_avg:94.95ms +step:873/1670 train_time:82888ms step_avg:94.95ms +step:874/1670 train_time:82983ms step_avg:94.95ms +step:875/1670 train_time:83077ms step_avg:94.94ms +step:875/1670 val_loss:3.5158 train_time:83168ms step_avg:95.05ms +step:876/1670 train_time:83193ms step_avg:94.97ms +step:877/1670 train_time:83269ms step_avg:94.95ms +step:878/1670 train_time:83371ms step_avg:94.96ms +step:879/1670 train_time:83468ms step_avg:94.96ms +step:880/1670 train_time:83561ms step_avg:94.96ms +step:881/1670 train_time:83654ms step_avg:94.95ms +step:882/1670 train_time:83746ms step_avg:94.95ms +step:883/1670 train_time:83839ms step_avg:94.95ms +step:884/1670 train_time:83932ms step_avg:94.95ms +step:885/1670 train_time:84024ms step_avg:94.94ms +step:886/1670 train_time:84117ms step_avg:94.94ms +step:887/1670 train_time:84211ms step_avg:94.94ms +step:888/1670 train_time:84308ms step_avg:94.94ms +step:889/1670 train_time:84405ms step_avg:94.94ms +step:890/1670 train_time:84500ms step_avg:94.94ms +step:891/1670 train_time:84593ms step_avg:94.94ms +step:892/1670 train_time:84686ms step_avg:94.94ms +step:893/1670 train_time:84778ms step_avg:94.94ms +step:894/1670 train_time:84872ms step_avg:94.94ms +step:895/1670 train_time:84965ms step_avg:94.93ms +step:896/1670 train_time:85058ms step_avg:94.93ms +step:897/1670 train_time:85152ms step_avg:94.93ms +step:898/1670 train_time:85247ms step_avg:94.93ms +step:899/1670 train_time:85342ms step_avg:94.93ms +step:900/1670 train_time:85437ms step_avg:94.93ms +step:901/1670 train_time:85530ms step_avg:94.93ms +step:902/1670 train_time:85623ms step_avg:94.93ms +step:903/1670 train_time:85717ms step_avg:94.93ms +step:904/1670 train_time:85811ms step_avg:94.92ms +step:905/1670 train_time:85904ms step_avg:94.92ms +step:906/1670 train_time:85998ms step_avg:94.92ms +step:907/1670 train_time:86092ms step_avg:94.92ms +step:908/1670 train_time:86184ms step_avg:94.92ms +step:909/1670 train_time:86278ms step_avg:94.92ms +step:910/1670 train_time:86373ms step_avg:94.92ms +step:911/1670 train_time:86467ms step_avg:94.91ms +step:912/1670 train_time:86562ms step_avg:94.91ms +step:913/1670 train_time:86655ms step_avg:94.91ms +step:914/1670 train_time:86748ms step_avg:94.91ms +step:915/1670 train_time:86842ms step_avg:94.91ms +step:916/1670 train_time:86936ms step_avg:94.91ms +step:917/1670 train_time:87029ms step_avg:94.91ms +step:918/1670 train_time:87122ms step_avg:94.90ms +step:919/1670 train_time:87216ms step_avg:94.90ms +step:920/1670 train_time:87311ms step_avg:94.90ms +step:921/1670 train_time:87405ms step_avg:94.90ms +step:922/1670 train_time:87498ms step_avg:94.90ms +step:923/1670 train_time:87593ms step_avg:94.90ms +step:924/1670 train_time:87686ms step_avg:94.90ms +step:925/1670 train_time:87780ms step_avg:94.90ms +step:926/1670 train_time:87873ms step_avg:94.90ms +step:927/1670 train_time:87966ms step_avg:94.89ms +step:928/1670 train_time:88060ms step_avg:94.89ms +step:929/1670 train_time:88153ms step_avg:94.89ms +step:930/1670 train_time:88246ms step_avg:94.89ms +step:931/1670 train_time:88340ms step_avg:94.89ms +step:932/1670 train_time:88435ms step_avg:94.89ms +step:933/1670 train_time:88528ms step_avg:94.89ms +step:934/1670 train_time:88622ms step_avg:94.88ms +step:935/1670 train_time:88716ms step_avg:94.88ms +step:936/1670 train_time:88810ms step_avg:94.88ms +step:937/1670 train_time:88903ms step_avg:94.88ms +step:938/1670 train_time:88998ms step_avg:94.88ms +step:939/1670 train_time:89091ms step_avg:94.88ms +step:940/1670 train_time:89184ms step_avg:94.88ms +step:941/1670 train_time:89278ms step_avg:94.88ms +step:942/1670 train_time:89372ms step_avg:94.87ms +step:943/1670 train_time:89465ms step_avg:94.87ms +step:944/1670 train_time:89560ms step_avg:94.87ms +step:945/1670 train_time:89653ms step_avg:94.87ms +step:946/1670 train_time:89747ms step_avg:94.87ms +step:947/1670 train_time:89842ms step_avg:94.87ms +step:948/1670 train_time:89936ms step_avg:94.87ms +step:949/1670 train_time:90030ms step_avg:94.87ms +step:950/1670 train_time:90123ms step_avg:94.87ms +step:951/1670 train_time:90217ms step_avg:94.87ms +step:952/1670 train_time:90310ms step_avg:94.86ms +step:953/1670 train_time:90405ms step_avg:94.86ms +step:954/1670 train_time:90499ms step_avg:94.86ms +step:955/1670 train_time:90592ms step_avg:94.86ms +step:956/1670 train_time:90685ms step_avg:94.86ms +step:957/1670 train_time:90779ms step_avg:94.86ms +step:958/1670 train_time:90873ms step_avg:94.86ms +step:959/1670 train_time:90966ms step_avg:94.86ms +step:960/1670 train_time:91059ms step_avg:94.85ms +step:961/1670 train_time:91153ms step_avg:94.85ms +step:962/1670 train_time:91247ms step_avg:94.85ms +step:963/1670 train_time:91341ms step_avg:94.85ms +step:964/1670 train_time:91435ms step_avg:94.85ms +step:965/1670 train_time:91529ms step_avg:94.85ms +step:966/1670 train_time:91623ms step_avg:94.85ms +step:967/1670 train_time:91717ms step_avg:94.85ms +step:968/1670 train_time:91811ms step_avg:94.85ms +step:969/1670 train_time:91905ms step_avg:94.84ms +step:970/1670 train_time:91999ms step_avg:94.84ms +step:971/1670 train_time:92091ms step_avg:94.84ms +step:972/1670 train_time:92185ms step_avg:94.84ms +step:973/1670 train_time:92279ms step_avg:94.84ms +step:974/1670 train_time:92373ms step_avg:94.84ms +step:975/1670 train_time:92466ms step_avg:94.84ms +step:976/1670 train_time:92559ms step_avg:94.84ms +step:977/1670 train_time:92652ms step_avg:94.83ms +step:978/1670 train_time:92747ms step_avg:94.83ms +step:979/1670 train_time:92842ms step_avg:94.83ms +step:980/1670 train_time:92936ms step_avg:94.83ms +step:981/1670 train_time:93029ms step_avg:94.83ms +step:982/1670 train_time:93123ms step_avg:94.83ms +step:983/1670 train_time:93217ms step_avg:94.83ms +step:984/1670 train_time:93310ms step_avg:94.83ms +step:985/1670 train_time:93405ms step_avg:94.83ms +step:986/1670 train_time:93497ms step_avg:94.82ms +step:987/1670 train_time:93591ms step_avg:94.82ms +step:988/1670 train_time:93685ms step_avg:94.82ms +step:989/1670 train_time:93779ms step_avg:94.82ms +step:990/1670 train_time:93873ms step_avg:94.82ms +step:991/1670 train_time:93966ms step_avg:94.82ms +step:992/1670 train_time:94060ms step_avg:94.82ms +step:993/1670 train_time:94154ms step_avg:94.82ms +step:994/1670 train_time:94247ms step_avg:94.82ms +step:995/1670 train_time:94341ms step_avg:94.81ms +step:996/1670 train_time:94435ms step_avg:94.81ms +step:997/1670 train_time:94528ms step_avg:94.81ms +step:998/1670 train_time:94622ms step_avg:94.81ms +step:999/1670 train_time:94716ms step_avg:94.81ms +step:1000/1670 train_time:94810ms step_avg:94.81ms +step:1000/1670 val_loss:3.4681 train_time:94902ms step_avg:94.90ms +step:1001/1670 train_time:94927ms step_avg:94.83ms +step:1002/1670 train_time:95005ms step_avg:94.81ms +step:1003/1670 train_time:95106ms step_avg:94.82ms +step:1004/1670 train_time:95201ms step_avg:94.82ms +step:1005/1670 train_time:95294ms step_avg:94.82ms +step:1006/1670 train_time:95387ms step_avg:94.82ms +step:1007/1670 train_time:95479ms step_avg:94.82ms +step:1008/1670 train_time:95572ms step_avg:94.81ms +step:1009/1670 train_time:95664ms step_avg:94.81ms +step:1010/1670 train_time:95757ms step_avg:94.81ms +step:1011/1670 train_time:95850ms step_avg:94.81ms +step:1012/1670 train_time:95944ms step_avg:94.81ms +step:1013/1670 train_time:96040ms step_avg:94.81ms +step:1014/1670 train_time:96137ms step_avg:94.81ms +step:1015/1670 train_time:96232ms step_avg:94.81ms +step:1016/1670 train_time:96326ms step_avg:94.81ms +step:1017/1670 train_time:96419ms step_avg:94.81ms +step:1018/1670 train_time:96513ms step_avg:94.81ms +step:1019/1670 train_time:96606ms step_avg:94.80ms +step:1020/1670 train_time:96698ms step_avg:94.80ms +step:1021/1670 train_time:96791ms step_avg:94.80ms +step:1022/1670 train_time:96884ms step_avg:94.80ms +step:1023/1670 train_time:96980ms step_avg:94.80ms +step:1024/1670 train_time:97075ms step_avg:94.80ms +step:1025/1670 train_time:97169ms step_avg:94.80ms +step:1026/1670 train_time:97264ms step_avg:94.80ms +step:1027/1670 train_time:97358ms step_avg:94.80ms +step:1028/1670 train_time:97452ms step_avg:94.80ms +step:1029/1670 train_time:97545ms step_avg:94.80ms +step:1030/1670 train_time:97637ms step_avg:94.79ms +step:1031/1670 train_time:97731ms step_avg:94.79ms +step:1032/1670 train_time:97823ms step_avg:94.79ms +step:1033/1670 train_time:97917ms step_avg:94.79ms +step:1034/1670 train_time:98012ms step_avg:94.79ms +step:1035/1670 train_time:98106ms step_avg:94.79ms +step:1036/1670 train_time:98200ms step_avg:94.79ms +step:1037/1670 train_time:98295ms step_avg:94.79ms +step:1038/1670 train_time:98389ms step_avg:94.79ms +step:1039/1670 train_time:98483ms step_avg:94.79ms +step:1040/1670 train_time:98577ms step_avg:94.79ms +step:1041/1670 train_time:98670ms step_avg:94.78ms +step:1042/1670 train_time:98763ms step_avg:94.78ms +step:1043/1670 train_time:98857ms step_avg:94.78ms +step:1044/1670 train_time:98951ms step_avg:94.78ms +step:1045/1670 train_time:99045ms step_avg:94.78ms +step:1046/1670 train_time:99139ms step_avg:94.78ms +step:1047/1670 train_time:99233ms step_avg:94.78ms +step:1048/1670 train_time:99327ms step_avg:94.78ms +step:1049/1670 train_time:99420ms step_avg:94.78ms +step:1050/1670 train_time:99515ms step_avg:94.78ms +step:1051/1670 train_time:99609ms step_avg:94.78ms +step:1052/1670 train_time:99702ms step_avg:94.77ms +step:1053/1670 train_time:99795ms step_avg:94.77ms +step:1054/1670 train_time:99889ms step_avg:94.77ms +step:1055/1670 train_time:99983ms step_avg:94.77ms +step:1056/1670 train_time:100077ms step_avg:94.77ms +step:1057/1670 train_time:100171ms step_avg:94.77ms +step:1058/1670 train_time:100265ms step_avg:94.77ms +step:1059/1670 train_time:100360ms step_avg:94.77ms +step:1060/1670 train_time:100453ms step_avg:94.77ms +step:1061/1670 train_time:100547ms step_avg:94.77ms +step:1062/1670 train_time:100874ms step_avg:94.99ms +step:1063/1670 train_time:101070ms step_avg:95.08ms +step:1064/1670 train_time:101162ms step_avg:95.08ms +step:1065/1670 train_time:101255ms step_avg:95.07ms +step:1066/1670 train_time:101347ms step_avg:95.07ms +step:1067/1670 train_time:101440ms step_avg:95.07ms +step:1068/1670 train_time:101532ms step_avg:95.07ms +step:1069/1670 train_time:101624ms step_avg:95.06ms +step:1070/1670 train_time:101717ms step_avg:95.06ms +step:1071/1670 train_time:101809ms step_avg:95.06ms +step:1072/1670 train_time:101905ms step_avg:95.06ms +step:1073/1670 train_time:102001ms step_avg:95.06ms +step:1074/1670 train_time:102097ms step_avg:95.06ms +step:1075/1670 train_time:102191ms step_avg:95.06ms +step:1076/1670 train_time:102285ms step_avg:95.06ms +step:1077/1670 train_time:102378ms step_avg:95.06ms +step:1078/1670 train_time:102471ms step_avg:95.06ms +step:1079/1670 train_time:102564ms step_avg:95.06ms +step:1080/1670 train_time:102657ms step_avg:95.05ms +step:1081/1670 train_time:102750ms step_avg:95.05ms +step:1082/1670 train_time:102843ms step_avg:95.05ms +step:1083/1670 train_time:102938ms step_avg:95.05ms +step:1084/1670 train_time:103033ms step_avg:95.05ms +step:1085/1670 train_time:103127ms step_avg:95.05ms +step:1086/1670 train_time:103221ms step_avg:95.05ms +step:1087/1670 train_time:103315ms step_avg:95.05ms +step:1088/1670 train_time:103409ms step_avg:95.04ms +step:1089/1670 train_time:103502ms step_avg:95.04ms +step:1090/1670 train_time:103595ms step_avg:95.04ms +step:1091/1670 train_time:103688ms step_avg:95.04ms +step:1092/1670 train_time:103783ms step_avg:95.04ms +step:1093/1670 train_time:103877ms step_avg:95.04ms +step:1094/1670 train_time:103971ms step_avg:95.04ms +step:1095/1670 train_time:104067ms step_avg:95.04ms +step:1096/1670 train_time:104160ms step_avg:95.04ms +step:1097/1670 train_time:104253ms step_avg:95.03ms +step:1098/1670 train_time:104347ms step_avg:95.03ms +step:1099/1670 train_time:104441ms step_avg:95.03ms +step:1100/1670 train_time:104534ms step_avg:95.03ms +step:1101/1670 train_time:104627ms step_avg:95.03ms +step:1102/1670 train_time:104721ms step_avg:95.03ms +step:1103/1670 train_time:104814ms step_avg:95.03ms +step:1104/1670 train_time:104908ms step_avg:95.03ms +step:1105/1670 train_time:105001ms step_avg:95.02ms +step:1106/1670 train_time:105095ms step_avg:95.02ms +step:1107/1670 train_time:105189ms step_avg:95.02ms +step:1108/1670 train_time:105284ms step_avg:95.02ms +step:1109/1670 train_time:105378ms step_avg:95.02ms +step:1110/1670 train_time:105472ms step_avg:95.02ms +step:1111/1670 train_time:105565ms step_avg:95.02ms +step:1112/1670 train_time:105658ms step_avg:95.02ms +step:1113/1670 train_time:105751ms step_avg:95.01ms +step:1114/1670 train_time:105845ms step_avg:95.01ms +step:1115/1670 train_time:106047ms step_avg:95.11ms +step:1116/1670 train_time:106116ms step_avg:95.09ms +step:1117/1670 train_time:106210ms step_avg:95.09ms +step:1118/1670 train_time:106303ms step_avg:95.08ms +step:1119/1670 train_time:106396ms step_avg:95.08ms +step:1120/1670 train_time:106490ms step_avg:95.08ms +step:1121/1670 train_time:106584ms step_avg:95.08ms +step:1122/1670 train_time:106677ms step_avg:95.08ms +step:1123/1670 train_time:106770ms step_avg:95.08ms +step:1124/1670 train_time:106863ms step_avg:95.07ms +step:1125/1670 train_time:106964ms step_avg:95.08ms +step:1125/1670 val_loss:3.4149 train_time:107061ms step_avg:95.17ms +step:1126/1670 train_time:107086ms step_avg:95.10ms +step:1127/1670 train_time:107167ms step_avg:95.09ms +step:1128/1670 train_time:107268ms step_avg:95.10ms +step:1129/1670 train_time:107363ms step_avg:95.10ms +step:1130/1670 train_time:107457ms step_avg:95.09ms +step:1131/1670 train_time:107551ms step_avg:95.09ms +step:1132/1670 train_time:107644ms step_avg:95.09ms +step:1133/1670 train_time:107737ms step_avg:95.09ms +step:1134/1670 train_time:107831ms step_avg:95.09ms +step:1135/1670 train_time:107924ms step_avg:95.09ms +step:1136/1670 train_time:108018ms step_avg:95.09ms +step:1137/1670 train_time:108116ms step_avg:95.09ms +step:1138/1670 train_time:108213ms step_avg:95.09ms +step:1139/1670 train_time:108309ms step_avg:95.09ms +step:1140/1670 train_time:108404ms step_avg:95.09ms +step:1141/1670 train_time:108498ms step_avg:95.09ms +step:1142/1670 train_time:108593ms step_avg:95.09ms +step:1143/1670 train_time:108686ms step_avg:95.09ms +step:1144/1670 train_time:108780ms step_avg:95.09ms +step:1145/1670 train_time:108875ms step_avg:95.09ms +step:1146/1670 train_time:108967ms step_avg:95.08ms +step:1147/1670 train_time:109062ms step_avg:95.08ms +step:1148/1670 train_time:109158ms step_avg:95.09ms +step:1149/1670 train_time:109255ms step_avg:95.09ms +step:1150/1670 train_time:109350ms step_avg:95.09ms +step:1151/1670 train_time:109445ms step_avg:95.09ms +step:1152/1670 train_time:109539ms step_avg:95.09ms +step:1153/1670 train_time:109633ms step_avg:95.08ms +step:1154/1670 train_time:109727ms step_avg:95.08ms +step:1155/1670 train_time:109821ms step_avg:95.08ms +step:1156/1670 train_time:109915ms step_avg:95.08ms +step:1157/1670 train_time:110009ms step_avg:95.08ms +step:1158/1670 train_time:110104ms step_avg:95.08ms +step:1159/1670 train_time:110200ms step_avg:95.08ms +step:1160/1670 train_time:110294ms step_avg:95.08ms +step:1161/1670 train_time:110389ms step_avg:95.08ms +step:1162/1670 train_time:110483ms step_avg:95.08ms +step:1163/1670 train_time:110579ms step_avg:95.08ms +step:1164/1670 train_time:110673ms step_avg:95.08ms +step:1165/1670 train_time:110766ms step_avg:95.08ms +step:1166/1670 train_time:110861ms step_avg:95.08ms +step:1167/1670 train_time:110955ms step_avg:95.08ms +step:1168/1670 train_time:111050ms step_avg:95.08ms +step:1169/1670 train_time:111145ms step_avg:95.08ms +step:1170/1670 train_time:111239ms step_avg:95.08ms +step:1171/1670 train_time:111334ms step_avg:95.08ms +step:1172/1670 train_time:111429ms step_avg:95.08ms +step:1173/1670 train_time:111523ms step_avg:95.08ms +step:1174/1670 train_time:111618ms step_avg:95.08ms +step:1175/1670 train_time:111712ms step_avg:95.07ms +step:1176/1670 train_time:111807ms step_avg:95.07ms +step:1177/1670 train_time:111902ms step_avg:95.07ms +step:1178/1670 train_time:111996ms step_avg:95.07ms +step:1179/1670 train_time:112092ms step_avg:95.07ms +step:1180/1670 train_time:112186ms step_avg:95.07ms +step:1181/1670 train_time:112281ms step_avg:95.07ms +step:1182/1670 train_time:112376ms step_avg:95.07ms +step:1183/1670 train_time:112471ms step_avg:95.07ms +step:1184/1670 train_time:112564ms step_avg:95.07ms +step:1185/1670 train_time:112659ms step_avg:95.07ms +step:1186/1670 train_time:112753ms step_avg:95.07ms +step:1187/1670 train_time:112848ms step_avg:95.07ms +step:1188/1670 train_time:112942ms step_avg:95.07ms +step:1189/1670 train_time:113036ms step_avg:95.07ms +step:1190/1670 train_time:113131ms step_avg:95.07ms +step:1191/1670 train_time:113226ms step_avg:95.07ms +step:1192/1670 train_time:113321ms step_avg:95.07ms +step:1193/1670 train_time:113416ms step_avg:95.07ms +step:1194/1670 train_time:113511ms step_avg:95.07ms +step:1195/1670 train_time:113605ms step_avg:95.07ms +step:1196/1670 train_time:113700ms step_avg:95.07ms +step:1197/1670 train_time:113794ms step_avg:95.07ms +step:1198/1670 train_time:113889ms step_avg:95.07ms +step:1199/1670 train_time:113985ms step_avg:95.07ms +step:1200/1670 train_time:114078ms step_avg:95.07ms +step:1201/1670 train_time:114172ms step_avg:95.06ms +step:1202/1670 train_time:114267ms step_avg:95.06ms +step:1203/1670 train_time:114363ms step_avg:95.06ms +step:1204/1670 train_time:114457ms step_avg:95.06ms +step:1205/1670 train_time:114551ms step_avg:95.06ms +step:1206/1670 train_time:114646ms step_avg:95.06ms +step:1207/1670 train_time:114740ms step_avg:95.06ms +step:1208/1670 train_time:114835ms step_avg:95.06ms +step:1209/1670 train_time:114929ms step_avg:95.06ms +step:1210/1670 train_time:115024ms step_avg:95.06ms +step:1211/1670 train_time:115119ms step_avg:95.06ms +step:1212/1670 train_time:115214ms step_avg:95.06ms +step:1213/1670 train_time:115309ms step_avg:95.06ms +step:1214/1670 train_time:115404ms step_avg:95.06ms +step:1215/1670 train_time:115499ms step_avg:95.06ms +step:1216/1670 train_time:115594ms step_avg:95.06ms +step:1217/1670 train_time:115689ms step_avg:95.06ms +step:1218/1670 train_time:115784ms step_avg:95.06ms +step:1219/1670 train_time:115878ms step_avg:95.06ms +step:1220/1670 train_time:115973ms step_avg:95.06ms +step:1221/1670 train_time:116067ms step_avg:95.06ms +step:1222/1670 train_time:116161ms step_avg:95.06ms +step:1223/1670 train_time:116256ms step_avg:95.06ms +step:1224/1670 train_time:116352ms step_avg:95.06ms +step:1225/1670 train_time:116447ms step_avg:95.06ms +step:1226/1670 train_time:116541ms step_avg:95.06ms +step:1227/1670 train_time:116635ms step_avg:95.06ms +step:1228/1670 train_time:116730ms step_avg:95.06ms +step:1229/1670 train_time:116824ms step_avg:95.06ms +step:1230/1670 train_time:116918ms step_avg:95.06ms +step:1231/1670 train_time:117012ms step_avg:95.05ms +step:1232/1670 train_time:117107ms step_avg:95.05ms +step:1233/1670 train_time:117202ms step_avg:95.05ms +step:1234/1670 train_time:117297ms step_avg:95.05ms +step:1235/1670 train_time:117392ms step_avg:95.05ms +step:1236/1670 train_time:117486ms step_avg:95.05ms +step:1237/1670 train_time:117581ms step_avg:95.05ms +step:1238/1670 train_time:117675ms step_avg:95.05ms +step:1239/1670 train_time:117770ms step_avg:95.05ms +step:1240/1670 train_time:117864ms step_avg:95.05ms +step:1241/1670 train_time:117959ms step_avg:95.05ms +step:1242/1670 train_time:118053ms step_avg:95.05ms +step:1243/1670 train_time:118152ms step_avg:95.05ms +step:1244/1670 train_time:118243ms step_avg:95.05ms +step:1245/1670 train_time:118336ms step_avg:95.05ms +step:1246/1670 train_time:118431ms step_avg:95.05ms +step:1247/1670 train_time:118525ms step_avg:95.05ms +step:1248/1670 train_time:118620ms step_avg:95.05ms +step:1249/1670 train_time:118714ms step_avg:95.05ms +step:1250/1670 train_time:118808ms step_avg:95.05ms +step:1250/1670 val_loss:3.3758 train_time:118903ms step_avg:95.12ms +step:1251/1670 train_time:118928ms step_avg:95.07ms +step:1252/1670 train_time:119009ms step_avg:95.06ms +step:1253/1670 train_time:119111ms step_avg:95.06ms +step:1254/1670 train_time:119205ms step_avg:95.06ms +step:1255/1670 train_time:119299ms step_avg:95.06ms +step:1256/1670 train_time:119392ms step_avg:95.06ms +step:1257/1670 train_time:119486ms step_avg:95.06ms +step:1258/1670 train_time:119579ms step_avg:95.05ms +step:1259/1670 train_time:119672ms step_avg:95.05ms +step:1260/1670 train_time:119766ms step_avg:95.05ms +step:1261/1670 train_time:119859ms step_avg:95.05ms +step:1262/1670 train_time:119956ms step_avg:95.05ms +step:1263/1670 train_time:120053ms step_avg:95.05ms +step:1264/1670 train_time:120149ms step_avg:95.05ms +step:1265/1670 train_time:120244ms step_avg:95.05ms +step:1266/1670 train_time:120338ms step_avg:95.05ms +step:1267/1670 train_time:120432ms step_avg:95.05ms +step:1268/1670 train_time:120525ms step_avg:95.05ms +step:1269/1670 train_time:120619ms step_avg:95.05ms +step:1270/1670 train_time:120713ms step_avg:95.05ms +step:1271/1670 train_time:120807ms step_avg:95.05ms +step:1272/1670 train_time:120902ms step_avg:95.05ms +step:1273/1670 train_time:120999ms step_avg:95.05ms +step:1274/1670 train_time:121447ms step_avg:95.33ms +step:1275/1670 train_time:121521ms step_avg:95.31ms +step:1276/1670 train_time:121614ms step_avg:95.31ms +step:1277/1670 train_time:121707ms step_avg:95.31ms +step:1278/1670 train_time:121800ms step_avg:95.31ms +step:1279/1670 train_time:121894ms step_avg:95.30ms +step:1280/1670 train_time:121987ms step_avg:95.30ms +step:1281/1670 train_time:122080ms step_avg:95.30ms +step:1282/1670 train_time:122174ms step_avg:95.30ms +step:1283/1670 train_time:122268ms step_avg:95.30ms +step:1284/1670 train_time:122366ms step_avg:95.30ms +step:1285/1670 train_time:122464ms step_avg:95.30ms +step:1286/1670 train_time:122559ms step_avg:95.30ms +step:1287/1670 train_time:122653ms step_avg:95.30ms +step:1288/1670 train_time:122747ms step_avg:95.30ms +step:1289/1670 train_time:122841ms step_avg:95.30ms +step:1290/1670 train_time:122935ms step_avg:95.30ms +step:1291/1670 train_time:123028ms step_avg:95.30ms +step:1292/1670 train_time:123122ms step_avg:95.30ms +step:1293/1670 train_time:123216ms step_avg:95.29ms +step:1294/1670 train_time:123311ms step_avg:95.29ms +step:1295/1670 train_time:123407ms step_avg:95.30ms +step:1296/1670 train_time:123503ms step_avg:95.30ms +step:1297/1670 train_time:123598ms step_avg:95.30ms +step:1298/1670 train_time:123693ms step_avg:95.29ms +step:1299/1670 train_time:123788ms step_avg:95.29ms +step:1300/1670 train_time:123882ms step_avg:95.29ms +step:1301/1670 train_time:123977ms step_avg:95.29ms +step:1302/1670 train_time:124071ms step_avg:95.29ms +step:1303/1670 train_time:124165ms step_avg:95.29ms +step:1304/1670 train_time:124259ms step_avg:95.29ms +step:1305/1670 train_time:124354ms step_avg:95.29ms +step:1306/1670 train_time:124449ms step_avg:95.29ms +step:1307/1670 train_time:124545ms step_avg:95.29ms +step:1308/1670 train_time:124640ms step_avg:95.29ms +step:1309/1670 train_time:124734ms step_avg:95.29ms +step:1310/1670 train_time:124829ms step_avg:95.29ms +step:1311/1670 train_time:124923ms step_avg:95.29ms +step:1312/1670 train_time:125017ms step_avg:95.29ms +step:1313/1670 train_time:125111ms step_avg:95.29ms +step:1314/1670 train_time:125206ms step_avg:95.29ms +step:1315/1670 train_time:125300ms step_avg:95.29ms +step:1316/1670 train_time:125395ms step_avg:95.28ms +step:1317/1670 train_time:125491ms step_avg:95.29ms +step:1318/1670 train_time:125586ms step_avg:95.29ms +step:1319/1670 train_time:125681ms step_avg:95.29ms +step:1320/1670 train_time:125775ms step_avg:95.28ms +step:1321/1670 train_time:125869ms step_avg:95.28ms +step:1322/1670 train_time:125963ms step_avg:95.28ms +step:1323/1670 train_time:126057ms step_avg:95.28ms +step:1324/1670 train_time:126151ms step_avg:95.28ms +step:1325/1670 train_time:126246ms step_avg:95.28ms +step:1326/1670 train_time:126341ms step_avg:95.28ms +step:1327/1670 train_time:126435ms step_avg:95.28ms +step:1328/1670 train_time:126531ms step_avg:95.28ms +step:1329/1670 train_time:126626ms step_avg:95.28ms +step:1330/1670 train_time:126721ms step_avg:95.28ms +step:1331/1670 train_time:126816ms step_avg:95.28ms +step:1332/1670 train_time:126910ms step_avg:95.28ms +step:1333/1670 train_time:127004ms step_avg:95.28ms +step:1334/1670 train_time:127098ms step_avg:95.28ms +step:1335/1670 train_time:127193ms step_avg:95.28ms +step:1336/1670 train_time:127287ms step_avg:95.28ms +step:1337/1670 train_time:127382ms step_avg:95.27ms +step:1338/1670 train_time:127477ms step_avg:95.27ms +step:1339/1670 train_time:127571ms step_avg:95.27ms +step:1340/1670 train_time:127666ms step_avg:95.27ms +step:1341/1670 train_time:127761ms step_avg:95.27ms +step:1342/1670 train_time:127856ms step_avg:95.27ms +step:1343/1670 train_time:127951ms step_avg:95.27ms +step:1344/1670 train_time:128046ms step_avg:95.27ms +step:1345/1670 train_time:128140ms step_avg:95.27ms +step:1346/1670 train_time:128235ms step_avg:95.27ms +step:1347/1670 train_time:128330ms step_avg:95.27ms +step:1348/1670 train_time:128423ms step_avg:95.27ms +step:1349/1670 train_time:128518ms step_avg:95.27ms +step:1350/1670 train_time:128613ms step_avg:95.27ms +step:1351/1670 train_time:128709ms step_avg:95.27ms +step:1352/1670 train_time:128803ms step_avg:95.27ms +step:1353/1670 train_time:128898ms step_avg:95.27ms +step:1354/1670 train_time:128993ms step_avg:95.27ms +step:1355/1670 train_time:129088ms step_avg:95.27ms +step:1356/1670 train_time:129183ms step_avg:95.27ms +step:1357/1670 train_time:129277ms step_avg:95.27ms +step:1358/1670 train_time:129371ms step_avg:95.27ms +step:1359/1670 train_time:129465ms step_avg:95.26ms +step:1360/1670 train_time:129559ms step_avg:95.26ms +step:1361/1670 train_time:129654ms step_avg:95.26ms +step:1362/1670 train_time:129749ms step_avg:95.26ms +step:1363/1670 train_time:129843ms step_avg:95.26ms +step:1364/1670 train_time:129937ms step_avg:95.26ms +step:1365/1670 train_time:130031ms step_avg:95.26ms +step:1366/1670 train_time:130126ms step_avg:95.26ms +step:1367/1670 train_time:130220ms step_avg:95.26ms +step:1368/1670 train_time:130315ms step_avg:95.26ms +step:1369/1670 train_time:130410ms step_avg:95.26ms +step:1370/1670 train_time:130504ms step_avg:95.26ms +step:1371/1670 train_time:130600ms step_avg:95.26ms +step:1372/1670 train_time:130695ms step_avg:95.26ms +step:1373/1670 train_time:130790ms step_avg:95.26ms +step:1374/1670 train_time:130885ms step_avg:95.26ms +step:1375/1670 train_time:130979ms step_avg:95.26ms +step:1375/1670 val_loss:3.3415 train_time:131073ms step_avg:95.33ms +step:1376/1670 train_time:131098ms step_avg:95.27ms +step:1377/1670 train_time:131176ms step_avg:95.26ms +step:1378/1670 train_time:131278ms step_avg:95.27ms +step:1379/1670 train_time:131373ms step_avg:95.27ms +step:1380/1670 train_time:131467ms step_avg:95.27ms +step:1381/1670 train_time:131560ms step_avg:95.26ms +step:1382/1670 train_time:131653ms step_avg:95.26ms +step:1383/1670 train_time:131747ms step_avg:95.26ms +step:1384/1670 train_time:131841ms step_avg:95.26ms +step:1385/1670 train_time:131935ms step_avg:95.26ms +step:1386/1670 train_time:132029ms step_avg:95.26ms +step:1387/1670 train_time:132125ms step_avg:95.26ms +step:1388/1670 train_time:132224ms step_avg:95.26ms +step:1389/1670 train_time:132322ms step_avg:95.26ms +step:1390/1670 train_time:132417ms step_avg:95.26ms +step:1391/1670 train_time:132511ms step_avg:95.26ms +step:1392/1670 train_time:132604ms step_avg:95.26ms +step:1393/1670 train_time:132698ms step_avg:95.26ms +step:1394/1670 train_time:132793ms step_avg:95.26ms +step:1395/1670 train_time:132886ms step_avg:95.26ms +step:1396/1670 train_time:132979ms step_avg:95.26ms +step:1397/1670 train_time:133076ms step_avg:95.26ms +step:1398/1670 train_time:133172ms step_avg:95.26ms +step:1399/1670 train_time:133268ms step_avg:95.26ms +step:1400/1670 train_time:133364ms step_avg:95.26ms +step:1401/1670 train_time:133458ms step_avg:95.26ms +step:1402/1670 train_time:133553ms step_avg:95.26ms +step:1403/1670 train_time:133647ms step_avg:95.26ms +step:1404/1670 train_time:133742ms step_avg:95.26ms +step:1405/1670 train_time:133837ms step_avg:95.26ms +step:1406/1670 train_time:133930ms step_avg:95.26ms +step:1407/1670 train_time:134023ms step_avg:95.25ms +step:1408/1670 train_time:134118ms step_avg:95.25ms +step:1409/1670 train_time:134214ms step_avg:95.26ms +step:1410/1670 train_time:134310ms step_avg:95.26ms +step:1411/1670 train_time:134405ms step_avg:95.25ms +step:1412/1670 train_time:134499ms step_avg:95.25ms +step:1413/1670 train_time:134594ms step_avg:95.25ms +step:1414/1670 train_time:134689ms step_avg:95.25ms +step:1415/1670 train_time:134783ms step_avg:95.25ms +step:1416/1670 train_time:134878ms step_avg:95.25ms +step:1417/1670 train_time:134972ms step_avg:95.25ms +step:1418/1670 train_time:135066ms step_avg:95.25ms +step:1419/1670 train_time:135161ms step_avg:95.25ms +step:1420/1670 train_time:135257ms step_avg:95.25ms +step:1421/1670 train_time:135353ms step_avg:95.25ms +step:1422/1670 train_time:135447ms step_avg:95.25ms +step:1423/1670 train_time:135542ms step_avg:95.25ms +step:1424/1670 train_time:135636ms step_avg:95.25ms +step:1425/1670 train_time:135730ms step_avg:95.25ms +step:1426/1670 train_time:135824ms step_avg:95.25ms +step:1427/1670 train_time:135918ms step_avg:95.25ms +step:1428/1670 train_time:136013ms step_avg:95.25ms +step:1429/1670 train_time:136107ms step_avg:95.25ms +step:1430/1670 train_time:136202ms step_avg:95.25ms +step:1431/1670 train_time:136297ms step_avg:95.25ms +step:1432/1670 train_time:136393ms step_avg:95.25ms +step:1433/1670 train_time:136486ms step_avg:95.25ms +step:1434/1670 train_time:136580ms step_avg:95.24ms +step:1435/1670 train_time:136676ms step_avg:95.24ms +step:1436/1670 train_time:136770ms step_avg:95.24ms +step:1437/1670 train_time:136865ms step_avg:95.24ms +step:1438/1670 train_time:136958ms step_avg:95.24ms +step:1439/1670 train_time:137053ms step_avg:95.24ms +step:1440/1670 train_time:137148ms step_avg:95.24ms +step:1441/1670 train_time:137243ms step_avg:95.24ms +step:1442/1670 train_time:137338ms step_avg:95.24ms +step:1443/1670 train_time:137432ms step_avg:95.24ms +step:1444/1670 train_time:137526ms step_avg:95.24ms +step:1445/1670 train_time:137622ms step_avg:95.24ms +step:1446/1670 train_time:137717ms step_avg:95.24ms +step:1447/1670 train_time:137811ms step_avg:95.24ms +step:1448/1670 train_time:137905ms step_avg:95.24ms +step:1449/1670 train_time:138000ms step_avg:95.24ms +step:1450/1670 train_time:138096ms step_avg:95.24ms +step:1451/1670 train_time:138189ms step_avg:95.24ms +step:1452/1670 train_time:138285ms step_avg:95.24ms +step:1453/1670 train_time:138380ms step_avg:95.24ms +step:1454/1670 train_time:138474ms step_avg:95.24ms +step:1455/1670 train_time:138569ms step_avg:95.24ms +step:1456/1670 train_time:138664ms step_avg:95.24ms +step:1457/1670 train_time:138758ms step_avg:95.24ms +step:1458/1670 train_time:138853ms step_avg:95.24ms +step:1459/1670 train_time:138947ms step_avg:95.23ms +step:1460/1670 train_time:139042ms step_avg:95.23ms +step:1461/1670 train_time:139137ms step_avg:95.23ms +step:1462/1670 train_time:139232ms step_avg:95.23ms +step:1463/1670 train_time:139327ms step_avg:95.23ms +step:1464/1670 train_time:139422ms step_avg:95.23ms +step:1465/1670 train_time:139517ms step_avg:95.23ms +step:1466/1670 train_time:139612ms step_avg:95.23ms +step:1467/1670 train_time:139706ms step_avg:95.23ms +step:1468/1670 train_time:139801ms step_avg:95.23ms +step:1469/1670 train_time:139896ms step_avg:95.23ms +step:1470/1670 train_time:139990ms step_avg:95.23ms +step:1471/1670 train_time:140084ms step_avg:95.23ms +step:1472/1670 train_time:140178ms step_avg:95.23ms +step:1473/1670 train_time:140274ms step_avg:95.23ms +step:1474/1670 train_time:140368ms step_avg:95.23ms +step:1475/1670 train_time:140464ms step_avg:95.23ms +step:1476/1670 train_time:140559ms step_avg:95.23ms +step:1477/1670 train_time:140653ms step_avg:95.23ms +step:1478/1670 train_time:140747ms step_avg:95.23ms +step:1479/1670 train_time:140842ms step_avg:95.23ms +step:1480/1670 train_time:140936ms step_avg:95.23ms +step:1481/1670 train_time:141031ms step_avg:95.23ms +step:1482/1670 train_time:141126ms step_avg:95.23ms +step:1483/1670 train_time:141221ms step_avg:95.23ms +step:1484/1670 train_time:141315ms step_avg:95.23ms +step:1485/1670 train_time:141758ms step_avg:95.46ms +step:1486/1670 train_time:141835ms step_avg:95.45ms +step:1487/1670 train_time:141927ms step_avg:95.45ms +step:1488/1670 train_time:142020ms step_avg:95.44ms +step:1489/1670 train_time:142113ms step_avg:95.44ms +step:1490/1670 train_time:142207ms step_avg:95.44ms +step:1491/1670 train_time:142300ms step_avg:95.44ms +step:1492/1670 train_time:142393ms step_avg:95.44ms +step:1493/1670 train_time:142487ms step_avg:95.44ms +step:1494/1670 train_time:142580ms step_avg:95.44ms +step:1495/1670 train_time:142677ms step_avg:95.44ms +step:1496/1670 train_time:142775ms step_avg:95.44ms +step:1497/1670 train_time:142873ms step_avg:95.44ms +step:1498/1670 train_time:142968ms step_avg:95.44ms +step:1499/1670 train_time:143062ms step_avg:95.44ms +step:1500/1670 train_time:143156ms step_avg:95.44ms +step:1500/1670 val_loss:3.3117 train_time:143248ms step_avg:95.50ms +step:1501/1670 train_time:143273ms step_avg:95.45ms +step:1502/1670 train_time:143353ms step_avg:95.44ms +step:1503/1670 train_time:143453ms step_avg:95.44ms +step:1504/1670 train_time:143549ms step_avg:95.44ms +step:1505/1670 train_time:143642ms step_avg:95.44ms +step:1506/1670 train_time:143736ms step_avg:95.44ms +step:1507/1670 train_time:143829ms step_avg:95.44ms +step:1508/1670 train_time:143922ms step_avg:95.44ms +step:1509/1670 train_time:144016ms step_avg:95.44ms +step:1510/1670 train_time:144109ms step_avg:95.44ms +step:1511/1670 train_time:144203ms step_avg:95.44ms +step:1512/1670 train_time:144302ms step_avg:95.44ms +step:1513/1670 train_time:144399ms step_avg:95.44ms +step:1514/1670 train_time:144494ms step_avg:95.44ms +step:1515/1670 train_time:144589ms step_avg:95.44ms +step:1516/1670 train_time:144682ms step_avg:95.44ms +step:1517/1670 train_time:144776ms step_avg:95.44ms +step:1518/1670 train_time:144869ms step_avg:95.43ms +step:1519/1670 train_time:144963ms step_avg:95.43ms +step:1520/1670 train_time:145056ms step_avg:95.43ms +step:1521/1670 train_time:145150ms step_avg:95.43ms +step:1522/1670 train_time:145245ms step_avg:95.43ms +step:1523/1670 train_time:145342ms step_avg:95.43ms +step:1524/1670 train_time:145437ms step_avg:95.43ms +step:1525/1670 train_time:145533ms step_avg:95.43ms +step:1526/1670 train_time:145628ms step_avg:95.43ms +step:1527/1670 train_time:145721ms step_avg:95.43ms +step:1528/1670 train_time:145815ms step_avg:95.43ms +step:1529/1670 train_time:145909ms step_avg:95.43ms +step:1530/1670 train_time:146003ms step_avg:95.43ms +step:1531/1670 train_time:146096ms step_avg:95.43ms +step:1532/1670 train_time:146191ms step_avg:95.42ms +step:1533/1670 train_time:146286ms step_avg:95.42ms +step:1534/1670 train_time:146382ms step_avg:95.42ms +step:1535/1670 train_time:146478ms step_avg:95.43ms +step:1536/1670 train_time:146573ms step_avg:95.42ms +step:1537/1670 train_time:146667ms step_avg:95.42ms +step:1538/1670 train_time:146762ms step_avg:95.42ms +step:1539/1670 train_time:146855ms step_avg:95.42ms +step:1540/1670 train_time:146950ms step_avg:95.42ms +step:1541/1670 train_time:147045ms step_avg:95.42ms +step:1542/1670 train_time:147139ms step_avg:95.42ms +step:1543/1670 train_time:147234ms step_avg:95.42ms +step:1544/1670 train_time:147329ms step_avg:95.42ms +step:1545/1670 train_time:147424ms step_avg:95.42ms +step:1546/1670 train_time:147519ms step_avg:95.42ms +step:1547/1670 train_time:147615ms step_avg:95.42ms +step:1548/1670 train_time:147709ms step_avg:95.42ms +step:1549/1670 train_time:147803ms step_avg:95.42ms +step:1550/1670 train_time:147899ms step_avg:95.42ms +step:1551/1670 train_time:147994ms step_avg:95.42ms +step:1552/1670 train_time:148088ms step_avg:95.42ms +step:1553/1670 train_time:148181ms step_avg:95.42ms +step:1554/1670 train_time:148276ms step_avg:95.42ms +step:1555/1670 train_time:148370ms step_avg:95.41ms +step:1556/1670 train_time:148465ms step_avg:95.41ms +step:1557/1670 train_time:148560ms step_avg:95.41ms +step:1558/1670 train_time:148655ms step_avg:95.41ms +step:1559/1670 train_time:148749ms step_avg:95.41ms +step:1560/1670 train_time:148845ms step_avg:95.41ms +step:1561/1670 train_time:148940ms step_avg:95.41ms +step:1562/1670 train_time:149034ms step_avg:95.41ms +step:1563/1670 train_time:149128ms step_avg:95.41ms +step:1564/1670 train_time:149222ms step_avg:95.41ms +step:1565/1670 train_time:149318ms step_avg:95.41ms +step:1566/1670 train_time:149413ms step_avg:95.41ms +step:1567/1670 train_time:149507ms step_avg:95.41ms +step:1568/1670 train_time:149601ms step_avg:95.41ms +step:1569/1670 train_time:149698ms step_avg:95.41ms +step:1570/1670 train_time:149793ms step_avg:95.41ms +step:1571/1670 train_time:149887ms step_avg:95.41ms +step:1572/1670 train_time:149982ms step_avg:95.41ms +step:1573/1670 train_time:150076ms step_avg:95.41ms +step:1574/1670 train_time:150170ms step_avg:95.41ms +step:1575/1670 train_time:150264ms step_avg:95.41ms +step:1576/1670 train_time:150359ms step_avg:95.41ms +step:1577/1670 train_time:150454ms step_avg:95.41ms +step:1578/1670 train_time:150549ms step_avg:95.40ms +step:1579/1670 train_time:150644ms step_avg:95.40ms +step:1580/1670 train_time:150738ms step_avg:95.40ms +step:1581/1670 train_time:150833ms step_avg:95.40ms +step:1582/1670 train_time:150928ms step_avg:95.40ms +step:1583/1670 train_time:151021ms step_avg:95.40ms +step:1584/1670 train_time:151116ms step_avg:95.40ms +step:1585/1670 train_time:151210ms step_avg:95.40ms +step:1586/1670 train_time:151304ms step_avg:95.40ms +step:1587/1670 train_time:151399ms step_avg:95.40ms +step:1588/1670 train_time:151494ms step_avg:95.40ms +step:1589/1670 train_time:151589ms step_avg:95.40ms +step:1590/1670 train_time:151684ms step_avg:95.40ms +step:1591/1670 train_time:151779ms step_avg:95.40ms +step:1592/1670 train_time:151873ms step_avg:95.40ms +step:1593/1670 train_time:151969ms step_avg:95.40ms +step:1594/1670 train_time:152063ms step_avg:95.40ms +step:1595/1670 train_time:152157ms step_avg:95.40ms +step:1596/1670 train_time:152251ms step_avg:95.40ms +step:1597/1670 train_time:152347ms step_avg:95.40ms +step:1598/1670 train_time:152478ms step_avg:95.42ms +step:1599/1670 train_time:152537ms step_avg:95.40ms +step:1600/1670 train_time:152631ms step_avg:95.39ms +step:1601/1670 train_time:152727ms step_avg:95.39ms +step:1602/1670 train_time:152821ms step_avg:95.39ms +step:1603/1670 train_time:152916ms step_avg:95.39ms +step:1604/1670 train_time:153011ms step_avg:95.39ms +step:1605/1670 train_time:153106ms step_avg:95.39ms +step:1606/1670 train_time:153200ms step_avg:95.39ms +step:1607/1670 train_time:153295ms step_avg:95.39ms +step:1608/1670 train_time:153390ms step_avg:95.39ms +step:1609/1670 train_time:153485ms step_avg:95.39ms +step:1610/1670 train_time:153580ms step_avg:95.39ms +step:1611/1670 train_time:153675ms step_avg:95.39ms +step:1612/1670 train_time:153769ms step_avg:95.39ms +step:1613/1670 train_time:153863ms step_avg:95.39ms +step:1614/1670 train_time:153959ms step_avg:95.39ms +step:1615/1670 train_time:154053ms step_avg:95.39ms +step:1616/1670 train_time:154148ms step_avg:95.39ms +step:1617/1670 train_time:154243ms step_avg:95.39ms +step:1618/1670 train_time:154338ms step_avg:95.39ms +step:1619/1670 train_time:154433ms step_avg:95.39ms +step:1620/1670 train_time:154528ms step_avg:95.39ms +step:1621/1670 train_time:154622ms step_avg:95.39ms +step:1622/1670 train_time:154717ms step_avg:95.39ms +step:1623/1670 train_time:154812ms step_avg:95.39ms +step:1624/1670 train_time:154907ms step_avg:95.39ms +step:1625/1670 train_time:155001ms step_avg:95.39ms +step:1625/1670 val_loss:3.2869 train_time:155094ms step_avg:95.44ms +step:1626/1670 train_time:155119ms step_avg:95.40ms +step:1627/1670 train_time:155195ms step_avg:95.39ms +step:1628/1670 train_time:155298ms step_avg:95.39ms +step:1629/1670 train_time:155393ms step_avg:95.39ms +step:1630/1670 train_time:155487ms step_avg:95.39ms +step:1631/1670 train_time:155581ms step_avg:95.39ms +step:1632/1670 train_time:155674ms step_avg:95.39ms +step:1633/1670 train_time:155767ms step_avg:95.39ms +step:1634/1670 train_time:155861ms step_avg:95.39ms +step:1635/1670 train_time:155955ms step_avg:95.39ms +step:1636/1670 train_time:156049ms step_avg:95.38ms +step:1637/1670 train_time:156144ms step_avg:95.38ms +step:1638/1670 train_time:156241ms step_avg:95.38ms +step:1639/1670 train_time:156337ms step_avg:95.39ms +step:1640/1670 train_time:156432ms step_avg:95.39ms +step:1641/1670 train_time:156527ms step_avg:95.39ms +step:1642/1670 train_time:156620ms step_avg:95.38ms +step:1643/1670 train_time:156714ms step_avg:95.38ms +step:1644/1670 train_time:156808ms step_avg:95.38ms +step:1645/1670 train_time:156901ms step_avg:95.38ms +step:1646/1670 train_time:156995ms step_avg:95.38ms +step:1647/1670 train_time:157090ms step_avg:95.38ms +step:1648/1670 train_time:157186ms step_avg:95.38ms +step:1649/1670 train_time:157282ms step_avg:95.38ms +step:1650/1670 train_time:157378ms step_avg:95.38ms +step:1651/1670 train_time:157473ms step_avg:95.38ms +step:1652/1670 train_time:157568ms step_avg:95.38ms +step:1653/1670 train_time:157661ms step_avg:95.38ms +step:1654/1670 train_time:157755ms step_avg:95.38ms +step:1655/1670 train_time:157850ms step_avg:95.38ms +step:1656/1670 train_time:157945ms step_avg:95.38ms +step:1657/1670 train_time:158039ms step_avg:95.38ms +step:1658/1670 train_time:158133ms step_avg:95.38ms +step:1659/1670 train_time:158229ms step_avg:95.38ms +step:1660/1670 train_time:158325ms step_avg:95.38ms +step:1661/1670 train_time:158421ms step_avg:95.38ms +step:1662/1670 train_time:158515ms step_avg:95.38ms +step:1663/1670 train_time:158610ms step_avg:95.38ms +step:1664/1670 train_time:158704ms step_avg:95.37ms +step:1665/1670 train_time:158798ms step_avg:95.37ms +step:1666/1670 train_time:158892ms step_avg:95.37ms +step:1667/1670 train_time:158987ms step_avg:95.37ms +step:1668/1670 train_time:159081ms step_avg:95.37ms +step:1669/1670 train_time:159175ms step_avg:95.37ms +step:1670/1670 train_time:159269ms step_avg:95.37ms +step:1670/1670 val_loss:3.2779 train_time:159447ms step_avg:95.48ms +peak memory allocated: 32712 MiB reserved: 48456 MiB diff --git a/train_gpt.py b/train_gpt.py index 257dfe5ef..a7f0b8cac 100644 --- a/train_gpt.py +++ b/train_gpt.py @@ -6,8 +6,11 @@ import time import copy import glob +import math + from dataclasses import dataclass from functools import lru_cache +from itertools import accumulate from pathlib import Path os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" @@ -556,27 +559,26 @@ def forward(self, x: Tensor): else: return F.linear(x, self.weight.type_as(x)) -class Rotary(nn.Module): - def __init__(self, dim: int, max_seq_len: int): - super().__init__() - # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) - angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) - angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) - t = torch.arange(max_seq_len, dtype=torch.float32) - theta = torch.einsum("i,j -> ij", t, angular_freq) - self.cos = nn.Buffer(theta.cos(), persistent=False) - self.sin = nn.Buffer(theta.sin(), persistent=False) - - def forward(self, x_BTHD: Tensor): - assert self.cos.size(0) >= x_BTHD.size(-3) - cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] - x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) - y1 = x1 * cos + x2 * sin - y2 = x1 * (-sin) + x2 * cos - return torch.cat((y1, y2), 3).type_as(x_BTHD) +def rotary(x_BTHD: Tensor, cos: Tensor, sin: Tensor): + assert cos.size(0) >= x_BTHD.size(-3) + cos, sin = cos[None, :x_BTHD.size(-3), None, :], sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +@dataclass +class AttnArgs: + ve: torch.Tensor + sa_lambdas: torch.Tensor + seqlens: torch.Tensor + bm_size: int + rotary_cos: torch.Tensor + rotary_sin: torch.Tensor + attn_scale: float class CausalSelfAttention(nn.Module): - def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + def __init__(self, dim: int, head_dim: int, num_heads: int): super().__init__() self.num_heads = num_heads self.head_dim = head_dim @@ -590,36 +592,35 @@ def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): with torch.no_grad(): self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights self.qkvo_w[3].zero_() # init output weights to zero - self.rotary = Rotary(head_dim, max_seq_len) - # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun - # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 - self.attn_scale = 0.12 # sparse gated attention to enable context based no-op by @classiclarryd - self.attn_gate_dim = 12 - self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate = CastedLinear(12, num_heads) self.attn_gate.weight.detach().zero_() - def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, seqlens: Tensor, bm_size: int): + def forward(self, x: Tensor, attn_args: AttnArgs): B, T = x.size(0), x.size(1) # batch size, sequence length assert B == 1, "varlen sequences requires B == 1" assert T % 16 == 0 + # unpack attention args + rotary_cos, rotary_sin = attn_args.rotary_cos, attn_args.rotary_sin + ve, sa_lambdas = attn_args.ve, attn_args.sa_lambdas + seqlens, attn_scale, bm_size = attn_args.seqlens, attn_args.attn_scale, attn_args.bm_size q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) q, k = norm(q), norm(k) # QK norm @Grad62304977 - q, k = self.rotary(q), self.rotary(k) + q, k = rotary(q, rotary_cos, rotary_sin), rotary(k, rotary_cos, rotary_sin) if ve is not None: - v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + v = sa_lambdas[0] * v + sa_lambdas[1] * ve.view_as(v) # @ KoszarskyB & @Grad62304977 else: # skip mid-layers token value embeddings by @YouJiacheng - v = lambdas[0] * v + v = sa_lambdas[0] * v max_len = args.train_max_seq_len if self.training else (args.val_batch_size // (grad_accum_steps * world_size)) # use flash_attn over flex_attn @varunneal. flash_attn_varlen suggested by @YouJiacheng y = flash_attn_varlen_func(q[0], k[0], v[0], cu_seqlens_q=seqlens, cu_seqlens_k=seqlens, max_seqlen_q=max_len, max_seqlen_k=max_len, - causal=True, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) + causal=True, softmax_scale=attn_scale, window_size=(bm_size, 0)) y = y.view(B, T, self.num_heads, self.head_dim) - y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate.weight.size(-1)])).view(B, T, self.num_heads, 1) y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side y = F.linear(y, self.qkvo_w[3].type_as(y)) return y @@ -646,18 +647,17 @@ def forward(self, x: Tensor): class Block(nn.Module): - def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + def __init__(self, dim: int, head_dim: int, num_heads: int, layer_idx: int): super().__init__() # skip attention of blocks.7 (the 8th layer) by @YouJiacheng - self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None - SKIPPED_MLP_BLOCKS = [0] # skip MLP blocks for first MLP layer by @EmelyanenkoK - self.mlp = None if layer_idx in SKIPPED_MLP_BLOCKS else MLP(dim) + self.attn = CausalSelfAttention(dim, head_dim, num_heads) if layer_idx != 7 else None + # skip MLP blocks for first MLP layer by @EmelyanenkoK + self.mlp = MLP(dim) if layer_idx != 0 else None - def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, - seqlens: Tensor, bm_size: int): + def forward(self, x: Tensor, x0: Tensor, lambdas: Tensor, attn_args: AttnArgs): x = lambdas[0] * x + lambdas[1] * x0 if self.attn is not None: - x = x + self.attn(norm(x), ve, sa_lambdas, seqlens, bm_size) + x = x + self.attn(norm(x), attn_args) if self.mlp is not None: x = x + self.mlp(norm(x)) return x @@ -669,14 +669,14 @@ def next_multiple_of_n(v: float | int, *, n: int): return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) class GPT(nn.Module): - def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, head_dim: int, model_dim: int, max_seq_len: int): super().__init__() vocab_size = next_multiple_of_n(vocab_size, n=128) self.embed = nn.Embedding(vocab_size, model_dim) # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) - self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + self.blocks = nn.ModuleList([Block(model_dim, head_dim, num_heads, i) for i in range(num_layers)]) # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. # suggested to me by @Grad62304977. this originates from Karpathy's experiments. use_fp8 = not os.environ.get("DISABLE_FP8", False) @@ -691,6 +691,8 @@ def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas torch.ones(pad), ])) + self.max_seq_len = max_seq_len + self.setup_yarn(head_dim) # set learning rates for param in self.embed.parameters(): param.lr_mul = 75. @@ -699,6 +701,33 @@ def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: self.lm_head.weight.lr_mul = 1.0 self.scalars.lr_mul = 5.0 + def setup_yarn(self, head_dim: int): + # store single copy of rotary tensors + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=head_dim//4, dtype=torch.float32) + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(head_dim//4)]) + t = torch.arange(self.max_seq_len, dtype=torch.float32) + theta = torch.outer(t, angular_freq) + self.rotary_cos = nn.Buffer(theta.cos(), persistent=False) + self.rotary_sin = nn.Buffer(theta.sin(), persistent=False) + self.angular_freq = angular_freq + + # scale attention factor f in attn=softmax(f*qk) logarithmically with window size @classiclarryd + windows = list(dict.fromkeys(list(args.ws_schedule) + [args.ws_validate])) + scale_factors = [0.2 * math.log(curr / prev) + 1 for prev, curr in zip(windows[:-1], windows[1:])] + # start with 0.1, inspired by 0.12 from @leloykun and learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + attn_scales = list(accumulate([0.1] + scale_factors, lambda acc, factor: acc * factor)) + self.attn_scales = dict(zip(windows, attn_scales)) + + def apply_yarn(self, old_window: int, new_window: int, alpha: int=1, beta: int=32): + rotations = args.block_size * old_window * self.angular_freq / (2 * torch.pi) + scaling_factor = old_window / new_window + interpolation_weight = torch.clamp((rotations - alpha) / (beta - alpha), 0, 1) + self.angular_freq *= scaling_factor + interpolation_weight * (1 - scaling_factor) + t = torch.arange(self.max_seq_len, dtype=torch.float32, device=self.angular_freq.device) + theta = torch.outer(t, self.angular_freq) + self.rotary_cos.copy_(theta.cos()) + self.rotary_sin.copy_(theta.sin()) def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: int): assert input_seq.ndim == 1 @@ -723,9 +752,18 @@ def forward(self, input_seq: Tensor, target_seq: Tensor, seqlens: Tensor, ws: in n = len(self.blocks) // 2 for i in range(len(self.blocks)): + attn_args = AttnArgs( + ve=ve[i], + sa_lambdas=sa_lambdas[i], + seqlens=seqlens, + bm_size=bm_sizes[i], + rotary_cos=self.rotary_cos, + rotary_sin=self.rotary_sin, + attn_scale=self.attn_scales[ws] + ) if i >= n: x = x + skip_weights[i - n] * skip_connections.pop() - x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], seqlens, bm_sizes[i]) + x = self.blocks[i](x, x0, lambdas[i], attn_args) if i < n: skip_connections.append(x) @@ -868,15 +906,16 @@ class Hyperparameters: train_max_seq_len: int = 128 * 16 val_batch_size: int = 4 * 64 * 1024 * 8 # optimization - num_iterations: int = 1705 # number of iterations to run - cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + num_iterations: int = 1670 # number of iterations to run + cooldown_frac: int = 0.5 # fraction of training spent cooling down the learning rate # evaluation and logging - run_id: str = str(uuid.uuid4()) + run_id: str = f"{uuid.uuid4()}" val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end save_checkpoint: bool = False # attention masking block_size: int = 128 ws_schedule: tuple = (3, 7, 11) + ws_validate: int = 13 # increase final validation ws @classiclarryd args = Hyperparameters() @@ -928,6 +967,7 @@ def nvidia_smi(): vocab_size=50257, num_layers=12, num_heads=6, + head_dim=128, model_dim=768, max_seq_len=max(args.train_batch_size, args.val_batch_size) // (grad_accum_steps * world_size) ).cuda() @@ -964,6 +1004,8 @@ def get_lr(step: int): return lr def get_ws(step: int): + if step == args.num_iterations: + return args.ws_validate x = step / (1 + args.num_iterations) assert 0 <= x < 1 ws_idx = int(len(args.ws_schedule) * x) @@ -1003,9 +1045,13 @@ def get_ws(step: int): t0 = time.perf_counter() # begin training train_steps = args.num_iterations +ws = get_ws(0) for step in range(train_steps + 1): last_step = (step == train_steps) - ws = get_ws(step) + new_ws = get_ws(step) + if new_ws != ws: + model.apply_yarn(ws, new_ws) + ws=new_ws # --------------- VALIDATION SECTION ----------------- if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0):