diff --git a/records/071825_TritonMuon/record.txt b/records/071825_TritonMuon/record.txt new file mode 100644 index 000000000..30d539ceb --- /dev/null +++ b/records/071825_TritonMuon/record.txt @@ -0,0 +1,2848 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T = x.size(0), x.size(1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=self.attn_scale).transpose(1, 2) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=24/448, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 27.5 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / (7.5 * x.size(-1)**0.5)) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1750 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +#assert world_size == 8 # this code is designed for 8xH100 +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = uuid.uuid4() + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() + +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Fri Jul 18 15:57:50 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 30C P0 132W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 32C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 32C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 29C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 29C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 32C P0 112W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 28C P0 112W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 111824 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 111825 C /usr/bin/python3 614MiB | +| 0 N/A N/A 111826 C /usr/bin/python3 614MiB | +| 0 N/A N/A 111827 C /usr/bin/python3 614MiB | +| 0 N/A N/A 111828 C /usr/bin/python3 614MiB | +| 0 N/A N/A 111829 C /usr/bin/python3 614MiB | +| 0 N/A N/A 111830 C /usr/bin/python3 614MiB | +| 0 N/A N/A 111831 C /usr/bin/python3 614MiB | +| 1 N/A N/A 111825 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 111826 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 111827 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 111828 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 111829 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 111830 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 111831 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1750 val_loss:10.8258 train_time:0ms step_avg:0.04ms +step:1/1750 train_time:151ms step_avg:151.05ms +step:2/1750 train_time:176ms step_avg:87.79ms +step:3/1750 train_time:240ms step_avg:79.89ms +step:4/1750 train_time:329ms step_avg:82.25ms +step:5/1750 train_time:420ms step_avg:83.91ms +step:6/1750 train_time:510ms step_avg:84.94ms +step:7/1750 train_time:601ms step_avg:85.79ms +step:8/1750 train_time:691ms step_avg:86.32ms +step:9/1750 train_time:781ms step_avg:86.76ms +step:10/1750 train_time:871ms step_avg:87.08ms +step:11/1750 train_time:961ms step_avg:87.37ms +step:12/1750 train_time:1053ms step_avg:87.78ms +step:13/1750 train_time:1148ms step_avg:88.27ms +step:14/1750 train_time:1242ms step_avg:88.73ms +step:15/1750 train_time:1334ms step_avg:88.93ms +step:16/1750 train_time:1425ms step_avg:89.09ms +step:17/1750 train_time:1517ms step_avg:89.22ms +step:18/1750 train_time:1608ms step_avg:89.31ms +step:19/1750 train_time:1699ms step_avg:89.40ms +step:20/1750 train_time:1790ms step_avg:89.49ms +step:21/1750 train_time:1880ms step_avg:89.52ms +step:22/1750 train_time:1970ms step_avg:89.56ms +step:23/1750 train_time:2062ms step_avg:89.65ms +step:24/1750 train_time:2156ms step_avg:89.82ms +step:25/1750 train_time:2248ms step_avg:89.92ms +step:26/1750 train_time:2341ms step_avg:90.03ms +step:27/1750 train_time:2432ms step_avg:90.07ms +step:28/1750 train_time:2523ms step_avg:90.10ms +step:29/1750 train_time:2613ms step_avg:90.12ms +step:30/1750 train_time:2705ms step_avg:90.17ms +step:31/1750 train_time:2796ms step_avg:90.19ms +step:32/1750 train_time:2887ms step_avg:90.21ms +step:33/1750 train_time:2978ms step_avg:90.24ms +step:34/1750 train_time:3069ms step_avg:90.27ms +step:35/1750 train_time:3162ms step_avg:90.33ms +step:36/1750 train_time:3254ms step_avg:90.38ms +step:37/1750 train_time:3346ms step_avg:90.42ms +step:38/1750 train_time:3438ms step_avg:90.47ms +step:39/1750 train_time:3529ms step_avg:90.49ms +step:40/1750 train_time:3621ms step_avg:90.52ms +step:41/1750 train_time:3712ms step_avg:90.55ms +step:42/1750 train_time:3803ms step_avg:90.55ms +step:43/1750 train_time:3894ms step_avg:90.56ms +step:44/1750 train_time:3985ms step_avg:90.57ms +step:45/1750 train_time:4077ms step_avg:90.60ms +step:46/1750 train_time:4169ms step_avg:90.63ms +step:47/1750 train_time:4261ms step_avg:90.65ms +step:48/1750 train_time:4352ms step_avg:90.66ms +step:49/1750 train_time:4444ms step_avg:90.70ms +step:50/1750 train_time:4535ms step_avg:90.70ms +step:51/1750 train_time:4628ms step_avg:90.74ms +step:52/1750 train_time:4719ms step_avg:90.75ms +step:53/1750 train_time:4810ms step_avg:90.76ms +step:54/1750 train_time:4902ms step_avg:90.77ms +step:55/1750 train_time:4993ms step_avg:90.78ms +step:56/1750 train_time:5085ms step_avg:90.80ms +step:57/1750 train_time:5176ms step_avg:90.80ms +step:58/1750 train_time:5267ms step_avg:90.82ms +step:59/1750 train_time:5359ms step_avg:90.84ms +step:60/1750 train_time:5451ms step_avg:90.85ms +step:61/1750 train_time:5543ms step_avg:90.86ms +step:62/1750 train_time:5634ms step_avg:90.88ms +step:63/1750 train_time:5726ms step_avg:90.89ms +step:64/1750 train_time:5819ms step_avg:90.91ms +step:65/1750 train_time:5910ms step_avg:90.93ms +step:66/1750 train_time:6002ms step_avg:90.94ms +step:67/1750 train_time:6093ms step_avg:90.94ms +step:68/1750 train_time:6185ms step_avg:90.95ms +step:69/1750 train_time:6276ms step_avg:90.96ms +step:70/1750 train_time:6368ms step_avg:90.97ms +step:71/1750 train_time:6459ms step_avg:90.98ms +step:72/1750 train_time:6551ms step_avg:90.99ms +step:73/1750 train_time:6643ms step_avg:91.00ms +step:74/1750 train_time:6734ms step_avg:91.00ms +step:75/1750 train_time:6826ms step_avg:91.02ms +step:76/1750 train_time:6918ms step_avg:91.02ms +step:77/1750 train_time:7009ms step_avg:91.03ms +step:78/1750 train_time:7101ms step_avg:91.04ms +step:79/1750 train_time:7192ms step_avg:91.04ms +step:80/1750 train_time:7284ms step_avg:91.05ms +step:81/1750 train_time:7375ms step_avg:91.05ms +step:82/1750 train_time:7466ms step_avg:91.05ms +step:83/1750 train_time:7558ms step_avg:91.06ms +step:84/1750 train_time:7650ms step_avg:91.07ms +step:85/1750 train_time:7742ms step_avg:91.08ms +step:86/1750 train_time:7833ms step_avg:91.08ms +step:87/1750 train_time:7925ms step_avg:91.09ms +step:88/1750 train_time:8016ms step_avg:91.09ms +step:89/1750 train_time:8108ms step_avg:91.10ms +step:90/1750 train_time:8200ms step_avg:91.11ms +step:91/1750 train_time:8290ms step_avg:91.10ms +step:92/1750 train_time:8382ms step_avg:91.11ms +step:93/1750 train_time:8474ms step_avg:91.12ms +step:94/1750 train_time:8566ms step_avg:91.12ms +step:95/1750 train_time:8657ms step_avg:91.13ms +step:96/1750 train_time:8749ms step_avg:91.14ms +step:97/1750 train_time:8841ms step_avg:91.14ms +step:98/1750 train_time:8931ms step_avg:91.14ms +step:99/1750 train_time:9023ms step_avg:91.14ms +step:100/1750 train_time:9114ms step_avg:91.14ms +step:101/1750 train_time:9207ms step_avg:91.15ms +step:102/1750 train_time:9298ms step_avg:91.15ms +step:103/1750 train_time:9389ms step_avg:91.16ms +step:104/1750 train_time:9481ms step_avg:91.16ms +step:105/1750 train_time:9573ms step_avg:91.17ms +step:106/1750 train_time:9665ms step_avg:91.17ms +step:107/1750 train_time:9756ms step_avg:91.18ms +step:108/1750 train_time:9847ms step_avg:91.18ms +step:109/1750 train_time:9939ms step_avg:91.18ms +step:110/1750 train_time:10031ms step_avg:91.19ms +step:111/1750 train_time:10123ms step_avg:91.20ms +step:112/1750 train_time:10215ms step_avg:91.20ms +step:113/1750 train_time:10307ms step_avg:91.21ms +step:114/1750 train_time:10398ms step_avg:91.21ms +step:115/1750 train_time:10490ms step_avg:91.22ms +step:116/1750 train_time:10581ms step_avg:91.22ms +step:117/1750 train_time:10673ms step_avg:91.22ms +step:118/1750 train_time:10765ms step_avg:91.23ms +step:119/1750 train_time:10856ms step_avg:91.23ms +step:120/1750 train_time:10947ms step_avg:91.23ms +step:121/1750 train_time:11039ms step_avg:91.23ms +step:122/1750 train_time:11131ms step_avg:91.24ms +step:123/1750 train_time:11224ms step_avg:91.25ms +step:124/1750 train_time:11315ms step_avg:91.25ms +step:125/1750 train_time:11407ms step_avg:91.26ms +step:125/1750 val_loss:4.6317 train_time:11502ms step_avg:92.02ms +step:126/1750 train_time:11528ms step_avg:91.49ms +step:127/1750 train_time:11599ms step_avg:91.33ms +step:128/1750 train_time:11696ms step_avg:91.38ms +step:129/1750 train_time:11790ms step_avg:91.39ms +step:130/1750 train_time:11881ms step_avg:91.39ms +step:131/1750 train_time:11972ms step_avg:91.39ms +step:132/1750 train_time:12062ms step_avg:91.38ms +step:133/1750 train_time:12153ms step_avg:91.38ms +step:134/1750 train_time:12244ms step_avg:91.38ms +step:135/1750 train_time:12336ms step_avg:91.37ms +step:136/1750 train_time:12427ms step_avg:91.37ms +step:137/1750 train_time:12520ms step_avg:91.39ms +step:138/1750 train_time:12613ms step_avg:91.40ms +step:139/1750 train_time:12708ms step_avg:91.42ms +step:140/1750 train_time:12802ms step_avg:91.44ms +step:141/1750 train_time:12894ms step_avg:91.45ms +step:142/1750 train_time:12986ms step_avg:91.45ms +step:143/1750 train_time:13078ms step_avg:91.45ms +step:144/1750 train_time:13169ms step_avg:91.45ms +step:145/1750 train_time:13260ms step_avg:91.45ms +step:146/1750 train_time:13351ms step_avg:91.44ms +step:147/1750 train_time:13442ms step_avg:91.44ms +step:148/1750 train_time:13534ms step_avg:91.45ms +step:149/1750 train_time:13627ms step_avg:91.46ms +step:150/1750 train_time:13721ms step_avg:91.48ms +step:151/1750 train_time:13813ms step_avg:91.48ms +step:152/1750 train_time:13906ms step_avg:91.49ms +step:153/1750 train_time:13998ms step_avg:91.49ms +step:154/1750 train_time:14089ms step_avg:91.49ms +step:155/1750 train_time:14180ms step_avg:91.49ms +step:156/1750 train_time:14272ms step_avg:91.48ms +step:157/1750 train_time:14363ms step_avg:91.48ms +step:158/1750 train_time:14455ms step_avg:91.49ms +step:159/1750 train_time:14547ms step_avg:91.49ms +step:160/1750 train_time:14640ms step_avg:91.50ms +step:161/1750 train_time:14732ms step_avg:91.50ms +step:162/1750 train_time:14825ms step_avg:91.51ms +step:163/1750 train_time:14917ms step_avg:91.51ms +step:164/1750 train_time:15008ms step_avg:91.51ms +step:165/1750 train_time:15101ms step_avg:91.52ms +step:166/1750 train_time:15193ms step_avg:91.52ms +step:167/1750 train_time:15284ms step_avg:91.52ms +step:168/1750 train_time:15376ms step_avg:91.52ms +step:169/1750 train_time:15468ms step_avg:91.53ms +step:170/1750 train_time:15561ms step_avg:91.54ms +step:171/1750 train_time:15653ms step_avg:91.54ms +step:172/1750 train_time:15746ms step_avg:91.55ms +step:173/1750 train_time:15838ms step_avg:91.55ms +step:174/1750 train_time:15929ms step_avg:91.55ms +step:175/1750 train_time:16021ms step_avg:91.55ms +step:176/1750 train_time:16112ms step_avg:91.55ms +step:177/1750 train_time:16204ms step_avg:91.55ms +step:178/1750 train_time:16296ms step_avg:91.55ms +step:179/1750 train_time:16387ms step_avg:91.55ms +step:180/1750 train_time:16480ms step_avg:91.56ms +step:181/1750 train_time:16572ms step_avg:91.56ms +step:182/1750 train_time:16665ms step_avg:91.57ms +step:183/1750 train_time:16757ms step_avg:91.57ms +step:184/1750 train_time:16850ms step_avg:91.58ms +step:185/1750 train_time:16943ms step_avg:91.58ms +step:186/1750 train_time:17034ms step_avg:91.58ms +step:187/1750 train_time:17126ms step_avg:91.58ms +step:188/1750 train_time:17218ms step_avg:91.58ms +step:189/1750 train_time:17309ms step_avg:91.58ms +step:190/1750 train_time:17402ms step_avg:91.59ms +step:191/1750 train_time:17494ms step_avg:91.59ms +step:192/1750 train_time:17587ms step_avg:91.60ms +step:193/1750 train_time:17678ms step_avg:91.60ms +step:194/1750 train_time:17770ms step_avg:91.60ms +step:195/1750 train_time:17862ms step_avg:91.60ms +step:196/1750 train_time:17954ms step_avg:91.60ms +step:197/1750 train_time:18046ms step_avg:91.60ms +step:198/1750 train_time:18137ms step_avg:91.60ms +step:199/1750 train_time:18229ms step_avg:91.61ms +step:200/1750 train_time:18321ms step_avg:91.60ms +step:201/1750 train_time:18412ms step_avg:91.60ms +step:202/1750 train_time:18504ms step_avg:91.61ms +step:203/1750 train_time:18597ms step_avg:91.61ms +step:204/1750 train_time:18689ms step_avg:91.61ms +step:205/1750 train_time:18781ms step_avg:91.61ms +step:206/1750 train_time:18874ms step_avg:91.62ms +step:207/1750 train_time:18966ms step_avg:91.62ms +step:208/1750 train_time:19057ms step_avg:91.62ms +step:209/1750 train_time:19149ms step_avg:91.62ms +step:210/1750 train_time:19241ms step_avg:91.62ms +step:211/1750 train_time:19332ms step_avg:91.62ms +step:212/1750 train_time:19425ms step_avg:91.63ms +step:213/1750 train_time:19517ms step_avg:91.63ms +step:214/1750 train_time:19609ms step_avg:91.63ms +step:215/1750 train_time:19702ms step_avg:91.63ms +step:216/1750 train_time:19794ms step_avg:91.64ms +step:217/1750 train_time:19886ms step_avg:91.64ms +step:218/1750 train_time:19978ms step_avg:91.64ms +step:219/1750 train_time:20070ms step_avg:91.65ms +step:220/1750 train_time:20162ms step_avg:91.65ms +step:221/1750 train_time:20254ms step_avg:91.65ms +step:222/1750 train_time:20346ms step_avg:91.65ms +step:223/1750 train_time:20438ms step_avg:91.65ms +step:224/1750 train_time:20530ms step_avg:91.65ms +step:225/1750 train_time:20622ms step_avg:91.65ms +step:226/1750 train_time:20714ms step_avg:91.65ms +step:227/1750 train_time:20806ms step_avg:91.66ms +step:228/1750 train_time:20899ms step_avg:91.66ms +step:229/1750 train_time:20990ms step_avg:91.66ms +step:230/1750 train_time:21082ms step_avg:91.66ms +step:231/1750 train_time:21174ms step_avg:91.66ms +step:232/1750 train_time:21267ms step_avg:91.67ms +step:233/1750 train_time:21360ms step_avg:91.67ms +step:234/1750 train_time:21450ms step_avg:91.67ms +step:235/1750 train_time:21542ms step_avg:91.67ms +step:236/1750 train_time:21633ms step_avg:91.67ms +step:237/1750 train_time:21726ms step_avg:91.67ms +step:238/1750 train_time:21819ms step_avg:91.67ms +step:239/1750 train_time:21910ms step_avg:91.68ms +step:240/1750 train_time:22002ms step_avg:91.68ms +step:241/1750 train_time:22094ms step_avg:91.68ms +step:242/1750 train_time:22186ms step_avg:91.68ms +step:243/1750 train_time:22278ms step_avg:91.68ms +step:244/1750 train_time:22370ms step_avg:91.68ms +step:245/1750 train_time:22462ms step_avg:91.68ms +step:246/1750 train_time:22554ms step_avg:91.68ms +step:247/1750 train_time:22646ms step_avg:91.68ms +step:248/1750 train_time:22738ms step_avg:91.69ms +step:249/1750 train_time:22830ms step_avg:91.69ms +step:250/1750 train_time:22923ms step_avg:91.69ms +step:250/1750 val_loss:4.0972 train_time:23018ms step_avg:92.07ms +step:251/1750 train_time:23041ms step_avg:91.80ms +step:252/1750 train_time:23113ms step_avg:91.72ms +step:253/1750 train_time:23211ms step_avg:91.74ms +step:254/1750 train_time:23303ms step_avg:91.75ms +step:255/1750 train_time:23395ms step_avg:91.74ms +step:256/1750 train_time:23486ms step_avg:91.74ms +step:257/1750 train_time:23577ms step_avg:91.74ms +step:258/1750 train_time:23668ms step_avg:91.73ms +step:259/1750 train_time:23759ms step_avg:91.73ms +step:260/1750 train_time:23849ms step_avg:91.73ms +step:261/1750 train_time:23940ms step_avg:91.73ms +step:262/1750 train_time:24034ms step_avg:91.73ms +step:263/1750 train_time:24128ms step_avg:91.74ms +step:264/1750 train_time:24221ms step_avg:91.75ms +step:265/1750 train_time:24314ms step_avg:91.75ms +step:266/1750 train_time:24406ms step_avg:91.75ms +step:267/1750 train_time:24499ms step_avg:91.75ms +step:268/1750 train_time:24591ms step_avg:91.76ms +step:269/1750 train_time:24683ms step_avg:91.76ms +step:270/1750 train_time:24775ms step_avg:91.76ms +step:271/1750 train_time:24867ms step_avg:91.76ms +step:272/1750 train_time:24959ms step_avg:91.76ms +step:273/1750 train_time:25051ms step_avg:91.76ms +step:274/1750 train_time:25144ms step_avg:91.77ms +step:275/1750 train_time:25237ms step_avg:91.77ms +step:276/1750 train_time:25330ms step_avg:91.78ms +step:277/1750 train_time:25423ms step_avg:91.78ms +step:278/1750 train_time:25515ms step_avg:91.78ms +step:279/1750 train_time:25607ms step_avg:91.78ms +step:280/1750 train_time:25699ms step_avg:91.78ms +step:281/1750 train_time:25792ms step_avg:91.79ms +step:282/1750 train_time:25884ms step_avg:91.79ms +step:283/1750 train_time:25976ms step_avg:91.79ms +step:284/1750 train_time:26068ms step_avg:91.79ms +step:285/1750 train_time:26161ms step_avg:91.79ms +step:286/1750 train_time:26254ms step_avg:91.80ms +step:287/1750 train_time:26346ms step_avg:91.80ms +step:288/1750 train_time:26439ms step_avg:91.80ms +step:289/1750 train_time:26532ms step_avg:91.81ms +step:290/1750 train_time:26624ms step_avg:91.81ms +step:291/1750 train_time:26716ms step_avg:91.81ms +step:292/1750 train_time:26808ms step_avg:91.81ms +step:293/1750 train_time:26900ms step_avg:91.81ms +step:294/1750 train_time:26992ms step_avg:91.81ms +step:295/1750 train_time:27085ms step_avg:91.81ms +step:296/1750 train_time:27177ms step_avg:91.82ms +step:297/1750 train_time:27269ms step_avg:91.82ms +step:298/1750 train_time:27362ms step_avg:91.82ms +step:299/1750 train_time:27455ms step_avg:91.82ms +step:300/1750 train_time:27548ms step_avg:91.83ms +step:301/1750 train_time:27640ms step_avg:91.83ms +step:302/1750 train_time:27732ms step_avg:91.83ms +step:303/1750 train_time:27824ms step_avg:91.83ms +step:304/1750 train_time:27916ms step_avg:91.83ms +step:305/1750 train_time:28008ms step_avg:91.83ms +step:306/1750 train_time:28101ms step_avg:91.83ms +step:307/1750 train_time:28193ms step_avg:91.83ms +step:308/1750 train_time:28285ms step_avg:91.84ms +step:309/1750 train_time:28378ms step_avg:91.84ms +step:310/1750 train_time:28470ms step_avg:91.84ms +step:311/1750 train_time:28563ms step_avg:91.84ms +step:312/1750 train_time:28656ms step_avg:91.85ms +step:313/1750 train_time:28747ms step_avg:91.84ms +step:314/1750 train_time:28839ms step_avg:91.84ms +step:315/1750 train_time:28932ms step_avg:91.85ms +step:316/1750 train_time:29024ms step_avg:91.85ms +step:317/1750 train_time:29117ms step_avg:91.85ms +step:318/1750 train_time:29209ms step_avg:91.85ms +step:319/1750 train_time:29303ms step_avg:91.86ms +step:320/1750 train_time:29395ms step_avg:91.86ms +step:321/1750 train_time:29488ms step_avg:91.86ms +step:322/1750 train_time:29580ms step_avg:91.86ms +step:323/1750 train_time:29672ms step_avg:91.86ms +step:324/1750 train_time:29764ms step_avg:91.86ms +step:325/1750 train_time:29856ms step_avg:91.86ms +step:326/1750 train_time:29948ms step_avg:91.86ms +step:327/1750 train_time:30040ms step_avg:91.87ms +step:328/1750 train_time:30133ms step_avg:91.87ms +step:329/1750 train_time:30226ms step_avg:91.87ms +step:330/1750 train_time:30318ms step_avg:91.87ms +step:331/1750 train_time:30411ms step_avg:91.88ms +step:332/1750 train_time:30504ms step_avg:91.88ms +step:333/1750 train_time:30597ms step_avg:91.88ms +step:334/1750 train_time:30690ms step_avg:91.89ms +step:335/1750 train_time:30782ms step_avg:91.89ms +step:336/1750 train_time:30874ms step_avg:91.89ms +step:337/1750 train_time:30966ms step_avg:91.89ms +step:338/1750 train_time:31059ms step_avg:91.89ms +step:339/1750 train_time:31152ms step_avg:91.89ms +step:340/1750 train_time:31244ms step_avg:91.90ms +step:341/1750 train_time:31338ms step_avg:91.90ms +step:342/1750 train_time:31431ms step_avg:91.90ms +step:343/1750 train_time:31523ms step_avg:91.90ms +step:344/1750 train_time:31615ms step_avg:91.90ms +step:345/1750 train_time:31707ms step_avg:91.90ms +step:346/1750 train_time:31800ms step_avg:91.91ms +step:347/1750 train_time:31892ms step_avg:91.91ms +step:348/1750 train_time:31984ms step_avg:91.91ms +step:349/1750 train_time:32077ms step_avg:91.91ms +step:350/1750 train_time:32168ms step_avg:91.91ms +step:351/1750 train_time:32261ms step_avg:91.91ms +step:352/1750 train_time:32353ms step_avg:91.91ms +step:353/1750 train_time:32445ms step_avg:91.91ms +step:354/1750 train_time:32539ms step_avg:91.92ms +step:355/1750 train_time:32631ms step_avg:91.92ms +step:356/1750 train_time:32724ms step_avg:91.92ms +step:357/1750 train_time:32816ms step_avg:91.92ms +step:358/1750 train_time:32908ms step_avg:91.92ms +step:359/1750 train_time:33000ms step_avg:91.92ms +step:360/1750 train_time:33092ms step_avg:91.92ms +step:361/1750 train_time:33185ms step_avg:91.92ms +step:362/1750 train_time:33277ms step_avg:91.93ms +step:363/1750 train_time:33369ms step_avg:91.93ms +step:364/1750 train_time:33461ms step_avg:91.93ms +step:365/1750 train_time:33554ms step_avg:91.93ms +step:366/1750 train_time:33646ms step_avg:91.93ms +step:367/1750 train_time:33738ms step_avg:91.93ms +step:368/1750 train_time:33830ms step_avg:91.93ms +step:369/1750 train_time:33923ms step_avg:91.93ms +step:370/1750 train_time:34015ms step_avg:91.93ms +step:371/1750 train_time:34107ms step_avg:91.93ms +step:372/1750 train_time:34200ms step_avg:91.93ms +step:373/1750 train_time:34292ms step_avg:91.94ms +step:374/1750 train_time:34385ms step_avg:91.94ms +step:375/1750 train_time:34477ms step_avg:91.94ms +step:375/1750 val_loss:3.8911 train_time:34573ms step_avg:92.19ms +step:376/1750 train_time:34597ms step_avg:92.01ms +step:377/1750 train_time:34671ms step_avg:91.97ms +step:378/1750 train_time:34767ms step_avg:91.98ms +step:379/1750 train_time:34861ms step_avg:91.98ms +step:380/1750 train_time:34953ms step_avg:91.98ms +step:381/1750 train_time:35045ms step_avg:91.98ms +step:382/1750 train_time:35136ms step_avg:91.98ms +step:383/1750 train_time:35228ms step_avg:91.98ms +step:384/1750 train_time:35320ms step_avg:91.98ms +step:385/1750 train_time:35411ms step_avg:91.98ms +step:386/1750 train_time:35503ms step_avg:91.98ms +step:387/1750 train_time:35597ms step_avg:91.98ms +step:388/1750 train_time:35691ms step_avg:91.99ms +step:389/1750 train_time:35786ms step_avg:91.99ms +step:390/1750 train_time:35878ms step_avg:92.00ms +step:391/1750 train_time:35973ms step_avg:92.00ms +step:392/1750 train_time:36067ms step_avg:92.01ms +step:393/1750 train_time:36160ms step_avg:92.01ms +step:394/1750 train_time:36253ms step_avg:92.01ms +step:395/1750 train_time:36346ms step_avg:92.01ms +step:396/1750 train_time:36439ms step_avg:92.02ms +step:397/1750 train_time:36533ms step_avg:92.02ms +step:398/1750 train_time:36628ms step_avg:92.03ms +step:399/1750 train_time:36723ms step_avg:92.04ms +step:400/1750 train_time:36819ms step_avg:92.05ms +step:401/1750 train_time:36913ms step_avg:92.05ms +step:402/1750 train_time:37007ms step_avg:92.06ms +step:403/1750 train_time:37101ms step_avg:92.06ms +step:404/1750 train_time:37195ms step_avg:92.07ms +step:405/1750 train_time:37288ms step_avg:92.07ms +step:406/1750 train_time:37381ms step_avg:92.07ms +step:407/1750 train_time:37476ms step_avg:92.08ms +step:408/1750 train_time:37570ms step_avg:92.08ms +step:409/1750 train_time:37665ms step_avg:92.09ms +step:410/1750 train_time:37760ms step_avg:92.10ms +step:411/1750 train_time:37855ms step_avg:92.10ms +step:412/1750 train_time:37949ms step_avg:92.11ms +step:413/1750 train_time:38044ms step_avg:92.12ms +step:414/1750 train_time:38138ms step_avg:92.12ms +step:415/1750 train_time:38232ms step_avg:92.12ms +step:416/1750 train_time:38325ms step_avg:92.13ms +step:417/1750 train_time:38419ms step_avg:92.13ms +step:418/1750 train_time:38514ms step_avg:92.14ms +step:419/1750 train_time:38608ms step_avg:92.14ms +step:420/1750 train_time:38703ms step_avg:92.15ms +step:421/1750 train_time:38798ms step_avg:92.16ms +step:422/1750 train_time:38893ms step_avg:92.16ms +step:423/1750 train_time:38987ms step_avg:92.17ms +step:424/1750 train_time:39081ms step_avg:92.17ms +step:425/1750 train_time:39176ms step_avg:92.18ms +step:426/1750 train_time:39269ms step_avg:92.18ms +step:427/1750 train_time:39363ms step_avg:92.18ms +step:428/1750 train_time:39458ms step_avg:92.19ms +step:429/1750 train_time:39552ms step_avg:92.20ms +step:430/1750 train_time:39646ms step_avg:92.20ms +step:431/1750 train_time:39741ms step_avg:92.21ms +step:432/1750 train_time:39837ms step_avg:92.21ms +step:433/1750 train_time:39931ms step_avg:92.22ms +step:434/1750 train_time:40026ms step_avg:92.23ms +step:435/1750 train_time:40120ms step_avg:92.23ms +step:436/1750 train_time:40214ms step_avg:92.23ms +step:437/1750 train_time:40308ms step_avg:92.24ms +step:438/1750 train_time:40402ms step_avg:92.24ms +step:439/1750 train_time:40497ms step_avg:92.25ms +step:440/1750 train_time:40591ms step_avg:92.25ms +step:441/1750 train_time:40685ms step_avg:92.26ms +step:442/1750 train_time:40780ms step_avg:92.26ms +step:443/1750 train_time:40876ms step_avg:92.27ms +step:444/1750 train_time:40971ms step_avg:92.28ms +step:445/1750 train_time:41065ms step_avg:92.28ms +step:446/1750 train_time:41159ms step_avg:92.29ms +step:447/1750 train_time:41254ms step_avg:92.29ms +step:448/1750 train_time:41348ms step_avg:92.29ms +step:449/1750 train_time:41442ms step_avg:92.30ms +step:450/1750 train_time:41536ms step_avg:92.30ms +step:451/1750 train_time:41630ms step_avg:92.31ms +step:452/1750 train_time:41724ms step_avg:92.31ms +step:453/1750 train_time:41819ms step_avg:92.32ms +step:454/1750 train_time:41914ms step_avg:92.32ms +step:455/1750 train_time:42008ms step_avg:92.33ms +step:456/1750 train_time:42102ms step_avg:92.33ms +step:457/1750 train_time:42196ms step_avg:92.33ms +step:458/1750 train_time:42291ms step_avg:92.34ms +step:459/1750 train_time:42385ms step_avg:92.34ms +step:460/1750 train_time:42479ms step_avg:92.35ms +step:461/1750 train_time:42573ms step_avg:92.35ms +step:462/1750 train_time:42667ms step_avg:92.35ms +step:463/1750 train_time:42761ms step_avg:92.36ms +step:464/1750 train_time:42855ms step_avg:92.36ms +step:465/1750 train_time:42950ms step_avg:92.36ms +step:466/1750 train_time:43044ms step_avg:92.37ms +step:467/1750 train_time:43138ms step_avg:92.37ms +step:468/1750 train_time:43233ms step_avg:92.38ms +step:469/1750 train_time:43327ms step_avg:92.38ms +step:470/1750 train_time:43421ms step_avg:92.38ms +step:471/1750 train_time:43515ms step_avg:92.39ms +step:472/1750 train_time:43609ms step_avg:92.39ms +step:473/1750 train_time:43704ms step_avg:92.40ms +step:474/1750 train_time:43798ms step_avg:92.40ms +step:475/1750 train_time:43892ms step_avg:92.41ms +step:476/1750 train_time:43987ms step_avg:92.41ms +step:477/1750 train_time:44080ms step_avg:92.41ms +step:478/1750 train_time:44174ms step_avg:92.41ms +step:479/1750 train_time:44269ms step_avg:92.42ms +step:480/1750 train_time:44363ms step_avg:92.42ms +step:481/1750 train_time:44458ms step_avg:92.43ms +step:482/1750 train_time:44552ms step_avg:92.43ms +step:483/1750 train_time:44645ms step_avg:92.43ms +step:484/1750 train_time:44739ms step_avg:92.44ms +step:485/1750 train_time:44833ms step_avg:92.44ms +step:486/1750 train_time:44927ms step_avg:92.44ms +step:487/1750 train_time:45022ms step_avg:92.45ms +step:488/1750 train_time:45116ms step_avg:92.45ms +step:489/1750 train_time:45211ms step_avg:92.46ms +step:490/1750 train_time:45305ms step_avg:92.46ms +step:491/1750 train_time:45400ms step_avg:92.46ms +step:492/1750 train_time:45495ms step_avg:92.47ms +step:493/1750 train_time:45589ms step_avg:92.47ms +step:494/1750 train_time:45683ms step_avg:92.48ms +step:495/1750 train_time:45778ms step_avg:92.48ms +step:496/1750 train_time:45873ms step_avg:92.49ms +step:497/1750 train_time:45968ms step_avg:92.49ms +step:498/1750 train_time:46063ms step_avg:92.50ms +step:499/1750 train_time:46157ms step_avg:92.50ms +step:500/1750 train_time:46251ms step_avg:92.50ms +step:500/1750 val_loss:3.7412 train_time:46349ms step_avg:92.70ms +step:501/1750 train_time:46373ms step_avg:92.56ms +step:502/1750 train_time:46448ms step_avg:92.53ms +step:503/1750 train_time:46548ms step_avg:92.54ms +step:504/1750 train_time:46642ms step_avg:92.54ms +step:505/1750 train_time:46738ms step_avg:92.55ms +step:506/1750 train_time:46831ms step_avg:92.55ms +step:507/1750 train_time:46923ms step_avg:92.55ms +step:508/1750 train_time:47016ms step_avg:92.55ms +step:509/1750 train_time:47109ms step_avg:92.55ms +step:510/1750 train_time:47202ms step_avg:92.55ms +step:511/1750 train_time:47296ms step_avg:92.56ms +step:512/1750 train_time:47392ms step_avg:92.56ms +step:513/1750 train_time:47489ms step_avg:92.57ms +step:514/1750 train_time:47584ms step_avg:92.58ms +step:515/1750 train_time:47679ms step_avg:92.58ms +step:516/1750 train_time:47773ms step_avg:92.58ms +step:517/1750 train_time:47867ms step_avg:92.59ms +step:518/1750 train_time:47960ms step_avg:92.59ms +step:519/1750 train_time:48054ms step_avg:92.59ms +step:520/1750 train_time:48147ms step_avg:92.59ms +step:521/1750 train_time:48241ms step_avg:92.59ms +step:522/1750 train_time:48335ms step_avg:92.60ms +step:523/1750 train_time:48431ms step_avg:92.60ms +step:524/1750 train_time:48526ms step_avg:92.61ms +step:525/1750 train_time:48622ms step_avg:92.61ms +step:526/1750 train_time:48718ms step_avg:92.62ms +step:527/1750 train_time:48812ms step_avg:92.62ms +step:528/1750 train_time:48906ms step_avg:92.62ms +step:529/1750 train_time:49000ms step_avg:92.63ms +step:530/1750 train_time:49094ms step_avg:92.63ms +step:531/1750 train_time:49189ms step_avg:92.63ms +step:532/1750 train_time:49283ms step_avg:92.64ms +step:533/1750 train_time:49378ms step_avg:92.64ms +step:534/1750 train_time:49473ms step_avg:92.65ms +step:535/1750 train_time:49569ms step_avg:92.65ms +step:536/1750 train_time:49664ms step_avg:92.66ms +step:537/1750 train_time:49759ms step_avg:92.66ms +step:538/1750 train_time:49853ms step_avg:92.66ms +step:539/1750 train_time:49947ms step_avg:92.67ms +step:540/1750 train_time:50041ms step_avg:92.67ms +step:541/1750 train_time:50136ms step_avg:92.67ms +step:542/1750 train_time:50230ms step_avg:92.68ms +step:543/1750 train_time:50324ms step_avg:92.68ms +step:544/1750 train_time:50419ms step_avg:92.68ms +step:545/1750 train_time:50514ms step_avg:92.69ms +step:546/1750 train_time:50609ms step_avg:92.69ms +step:547/1750 train_time:50703ms step_avg:92.69ms +step:548/1750 train_time:50799ms step_avg:92.70ms +step:549/1750 train_time:50893ms step_avg:92.70ms +step:550/1750 train_time:50988ms step_avg:92.70ms +step:551/1750 train_time:51082ms step_avg:92.71ms +step:552/1750 train_time:51176ms step_avg:92.71ms +step:553/1750 train_time:51270ms step_avg:92.71ms +step:554/1750 train_time:51364ms step_avg:92.72ms +step:555/1750 train_time:51460ms step_avg:92.72ms +step:556/1750 train_time:51555ms step_avg:92.73ms +step:557/1750 train_time:51650ms step_avg:92.73ms +step:558/1750 train_time:51745ms step_avg:92.73ms +step:559/1750 train_time:51839ms step_avg:92.74ms +step:560/1750 train_time:51934ms step_avg:92.74ms +step:561/1750 train_time:52028ms step_avg:92.74ms +step:562/1750 train_time:52122ms step_avg:92.74ms +step:563/1750 train_time:52216ms step_avg:92.75ms +step:564/1750 train_time:52311ms step_avg:92.75ms +step:565/1750 train_time:52406ms step_avg:92.75ms +step:566/1750 train_time:52501ms step_avg:92.76ms +step:567/1750 train_time:52596ms step_avg:92.76ms +step:568/1750 train_time:52691ms step_avg:92.77ms +step:569/1750 train_time:52785ms step_avg:92.77ms +step:570/1750 train_time:52880ms step_avg:92.77ms +step:571/1750 train_time:52976ms step_avg:92.78ms +step:572/1750 train_time:53069ms step_avg:92.78ms +step:573/1750 train_time:53163ms step_avg:92.78ms +step:574/1750 train_time:53258ms step_avg:92.78ms +step:575/1750 train_time:53353ms step_avg:92.79ms +step:576/1750 train_time:53447ms step_avg:92.79ms +step:577/1750 train_time:53542ms step_avg:92.79ms +step:578/1750 train_time:53636ms step_avg:92.80ms +step:579/1750 train_time:53731ms step_avg:92.80ms +step:580/1750 train_time:53827ms step_avg:92.81ms +step:581/1750 train_time:53922ms step_avg:92.81ms +step:582/1750 train_time:54017ms step_avg:92.81ms +step:583/1750 train_time:54112ms step_avg:92.82ms +step:584/1750 train_time:54205ms step_avg:92.82ms +step:585/1750 train_time:54300ms step_avg:92.82ms +step:586/1750 train_time:54394ms step_avg:92.82ms +step:587/1750 train_time:54490ms step_avg:92.83ms +step:588/1750 train_time:54584ms step_avg:92.83ms +step:589/1750 train_time:54679ms step_avg:92.83ms +step:590/1750 train_time:54773ms step_avg:92.84ms +step:591/1750 train_time:54868ms step_avg:92.84ms +step:592/1750 train_time:54963ms step_avg:92.84ms +step:593/1750 train_time:55058ms step_avg:92.85ms +step:594/1750 train_time:55152ms step_avg:92.85ms +step:595/1750 train_time:55245ms step_avg:92.85ms +step:596/1750 train_time:55340ms step_avg:92.85ms +step:597/1750 train_time:55434ms step_avg:92.85ms +step:598/1750 train_time:55529ms step_avg:92.86ms +step:599/1750 train_time:55624ms step_avg:92.86ms +step:600/1750 train_time:55719ms step_avg:92.86ms +step:601/1750 train_time:55813ms step_avg:92.87ms +step:602/1750 train_time:55910ms step_avg:92.87ms +step:603/1750 train_time:56004ms step_avg:92.88ms +step:604/1750 train_time:56098ms step_avg:92.88ms +step:605/1750 train_time:56193ms step_avg:92.88ms +step:606/1750 train_time:56288ms step_avg:92.88ms +step:607/1750 train_time:56383ms step_avg:92.89ms +step:608/1750 train_time:56478ms step_avg:92.89ms +step:609/1750 train_time:56572ms step_avg:92.89ms +step:610/1750 train_time:56668ms step_avg:92.90ms +step:611/1750 train_time:56762ms step_avg:92.90ms +step:612/1750 train_time:56858ms step_avg:92.91ms +step:613/1750 train_time:56952ms step_avg:92.91ms +step:614/1750 train_time:57047ms step_avg:92.91ms +step:615/1750 train_time:57141ms step_avg:92.91ms +step:616/1750 train_time:57236ms step_avg:92.92ms +step:617/1750 train_time:57330ms step_avg:92.92ms +step:618/1750 train_time:57425ms step_avg:92.92ms +step:619/1750 train_time:57519ms step_avg:92.92ms +step:620/1750 train_time:57613ms step_avg:92.92ms +step:621/1750 train_time:57708ms step_avg:92.93ms +step:622/1750 train_time:57802ms step_avg:92.93ms +step:623/1750 train_time:57896ms step_avg:92.93ms +step:624/1750 train_time:57990ms step_avg:92.93ms +step:625/1750 train_time:58086ms step_avg:92.94ms +step:625/1750 val_loss:3.6537 train_time:58184ms step_avg:93.09ms +step:626/1750 train_time:58208ms step_avg:92.98ms +step:627/1750 train_time:58285ms step_avg:92.96ms +step:628/1750 train_time:58383ms step_avg:92.97ms +step:629/1750 train_time:58478ms step_avg:92.97ms +step:630/1750 train_time:58571ms step_avg:92.97ms +step:631/1750 train_time:58665ms step_avg:92.97ms +step:632/1750 train_time:58759ms step_avg:92.97ms +step:633/1750 train_time:58852ms step_avg:92.97ms +step:634/1750 train_time:58946ms step_avg:92.97ms +step:635/1750 train_time:59040ms step_avg:92.98ms +step:636/1750 train_time:59133ms step_avg:92.98ms +step:637/1750 train_time:59229ms step_avg:92.98ms +step:638/1750 train_time:59325ms step_avg:92.99ms +step:639/1750 train_time:59420ms step_avg:92.99ms +step:640/1750 train_time:59516ms step_avg:92.99ms +step:641/1750 train_time:59611ms step_avg:93.00ms +step:642/1750 train_time:59706ms step_avg:93.00ms +step:643/1750 train_time:59800ms step_avg:93.00ms +step:644/1750 train_time:59894ms step_avg:93.00ms +step:645/1750 train_time:59987ms step_avg:93.00ms +step:646/1750 train_time:60081ms step_avg:93.00ms +step:647/1750 train_time:60176ms step_avg:93.01ms +step:648/1750 train_time:60271ms step_avg:93.01ms +step:649/1750 train_time:60367ms step_avg:93.01ms +step:650/1750 train_time:60462ms step_avg:93.02ms +step:651/1750 train_time:60559ms step_avg:93.02ms +step:652/1750 train_time:60655ms step_avg:93.03ms +step:653/1750 train_time:60750ms step_avg:93.03ms +step:654/1750 train_time:60846ms step_avg:93.04ms +step:655/1750 train_time:60941ms step_avg:93.04ms +step:656/1750 train_time:61037ms step_avg:93.04ms +step:657/1750 train_time:61132ms step_avg:93.05ms +step:658/1750 train_time:61228ms step_avg:93.05ms +step:659/1750 train_time:61324ms step_avg:93.06ms +step:660/1750 train_time:61421ms step_avg:93.06ms +step:661/1750 train_time:61517ms step_avg:93.07ms +step:662/1750 train_time:61614ms step_avg:93.07ms +step:663/1750 train_time:61709ms step_avg:93.08ms +step:664/1750 train_time:61805ms step_avg:93.08ms +step:665/1750 train_time:61900ms step_avg:93.08ms +step:666/1750 train_time:61996ms step_avg:93.09ms +step:667/1750 train_time:62092ms step_avg:93.09ms +step:668/1750 train_time:62188ms step_avg:93.10ms +step:669/1750 train_time:62284ms step_avg:93.10ms +step:670/1750 train_time:62381ms step_avg:93.11ms +step:671/1750 train_time:62477ms step_avg:93.11ms +step:672/1750 train_time:62573ms step_avg:93.11ms +step:673/1750 train_time:62669ms step_avg:93.12ms +step:674/1750 train_time:62765ms step_avg:93.12ms +step:675/1750 train_time:62860ms step_avg:93.13ms +step:676/1750 train_time:62956ms step_avg:93.13ms +step:677/1750 train_time:63051ms step_avg:93.13ms +step:678/1750 train_time:63147ms step_avg:93.14ms +step:679/1750 train_time:63243ms step_avg:93.14ms +step:680/1750 train_time:63340ms step_avg:93.15ms +step:681/1750 train_time:63436ms step_avg:93.15ms +step:682/1750 train_time:63532ms step_avg:93.16ms +step:683/1750 train_time:63627ms step_avg:93.16ms +step:684/1750 train_time:63723ms step_avg:93.16ms +step:685/1750 train_time:63820ms step_avg:93.17ms +step:686/1750 train_time:63915ms step_avg:93.17ms +step:687/1750 train_time:64011ms step_avg:93.17ms +step:688/1750 train_time:64106ms step_avg:93.18ms +step:689/1750 train_time:64202ms step_avg:93.18ms +step:690/1750 train_time:64299ms step_avg:93.19ms +step:691/1750 train_time:64394ms step_avg:93.19ms +step:692/1750 train_time:64490ms step_avg:93.19ms +step:693/1750 train_time:64586ms step_avg:93.20ms +step:694/1750 train_time:64681ms step_avg:93.20ms +step:695/1750 train_time:64778ms step_avg:93.21ms +step:696/1750 train_time:64874ms step_avg:93.21ms +step:697/1750 train_time:64969ms step_avg:93.21ms +step:698/1750 train_time:65065ms step_avg:93.22ms +step:699/1750 train_time:65161ms step_avg:93.22ms +step:700/1750 train_time:65257ms step_avg:93.22ms +step:701/1750 train_time:65353ms step_avg:93.23ms +step:702/1750 train_time:65450ms step_avg:93.23ms +step:703/1750 train_time:65545ms step_avg:93.24ms +step:704/1750 train_time:65642ms step_avg:93.24ms +step:705/1750 train_time:65738ms step_avg:93.25ms +step:706/1750 train_time:65834ms step_avg:93.25ms +step:707/1750 train_time:65930ms step_avg:93.25ms +step:708/1750 train_time:66025ms step_avg:93.26ms +step:709/1750 train_time:66122ms step_avg:93.26ms +step:710/1750 train_time:66218ms step_avg:93.26ms +step:711/1750 train_time:66314ms step_avg:93.27ms +step:712/1750 train_time:66410ms step_avg:93.27ms +step:713/1750 train_time:66505ms step_avg:93.28ms +step:714/1750 train_time:66601ms step_avg:93.28ms +step:715/1750 train_time:66698ms step_avg:93.28ms +step:716/1750 train_time:66793ms step_avg:93.29ms +step:717/1750 train_time:66889ms step_avg:93.29ms +step:718/1750 train_time:66985ms step_avg:93.29ms +step:719/1750 train_time:67081ms step_avg:93.30ms +step:720/1750 train_time:67176ms step_avg:93.30ms +step:721/1750 train_time:67272ms step_avg:93.30ms +step:722/1750 train_time:67368ms step_avg:93.31ms +step:723/1750 train_time:67464ms step_avg:93.31ms +step:724/1750 train_time:67561ms step_avg:93.32ms +step:725/1750 train_time:67657ms step_avg:93.32ms +step:726/1750 train_time:67753ms step_avg:93.32ms +step:727/1750 train_time:67850ms step_avg:93.33ms +step:728/1750 train_time:67945ms step_avg:93.33ms +step:729/1750 train_time:68042ms step_avg:93.34ms +step:730/1750 train_time:68138ms step_avg:93.34ms +step:731/1750 train_time:68234ms step_avg:93.34ms +step:732/1750 train_time:68330ms step_avg:93.35ms +step:733/1750 train_time:68425ms step_avg:93.35ms +step:734/1750 train_time:68521ms step_avg:93.35ms +step:735/1750 train_time:68617ms step_avg:93.36ms +step:736/1750 train_time:68713ms step_avg:93.36ms +step:737/1750 train_time:68809ms step_avg:93.36ms +step:738/1750 train_time:68904ms step_avg:93.37ms +step:739/1750 train_time:69000ms step_avg:93.37ms +step:740/1750 train_time:69097ms step_avg:93.37ms +step:741/1750 train_time:69193ms step_avg:93.38ms +step:742/1750 train_time:69289ms step_avg:93.38ms +step:743/1750 train_time:69385ms step_avg:93.38ms +step:744/1750 train_time:69481ms step_avg:93.39ms +step:745/1750 train_time:69577ms step_avg:93.39ms +step:746/1750 train_time:69673ms step_avg:93.40ms +step:747/1750 train_time:69770ms step_avg:93.40ms +step:748/1750 train_time:69866ms step_avg:93.40ms +step:749/1750 train_time:69961ms step_avg:93.41ms +step:750/1750 train_time:70057ms step_avg:93.41ms +step:750/1750 val_loss:3.5904 train_time:70156ms step_avg:93.54ms +step:751/1750 train_time:70181ms step_avg:93.45ms +step:752/1750 train_time:70258ms step_avg:93.43ms +step:753/1750 train_time:70358ms step_avg:93.44ms +step:754/1750 train_time:70454ms step_avg:93.44ms +step:755/1750 train_time:70550ms step_avg:93.44ms +step:756/1750 train_time:70645ms step_avg:93.45ms +step:757/1750 train_time:70740ms step_avg:93.45ms +step:758/1750 train_time:70835ms step_avg:93.45ms +step:759/1750 train_time:70930ms step_avg:93.45ms +step:760/1750 train_time:71024ms step_avg:93.45ms +step:761/1750 train_time:71120ms step_avg:93.46ms +step:762/1750 train_time:71218ms step_avg:93.46ms +step:763/1750 train_time:71316ms step_avg:93.47ms +step:764/1750 train_time:71413ms step_avg:93.47ms +step:765/1750 train_time:71510ms step_avg:93.48ms +step:766/1750 train_time:71606ms step_avg:93.48ms +step:767/1750 train_time:71701ms step_avg:93.48ms +step:768/1750 train_time:71797ms step_avg:93.49ms +step:769/1750 train_time:71891ms step_avg:93.49ms +step:770/1750 train_time:71986ms step_avg:93.49ms +step:771/1750 train_time:72082ms step_avg:93.49ms +step:772/1750 train_time:72178ms step_avg:93.49ms +step:773/1750 train_time:72275ms step_avg:93.50ms +step:774/1750 train_time:72372ms step_avg:93.50ms +step:775/1750 train_time:72469ms step_avg:93.51ms +step:776/1750 train_time:72566ms step_avg:93.51ms +step:777/1750 train_time:72661ms step_avg:93.52ms +step:778/1750 train_time:72756ms step_avg:93.52ms +step:779/1750 train_time:72851ms step_avg:93.52ms +step:780/1750 train_time:72947ms step_avg:93.52ms +step:781/1750 train_time:73042ms step_avg:93.52ms +step:782/1750 train_time:73138ms step_avg:93.53ms +step:783/1750 train_time:73234ms step_avg:93.53ms +step:784/1750 train_time:73330ms step_avg:93.53ms +step:785/1750 train_time:73427ms step_avg:93.54ms +step:786/1750 train_time:73524ms step_avg:93.54ms +step:787/1750 train_time:73620ms step_avg:93.54ms +step:788/1750 train_time:73715ms step_avg:93.55ms +step:789/1750 train_time:73811ms step_avg:93.55ms +step:790/1750 train_time:73907ms step_avg:93.55ms +step:791/1750 train_time:74003ms step_avg:93.56ms +step:792/1750 train_time:74098ms step_avg:93.56ms +step:793/1750 train_time:74195ms step_avg:93.56ms +step:794/1750 train_time:74291ms step_avg:93.57ms +step:795/1750 train_time:74388ms step_avg:93.57ms +step:796/1750 train_time:74485ms step_avg:93.57ms +step:797/1750 train_time:74581ms step_avg:93.58ms +step:798/1750 train_time:74677ms step_avg:93.58ms +step:799/1750 train_time:74773ms step_avg:93.58ms +step:800/1750 train_time:74869ms step_avg:93.59ms +step:801/1750 train_time:74965ms step_avg:93.59ms +step:802/1750 train_time:75061ms step_avg:93.59ms +step:803/1750 train_time:75157ms step_avg:93.59ms +step:804/1750 train_time:75253ms step_avg:93.60ms +step:805/1750 train_time:75349ms step_avg:93.60ms +step:806/1750 train_time:75445ms step_avg:93.60ms +step:807/1750 train_time:75541ms step_avg:93.61ms +step:808/1750 train_time:75638ms step_avg:93.61ms +step:809/1750 train_time:75734ms step_avg:93.61ms +step:810/1750 train_time:75830ms step_avg:93.62ms +step:811/1750 train_time:75926ms step_avg:93.62ms +step:812/1750 train_time:76022ms step_avg:93.62ms +step:813/1750 train_time:76118ms step_avg:93.63ms +step:814/1750 train_time:76214ms step_avg:93.63ms +step:815/1750 train_time:76311ms step_avg:93.63ms +step:816/1750 train_time:76408ms step_avg:93.64ms +step:817/1750 train_time:76505ms step_avg:93.64ms +step:818/1750 train_time:76601ms step_avg:93.64ms +step:819/1750 train_time:76697ms step_avg:93.65ms +step:820/1750 train_time:76793ms step_avg:93.65ms +step:821/1750 train_time:76889ms step_avg:93.65ms +step:822/1750 train_time:76986ms step_avg:93.66ms +step:823/1750 train_time:77082ms step_avg:93.66ms +step:824/1750 train_time:77178ms step_avg:93.66ms +step:825/1750 train_time:77274ms step_avg:93.67ms +step:826/1750 train_time:77370ms step_avg:93.67ms +step:827/1750 train_time:77467ms step_avg:93.67ms +step:828/1750 train_time:77563ms step_avg:93.68ms +step:829/1750 train_time:77659ms step_avg:93.68ms +step:830/1750 train_time:77755ms step_avg:93.68ms +step:831/1750 train_time:77852ms step_avg:93.68ms +step:832/1750 train_time:77949ms step_avg:93.69ms +step:833/1750 train_time:78045ms step_avg:93.69ms +step:834/1750 train_time:78141ms step_avg:93.69ms +step:835/1750 train_time:78237ms step_avg:93.70ms +step:836/1750 train_time:78334ms step_avg:93.70ms +step:837/1750 train_time:78431ms step_avg:93.70ms +step:838/1750 train_time:78527ms step_avg:93.71ms +step:839/1750 train_time:78623ms step_avg:93.71ms +step:840/1750 train_time:78720ms step_avg:93.71ms +step:841/1750 train_time:78817ms step_avg:93.72ms +step:842/1750 train_time:78914ms step_avg:93.72ms +step:843/1750 train_time:79010ms step_avg:93.72ms +step:844/1750 train_time:79106ms step_avg:93.73ms +step:845/1750 train_time:79203ms step_avg:93.73ms +step:846/1750 train_time:79299ms step_avg:93.73ms +step:847/1750 train_time:79395ms step_avg:93.74ms +step:848/1750 train_time:79492ms step_avg:93.74ms +step:849/1750 train_time:79588ms step_avg:93.74ms +step:850/1750 train_time:79685ms step_avg:93.75ms +step:851/1750 train_time:79781ms step_avg:93.75ms +step:852/1750 train_time:79877ms step_avg:93.75ms +step:853/1750 train_time:79973ms step_avg:93.76ms +step:854/1750 train_time:80070ms step_avg:93.76ms +step:855/1750 train_time:80166ms step_avg:93.76ms +step:856/1750 train_time:80262ms step_avg:93.76ms +step:857/1750 train_time:80358ms step_avg:93.77ms +step:858/1750 train_time:80454ms step_avg:93.77ms +step:859/1750 train_time:80550ms step_avg:93.77ms +step:860/1750 train_time:80646ms step_avg:93.77ms +step:861/1750 train_time:80743ms step_avg:93.78ms +step:862/1750 train_time:80839ms step_avg:93.78ms +step:863/1750 train_time:80935ms step_avg:93.78ms +step:864/1750 train_time:81031ms step_avg:93.79ms +step:865/1750 train_time:81127ms step_avg:93.79ms +step:866/1750 train_time:81224ms step_avg:93.79ms +step:867/1750 train_time:81320ms step_avg:93.79ms +step:868/1750 train_time:81416ms step_avg:93.80ms +step:869/1750 train_time:81512ms step_avg:93.80ms +step:870/1750 train_time:81608ms step_avg:93.80ms +step:871/1750 train_time:81705ms step_avg:93.81ms +step:872/1750 train_time:81801ms step_avg:93.81ms +step:873/1750 train_time:81898ms step_avg:93.81ms +step:874/1750 train_time:81995ms step_avg:93.82ms +step:875/1750 train_time:82091ms step_avg:93.82ms +step:875/1750 val_loss:3.5431 train_time:82190ms step_avg:93.93ms +step:876/1750 train_time:82213ms step_avg:93.85ms +step:877/1750 train_time:82295ms step_avg:93.84ms +step:878/1750 train_time:82396ms step_avg:93.84ms +step:879/1750 train_time:82492ms step_avg:93.85ms +step:880/1750 train_time:82588ms step_avg:93.85ms +step:881/1750 train_time:82683ms step_avg:93.85ms +step:882/1750 train_time:82779ms step_avg:93.85ms +step:883/1750 train_time:82874ms step_avg:93.85ms +step:884/1750 train_time:82969ms step_avg:93.86ms +step:885/1750 train_time:83064ms step_avg:93.86ms +step:886/1750 train_time:83160ms step_avg:93.86ms +step:887/1750 train_time:83259ms step_avg:93.87ms +step:888/1750 train_time:83358ms step_avg:93.87ms +step:889/1750 train_time:83456ms step_avg:93.88ms +step:890/1750 train_time:83552ms step_avg:93.88ms +step:891/1750 train_time:83648ms step_avg:93.88ms +step:892/1750 train_time:83744ms step_avg:93.88ms +step:893/1750 train_time:83839ms step_avg:93.88ms +step:894/1750 train_time:83933ms step_avg:93.89ms +step:895/1750 train_time:84029ms step_avg:93.89ms +step:896/1750 train_time:84124ms step_avg:93.89ms +step:897/1750 train_time:84221ms step_avg:93.89ms +step:898/1750 train_time:84319ms step_avg:93.90ms +step:899/1750 train_time:84416ms step_avg:93.90ms +step:900/1750 train_time:84513ms step_avg:93.90ms +step:901/1750 train_time:84610ms step_avg:93.91ms +step:902/1750 train_time:84706ms step_avg:93.91ms +step:903/1750 train_time:84801ms step_avg:93.91ms +step:904/1750 train_time:84897ms step_avg:93.91ms +step:905/1750 train_time:84992ms step_avg:93.91ms +step:906/1750 train_time:85088ms step_avg:93.92ms +step:907/1750 train_time:85184ms step_avg:93.92ms +step:908/1750 train_time:85281ms step_avg:93.92ms +step:909/1750 train_time:85378ms step_avg:93.92ms +step:910/1750 train_time:85476ms step_avg:93.93ms +step:911/1750 train_time:85574ms step_avg:93.93ms +step:912/1750 train_time:85671ms step_avg:93.94ms +step:913/1750 train_time:85769ms step_avg:93.94ms +step:914/1750 train_time:85867ms step_avg:93.95ms +step:915/1750 train_time:85964ms step_avg:93.95ms +step:916/1750 train_time:86061ms step_avg:93.95ms +step:917/1750 train_time:86158ms step_avg:93.96ms +step:918/1750 train_time:86255ms step_avg:93.96ms +step:919/1750 train_time:86352ms step_avg:93.96ms +step:920/1750 train_time:86450ms step_avg:93.97ms +step:921/1750 train_time:86548ms step_avg:93.97ms +step:922/1750 train_time:86646ms step_avg:93.98ms +step:923/1750 train_time:86744ms step_avg:93.98ms +step:924/1750 train_time:86841ms step_avg:93.98ms +step:925/1750 train_time:86937ms step_avg:93.99ms +step:926/1750 train_time:87035ms step_avg:93.99ms +step:927/1750 train_time:87131ms step_avg:93.99ms +step:928/1750 train_time:87229ms step_avg:94.00ms +step:929/1750 train_time:87327ms step_avg:94.00ms +step:930/1750 train_time:87425ms step_avg:94.01ms +step:931/1750 train_time:87523ms step_avg:94.01ms +step:932/1750 train_time:87621ms step_avg:94.01ms +step:933/1750 train_time:87719ms step_avg:94.02ms +step:934/1750 train_time:87816ms step_avg:94.02ms +step:935/1750 train_time:87913ms step_avg:94.02ms +step:936/1750 train_time:88010ms step_avg:94.03ms +step:937/1750 train_time:88108ms step_avg:94.03ms +step:938/1750 train_time:88206ms step_avg:94.04ms +step:939/1750 train_time:88304ms step_avg:94.04ms +step:940/1750 train_time:88402ms step_avg:94.04ms +step:941/1750 train_time:88500ms step_avg:94.05ms +step:942/1750 train_time:88597ms step_avg:94.05ms +step:943/1750 train_time:88695ms step_avg:94.06ms +step:944/1750 train_time:88792ms step_avg:94.06ms +step:945/1750 train_time:88889ms step_avg:94.06ms +step:946/1750 train_time:88986ms step_avg:94.07ms +step:947/1750 train_time:89083ms step_avg:94.07ms +step:948/1750 train_time:89181ms step_avg:94.07ms +step:949/1750 train_time:89278ms step_avg:94.08ms +step:950/1750 train_time:89377ms step_avg:94.08ms +step:951/1750 train_time:89473ms step_avg:94.08ms +step:952/1750 train_time:89571ms step_avg:94.09ms +step:953/1750 train_time:89668ms step_avg:94.09ms +step:954/1750 train_time:89765ms step_avg:94.09ms +step:955/1750 train_time:89863ms step_avg:94.10ms +step:956/1750 train_time:89962ms step_avg:94.10ms +step:957/1750 train_time:90058ms step_avg:94.10ms +step:958/1750 train_time:90155ms step_avg:94.11ms +step:959/1750 train_time:90253ms step_avg:94.11ms +step:960/1750 train_time:90351ms step_avg:94.12ms +step:961/1750 train_time:90449ms step_avg:94.12ms +step:962/1750 train_time:90547ms step_avg:94.12ms +step:963/1750 train_time:90645ms step_avg:94.13ms +step:964/1750 train_time:90743ms step_avg:94.13ms +step:965/1750 train_time:90840ms step_avg:94.14ms +step:966/1750 train_time:90937ms step_avg:94.14ms +step:967/1750 train_time:91035ms step_avg:94.14ms +step:968/1750 train_time:91133ms step_avg:94.15ms +step:969/1750 train_time:91230ms step_avg:94.15ms +step:970/1750 train_time:91328ms step_avg:94.15ms +step:971/1750 train_time:91427ms step_avg:94.16ms +step:972/1750 train_time:91524ms step_avg:94.16ms +step:973/1750 train_time:91622ms step_avg:94.16ms +step:974/1750 train_time:91719ms step_avg:94.17ms +step:975/1750 train_time:91818ms step_avg:94.17ms +step:976/1750 train_time:91915ms step_avg:94.18ms +step:977/1750 train_time:92013ms step_avg:94.18ms +step:978/1750 train_time:92110ms step_avg:94.18ms +step:979/1750 train_time:92209ms step_avg:94.19ms +step:980/1750 train_time:92306ms step_avg:94.19ms +step:981/1750 train_time:92403ms step_avg:94.19ms +step:982/1750 train_time:92500ms step_avg:94.20ms +step:983/1750 train_time:92597ms step_avg:94.20ms +step:984/1750 train_time:92695ms step_avg:94.20ms +step:985/1750 train_time:92794ms step_avg:94.21ms +step:986/1750 train_time:92891ms step_avg:94.21ms +step:987/1750 train_time:92989ms step_avg:94.21ms +step:988/1750 train_time:93087ms step_avg:94.22ms +step:989/1750 train_time:93185ms step_avg:94.22ms +step:990/1750 train_time:93283ms step_avg:94.23ms +step:991/1750 train_time:93381ms step_avg:94.23ms +step:992/1750 train_time:93479ms step_avg:94.23ms +step:993/1750 train_time:93575ms step_avg:94.24ms +step:994/1750 train_time:93673ms step_avg:94.24ms +step:995/1750 train_time:93770ms step_avg:94.24ms +step:996/1750 train_time:93868ms step_avg:94.25ms +step:997/1750 train_time:93967ms step_avg:94.25ms +step:998/1750 train_time:94063ms step_avg:94.25ms +step:999/1750 train_time:94160ms step_avg:94.25ms +step:1000/1750 train_time:94258ms step_avg:94.26ms +step:1000/1750 val_loss:3.5036 train_time:94359ms step_avg:94.36ms +step:1001/1750 train_time:94383ms step_avg:94.29ms +step:1002/1750 train_time:94465ms step_avg:94.28ms +step:1003/1750 train_time:94566ms step_avg:94.28ms +step:1004/1750 train_time:94664ms step_avg:94.29ms +step:1005/1750 train_time:94761ms step_avg:94.29ms +step:1006/1750 train_time:94857ms step_avg:94.29ms +step:1007/1750 train_time:94953ms step_avg:94.29ms +step:1008/1750 train_time:95049ms step_avg:94.29ms +step:1009/1750 train_time:95145ms step_avg:94.30ms +step:1010/1750 train_time:95242ms step_avg:94.30ms +step:1011/1750 train_time:95340ms step_avg:94.30ms +step:1012/1750 train_time:95441ms step_avg:94.31ms +step:1013/1750 train_time:95540ms step_avg:94.31ms +step:1014/1750 train_time:95639ms step_avg:94.32ms +step:1015/1750 train_time:95736ms step_avg:94.32ms +step:1016/1750 train_time:95834ms step_avg:94.32ms +step:1017/1750 train_time:95931ms step_avg:94.33ms +step:1018/1750 train_time:96027ms step_avg:94.33ms +step:1019/1750 train_time:96124ms step_avg:94.33ms +step:1020/1750 train_time:96220ms step_avg:94.33ms +step:1021/1750 train_time:96318ms step_avg:94.34ms +step:1022/1750 train_time:96417ms step_avg:94.34ms +step:1023/1750 train_time:96516ms step_avg:94.35ms +step:1024/1750 train_time:96615ms step_avg:94.35ms +step:1025/1750 train_time:96712ms step_avg:94.35ms +step:1026/1750 train_time:96810ms step_avg:94.36ms +step:1027/1750 train_time:96906ms step_avg:94.36ms +step:1028/1750 train_time:97004ms step_avg:94.36ms +step:1029/1750 train_time:97102ms step_avg:94.37ms +step:1030/1750 train_time:97199ms step_avg:94.37ms +step:1031/1750 train_time:97296ms step_avg:94.37ms +step:1032/1750 train_time:97394ms step_avg:94.37ms +step:1033/1750 train_time:97492ms step_avg:94.38ms +step:1034/1750 train_time:97589ms step_avg:94.38ms +step:1035/1750 train_time:97688ms step_avg:94.38ms +step:1036/1750 train_time:97786ms step_avg:94.39ms +step:1037/1750 train_time:97884ms step_avg:94.39ms +step:1038/1750 train_time:97981ms step_avg:94.39ms +step:1039/1750 train_time:98078ms step_avg:94.40ms +step:1040/1750 train_time:98175ms step_avg:94.40ms +step:1041/1750 train_time:98273ms step_avg:94.40ms +step:1042/1750 train_time:98370ms step_avg:94.40ms +step:1043/1750 train_time:98468ms step_avg:94.41ms +step:1044/1750 train_time:98566ms step_avg:94.41ms +step:1045/1750 train_time:98664ms step_avg:94.42ms +step:1046/1750 train_time:98763ms step_avg:94.42ms +step:1047/1750 train_time:98861ms step_avg:94.42ms +step:1048/1750 train_time:98958ms step_avg:94.43ms +step:1049/1750 train_time:99055ms step_avg:94.43ms +step:1050/1750 train_time:99152ms step_avg:94.43ms +step:1051/1750 train_time:99250ms step_avg:94.43ms +step:1052/1750 train_time:99347ms step_avg:94.44ms +step:1053/1750 train_time:99444ms step_avg:94.44ms +step:1054/1750 train_time:99543ms step_avg:94.44ms +step:1055/1750 train_time:99641ms step_avg:94.45ms +step:1056/1750 train_time:99739ms step_avg:94.45ms +step:1057/1750 train_time:99837ms step_avg:94.45ms +step:1058/1750 train_time:99935ms step_avg:94.46ms +step:1059/1750 train_time:100032ms step_avg:94.46ms +step:1060/1750 train_time:100129ms step_avg:94.46ms +step:1061/1750 train_time:100226ms step_avg:94.46ms +step:1062/1750 train_time:100324ms step_avg:94.47ms +step:1063/1750 train_time:100421ms step_avg:94.47ms +step:1064/1750 train_time:100520ms step_avg:94.47ms +step:1065/1750 train_time:100619ms step_avg:94.48ms +step:1066/1750 train_time:100717ms step_avg:94.48ms +step:1067/1750 train_time:100815ms step_avg:94.48ms +step:1068/1750 train_time:100912ms step_avg:94.49ms +step:1069/1750 train_time:101009ms step_avg:94.49ms +step:1070/1750 train_time:101107ms step_avg:94.49ms +step:1071/1750 train_time:101205ms step_avg:94.50ms +step:1072/1750 train_time:101303ms step_avg:94.50ms +step:1073/1750 train_time:101399ms step_avg:94.50ms +step:1074/1750 train_time:101497ms step_avg:94.50ms +step:1075/1750 train_time:101595ms step_avg:94.51ms +step:1076/1750 train_time:101693ms step_avg:94.51ms +step:1077/1750 train_time:101792ms step_avg:94.51ms +step:1078/1750 train_time:101889ms step_avg:94.52ms +step:1079/1750 train_time:101988ms step_avg:94.52ms +step:1080/1750 train_time:102085ms step_avg:94.52ms +step:1081/1750 train_time:102182ms step_avg:94.53ms +step:1082/1750 train_time:102280ms step_avg:94.53ms +step:1083/1750 train_time:102378ms step_avg:94.53ms +step:1084/1750 train_time:102474ms step_avg:94.53ms +step:1085/1750 train_time:102572ms step_avg:94.54ms +step:1086/1750 train_time:102670ms step_avg:94.54ms +step:1087/1750 train_time:102767ms step_avg:94.54ms +step:1088/1750 train_time:102865ms step_avg:94.55ms +step:1089/1750 train_time:102963ms step_avg:94.55ms +step:1090/1750 train_time:103062ms step_avg:94.55ms +step:1091/1750 train_time:103160ms step_avg:94.56ms +step:1092/1750 train_time:103258ms step_avg:94.56ms +step:1093/1750 train_time:103356ms step_avg:94.56ms +step:1094/1750 train_time:103453ms step_avg:94.56ms +step:1095/1750 train_time:103550ms step_avg:94.57ms +step:1096/1750 train_time:103648ms step_avg:94.57ms +step:1097/1750 train_time:103746ms step_avg:94.57ms +step:1098/1750 train_time:103844ms step_avg:94.58ms +step:1099/1750 train_time:103942ms step_avg:94.58ms +step:1100/1750 train_time:104039ms step_avg:94.58ms +step:1101/1750 train_time:104137ms step_avg:94.58ms +step:1102/1750 train_time:104234ms step_avg:94.59ms +step:1103/1750 train_time:104332ms step_avg:94.59ms +step:1104/1750 train_time:104429ms step_avg:94.59ms +step:1105/1750 train_time:104526ms step_avg:94.59ms +step:1106/1750 train_time:104625ms step_avg:94.60ms +step:1107/1750 train_time:104723ms step_avg:94.60ms +step:1108/1750 train_time:104822ms step_avg:94.60ms +step:1109/1750 train_time:104920ms step_avg:94.61ms +step:1110/1750 train_time:105018ms step_avg:94.61ms +step:1111/1750 train_time:105115ms step_avg:94.61ms +step:1112/1750 train_time:105213ms step_avg:94.62ms +step:1113/1750 train_time:105310ms step_avg:94.62ms +step:1114/1750 train_time:105408ms step_avg:94.62ms +step:1115/1750 train_time:105505ms step_avg:94.62ms +step:1116/1750 train_time:105602ms step_avg:94.63ms +step:1117/1750 train_time:105700ms step_avg:94.63ms +step:1118/1750 train_time:105798ms step_avg:94.63ms +step:1119/1750 train_time:105895ms step_avg:94.63ms +step:1120/1750 train_time:105994ms step_avg:94.64ms +step:1121/1750 train_time:106092ms step_avg:94.64ms +step:1122/1750 train_time:106190ms step_avg:94.64ms +step:1123/1750 train_time:106287ms step_avg:94.65ms +step:1124/1750 train_time:106385ms step_avg:94.65ms +step:1125/1750 train_time:106484ms step_avg:94.65ms +step:1125/1750 val_loss:3.4519 train_time:106584ms step_avg:94.74ms +step:1126/1750 train_time:106609ms step_avg:94.68ms +step:1127/1750 train_time:106687ms step_avg:94.66ms +step:1128/1750 train_time:106787ms step_avg:94.67ms +step:1129/1750 train_time:106885ms step_avg:94.67ms +step:1130/1750 train_time:106983ms step_avg:94.67ms +step:1131/1750 train_time:107079ms step_avg:94.68ms +step:1132/1750 train_time:107176ms step_avg:94.68ms +step:1133/1750 train_time:107273ms step_avg:94.68ms +step:1134/1750 train_time:107371ms step_avg:94.68ms +step:1135/1750 train_time:107468ms step_avg:94.69ms +step:1136/1750 train_time:107567ms step_avg:94.69ms +step:1137/1750 train_time:107667ms step_avg:94.69ms +step:1138/1750 train_time:107766ms step_avg:94.70ms +step:1139/1750 train_time:107864ms step_avg:94.70ms +step:1140/1750 train_time:107961ms step_avg:94.70ms +step:1141/1750 train_time:108058ms step_avg:94.70ms +step:1142/1750 train_time:108154ms step_avg:94.71ms +step:1143/1750 train_time:108250ms step_avg:94.71ms +step:1144/1750 train_time:108347ms step_avg:94.71ms +step:1145/1750 train_time:108444ms step_avg:94.71ms +step:1146/1750 train_time:108543ms step_avg:94.71ms +step:1147/1750 train_time:108641ms step_avg:94.72ms +step:1148/1750 train_time:108740ms step_avg:94.72ms +step:1149/1750 train_time:108839ms step_avg:94.72ms +step:1150/1750 train_time:108936ms step_avg:94.73ms +step:1151/1750 train_time:109033ms step_avg:94.73ms +step:1152/1750 train_time:109130ms step_avg:94.73ms +step:1153/1750 train_time:109228ms step_avg:94.73ms +step:1154/1750 train_time:109324ms step_avg:94.73ms +step:1155/1750 train_time:109421ms step_avg:94.74ms +step:1156/1750 train_time:109519ms step_avg:94.74ms +step:1157/1750 train_time:109617ms step_avg:94.74ms +step:1158/1750 train_time:109715ms step_avg:94.75ms +step:1159/1750 train_time:109813ms step_avg:94.75ms +step:1160/1750 train_time:109911ms step_avg:94.75ms +step:1161/1750 train_time:110010ms step_avg:94.75ms +step:1162/1750 train_time:110108ms step_avg:94.76ms +step:1163/1750 train_time:110207ms step_avg:94.76ms +step:1164/1750 train_time:110304ms step_avg:94.76ms +step:1165/1750 train_time:110401ms step_avg:94.77ms +step:1166/1750 train_time:110499ms step_avg:94.77ms +step:1167/1750 train_time:110596ms step_avg:94.77ms +step:1168/1750 train_time:110695ms step_avg:94.77ms +step:1169/1750 train_time:110794ms step_avg:94.78ms +step:1170/1750 train_time:110892ms step_avg:94.78ms +step:1171/1750 train_time:110992ms step_avg:94.78ms +step:1172/1750 train_time:111090ms step_avg:94.79ms +step:1173/1750 train_time:111191ms step_avg:94.79ms +step:1174/1750 train_time:111291ms step_avg:94.80ms +step:1175/1750 train_time:111390ms step_avg:94.80ms +step:1176/1750 train_time:111489ms step_avg:94.80ms +step:1177/1750 train_time:111590ms step_avg:94.81ms +step:1178/1750 train_time:111689ms step_avg:94.81ms +step:1179/1750 train_time:111792ms step_avg:94.82ms +step:1180/1750 train_time:111891ms step_avg:94.82ms +step:1181/1750 train_time:111989ms step_avg:94.83ms +step:1182/1750 train_time:112088ms step_avg:94.83ms +step:1183/1750 train_time:112187ms step_avg:94.83ms +step:1184/1750 train_time:112286ms step_avg:94.84ms +step:1185/1750 train_time:112385ms step_avg:94.84ms +step:1186/1750 train_time:112485ms step_avg:94.84ms +step:1187/1750 train_time:112584ms step_avg:94.85ms +step:1188/1750 train_time:112683ms step_avg:94.85ms +step:1189/1750 train_time:112781ms step_avg:94.85ms +step:1190/1750 train_time:112881ms step_avg:94.86ms +step:1191/1750 train_time:112980ms step_avg:94.86ms +step:1192/1750 train_time:113080ms step_avg:94.87ms +step:1193/1750 train_time:113179ms step_avg:94.87ms +step:1194/1750 train_time:113278ms step_avg:94.87ms +step:1195/1750 train_time:113378ms step_avg:94.88ms +step:1196/1750 train_time:113476ms step_avg:94.88ms +step:1197/1750 train_time:113575ms step_avg:94.88ms +step:1198/1750 train_time:113675ms step_avg:94.89ms +step:1199/1750 train_time:113774ms step_avg:94.89ms +step:1200/1750 train_time:113873ms step_avg:94.89ms +step:1201/1750 train_time:113973ms step_avg:94.90ms +step:1202/1750 train_time:114073ms step_avg:94.90ms +step:1203/1750 train_time:114172ms step_avg:94.91ms +step:1204/1750 train_time:114271ms step_avg:94.91ms +step:1205/1750 train_time:114370ms step_avg:94.91ms +step:1206/1750 train_time:114469ms step_avg:94.92ms +step:1207/1750 train_time:114568ms step_avg:94.92ms +step:1208/1750 train_time:114668ms step_avg:94.92ms +step:1209/1750 train_time:114767ms step_avg:94.93ms +step:1210/1750 train_time:114866ms step_avg:94.93ms +step:1211/1750 train_time:114966ms step_avg:94.93ms +step:1212/1750 train_time:115064ms step_avg:94.94ms +step:1213/1750 train_time:115163ms step_avg:94.94ms +step:1214/1750 train_time:115261ms step_avg:94.94ms +step:1215/1750 train_time:115361ms step_avg:94.95ms +step:1216/1750 train_time:115460ms step_avg:94.95ms +step:1217/1750 train_time:115560ms step_avg:94.95ms +step:1218/1750 train_time:115660ms step_avg:94.96ms +step:1219/1750 train_time:115759ms step_avg:94.96ms +step:1220/1750 train_time:115859ms step_avg:94.97ms +step:1221/1750 train_time:115958ms step_avg:94.97ms +step:1222/1750 train_time:116057ms step_avg:94.97ms +step:1223/1750 train_time:116157ms step_avg:94.98ms +step:1224/1750 train_time:116256ms step_avg:94.98ms +step:1225/1750 train_time:116355ms step_avg:94.98ms +step:1226/1750 train_time:116455ms step_avg:94.99ms +step:1227/1750 train_time:116553ms step_avg:94.99ms +step:1228/1750 train_time:116652ms step_avg:94.99ms +step:1229/1750 train_time:116752ms step_avg:95.00ms +step:1230/1750 train_time:116850ms step_avg:95.00ms +step:1231/1750 train_time:116950ms step_avg:95.00ms +step:1232/1750 train_time:117049ms step_avg:95.01ms +step:1233/1750 train_time:117148ms step_avg:95.01ms +step:1234/1750 train_time:117249ms step_avg:95.02ms +step:1235/1750 train_time:117348ms step_avg:95.02ms +step:1236/1750 train_time:117447ms step_avg:95.02ms +step:1237/1750 train_time:117548ms step_avg:95.03ms +step:1238/1750 train_time:117647ms step_avg:95.03ms +step:1239/1750 train_time:117746ms step_avg:95.03ms +step:1240/1750 train_time:117845ms step_avg:95.04ms +step:1241/1750 train_time:117944ms step_avg:95.04ms +step:1242/1750 train_time:118042ms step_avg:95.04ms +step:1243/1750 train_time:118141ms step_avg:95.04ms +step:1244/1750 train_time:118241ms step_avg:95.05ms +step:1245/1750 train_time:118341ms step_avg:95.05ms +step:1246/1750 train_time:118441ms step_avg:95.06ms +step:1247/1750 train_time:118539ms step_avg:95.06ms +step:1248/1750 train_time:118638ms step_avg:95.06ms +step:1249/1750 train_time:118738ms step_avg:95.07ms +step:1250/1750 train_time:118836ms step_avg:95.07ms +step:1250/1750 val_loss:3.4067 train_time:118938ms step_avg:95.15ms +step:1251/1750 train_time:118962ms step_avg:95.09ms +step:1252/1750 train_time:119048ms step_avg:95.09ms +step:1253/1750 train_time:119149ms step_avg:95.09ms +step:1254/1750 train_time:119248ms step_avg:95.09ms +step:1255/1750 train_time:119347ms step_avg:95.10ms +step:1256/1750 train_time:119445ms step_avg:95.10ms +step:1257/1750 train_time:119542ms step_avg:95.10ms +step:1258/1750 train_time:119640ms step_avg:95.10ms +step:1259/1750 train_time:119737ms step_avg:95.10ms +step:1260/1750 train_time:119834ms step_avg:95.11ms +step:1261/1750 train_time:119932ms step_avg:95.11ms +step:1262/1750 train_time:120033ms step_avg:95.11ms +step:1263/1750 train_time:120133ms step_avg:95.12ms +step:1264/1750 train_time:120232ms step_avg:95.12ms +step:1265/1750 train_time:120332ms step_avg:95.12ms +step:1266/1750 train_time:120430ms step_avg:95.13ms +step:1267/1750 train_time:120529ms step_avg:95.13ms +step:1268/1750 train_time:120627ms step_avg:95.13ms +step:1269/1750 train_time:120724ms step_avg:95.13ms +step:1270/1750 train_time:120822ms step_avg:95.14ms +step:1271/1750 train_time:120924ms step_avg:95.14ms +step:1272/1750 train_time:121023ms step_avg:95.14ms +step:1273/1750 train_time:121123ms step_avg:95.15ms +step:1274/1750 train_time:121221ms step_avg:95.15ms +step:1275/1750 train_time:121320ms step_avg:95.15ms +step:1276/1750 train_time:121420ms step_avg:95.16ms +step:1277/1750 train_time:121519ms step_avg:95.16ms +step:1278/1750 train_time:121619ms step_avg:95.16ms +step:1279/1750 train_time:121718ms step_avg:95.17ms +step:1280/1750 train_time:121817ms step_avg:95.17ms +step:1281/1750 train_time:121917ms step_avg:95.17ms +step:1282/1750 train_time:122016ms step_avg:95.18ms +step:1283/1750 train_time:122115ms step_avg:95.18ms +step:1284/1750 train_time:122213ms step_avg:95.18ms +step:1285/1750 train_time:122311ms step_avg:95.18ms +step:1286/1750 train_time:122410ms step_avg:95.19ms +step:1287/1750 train_time:122509ms step_avg:95.19ms +step:1288/1750 train_time:122609ms step_avg:95.19ms +step:1289/1750 train_time:122709ms step_avg:95.20ms +step:1290/1750 train_time:122807ms step_avg:95.20ms +step:1291/1750 train_time:122906ms step_avg:95.20ms +step:1292/1750 train_time:123006ms step_avg:95.21ms +step:1293/1750 train_time:123105ms step_avg:95.21ms +step:1294/1750 train_time:123206ms step_avg:95.21ms +step:1295/1750 train_time:123305ms step_avg:95.22ms +step:1296/1750 train_time:123405ms step_avg:95.22ms +step:1297/1750 train_time:123504ms step_avg:95.22ms +step:1298/1750 train_time:123602ms step_avg:95.23ms +step:1299/1750 train_time:123701ms step_avg:95.23ms +step:1300/1750 train_time:123800ms step_avg:95.23ms +step:1301/1750 train_time:123899ms step_avg:95.23ms +step:1302/1750 train_time:123997ms step_avg:95.24ms +step:1303/1750 train_time:124097ms step_avg:95.24ms +step:1304/1750 train_time:124197ms step_avg:95.24ms +step:1305/1750 train_time:124296ms step_avg:95.25ms +step:1306/1750 train_time:124395ms step_avg:95.25ms +step:1307/1750 train_time:124496ms step_avg:95.25ms +step:1308/1750 train_time:124595ms step_avg:95.26ms +step:1309/1750 train_time:124694ms step_avg:95.26ms +step:1310/1750 train_time:124794ms step_avg:95.26ms +step:1311/1750 train_time:124894ms step_avg:95.27ms +step:1312/1750 train_time:124993ms step_avg:95.27ms +step:1313/1750 train_time:125093ms step_avg:95.27ms +step:1314/1750 train_time:125193ms step_avg:95.28ms +step:1315/1750 train_time:125292ms step_avg:95.28ms +step:1316/1750 train_time:125391ms step_avg:95.28ms +step:1317/1750 train_time:125490ms step_avg:95.28ms +step:1318/1750 train_time:125588ms step_avg:95.29ms +step:1319/1750 train_time:125687ms step_avg:95.29ms +step:1320/1750 train_time:125788ms step_avg:95.29ms +step:1321/1750 train_time:125887ms step_avg:95.30ms +step:1322/1750 train_time:125986ms step_avg:95.30ms +step:1323/1750 train_time:126085ms step_avg:95.30ms +step:1324/1750 train_time:126186ms step_avg:95.31ms +step:1325/1750 train_time:126285ms step_avg:95.31ms +step:1326/1750 train_time:126384ms step_avg:95.31ms +step:1327/1750 train_time:126482ms step_avg:95.31ms +step:1328/1750 train_time:126581ms step_avg:95.32ms +step:1329/1750 train_time:126680ms step_avg:95.32ms +step:1330/1750 train_time:126780ms step_avg:95.32ms +step:1331/1750 train_time:126878ms step_avg:95.33ms +step:1332/1750 train_time:126977ms step_avg:95.33ms +step:1333/1750 train_time:127078ms step_avg:95.33ms +step:1334/1750 train_time:127177ms step_avg:95.34ms +step:1335/1750 train_time:127277ms step_avg:95.34ms +step:1336/1750 train_time:127376ms step_avg:95.34ms +step:1337/1750 train_time:127475ms step_avg:95.34ms +step:1338/1750 train_time:127574ms step_avg:95.35ms +step:1339/1750 train_time:127673ms step_avg:95.35ms +step:1340/1750 train_time:127773ms step_avg:95.35ms +step:1341/1750 train_time:127872ms step_avg:95.36ms +step:1342/1750 train_time:127972ms step_avg:95.36ms +step:1343/1750 train_time:128072ms step_avg:95.36ms +step:1344/1750 train_time:128171ms step_avg:95.37ms +step:1345/1750 train_time:128271ms step_avg:95.37ms +step:1346/1750 train_time:128370ms step_avg:95.37ms +step:1347/1750 train_time:128469ms step_avg:95.37ms +step:1348/1750 train_time:128569ms step_avg:95.38ms +step:1349/1750 train_time:128666ms step_avg:95.38ms +step:1350/1750 train_time:128767ms step_avg:95.38ms +step:1351/1750 train_time:128865ms step_avg:95.38ms +step:1352/1750 train_time:128964ms step_avg:95.39ms +step:1353/1750 train_time:129064ms step_avg:95.39ms +step:1354/1750 train_time:129163ms step_avg:95.39ms +step:1355/1750 train_time:129262ms step_avg:95.40ms +step:1356/1750 train_time:129362ms step_avg:95.40ms +step:1357/1750 train_time:129460ms step_avg:95.40ms +step:1358/1750 train_time:129560ms step_avg:95.40ms +step:1359/1750 train_time:129658ms step_avg:95.41ms +step:1360/1750 train_time:129757ms step_avg:95.41ms +step:1361/1750 train_time:129856ms step_avg:95.41ms +step:1362/1750 train_time:129957ms step_avg:95.42ms +step:1363/1750 train_time:130058ms step_avg:95.42ms +step:1364/1750 train_time:130157ms step_avg:95.42ms +step:1365/1750 train_time:130257ms step_avg:95.43ms +step:1366/1750 train_time:130355ms step_avg:95.43ms +step:1367/1750 train_time:130453ms step_avg:95.43ms +step:1368/1750 train_time:130552ms step_avg:95.43ms +step:1369/1750 train_time:130650ms step_avg:95.43ms +step:1370/1750 train_time:130749ms step_avg:95.44ms +step:1371/1750 train_time:130849ms step_avg:95.44ms +step:1372/1750 train_time:130948ms step_avg:95.44ms +step:1373/1750 train_time:131048ms step_avg:95.45ms +step:1374/1750 train_time:131147ms step_avg:95.45ms +step:1375/1750 train_time:131248ms step_avg:95.45ms +step:1375/1750 val_loss:3.3669 train_time:131351ms step_avg:95.53ms +step:1376/1750 train_time:131375ms step_avg:95.48ms +step:1377/1750 train_time:131458ms step_avg:95.47ms +step:1378/1750 train_time:131559ms step_avg:95.47ms +step:1379/1750 train_time:131659ms step_avg:95.47ms +step:1380/1750 train_time:131759ms step_avg:95.48ms +step:1381/1750 train_time:131857ms step_avg:95.48ms +step:1382/1750 train_time:131955ms step_avg:95.48ms +step:1383/1750 train_time:132053ms step_avg:95.48ms +step:1384/1750 train_time:132150ms step_avg:95.48ms +step:1385/1750 train_time:132248ms step_avg:95.49ms +step:1386/1750 train_time:132348ms step_avg:95.49ms +step:1387/1750 train_time:132449ms step_avg:95.49ms +step:1388/1750 train_time:132549ms step_avg:95.50ms +step:1389/1750 train_time:132649ms step_avg:95.50ms +step:1390/1750 train_time:132748ms step_avg:95.50ms +step:1391/1750 train_time:132846ms step_avg:95.50ms +step:1392/1750 train_time:132945ms step_avg:95.51ms +step:1393/1750 train_time:133043ms step_avg:95.51ms +step:1394/1750 train_time:133142ms step_avg:95.51ms +step:1395/1750 train_time:133241ms step_avg:95.51ms +step:1396/1750 train_time:133340ms step_avg:95.52ms +step:1397/1750 train_time:133440ms step_avg:95.52ms +step:1398/1750 train_time:133540ms step_avg:95.52ms +step:1399/1750 train_time:133640ms step_avg:95.53ms +step:1400/1750 train_time:133739ms step_avg:95.53ms +step:1401/1750 train_time:133838ms step_avg:95.53ms +step:1402/1750 train_time:133938ms step_avg:95.53ms +step:1403/1750 train_time:134038ms step_avg:95.54ms +step:1404/1750 train_time:134137ms step_avg:95.54ms +step:1405/1750 train_time:134235ms step_avg:95.54ms +step:1406/1750 train_time:134335ms step_avg:95.54ms +step:1407/1750 train_time:134435ms step_avg:95.55ms +step:1408/1750 train_time:134535ms step_avg:95.55ms +step:1409/1750 train_time:134635ms step_avg:95.55ms +step:1410/1750 train_time:134734ms step_avg:95.56ms +step:1411/1750 train_time:134834ms step_avg:95.56ms +step:1412/1750 train_time:134934ms step_avg:95.56ms +step:1413/1750 train_time:135032ms step_avg:95.56ms +step:1414/1750 train_time:135132ms step_avg:95.57ms +step:1415/1750 train_time:135232ms step_avg:95.57ms +step:1416/1750 train_time:135330ms step_avg:95.57ms +step:1417/1750 train_time:135429ms step_avg:95.57ms +step:1418/1750 train_time:135528ms step_avg:95.58ms +step:1419/1750 train_time:135627ms step_avg:95.58ms +step:1420/1750 train_time:135727ms step_avg:95.58ms +step:1421/1750 train_time:135826ms step_avg:95.58ms +step:1422/1750 train_time:135925ms step_avg:95.59ms +step:1423/1750 train_time:136025ms step_avg:95.59ms +step:1424/1750 train_time:136126ms step_avg:95.59ms +step:1425/1750 train_time:136225ms step_avg:95.60ms +step:1426/1750 train_time:136324ms step_avg:95.60ms +step:1427/1750 train_time:136423ms step_avg:95.60ms +step:1428/1750 train_time:136523ms step_avg:95.60ms +step:1429/1750 train_time:136622ms step_avg:95.61ms +step:1430/1750 train_time:136722ms step_avg:95.61ms +step:1431/1750 train_time:136822ms step_avg:95.61ms +step:1432/1750 train_time:136922ms step_avg:95.62ms +step:1433/1750 train_time:137022ms step_avg:95.62ms +step:1434/1750 train_time:137122ms step_avg:95.62ms +step:1435/1750 train_time:137223ms step_avg:95.63ms +step:1436/1750 train_time:137324ms step_avg:95.63ms +step:1437/1750 train_time:137425ms step_avg:95.63ms +step:1438/1750 train_time:137524ms step_avg:95.64ms +step:1439/1750 train_time:137624ms step_avg:95.64ms +step:1440/1750 train_time:137726ms step_avg:95.64ms +step:1441/1750 train_time:137827ms step_avg:95.65ms +step:1442/1750 train_time:137925ms step_avg:95.65ms +step:1443/1750 train_time:138024ms step_avg:95.65ms +step:1444/1750 train_time:138125ms step_avg:95.65ms +step:1445/1750 train_time:138224ms step_avg:95.66ms +step:1446/1750 train_time:138324ms step_avg:95.66ms +step:1447/1750 train_time:138424ms step_avg:95.66ms +step:1448/1750 train_time:138524ms step_avg:95.67ms +step:1449/1750 train_time:138624ms step_avg:95.67ms +step:1450/1750 train_time:138723ms step_avg:95.67ms +step:1451/1750 train_time:138823ms step_avg:95.67ms +step:1452/1750 train_time:138922ms step_avg:95.68ms +step:1453/1750 train_time:139023ms step_avg:95.68ms +step:1454/1750 train_time:139125ms step_avg:95.68ms +step:1455/1750 train_time:139225ms step_avg:95.69ms +step:1456/1750 train_time:139324ms step_avg:95.69ms +step:1457/1750 train_time:139425ms step_avg:95.69ms +step:1458/1750 train_time:139525ms step_avg:95.70ms +step:1459/1750 train_time:139624ms step_avg:95.70ms +step:1460/1750 train_time:139724ms step_avg:95.70ms +step:1461/1750 train_time:139823ms step_avg:95.70ms +step:1462/1750 train_time:139922ms step_avg:95.71ms +step:1463/1750 train_time:140022ms step_avg:95.71ms +step:1464/1750 train_time:140122ms step_avg:95.71ms +step:1465/1750 train_time:140222ms step_avg:95.71ms +step:1466/1750 train_time:140321ms step_avg:95.72ms +step:1467/1750 train_time:140422ms step_avg:95.72ms +step:1468/1750 train_time:140522ms step_avg:95.72ms +step:1469/1750 train_time:140624ms step_avg:95.73ms +step:1470/1750 train_time:140724ms step_avg:95.73ms +step:1471/1750 train_time:140823ms step_avg:95.73ms +step:1472/1750 train_time:140923ms step_avg:95.74ms +step:1473/1750 train_time:141023ms step_avg:95.74ms +step:1474/1750 train_time:141123ms step_avg:95.74ms +step:1475/1750 train_time:141222ms step_avg:95.74ms +step:1476/1750 train_time:141323ms step_avg:95.75ms +step:1477/1750 train_time:141423ms step_avg:95.75ms +step:1478/1750 train_time:141524ms step_avg:95.75ms +step:1479/1750 train_time:141625ms step_avg:95.76ms +step:1480/1750 train_time:141725ms step_avg:95.76ms +step:1481/1750 train_time:141824ms step_avg:95.76ms +step:1482/1750 train_time:141925ms step_avg:95.77ms +step:1483/1750 train_time:142024ms step_avg:95.77ms +step:1484/1750 train_time:142125ms step_avg:95.77ms +step:1485/1750 train_time:142227ms step_avg:95.78ms +step:1486/1750 train_time:142327ms step_avg:95.78ms +step:1487/1750 train_time:142428ms step_avg:95.78ms +step:1488/1750 train_time:142530ms step_avg:95.79ms +step:1489/1750 train_time:142629ms step_avg:95.79ms +step:1490/1750 train_time:142729ms step_avg:95.79ms +step:1491/1750 train_time:142829ms step_avg:95.79ms +step:1492/1750 train_time:142929ms step_avg:95.80ms +step:1493/1750 train_time:143029ms step_avg:95.80ms +step:1494/1750 train_time:143129ms step_avg:95.80ms +step:1495/1750 train_time:143229ms step_avg:95.81ms +step:1496/1750 train_time:143329ms step_avg:95.81ms +step:1497/1750 train_time:143429ms step_avg:95.81ms +step:1498/1750 train_time:143529ms step_avg:95.81ms +step:1499/1750 train_time:143629ms step_avg:95.82ms +step:1500/1750 train_time:143730ms step_avg:95.82ms +step:1500/1750 val_loss:3.3314 train_time:143832ms step_avg:95.89ms +step:1501/1750 train_time:143856ms step_avg:95.84ms +step:1502/1750 train_time:143941ms step_avg:95.83ms +step:1503/1750 train_time:144043ms step_avg:95.84ms +step:1504/1750 train_time:144142ms step_avg:95.84ms +step:1505/1750 train_time:144241ms step_avg:95.84ms +step:1506/1750 train_time:144340ms step_avg:95.84ms +step:1507/1750 train_time:144438ms step_avg:95.84ms +step:1508/1750 train_time:144537ms step_avg:95.85ms +step:1509/1750 train_time:144635ms step_avg:95.85ms +step:1510/1750 train_time:144734ms step_avg:95.85ms +step:1511/1750 train_time:144835ms step_avg:95.85ms +step:1512/1750 train_time:144937ms step_avg:95.86ms +step:1513/1750 train_time:145038ms step_avg:95.86ms +step:1514/1750 train_time:145139ms step_avg:95.86ms +step:1515/1750 train_time:145243ms step_avg:95.87ms +step:1516/1750 train_time:145342ms step_avg:95.87ms +step:1517/1750 train_time:145441ms step_avg:95.87ms +step:1518/1750 train_time:145540ms step_avg:95.88ms +step:1519/1750 train_time:145640ms step_avg:95.88ms +step:1520/1750 train_time:145741ms step_avg:95.88ms +step:1521/1750 train_time:145842ms step_avg:95.89ms +step:1522/1750 train_time:145943ms step_avg:95.89ms +step:1523/1750 train_time:146042ms step_avg:95.89ms +step:1524/1750 train_time:146143ms step_avg:95.89ms +step:1525/1750 train_time:146243ms step_avg:95.90ms +step:1526/1750 train_time:146342ms step_avg:95.90ms +step:1527/1750 train_time:146442ms step_avg:95.90ms +step:1528/1750 train_time:146544ms step_avg:95.91ms +step:1529/1750 train_time:146644ms step_avg:95.91ms +step:1530/1750 train_time:146744ms step_avg:95.91ms +step:1531/1750 train_time:146844ms step_avg:95.91ms +step:1532/1750 train_time:146945ms step_avg:95.92ms +step:1533/1750 train_time:147044ms step_avg:95.92ms +step:1534/1750 train_time:147144ms step_avg:95.92ms +step:1535/1750 train_time:147244ms step_avg:95.92ms +step:1536/1750 train_time:147343ms step_avg:95.93ms +step:1537/1750 train_time:147442ms step_avg:95.93ms +step:1538/1750 train_time:147543ms step_avg:95.93ms +step:1539/1750 train_time:147642ms step_avg:95.93ms +step:1540/1750 train_time:147742ms step_avg:95.94ms +step:1541/1750 train_time:147844ms step_avg:95.94ms +step:1542/1750 train_time:147947ms step_avg:95.95ms +step:1543/1750 train_time:148049ms step_avg:95.95ms +step:1544/1750 train_time:148148ms step_avg:95.95ms +step:1545/1750 train_time:148249ms step_avg:95.95ms +step:1546/1750 train_time:148348ms step_avg:95.96ms +step:1547/1750 train_time:148452ms step_avg:95.96ms +step:1548/1750 train_time:148553ms step_avg:95.96ms +step:1549/1750 train_time:148654ms step_avg:95.97ms +step:1550/1750 train_time:148755ms step_avg:95.97ms +step:1551/1750 train_time:148855ms step_avg:95.97ms +step:1552/1750 train_time:148957ms step_avg:95.98ms +step:1553/1750 train_time:149056ms step_avg:95.98ms +step:1554/1750 train_time:149155ms step_avg:95.98ms +step:1555/1750 train_time:149256ms step_avg:95.98ms +step:1556/1750 train_time:149357ms step_avg:95.99ms +step:1557/1750 train_time:149457ms step_avg:95.99ms +step:1558/1750 train_time:149557ms step_avg:95.99ms +step:1559/1750 train_time:149657ms step_avg:96.00ms +step:1560/1750 train_time:149757ms step_avg:96.00ms +step:1561/1750 train_time:149858ms step_avg:96.00ms +step:1562/1750 train_time:149959ms step_avg:96.00ms +step:1563/1750 train_time:150062ms step_avg:96.01ms +step:1564/1750 train_time:150162ms step_avg:96.01ms +step:1565/1750 train_time:150262ms step_avg:96.01ms +step:1566/1750 train_time:150362ms step_avg:96.02ms +step:1567/1750 train_time:150461ms step_avg:96.02ms +step:1568/1750 train_time:150561ms step_avg:96.02ms +step:1569/1750 train_time:150662ms step_avg:96.02ms +step:1570/1750 train_time:150764ms step_avg:96.03ms +step:1571/1750 train_time:150864ms step_avg:96.03ms +step:1572/1750 train_time:150964ms step_avg:96.03ms +step:1573/1750 train_time:151065ms step_avg:96.04ms +step:1574/1750 train_time:151165ms step_avg:96.04ms +step:1575/1750 train_time:151266ms step_avg:96.04ms +step:1576/1750 train_time:151366ms step_avg:96.04ms +step:1577/1750 train_time:151468ms step_avg:96.05ms +step:1578/1750 train_time:151568ms step_avg:96.05ms +step:1579/1750 train_time:151668ms step_avg:96.05ms +step:1580/1750 train_time:151771ms step_avg:96.06ms +step:1581/1750 train_time:151871ms step_avg:96.06ms +step:1582/1750 train_time:151970ms step_avg:96.06ms +step:1583/1750 train_time:152072ms step_avg:96.07ms +step:1584/1750 train_time:152173ms step_avg:96.07ms +step:1585/1750 train_time:152272ms step_avg:96.07ms +step:1586/1750 train_time:152372ms step_avg:96.07ms +step:1587/1750 train_time:152472ms step_avg:96.08ms +step:1588/1750 train_time:152573ms step_avg:96.08ms +step:1589/1750 train_time:152672ms step_avg:96.08ms +step:1590/1750 train_time:152773ms step_avg:96.08ms +step:1591/1750 train_time:152873ms step_avg:96.09ms +step:1592/1750 train_time:152975ms step_avg:96.09ms +step:1593/1750 train_time:153074ms step_avg:96.09ms +step:1594/1750 train_time:153178ms step_avg:96.10ms +step:1595/1750 train_time:153277ms step_avg:96.10ms +step:1596/1750 train_time:153377ms step_avg:96.10ms +step:1597/1750 train_time:153476ms step_avg:96.10ms +step:1598/1750 train_time:153579ms step_avg:96.11ms +step:1599/1750 train_time:153679ms step_avg:96.11ms +step:1600/1750 train_time:153781ms step_avg:96.11ms +step:1601/1750 train_time:153882ms step_avg:96.12ms +step:1602/1750 train_time:153982ms step_avg:96.12ms +step:1603/1750 train_time:154082ms step_avg:96.12ms +step:1604/1750 train_time:154182ms step_avg:96.12ms +step:1605/1750 train_time:154282ms step_avg:96.13ms +step:1606/1750 train_time:154381ms step_avg:96.13ms +step:1607/1750 train_time:154482ms step_avg:96.13ms +step:1608/1750 train_time:154581ms step_avg:96.13ms +step:1609/1750 train_time:154681ms step_avg:96.13ms +step:1610/1750 train_time:154782ms step_avg:96.14ms +step:1611/1750 train_time:154883ms step_avg:96.14ms +step:1612/1750 train_time:154984ms step_avg:96.14ms +step:1613/1750 train_time:155084ms step_avg:96.15ms +step:1614/1750 train_time:155184ms step_avg:96.15ms +step:1615/1750 train_time:155285ms step_avg:96.15ms +step:1616/1750 train_time:155385ms step_avg:96.15ms +step:1617/1750 train_time:155485ms step_avg:96.16ms +step:1618/1750 train_time:155586ms step_avg:96.16ms +step:1619/1750 train_time:155686ms step_avg:96.16ms +step:1620/1750 train_time:155786ms step_avg:96.16ms +step:1621/1750 train_time:155887ms step_avg:96.17ms +step:1622/1750 train_time:155986ms step_avg:96.17ms +step:1623/1750 train_time:156086ms step_avg:96.17ms +step:1624/1750 train_time:156187ms step_avg:96.17ms +step:1625/1750 train_time:156290ms step_avg:96.18ms +step:1625/1750 val_loss:3.3014 train_time:156394ms step_avg:96.24ms +step:1626/1750 train_time:156420ms step_avg:96.20ms +step:1627/1750 train_time:156503ms step_avg:96.19ms +step:1628/1750 train_time:156603ms step_avg:96.19ms +step:1629/1750 train_time:156703ms step_avg:96.20ms +step:1630/1750 train_time:156802ms step_avg:96.20ms +step:1631/1750 train_time:156902ms step_avg:96.20ms +step:1632/1750 train_time:157002ms step_avg:96.20ms +step:1633/1750 train_time:157101ms step_avg:96.20ms +step:1634/1750 train_time:157202ms step_avg:96.21ms +step:1635/1750 train_time:157302ms step_avg:96.21ms +step:1636/1750 train_time:157404ms step_avg:96.21ms +step:1637/1750 train_time:157507ms step_avg:96.22ms +step:1638/1750 train_time:157608ms step_avg:96.22ms +step:1639/1750 train_time:157709ms step_avg:96.22ms +step:1640/1750 train_time:157808ms step_avg:96.22ms +step:1641/1750 train_time:157907ms step_avg:96.23ms +step:1642/1750 train_time:158006ms step_avg:96.23ms +step:1643/1750 train_time:158105ms step_avg:96.23ms +step:1644/1750 train_time:158205ms step_avg:96.23ms +step:1645/1750 train_time:158305ms step_avg:96.23ms +step:1646/1750 train_time:158406ms step_avg:96.24ms +step:1647/1750 train_time:158509ms step_avg:96.24ms +step:1648/1750 train_time:158611ms step_avg:96.24ms +step:1649/1750 train_time:158712ms step_avg:96.25ms +step:1650/1750 train_time:158811ms step_avg:96.25ms +step:1651/1750 train_time:158910ms step_avg:96.25ms +step:1652/1750 train_time:159010ms step_avg:96.25ms +step:1653/1750 train_time:159111ms step_avg:96.26ms +step:1654/1750 train_time:159211ms step_avg:96.26ms +step:1655/1750 train_time:159311ms step_avg:96.26ms +step:1656/1750 train_time:159412ms step_avg:96.26ms +step:1657/1750 train_time:159512ms step_avg:96.27ms +step:1658/1750 train_time:159614ms step_avg:96.27ms +step:1659/1750 train_time:159719ms step_avg:96.27ms +step:1660/1750 train_time:159820ms step_avg:96.28ms +step:1661/1750 train_time:159921ms step_avg:96.28ms +step:1662/1750 train_time:160021ms step_avg:96.28ms +step:1663/1750 train_time:160122ms step_avg:96.29ms +step:1664/1750 train_time:160222ms step_avg:96.29ms +step:1665/1750 train_time:160323ms step_avg:96.29ms +step:1666/1750 train_time:160425ms step_avg:96.29ms +step:1667/1750 train_time:160525ms step_avg:96.30ms +step:1668/1750 train_time:160628ms step_avg:96.30ms +step:1669/1750 train_time:160729ms step_avg:96.30ms +step:1670/1750 train_time:160830ms step_avg:96.31ms +step:1671/1750 train_time:160930ms step_avg:96.31ms +step:1672/1750 train_time:161031ms step_avg:96.31ms +step:1673/1750 train_time:161130ms step_avg:96.31ms +step:1674/1750 train_time:161232ms step_avg:96.32ms +step:1675/1750 train_time:161334ms step_avg:96.32ms +step:1676/1750 train_time:161434ms step_avg:96.32ms +step:1677/1750 train_time:161535ms step_avg:96.32ms +step:1678/1750 train_time:161635ms step_avg:96.33ms +step:1679/1750 train_time:161736ms step_avg:96.33ms +step:1680/1750 train_time:161836ms step_avg:96.33ms +step:1681/1750 train_time:161938ms step_avg:96.33ms +step:1682/1750 train_time:162042ms step_avg:96.34ms +step:1683/1750 train_time:162142ms step_avg:96.34ms +step:1684/1750 train_time:162244ms step_avg:96.34ms +step:1685/1750 train_time:162343ms step_avg:96.35ms +step:1686/1750 train_time:162443ms step_avg:96.35ms +step:1687/1750 train_time:162543ms step_avg:96.35ms +step:1688/1750 train_time:162644ms step_avg:96.35ms +step:1689/1750 train_time:162744ms step_avg:96.36ms +step:1690/1750 train_time:162846ms step_avg:96.36ms +step:1691/1750 train_time:162948ms step_avg:96.36ms +step:1692/1750 train_time:163048ms step_avg:96.36ms +step:1693/1750 train_time:163149ms step_avg:96.37ms +step:1694/1750 train_time:163250ms step_avg:96.37ms +step:1695/1750 train_time:163353ms step_avg:96.37ms +step:1696/1750 train_time:163453ms step_avg:96.38ms +step:1697/1750 train_time:163556ms step_avg:96.38ms +step:1698/1750 train_time:163656ms step_avg:96.38ms +step:1699/1750 train_time:163757ms step_avg:96.38ms +step:1700/1750 train_time:163858ms step_avg:96.39ms +step:1701/1750 train_time:163958ms step_avg:96.39ms +step:1702/1750 train_time:164063ms step_avg:96.39ms +step:1703/1750 train_time:164164ms step_avg:96.40ms +step:1704/1750 train_time:164266ms step_avg:96.40ms +step:1705/1750 train_time:164366ms step_avg:96.40ms +step:1706/1750 train_time:164466ms step_avg:96.40ms +step:1707/1750 train_time:164569ms step_avg:96.41ms +step:1708/1750 train_time:164672ms step_avg:96.41ms +step:1709/1750 train_time:164774ms step_avg:96.42ms +step:1710/1750 train_time:164875ms step_avg:96.42ms +step:1711/1750 train_time:164976ms step_avg:96.42ms +step:1712/1750 train_time:165077ms step_avg:96.42ms +step:1713/1750 train_time:165178ms step_avg:96.43ms +step:1714/1750 train_time:165277ms step_avg:96.43ms +step:1715/1750 train_time:165381ms step_avg:96.43ms +step:1716/1750 train_time:165484ms step_avg:96.44ms +step:1717/1750 train_time:165585ms step_avg:96.44ms +step:1718/1750 train_time:165686ms step_avg:96.44ms +step:1719/1750 train_time:165790ms step_avg:96.45ms +step:1720/1750 train_time:165891ms step_avg:96.45ms +step:1721/1750 train_time:165993ms step_avg:96.45ms +step:1722/1750 train_time:166094ms step_avg:96.45ms +step:1723/1750 train_time:166195ms step_avg:96.46ms +step:1724/1750 train_time:166297ms step_avg:96.46ms +step:1725/1750 train_time:166398ms step_avg:96.46ms +step:1726/1750 train_time:166500ms step_avg:96.47ms +step:1727/1750 train_time:166601ms step_avg:96.47ms +step:1728/1750 train_time:166705ms step_avg:96.47ms +step:1729/1750 train_time:166809ms step_avg:96.48ms +step:1730/1750 train_time:166908ms step_avg:96.48ms +step:1731/1750 train_time:167009ms step_avg:96.48ms +step:1732/1750 train_time:167110ms step_avg:96.48ms +step:1733/1750 train_time:167212ms step_avg:96.49ms +step:1734/1750 train_time:167314ms step_avg:96.49ms +step:1735/1750 train_time:167415ms step_avg:96.49ms +step:1736/1750 train_time:167516ms step_avg:96.50ms +step:1737/1750 train_time:167617ms step_avg:96.50ms +step:1738/1750 train_time:167718ms step_avg:96.50ms +step:1739/1750 train_time:167820ms step_avg:96.50ms +step:1740/1750 train_time:167921ms step_avg:96.51ms +step:1741/1750 train_time:168026ms step_avg:96.51ms +step:1742/1750 train_time:168126ms step_avg:96.51ms +step:1743/1750 train_time:168226ms step_avg:96.52ms +step:1744/1750 train_time:168327ms step_avg:96.52ms +step:1745/1750 train_time:168427ms step_avg:96.52ms +step:1746/1750 train_time:168528ms step_avg:96.52ms +step:1747/1750 train_time:168630ms step_avg:96.53ms +step:1748/1750 train_time:168733ms step_avg:96.53ms +step:1749/1750 train_time:168834ms step_avg:96.53ms +step:1750/1750 train_time:168935ms step_avg:96.53ms +step:1750/1750 val_loss:3.2780 train_time:169042ms step_avg:96.60ms +peak memory allocated: 34460 MiB reserved: 48914 MiB \ No newline at end of file diff --git a/records/082325_SparseAttnGate/020630eb-2191-4ba2-9ee4-4cdc94316943.txt b/records/082325_SparseAttnGate/020630eb-2191-4ba2-9ee4-4cdc94316943.txt new file mode 100644 index 000000000..70325646a --- /dev/null +++ b/records/082325_SparseAttnGate/020630eb-2191-4ba2-9ee4-4cdc94316943.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:51:59 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 31C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 321540 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 321541 C /usr/bin/python3 614MiB | +| 0 N/A N/A 321542 C /usr/bin/python3 614MiB | +| 0 N/A N/A 321543 C /usr/bin/python3 614MiB | +| 0 N/A N/A 321544 C /usr/bin/python3 614MiB | +| 0 N/A N/A 321545 C /usr/bin/python3 614MiB | +| 0 N/A N/A 321546 C /usr/bin/python3 614MiB | +| 0 N/A N/A 321547 C /usr/bin/python3 614MiB | +| 1 N/A N/A 321541 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 321542 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 321543 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 321544 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 321545 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 321546 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 321547 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:153ms step_avg:153.49ms +step:2/1695 train_time:179ms step_avg:89.47ms +step:3/1695 train_time:250ms step_avg:83.25ms +step:4/1695 train_time:342ms step_avg:85.39ms +step:5/1695 train_time:435ms step_avg:86.93ms +step:6/1695 train_time:527ms step_avg:87.77ms +step:7/1695 train_time:620ms step_avg:88.62ms +step:8/1695 train_time:713ms step_avg:89.16ms +step:9/1695 train_time:806ms step_avg:89.56ms +step:10/1695 train_time:899ms step_avg:89.92ms +step:11/1695 train_time:992ms step_avg:90.20ms +step:12/1695 train_time:1086ms step_avg:90.53ms +step:13/1695 train_time:1183ms step_avg:91.00ms +step:14/1695 train_time:1278ms step_avg:91.26ms +step:15/1695 train_time:1371ms step_avg:91.40ms +step:16/1695 train_time:1465ms step_avg:91.55ms +step:17/1695 train_time:1558ms step_avg:91.67ms +step:18/1695 train_time:1651ms step_avg:91.74ms +step:19/1695 train_time:1744ms step_avg:91.81ms +step:20/1695 train_time:1839ms step_avg:91.97ms +step:21/1695 train_time:1934ms step_avg:92.08ms +step:22/1695 train_time:2027ms step_avg:92.15ms +step:23/1695 train_time:2122ms step_avg:92.25ms +step:24/1695 train_time:2216ms step_avg:92.35ms +step:25/1695 train_time:2310ms step_avg:92.41ms +step:26/1695 train_time:2404ms step_avg:92.47ms +step:27/1695 train_time:2498ms step_avg:92.50ms +step:28/1695 train_time:2591ms step_avg:92.55ms +step:29/1695 train_time:2684ms step_avg:92.57ms +step:30/1695 train_time:2778ms step_avg:92.59ms +step:31/1695 train_time:2872ms step_avg:92.63ms +step:32/1695 train_time:2966ms step_avg:92.68ms +step:33/1695 train_time:3060ms step_avg:92.73ms +step:34/1695 train_time:3154ms step_avg:92.78ms +step:35/1695 train_time:3247ms step_avg:92.79ms +step:36/1695 train_time:3342ms step_avg:92.84ms +step:37/1695 train_time:3437ms step_avg:92.89ms +step:38/1695 train_time:3531ms step_avg:92.91ms +step:39/1695 train_time:3624ms step_avg:92.93ms +step:40/1695 train_time:3719ms step_avg:92.96ms +step:41/1695 train_time:3813ms step_avg:93.01ms +step:42/1695 train_time:3907ms step_avg:93.03ms +step:43/1695 train_time:4001ms step_avg:93.05ms +step:44/1695 train_time:4095ms step_avg:93.06ms +step:45/1695 train_time:4188ms step_avg:93.06ms +step:46/1695 train_time:4281ms step_avg:93.06ms +step:47/1695 train_time:4374ms step_avg:93.07ms +step:48/1695 train_time:4469ms step_avg:93.11ms +step:49/1695 train_time:4563ms step_avg:93.12ms +step:50/1695 train_time:4658ms step_avg:93.16ms +step:51/1695 train_time:4751ms step_avg:93.16ms +step:52/1695 train_time:4845ms step_avg:93.17ms +step:53/1695 train_time:4939ms step_avg:93.20ms +step:54/1695 train_time:5034ms step_avg:93.22ms +step:55/1695 train_time:5127ms step_avg:93.22ms +step:56/1695 train_time:5220ms step_avg:93.22ms +step:57/1695 train_time:5314ms step_avg:93.24ms +step:58/1695 train_time:5409ms step_avg:93.25ms +step:59/1695 train_time:5502ms step_avg:93.26ms +step:60/1695 train_time:5596ms step_avg:93.27ms +step:61/1695 train_time:5690ms step_avg:93.27ms +step:62/1695 train_time:5783ms step_avg:93.28ms +step:63/1695 train_time:5877ms step_avg:93.28ms +step:64/1695 train_time:5970ms step_avg:93.29ms +step:65/1695 train_time:6065ms step_avg:93.30ms +step:66/1695 train_time:6159ms step_avg:93.31ms +step:67/1695 train_time:6252ms step_avg:93.31ms +step:68/1695 train_time:6346ms step_avg:93.32ms +step:69/1695 train_time:6440ms step_avg:93.34ms +step:70/1695 train_time:6535ms step_avg:93.36ms +step:71/1695 train_time:6629ms step_avg:93.36ms +step:72/1695 train_time:6724ms step_avg:93.38ms +step:73/1695 train_time:6818ms step_avg:93.39ms +step:74/1695 train_time:6912ms step_avg:93.40ms +step:75/1695 train_time:7005ms step_avg:93.40ms +step:76/1695 train_time:7099ms step_avg:93.41ms +step:77/1695 train_time:7193ms step_avg:93.42ms +step:78/1695 train_time:7287ms step_avg:93.42ms +step:79/1695 train_time:7380ms step_avg:93.42ms +step:80/1695 train_time:7475ms step_avg:93.44ms +step:81/1695 train_time:7568ms step_avg:93.44ms +step:82/1695 train_time:7662ms step_avg:93.44ms +step:83/1695 train_time:7756ms step_avg:93.45ms +step:84/1695 train_time:7850ms step_avg:93.45ms +step:85/1695 train_time:7943ms step_avg:93.45ms +step:86/1695 train_time:8037ms step_avg:93.46ms +step:87/1695 train_time:8130ms step_avg:93.45ms +step:88/1695 train_time:8224ms step_avg:93.46ms +step:89/1695 train_time:8319ms step_avg:93.47ms +step:90/1695 train_time:8413ms step_avg:93.48ms +step:91/1695 train_time:8507ms step_avg:93.48ms +step:92/1695 train_time:8600ms step_avg:93.48ms +step:93/1695 train_time:8694ms step_avg:93.48ms +step:94/1695 train_time:8787ms step_avg:93.48ms +step:95/1695 train_time:8880ms step_avg:93.47ms +step:96/1695 train_time:8974ms step_avg:93.48ms +step:97/1695 train_time:9068ms step_avg:93.48ms +step:98/1695 train_time:9161ms step_avg:93.48ms +step:99/1695 train_time:9254ms step_avg:93.48ms +step:100/1695 train_time:9348ms step_avg:93.48ms +step:101/1695 train_time:9442ms step_avg:93.48ms +step:102/1695 train_time:9536ms step_avg:93.49ms +step:103/1695 train_time:9630ms step_avg:93.49ms +step:104/1695 train_time:9723ms step_avg:93.49ms +step:105/1695 train_time:9817ms step_avg:93.49ms +step:106/1695 train_time:9910ms step_avg:93.49ms +step:107/1695 train_time:10004ms step_avg:93.49ms +step:108/1695 train_time:10098ms step_avg:93.50ms +step:109/1695 train_time:10191ms step_avg:93.49ms +step:110/1695 train_time:10284ms step_avg:93.49ms +step:111/1695 train_time:10378ms step_avg:93.50ms +step:112/1695 train_time:10471ms step_avg:93.49ms +step:113/1695 train_time:10565ms step_avg:93.50ms +step:114/1695 train_time:10658ms step_avg:93.49ms +step:115/1695 train_time:10752ms step_avg:93.49ms +step:116/1695 train_time:10845ms step_avg:93.49ms +step:117/1695 train_time:10940ms step_avg:93.50ms +step:118/1695 train_time:11035ms step_avg:93.51ms +step:119/1695 train_time:11128ms step_avg:93.52ms +step:120/1695 train_time:11222ms step_avg:93.51ms +step:121/1695 train_time:11315ms step_avg:93.51ms +step:122/1695 train_time:11409ms step_avg:93.51ms +step:123/1695 train_time:11502ms step_avg:93.51ms +step:124/1695 train_time:11596ms step_avg:93.51ms +step:125/1695 train_time:11689ms step_avg:93.51ms +step:125/1695 val_loss:4.6029 train_time:11781ms step_avg:94.25ms +step:126/1695 train_time:11809ms step_avg:93.72ms +step:127/1695 train_time:11886ms step_avg:93.59ms +step:128/1695 train_time:11989ms step_avg:93.67ms +step:129/1695 train_time:12085ms step_avg:93.68ms +step:130/1695 train_time:12179ms step_avg:93.68ms +step:131/1695 train_time:12272ms step_avg:93.68ms +step:132/1695 train_time:12365ms step_avg:93.68ms +step:133/1695 train_time:12459ms step_avg:93.67ms +step:134/1695 train_time:12552ms step_avg:93.67ms +step:135/1695 train_time:12646ms step_avg:93.67ms +step:136/1695 train_time:12739ms step_avg:93.67ms +step:137/1695 train_time:12833ms step_avg:93.67ms +step:138/1695 train_time:12931ms step_avg:93.70ms +step:139/1695 train_time:13028ms step_avg:93.73ms +step:140/1695 train_time:13124ms step_avg:93.74ms +step:141/1695 train_time:13218ms step_avg:93.74ms +step:142/1695 train_time:13311ms step_avg:93.74ms +step:143/1695 train_time:13405ms step_avg:93.74ms +step:144/1695 train_time:13498ms step_avg:93.74ms +step:145/1695 train_time:13592ms step_avg:93.74ms +step:146/1695 train_time:13685ms step_avg:93.73ms +step:147/1695 train_time:13779ms step_avg:93.73ms +step:148/1695 train_time:13874ms step_avg:93.74ms +step:149/1695 train_time:13968ms step_avg:93.74ms +step:150/1695 train_time:14063ms step_avg:93.75ms +step:151/1695 train_time:14157ms step_avg:93.76ms +step:152/1695 train_time:14251ms step_avg:93.76ms +step:153/1695 train_time:14345ms step_avg:93.76ms +step:154/1695 train_time:14439ms step_avg:93.76ms +step:155/1695 train_time:14533ms step_avg:93.76ms +step:156/1695 train_time:14627ms step_avg:93.76ms +step:157/1695 train_time:14721ms step_avg:93.77ms +step:158/1695 train_time:14816ms step_avg:93.77ms +step:159/1695 train_time:14910ms step_avg:93.77ms +step:160/1695 train_time:15005ms step_avg:93.78ms +step:161/1695 train_time:15100ms step_avg:93.79ms +step:162/1695 train_time:15195ms step_avg:93.79ms +step:163/1695 train_time:15289ms step_avg:93.80ms +step:164/1695 train_time:15384ms step_avg:93.80ms +step:165/1695 train_time:15478ms step_avg:93.81ms +step:166/1695 train_time:15572ms step_avg:93.81ms +step:167/1695 train_time:15666ms step_avg:93.81ms +step:168/1695 train_time:15760ms step_avg:93.81ms +step:169/1695 train_time:15853ms step_avg:93.80ms +step:170/1695 train_time:15947ms step_avg:93.81ms +step:171/1695 train_time:16041ms step_avg:93.81ms +step:172/1695 train_time:16135ms step_avg:93.81ms +step:173/1695 train_time:16230ms step_avg:93.81ms +step:174/1695 train_time:16324ms step_avg:93.82ms +step:175/1695 train_time:16419ms step_avg:93.82ms +step:176/1695 train_time:16513ms step_avg:93.82ms +step:177/1695 train_time:16606ms step_avg:93.82ms +step:178/1695 train_time:16700ms step_avg:93.82ms +step:179/1695 train_time:16795ms step_avg:93.83ms +step:180/1695 train_time:16888ms step_avg:93.82ms +step:181/1695 train_time:16983ms step_avg:93.83ms +step:182/1695 train_time:17077ms step_avg:93.83ms +step:183/1695 train_time:17171ms step_avg:93.83ms +step:184/1695 train_time:17265ms step_avg:93.83ms +step:185/1695 train_time:17359ms step_avg:93.83ms +step:186/1695 train_time:17453ms step_avg:93.83ms +step:187/1695 train_time:17547ms step_avg:93.83ms +step:188/1695 train_time:17642ms step_avg:93.84ms +step:189/1695 train_time:17736ms step_avg:93.84ms +step:190/1695 train_time:17829ms step_avg:93.84ms +step:191/1695 train_time:17923ms step_avg:93.84ms +step:192/1695 train_time:18016ms step_avg:93.84ms +step:193/1695 train_time:18110ms step_avg:93.84ms +step:194/1695 train_time:18205ms step_avg:93.84ms +step:195/1695 train_time:18299ms step_avg:93.84ms +step:196/1695 train_time:18392ms step_avg:93.84ms +step:197/1695 train_time:18486ms step_avg:93.84ms +step:198/1695 train_time:18581ms step_avg:93.84ms +step:199/1695 train_time:18676ms step_avg:93.85ms +step:200/1695 train_time:18770ms step_avg:93.85ms +step:201/1695 train_time:18864ms step_avg:93.85ms +step:202/1695 train_time:18958ms step_avg:93.85ms +step:203/1695 train_time:19052ms step_avg:93.85ms +step:204/1695 train_time:19145ms step_avg:93.85ms +step:205/1695 train_time:19239ms step_avg:93.85ms +step:206/1695 train_time:19333ms step_avg:93.85ms +step:207/1695 train_time:19427ms step_avg:93.85ms +step:208/1695 train_time:19521ms step_avg:93.85ms +step:209/1695 train_time:19616ms step_avg:93.86ms +step:210/1695 train_time:19710ms step_avg:93.86ms +step:211/1695 train_time:19805ms step_avg:93.86ms +step:212/1695 train_time:19899ms step_avg:93.87ms +step:213/1695 train_time:19992ms step_avg:93.86ms +step:214/1695 train_time:20086ms step_avg:93.86ms +step:215/1695 train_time:20181ms step_avg:93.86ms +step:216/1695 train_time:20275ms step_avg:93.86ms +step:217/1695 train_time:20370ms step_avg:93.87ms +step:218/1695 train_time:20464ms step_avg:93.87ms +step:219/1695 train_time:20558ms step_avg:93.87ms +step:220/1695 train_time:20652ms step_avg:93.87ms +step:221/1695 train_time:20747ms step_avg:93.88ms +step:222/1695 train_time:20842ms step_avg:93.88ms +step:223/1695 train_time:20936ms step_avg:93.88ms +step:224/1695 train_time:21030ms step_avg:93.88ms +step:225/1695 train_time:21124ms step_avg:93.88ms +step:226/1695 train_time:21218ms step_avg:93.89ms +step:227/1695 train_time:21312ms step_avg:93.89ms +step:228/1695 train_time:21406ms step_avg:93.89ms +step:229/1695 train_time:21501ms step_avg:93.89ms +step:230/1695 train_time:21595ms step_avg:93.89ms +step:231/1695 train_time:21689ms step_avg:93.89ms +step:232/1695 train_time:21783ms step_avg:93.89ms +step:233/1695 train_time:21878ms step_avg:93.90ms +step:234/1695 train_time:21972ms step_avg:93.90ms +step:235/1695 train_time:22066ms step_avg:93.90ms +step:236/1695 train_time:22159ms step_avg:93.90ms +step:237/1695 train_time:22253ms step_avg:93.90ms +step:238/1695 train_time:22347ms step_avg:93.89ms +step:239/1695 train_time:22442ms step_avg:93.90ms +step:240/1695 train_time:22537ms step_avg:93.91ms +step:241/1695 train_time:22631ms step_avg:93.90ms +step:242/1695 train_time:22724ms step_avg:93.90ms +step:243/1695 train_time:22819ms step_avg:93.91ms +step:244/1695 train_time:22913ms step_avg:93.91ms +step:245/1695 train_time:23008ms step_avg:93.91ms +step:246/1695 train_time:23102ms step_avg:93.91ms +step:247/1695 train_time:23196ms step_avg:93.91ms +step:248/1695 train_time:23290ms step_avg:93.91ms +step:249/1695 train_time:23384ms step_avg:93.91ms +step:250/1695 train_time:23478ms step_avg:93.91ms +step:250/1695 val_loss:4.0788 train_time:23571ms step_avg:94.28ms +step:251/1695 train_time:23598ms step_avg:94.02ms +step:252/1695 train_time:23676ms step_avg:93.95ms +step:253/1695 train_time:23777ms step_avg:93.98ms +step:254/1695 train_time:23872ms step_avg:93.98ms +step:255/1695 train_time:23966ms step_avg:93.98ms +step:256/1695 train_time:24060ms step_avg:93.98ms +step:257/1695 train_time:24153ms step_avg:93.98ms +step:258/1695 train_time:24248ms step_avg:93.98ms +step:259/1695 train_time:24341ms step_avg:93.98ms +step:260/1695 train_time:24435ms step_avg:93.98ms +step:261/1695 train_time:24529ms step_avg:93.98ms +step:262/1695 train_time:24623ms step_avg:93.98ms +step:263/1695 train_time:24720ms step_avg:93.99ms +step:264/1695 train_time:24816ms step_avg:94.00ms +step:265/1695 train_time:24910ms step_avg:94.00ms +step:266/1695 train_time:25004ms step_avg:94.00ms +step:267/1695 train_time:25098ms step_avg:94.00ms +step:268/1695 train_time:25192ms step_avg:94.00ms +step:269/1695 train_time:25286ms step_avg:94.00ms +step:270/1695 train_time:25380ms step_avg:94.00ms +step:271/1695 train_time:25473ms step_avg:94.00ms +step:272/1695 train_time:25568ms step_avg:94.00ms +step:273/1695 train_time:25664ms step_avg:94.01ms +step:274/1695 train_time:25759ms step_avg:94.01ms +step:275/1695 train_time:25854ms step_avg:94.01ms +step:276/1695 train_time:25948ms step_avg:94.02ms +step:277/1695 train_time:26043ms step_avg:94.02ms +step:278/1695 train_time:26138ms step_avg:94.02ms +step:279/1695 train_time:26232ms step_avg:94.02ms +step:280/1695 train_time:26326ms step_avg:94.02ms +step:281/1695 train_time:26420ms step_avg:94.02ms +step:282/1695 train_time:26514ms step_avg:94.02ms +step:283/1695 train_time:26608ms step_avg:94.02ms +step:284/1695 train_time:26703ms step_avg:94.02ms +step:285/1695 train_time:26799ms step_avg:94.03ms +step:286/1695 train_time:26893ms step_avg:94.03ms +step:287/1695 train_time:26988ms step_avg:94.03ms +step:288/1695 train_time:27082ms step_avg:94.04ms +step:289/1695 train_time:27177ms step_avg:94.04ms +step:290/1695 train_time:27271ms step_avg:94.04ms +step:291/1695 train_time:27365ms step_avg:94.04ms +step:292/1695 train_time:27460ms step_avg:94.04ms +step:293/1695 train_time:27554ms step_avg:94.04ms +step:294/1695 train_time:27648ms step_avg:94.04ms +step:295/1695 train_time:27743ms step_avg:94.04ms +step:296/1695 train_time:27838ms step_avg:94.05ms +step:297/1695 train_time:27933ms step_avg:94.05ms +step:298/1695 train_time:28026ms step_avg:94.05ms +step:299/1695 train_time:28121ms step_avg:94.05ms +step:300/1695 train_time:28216ms step_avg:94.05ms +step:301/1695 train_time:28310ms step_avg:94.05ms +step:302/1695 train_time:28404ms step_avg:94.05ms +step:303/1695 train_time:28499ms step_avg:94.06ms +step:304/1695 train_time:28593ms step_avg:94.06ms +step:305/1695 train_time:28687ms step_avg:94.06ms +step:306/1695 train_time:28782ms step_avg:94.06ms +step:307/1695 train_time:28877ms step_avg:94.06ms +step:308/1695 train_time:28971ms step_avg:94.06ms +step:309/1695 train_time:29065ms step_avg:94.06ms +step:310/1695 train_time:29159ms step_avg:94.06ms +step:311/1695 train_time:29254ms step_avg:94.06ms +step:312/1695 train_time:29348ms step_avg:94.06ms +step:313/1695 train_time:29442ms step_avg:94.06ms +step:314/1695 train_time:29538ms step_avg:94.07ms +step:315/1695 train_time:29631ms step_avg:94.07ms +step:316/1695 train_time:29725ms step_avg:94.07ms +step:317/1695 train_time:29820ms step_avg:94.07ms +step:318/1695 train_time:29915ms step_avg:94.07ms +step:319/1695 train_time:30009ms step_avg:94.07ms +step:320/1695 train_time:30104ms step_avg:94.07ms +step:321/1695 train_time:30198ms step_avg:94.08ms +step:322/1695 train_time:30293ms step_avg:94.08ms +step:323/1695 train_time:30387ms step_avg:94.08ms +step:324/1695 train_time:30482ms step_avg:94.08ms +step:325/1695 train_time:30577ms step_avg:94.08ms +step:326/1695 train_time:30670ms step_avg:94.08ms +step:327/1695 train_time:30765ms step_avg:94.08ms +step:328/1695 train_time:30859ms step_avg:94.08ms +step:329/1695 train_time:30953ms step_avg:94.08ms +step:330/1695 train_time:31048ms step_avg:94.08ms +step:331/1695 train_time:31142ms step_avg:94.09ms +step:332/1695 train_time:31237ms step_avg:94.09ms +step:333/1695 train_time:31332ms step_avg:94.09ms +step:334/1695 train_time:31426ms step_avg:94.09ms +step:335/1695 train_time:31521ms step_avg:94.09ms +step:336/1695 train_time:31616ms step_avg:94.10ms +step:337/1695 train_time:31710ms step_avg:94.10ms +step:338/1695 train_time:31805ms step_avg:94.10ms +step:339/1695 train_time:31900ms step_avg:94.10ms +step:340/1695 train_time:31994ms step_avg:94.10ms +step:341/1695 train_time:32088ms step_avg:94.10ms +step:342/1695 train_time:32183ms step_avg:94.10ms +step:343/1695 train_time:32278ms step_avg:94.10ms +step:344/1695 train_time:32372ms step_avg:94.10ms +step:345/1695 train_time:32466ms step_avg:94.10ms +step:346/1695 train_time:32561ms step_avg:94.11ms +step:347/1695 train_time:32655ms step_avg:94.11ms +step:348/1695 train_time:32749ms step_avg:94.11ms +step:349/1695 train_time:32844ms step_avg:94.11ms +step:350/1695 train_time:32937ms step_avg:94.11ms +step:351/1695 train_time:33032ms step_avg:94.11ms +step:352/1695 train_time:33126ms step_avg:94.11ms +step:353/1695 train_time:33221ms step_avg:94.11ms +step:354/1695 train_time:33315ms step_avg:94.11ms +step:355/1695 train_time:33409ms step_avg:94.11ms +step:356/1695 train_time:33503ms step_avg:94.11ms +step:357/1695 train_time:33598ms step_avg:94.11ms +step:358/1695 train_time:33692ms step_avg:94.11ms +step:359/1695 train_time:33787ms step_avg:94.11ms +step:360/1695 train_time:33882ms step_avg:94.12ms +step:361/1695 train_time:33977ms step_avg:94.12ms +step:362/1695 train_time:34071ms step_avg:94.12ms +step:363/1695 train_time:34165ms step_avg:94.12ms +step:364/1695 train_time:34261ms step_avg:94.12ms +step:365/1695 train_time:34356ms step_avg:94.13ms +step:366/1695 train_time:34450ms step_avg:94.13ms +step:367/1695 train_time:34545ms step_avg:94.13ms +step:368/1695 train_time:34640ms step_avg:94.13ms +step:369/1695 train_time:34735ms step_avg:94.13ms +step:370/1695 train_time:34829ms step_avg:94.13ms +step:371/1695 train_time:34924ms step_avg:94.13ms +step:372/1695 train_time:35018ms step_avg:94.13ms +step:373/1695 train_time:35112ms step_avg:94.13ms +step:374/1695 train_time:35206ms step_avg:94.13ms +step:375/1695 train_time:35300ms step_avg:94.13ms +step:375/1695 val_loss:3.8792 train_time:35392ms step_avg:94.38ms +step:376/1695 train_time:35419ms step_avg:94.20ms +step:377/1695 train_time:35496ms step_avg:94.15ms +step:378/1695 train_time:35596ms step_avg:94.17ms +step:379/1695 train_time:35694ms step_avg:94.18ms +step:380/1695 train_time:35789ms step_avg:94.18ms +step:381/1695 train_time:35884ms step_avg:94.18ms +step:382/1695 train_time:35980ms step_avg:94.19ms +step:383/1695 train_time:36076ms step_avg:94.19ms +step:384/1695 train_time:36172ms step_avg:94.20ms +step:385/1695 train_time:36267ms step_avg:94.20ms +step:386/1695 train_time:36363ms step_avg:94.20ms +step:387/1695 train_time:36460ms step_avg:94.21ms +step:388/1695 train_time:36557ms step_avg:94.22ms +step:389/1695 train_time:36654ms step_avg:94.23ms +step:390/1695 train_time:36752ms step_avg:94.23ms +step:391/1695 train_time:36849ms step_avg:94.24ms +step:392/1695 train_time:36944ms step_avg:94.25ms +step:393/1695 train_time:37040ms step_avg:94.25ms +step:394/1695 train_time:37136ms step_avg:94.25ms +step:395/1695 train_time:37231ms step_avg:94.26ms +step:396/1695 train_time:37327ms step_avg:94.26ms +step:397/1695 train_time:37422ms step_avg:94.26ms +step:398/1695 train_time:37519ms step_avg:94.27ms +step:399/1695 train_time:37615ms step_avg:94.27ms +step:400/1695 train_time:37713ms step_avg:94.28ms +step:401/1695 train_time:37811ms step_avg:94.29ms +step:402/1695 train_time:37908ms step_avg:94.30ms +step:403/1695 train_time:38003ms step_avg:94.30ms +step:404/1695 train_time:38099ms step_avg:94.31ms +step:405/1695 train_time:38196ms step_avg:94.31ms +step:406/1695 train_time:38291ms step_avg:94.31ms +step:407/1695 train_time:38387ms step_avg:94.32ms +step:408/1695 train_time:38483ms step_avg:94.32ms +step:409/1695 train_time:38579ms step_avg:94.33ms +step:410/1695 train_time:38675ms step_avg:94.33ms +step:411/1695 train_time:38772ms step_avg:94.34ms +step:412/1695 train_time:38869ms step_avg:94.34ms +step:413/1695 train_time:38967ms step_avg:94.35ms +step:414/1695 train_time:39063ms step_avg:94.35ms +step:415/1695 train_time:39158ms step_avg:94.36ms +step:416/1695 train_time:39255ms step_avg:94.36ms +step:417/1695 train_time:39350ms step_avg:94.36ms +step:418/1695 train_time:39446ms step_avg:94.37ms +step:419/1695 train_time:39541ms step_avg:94.37ms +step:420/1695 train_time:39638ms step_avg:94.38ms +step:421/1695 train_time:39734ms step_avg:94.38ms +step:422/1695 train_time:39830ms step_avg:94.38ms +step:423/1695 train_time:39926ms step_avg:94.39ms +step:424/1695 train_time:40022ms step_avg:94.39ms +step:425/1695 train_time:40118ms step_avg:94.40ms +step:426/1695 train_time:40214ms step_avg:94.40ms +step:427/1695 train_time:40312ms step_avg:94.41ms +step:428/1695 train_time:40408ms step_avg:94.41ms +step:429/1695 train_time:40504ms step_avg:94.41ms +step:430/1695 train_time:40599ms step_avg:94.42ms +step:431/1695 train_time:40696ms step_avg:94.42ms +step:432/1695 train_time:40792ms step_avg:94.42ms +step:433/1695 train_time:40887ms step_avg:94.43ms +step:434/1695 train_time:40983ms step_avg:94.43ms +step:435/1695 train_time:41080ms step_avg:94.44ms +step:436/1695 train_time:41177ms step_avg:94.44ms +step:437/1695 train_time:41274ms step_avg:94.45ms +step:438/1695 train_time:41370ms step_avg:94.45ms +step:439/1695 train_time:41467ms step_avg:94.46ms +step:440/1695 train_time:41563ms step_avg:94.46ms +step:441/1695 train_time:41658ms step_avg:94.46ms +step:442/1695 train_time:41754ms step_avg:94.47ms +step:443/1695 train_time:41850ms step_avg:94.47ms +step:444/1695 train_time:41947ms step_avg:94.47ms +step:445/1695 train_time:42042ms step_avg:94.48ms +step:446/1695 train_time:42138ms step_avg:94.48ms +step:447/1695 train_time:42234ms step_avg:94.48ms +step:448/1695 train_time:42330ms step_avg:94.49ms +step:449/1695 train_time:42426ms step_avg:94.49ms +step:450/1695 train_time:42522ms step_avg:94.49ms +step:451/1695 train_time:42618ms step_avg:94.50ms +step:452/1695 train_time:42714ms step_avg:94.50ms +step:453/1695 train_time:42810ms step_avg:94.50ms +step:454/1695 train_time:42906ms step_avg:94.51ms +step:455/1695 train_time:43002ms step_avg:94.51ms +step:456/1695 train_time:43098ms step_avg:94.51ms +step:457/1695 train_time:43194ms step_avg:94.52ms +step:458/1695 train_time:43290ms step_avg:94.52ms +step:459/1695 train_time:43385ms step_avg:94.52ms +step:460/1695 train_time:43481ms step_avg:94.52ms +step:461/1695 train_time:43578ms step_avg:94.53ms +step:462/1695 train_time:43674ms step_avg:94.53ms +step:463/1695 train_time:43770ms step_avg:94.54ms +step:464/1695 train_time:43866ms step_avg:94.54ms +step:465/1695 train_time:43962ms step_avg:94.54ms +step:466/1695 train_time:44058ms step_avg:94.54ms +step:467/1695 train_time:44154ms step_avg:94.55ms +step:468/1695 train_time:44250ms step_avg:94.55ms +step:469/1695 train_time:44346ms step_avg:94.55ms +step:470/1695 train_time:44442ms step_avg:94.56ms +step:471/1695 train_time:44538ms step_avg:94.56ms +step:472/1695 train_time:44634ms step_avg:94.56ms +step:473/1695 train_time:44730ms step_avg:94.57ms +step:474/1695 train_time:44826ms step_avg:94.57ms +step:475/1695 train_time:44922ms step_avg:94.57ms +step:476/1695 train_time:45019ms step_avg:94.58ms +step:477/1695 train_time:45115ms step_avg:94.58ms +step:478/1695 train_time:45211ms step_avg:94.58ms +step:479/1695 train_time:45307ms step_avg:94.59ms +step:480/1695 train_time:45403ms step_avg:94.59ms +step:481/1695 train_time:45499ms step_avg:94.59ms +step:482/1695 train_time:45595ms step_avg:94.60ms +step:483/1695 train_time:45692ms step_avg:94.60ms +step:484/1695 train_time:45788ms step_avg:94.60ms +step:485/1695 train_time:45884ms step_avg:94.61ms +step:486/1695 train_time:45980ms step_avg:94.61ms +step:487/1695 train_time:46076ms step_avg:94.61ms +step:488/1695 train_time:46173ms step_avg:94.62ms +step:489/1695 train_time:46270ms step_avg:94.62ms +step:490/1695 train_time:46365ms step_avg:94.62ms +step:491/1695 train_time:46462ms step_avg:94.63ms +step:492/1695 train_time:46557ms step_avg:94.63ms +step:493/1695 train_time:46654ms step_avg:94.63ms +step:494/1695 train_time:46750ms step_avg:94.64ms +step:495/1695 train_time:46847ms step_avg:94.64ms +step:496/1695 train_time:46943ms step_avg:94.64ms +step:497/1695 train_time:47039ms step_avg:94.65ms +step:498/1695 train_time:47135ms step_avg:94.65ms +step:499/1695 train_time:47232ms step_avg:94.65ms +step:500/1695 train_time:47329ms step_avg:94.66ms +step:500/1695 val_loss:3.7326 train_time:47422ms step_avg:94.84ms +step:501/1695 train_time:47453ms step_avg:94.72ms +step:502/1695 train_time:47532ms step_avg:94.68ms +step:503/1695 train_time:47632ms step_avg:94.70ms +step:504/1695 train_time:47730ms step_avg:94.70ms +step:505/1695 train_time:47826ms step_avg:94.71ms +step:506/1695 train_time:47922ms step_avg:94.71ms +step:507/1695 train_time:48018ms step_avg:94.71ms +step:508/1695 train_time:48113ms step_avg:94.71ms +step:509/1695 train_time:48209ms step_avg:94.71ms +step:510/1695 train_time:48305ms step_avg:94.72ms +step:511/1695 train_time:48401ms step_avg:94.72ms +step:512/1695 train_time:48499ms step_avg:94.72ms +step:513/1695 train_time:48597ms step_avg:94.73ms +step:514/1695 train_time:48695ms step_avg:94.74ms +step:515/1695 train_time:48792ms step_avg:94.74ms +step:516/1695 train_time:48889ms step_avg:94.75ms +step:517/1695 train_time:48984ms step_avg:94.75ms +step:518/1695 train_time:49080ms step_avg:94.75ms +step:519/1695 train_time:49176ms step_avg:94.75ms +step:520/1695 train_time:49273ms step_avg:94.76ms +step:521/1695 train_time:49370ms step_avg:94.76ms +step:522/1695 train_time:49467ms step_avg:94.76ms +step:523/1695 train_time:49563ms step_avg:94.77ms +step:524/1695 train_time:49660ms step_avg:94.77ms +step:525/1695 train_time:49757ms step_avg:94.78ms +step:526/1695 train_time:49856ms step_avg:94.78ms +step:527/1695 train_time:49953ms step_avg:94.79ms +step:528/1695 train_time:50049ms step_avg:94.79ms +step:529/1695 train_time:50145ms step_avg:94.79ms +step:530/1695 train_time:50241ms step_avg:94.79ms +step:531/1695 train_time:50337ms step_avg:94.80ms +step:532/1695 train_time:50434ms step_avg:94.80ms +step:533/1695 train_time:50531ms step_avg:94.81ms +step:534/1695 train_time:50630ms step_avg:94.81ms +step:535/1695 train_time:50727ms step_avg:94.82ms +step:536/1695 train_time:50824ms step_avg:94.82ms +step:537/1695 train_time:50921ms step_avg:94.82ms +step:538/1695 train_time:51017ms step_avg:94.83ms +step:539/1695 train_time:51114ms step_avg:94.83ms +step:540/1695 train_time:51210ms step_avg:94.83ms +step:541/1695 train_time:51307ms step_avg:94.84ms +step:542/1695 train_time:51404ms step_avg:94.84ms +step:543/1695 train_time:51499ms step_avg:94.84ms +step:544/1695 train_time:51596ms step_avg:94.85ms +step:545/1695 train_time:51693ms step_avg:94.85ms +step:546/1695 train_time:51790ms step_avg:94.85ms +step:547/1695 train_time:51887ms step_avg:94.86ms +step:548/1695 train_time:51983ms step_avg:94.86ms +step:549/1695 train_time:52079ms step_avg:94.86ms +step:550/1695 train_time:52175ms step_avg:94.86ms +step:551/1695 train_time:52272ms step_avg:94.87ms +step:552/1695 train_time:52369ms step_avg:94.87ms +step:553/1695 train_time:52466ms step_avg:94.88ms +step:554/1695 train_time:52563ms step_avg:94.88ms +step:555/1695 train_time:52659ms step_avg:94.88ms +step:556/1695 train_time:52755ms step_avg:94.88ms +step:557/1695 train_time:52852ms step_avg:94.89ms +step:558/1695 train_time:52949ms step_avg:94.89ms +step:559/1695 train_time:53045ms step_avg:94.89ms +step:560/1695 train_time:53141ms step_avg:94.90ms +step:561/1695 train_time:53238ms step_avg:94.90ms +step:562/1695 train_time:53334ms step_avg:94.90ms +step:563/1695 train_time:53431ms step_avg:94.90ms +step:564/1695 train_time:53527ms step_avg:94.91ms +step:565/1695 train_time:53624ms step_avg:94.91ms +step:566/1695 train_time:53721ms step_avg:94.91ms +step:567/1695 train_time:53818ms step_avg:94.92ms +step:568/1695 train_time:53916ms step_avg:94.92ms +step:569/1695 train_time:54013ms step_avg:94.93ms +step:570/1695 train_time:54110ms step_avg:94.93ms +step:571/1695 train_time:54207ms step_avg:94.93ms +step:572/1695 train_time:54304ms step_avg:94.94ms +step:573/1695 train_time:54400ms step_avg:94.94ms +step:574/1695 train_time:54496ms step_avg:94.94ms +step:575/1695 train_time:54593ms step_avg:94.94ms +step:576/1695 train_time:54691ms step_avg:94.95ms +step:577/1695 train_time:54788ms step_avg:94.95ms +step:578/1695 train_time:54884ms step_avg:94.95ms +step:579/1695 train_time:54979ms step_avg:94.96ms +step:580/1695 train_time:55075ms step_avg:94.96ms +step:581/1695 train_time:55172ms step_avg:94.96ms +step:582/1695 train_time:55270ms step_avg:94.97ms +step:583/1695 train_time:55367ms step_avg:94.97ms +step:584/1695 train_time:55464ms step_avg:94.97ms +step:585/1695 train_time:55560ms step_avg:94.97ms +step:586/1695 train_time:55657ms step_avg:94.98ms +step:587/1695 train_time:55755ms step_avg:94.98ms +step:588/1695 train_time:55852ms step_avg:94.99ms +step:589/1695 train_time:55949ms step_avg:94.99ms +step:590/1695 train_time:56046ms step_avg:94.99ms +step:591/1695 train_time:56143ms step_avg:95.00ms +step:592/1695 train_time:56238ms step_avg:95.00ms +step:593/1695 train_time:56335ms step_avg:95.00ms +step:594/1695 train_time:56432ms step_avg:95.00ms +step:595/1695 train_time:56529ms step_avg:95.01ms +step:596/1695 train_time:56626ms step_avg:95.01ms +step:597/1695 train_time:56722ms step_avg:95.01ms +step:598/1695 train_time:56818ms step_avg:95.01ms +step:599/1695 train_time:56914ms step_avg:95.02ms +step:600/1695 train_time:57011ms step_avg:95.02ms +step:601/1695 train_time:57108ms step_avg:95.02ms +step:602/1695 train_time:57205ms step_avg:95.03ms +step:603/1695 train_time:57302ms step_avg:95.03ms +step:604/1695 train_time:57398ms step_avg:95.03ms +step:605/1695 train_time:57496ms step_avg:95.03ms +step:606/1695 train_time:57592ms step_avg:95.04ms +step:607/1695 train_time:57690ms step_avg:95.04ms +step:608/1695 train_time:57787ms step_avg:95.04ms +step:609/1695 train_time:57884ms step_avg:95.05ms +step:610/1695 train_time:57980ms step_avg:95.05ms +step:611/1695 train_time:58076ms step_avg:95.05ms +step:612/1695 train_time:58173ms step_avg:95.05ms +step:613/1695 train_time:58270ms step_avg:95.06ms +step:614/1695 train_time:58367ms step_avg:95.06ms +step:615/1695 train_time:58463ms step_avg:95.06ms +step:616/1695 train_time:58559ms step_avg:95.06ms +step:617/1695 train_time:58656ms step_avg:95.07ms +step:618/1695 train_time:58752ms step_avg:95.07ms +step:619/1695 train_time:58849ms step_avg:95.07ms +step:620/1695 train_time:58946ms step_avg:95.07ms +step:621/1695 train_time:59042ms step_avg:95.08ms +step:622/1695 train_time:59138ms step_avg:95.08ms +step:623/1695 train_time:59234ms step_avg:95.08ms +step:624/1695 train_time:59331ms step_avg:95.08ms +step:625/1695 train_time:59429ms step_avg:95.09ms +step:625/1695 val_loss:3.6468 train_time:59523ms step_avg:95.24ms +step:626/1695 train_time:59550ms step_avg:95.13ms +step:627/1695 train_time:59629ms step_avg:95.10ms +step:628/1695 train_time:59729ms step_avg:95.11ms +step:629/1695 train_time:60148ms step_avg:95.62ms +step:630/1695 train_time:60244ms step_avg:95.62ms +step:631/1695 train_time:60341ms step_avg:95.63ms +step:632/1695 train_time:60438ms step_avg:95.63ms +step:633/1695 train_time:60535ms step_avg:95.63ms +step:634/1695 train_time:60631ms step_avg:95.63ms +step:635/1695 train_time:60966ms step_avg:96.01ms +step:636/1695 train_time:61063ms step_avg:96.01ms +step:637/1695 train_time:61161ms step_avg:96.01ms +step:638/1695 train_time:61258ms step_avg:96.02ms +step:639/1695 train_time:61355ms step_avg:96.02ms +step:640/1695 train_time:61452ms step_avg:96.02ms +step:641/1695 train_time:61549ms step_avg:96.02ms +step:642/1695 train_time:61647ms step_avg:96.02ms +step:643/1695 train_time:61745ms step_avg:96.03ms +step:644/1695 train_time:61846ms step_avg:96.03ms +step:645/1695 train_time:61947ms step_avg:96.04ms +step:646/1695 train_time:62047ms step_avg:96.05ms +step:647/1695 train_time:62146ms step_avg:96.05ms +step:648/1695 train_time:62245ms step_avg:96.06ms +step:649/1695 train_time:62343ms step_avg:96.06ms +step:650/1695 train_time:62441ms step_avg:96.06ms +step:651/1695 train_time:62539ms step_avg:96.07ms +step:652/1695 train_time:62636ms step_avg:96.07ms +step:653/1695 train_time:62734ms step_avg:96.07ms +step:654/1695 train_time:62832ms step_avg:96.07ms +step:655/1695 train_time:62930ms step_avg:96.08ms +step:656/1695 train_time:63030ms step_avg:96.08ms +step:657/1695 train_time:63129ms step_avg:96.09ms +step:658/1695 train_time:63227ms step_avg:96.09ms +step:659/1695 train_time:63326ms step_avg:96.09ms +step:660/1695 train_time:63424ms step_avg:96.10ms +step:661/1695 train_time:63522ms step_avg:96.10ms +step:662/1695 train_time:63620ms step_avg:96.10ms +step:663/1695 train_time:63718ms step_avg:96.11ms +step:664/1695 train_time:63817ms step_avg:96.11ms +step:665/1695 train_time:63914ms step_avg:96.11ms +step:666/1695 train_time:64012ms step_avg:96.11ms +step:667/1695 train_time:64109ms step_avg:96.12ms +step:668/1695 train_time:64207ms step_avg:96.12ms +step:669/1695 train_time:64305ms step_avg:96.12ms +step:670/1695 train_time:64403ms step_avg:96.12ms +step:671/1695 train_time:64501ms step_avg:96.13ms +step:672/1695 train_time:64600ms step_avg:96.13ms +step:673/1695 train_time:64696ms step_avg:96.13ms +step:674/1695 train_time:64794ms step_avg:96.13ms +step:675/1695 train_time:64893ms step_avg:96.14ms +step:676/1695 train_time:64991ms step_avg:96.14ms +step:677/1695 train_time:65089ms step_avg:96.14ms +step:678/1695 train_time:65187ms step_avg:96.15ms +step:679/1695 train_time:65285ms step_avg:96.15ms +step:680/1695 train_time:65384ms step_avg:96.15ms +step:681/1695 train_time:65481ms step_avg:96.15ms +step:682/1695 train_time:65580ms step_avg:96.16ms +step:683/1695 train_time:65678ms step_avg:96.16ms +step:684/1695 train_time:65777ms step_avg:96.17ms +step:685/1695 train_time:65875ms step_avg:96.17ms +step:686/1695 train_time:65973ms step_avg:96.17ms +step:687/1695 train_time:66071ms step_avg:96.17ms +step:688/1695 train_time:66169ms step_avg:96.18ms +step:689/1695 train_time:66267ms step_avg:96.18ms +step:690/1695 train_time:66365ms step_avg:96.18ms +step:691/1695 train_time:66463ms step_avg:96.18ms +step:692/1695 train_time:66561ms step_avg:96.19ms +step:693/1695 train_time:66659ms step_avg:96.19ms +step:694/1695 train_time:66759ms step_avg:96.19ms +step:695/1695 train_time:66857ms step_avg:96.20ms +step:696/1695 train_time:66955ms step_avg:96.20ms +step:697/1695 train_time:67053ms step_avg:96.20ms +step:698/1695 train_time:67150ms step_avg:96.20ms +step:699/1695 train_time:67248ms step_avg:96.21ms +step:700/1695 train_time:67346ms step_avg:96.21ms +step:701/1695 train_time:67444ms step_avg:96.21ms +step:702/1695 train_time:67541ms step_avg:96.21ms +step:703/1695 train_time:67639ms step_avg:96.22ms +step:704/1695 train_time:67738ms step_avg:96.22ms +step:705/1695 train_time:67836ms step_avg:96.22ms +step:706/1695 train_time:67934ms step_avg:96.22ms +step:707/1695 train_time:68032ms step_avg:96.23ms +step:708/1695 train_time:68130ms step_avg:96.23ms +step:709/1695 train_time:68228ms step_avg:96.23ms +step:710/1695 train_time:68326ms step_avg:96.23ms +step:711/1695 train_time:68424ms step_avg:96.24ms +step:712/1695 train_time:68522ms step_avg:96.24ms +step:713/1695 train_time:68620ms step_avg:96.24ms +step:714/1695 train_time:68720ms step_avg:96.25ms +step:715/1695 train_time:68818ms step_avg:96.25ms +step:716/1695 train_time:68916ms step_avg:96.25ms +step:717/1695 train_time:69015ms step_avg:96.25ms +step:718/1695 train_time:69113ms step_avg:96.26ms +step:719/1695 train_time:69210ms step_avg:96.26ms +step:720/1695 train_time:69307ms step_avg:96.26ms +step:721/1695 train_time:69405ms step_avg:96.26ms +step:722/1695 train_time:69824ms step_avg:96.71ms +step:723/1695 train_time:69919ms step_avg:96.71ms +step:724/1695 train_time:70016ms step_avg:96.71ms +step:725/1695 train_time:70113ms step_avg:96.71ms +step:726/1695 train_time:70211ms step_avg:96.71ms +step:727/1695 train_time:70308ms step_avg:96.71ms +step:728/1695 train_time:70405ms step_avg:96.71ms +step:729/1695 train_time:70503ms step_avg:96.71ms +step:730/1695 train_time:70600ms step_avg:96.71ms +step:731/1695 train_time:70697ms step_avg:96.71ms +step:732/1695 train_time:70798ms step_avg:96.72ms +step:733/1695 train_time:70897ms step_avg:96.72ms +step:734/1695 train_time:70995ms step_avg:96.72ms +step:735/1695 train_time:71092ms step_avg:96.72ms +step:736/1695 train_time:71190ms step_avg:96.73ms +step:737/1695 train_time:71287ms step_avg:96.73ms +step:738/1695 train_time:71384ms step_avg:96.73ms +step:739/1695 train_time:71482ms step_avg:96.73ms +step:740/1695 train_time:71579ms step_avg:96.73ms +step:741/1695 train_time:71676ms step_avg:96.73ms +step:742/1695 train_time:71776ms step_avg:96.73ms +step:743/1695 train_time:71874ms step_avg:96.74ms +step:744/1695 train_time:71972ms step_avg:96.74ms +step:745/1695 train_time:72069ms step_avg:96.74ms +step:746/1695 train_time:72168ms step_avg:96.74ms +step:747/1695 train_time:72265ms step_avg:96.74ms +step:748/1695 train_time:72362ms step_avg:96.74ms +step:749/1695 train_time:72459ms step_avg:96.74ms +step:750/1695 train_time:72557ms step_avg:96.74ms +step:750/1695 val_loss:3.5854 train_time:72652ms step_avg:96.87ms +step:751/1695 train_time:72679ms step_avg:96.78ms +step:752/1695 train_time:72766ms step_avg:96.76ms +step:753/1695 train_time:72868ms step_avg:96.77ms +step:754/1695 train_time:72967ms step_avg:96.77ms +step:755/1695 train_time:73063ms step_avg:96.77ms +step:756/1695 train_time:73160ms step_avg:96.77ms +step:757/1695 train_time:73258ms step_avg:96.77ms +step:758/1695 train_time:73356ms step_avg:96.78ms +step:759/1695 train_time:73454ms step_avg:96.78ms +step:760/1695 train_time:73552ms step_avg:96.78ms +step:761/1695 train_time:73651ms step_avg:96.78ms +step:762/1695 train_time:73751ms step_avg:96.79ms +step:763/1695 train_time:73853ms step_avg:96.79ms +step:764/1695 train_time:73952ms step_avg:96.80ms +step:765/1695 train_time:74052ms step_avg:96.80ms +step:766/1695 train_time:74150ms step_avg:96.80ms +step:767/1695 train_time:74248ms step_avg:96.80ms +step:768/1695 train_time:74347ms step_avg:96.81ms +step:769/1695 train_time:74445ms step_avg:96.81ms +step:770/1695 train_time:74542ms step_avg:96.81ms +step:771/1695 train_time:74639ms step_avg:96.81ms +step:772/1695 train_time:74962ms step_avg:97.10ms +step:773/1695 train_time:75058ms step_avg:97.10ms +step:774/1695 train_time:75156ms step_avg:97.10ms +step:775/1695 train_time:75254ms step_avg:97.10ms +step:776/1695 train_time:75351ms step_avg:97.10ms +step:777/1695 train_time:75679ms step_avg:97.40ms +step:778/1695 train_time:75776ms step_avg:97.40ms +step:779/1695 train_time:75873ms step_avg:97.40ms +step:780/1695 train_time:75970ms step_avg:97.40ms +step:781/1695 train_time:76068ms step_avg:97.40ms +step:782/1695 train_time:76166ms step_avg:97.40ms +step:783/1695 train_time:76263ms step_avg:97.40ms +step:784/1695 train_time:76360ms step_avg:97.40ms +step:785/1695 train_time:76458ms step_avg:97.40ms +step:786/1695 train_time:76557ms step_avg:97.40ms +step:787/1695 train_time:76660ms step_avg:97.41ms +step:788/1695 train_time:76760ms step_avg:97.41ms +step:789/1695 train_time:76858ms step_avg:97.41ms +step:790/1695 train_time:76957ms step_avg:97.41ms +step:791/1695 train_time:77057ms step_avg:97.42ms +step:792/1695 train_time:77156ms step_avg:97.42ms +step:793/1695 train_time:77254ms step_avg:97.42ms +step:794/1695 train_time:77352ms step_avg:97.42ms +step:795/1695 train_time:77746ms step_avg:97.79ms +step:796/1695 train_time:77794ms step_avg:97.73ms +step:797/1695 train_time:77891ms step_avg:97.73ms +step:798/1695 train_time:77988ms step_avg:97.73ms +step:799/1695 train_time:78084ms step_avg:97.73ms +step:800/1695 train_time:78181ms step_avg:97.73ms +step:801/1695 train_time:78278ms step_avg:97.73ms +step:802/1695 train_time:78377ms step_avg:97.73ms +step:803/1695 train_time:78474ms step_avg:97.73ms +step:804/1695 train_time:78572ms step_avg:97.73ms +step:805/1695 train_time:78670ms step_avg:97.73ms +step:806/1695 train_time:78771ms step_avg:97.73ms +step:807/1695 train_time:78871ms step_avg:97.73ms +step:808/1695 train_time:78970ms step_avg:97.74ms +step:809/1695 train_time:79068ms step_avg:97.74ms +step:810/1695 train_time:79166ms step_avg:97.74ms +step:811/1695 train_time:79263ms step_avg:97.74ms +step:812/1695 train_time:79361ms step_avg:97.73ms +step:813/1695 train_time:79458ms step_avg:97.73ms +step:814/1695 train_time:79556ms step_avg:97.73ms +step:815/1695 train_time:79654ms step_avg:97.73ms +step:816/1695 train_time:79753ms step_avg:97.74ms +step:817/1695 train_time:79852ms step_avg:97.74ms +step:818/1695 train_time:79951ms step_avg:97.74ms +step:819/1695 train_time:80051ms step_avg:97.74ms +step:820/1695 train_time:80149ms step_avg:97.74ms +step:821/1695 train_time:80248ms step_avg:97.74ms +step:822/1695 train_time:80347ms step_avg:97.75ms +step:823/1695 train_time:80445ms step_avg:97.75ms +step:824/1695 train_time:80543ms step_avg:97.75ms +step:825/1695 train_time:80640ms step_avg:97.75ms +step:826/1695 train_time:80738ms step_avg:97.75ms +step:827/1695 train_time:80837ms step_avg:97.75ms +step:828/1695 train_time:80935ms step_avg:97.75ms +step:829/1695 train_time:81033ms step_avg:97.75ms +step:830/1695 train_time:81132ms step_avg:97.75ms +step:831/1695 train_time:81231ms step_avg:97.75ms +step:832/1695 train_time:81329ms step_avg:97.75ms +step:833/1695 train_time:81427ms step_avg:97.75ms +step:834/1695 train_time:81526ms step_avg:97.75ms +step:835/1695 train_time:81624ms step_avg:97.75ms +step:836/1695 train_time:81723ms step_avg:97.75ms +step:837/1695 train_time:81821ms step_avg:97.75ms +step:838/1695 train_time:81919ms step_avg:97.76ms +step:839/1695 train_time:82017ms step_avg:97.76ms +step:840/1695 train_time:82116ms step_avg:97.76ms +step:841/1695 train_time:82214ms step_avg:97.76ms +step:842/1695 train_time:82313ms step_avg:97.76ms +step:843/1695 train_time:82412ms step_avg:97.76ms +step:844/1695 train_time:82512ms step_avg:97.76ms +step:845/1695 train_time:82612ms step_avg:97.77ms +step:846/1695 train_time:82712ms step_avg:97.77ms +step:847/1695 train_time:82811ms step_avg:97.77ms +step:848/1695 train_time:82910ms step_avg:97.77ms +step:849/1695 train_time:83009ms step_avg:97.77ms +step:850/1695 train_time:83109ms step_avg:97.77ms +step:851/1695 train_time:83207ms step_avg:97.78ms +step:852/1695 train_time:83304ms step_avg:97.77ms +step:853/1695 train_time:83402ms step_avg:97.77ms +step:854/1695 train_time:83499ms step_avg:97.77ms +step:855/1695 train_time:83598ms step_avg:97.78ms +step:856/1695 train_time:83697ms step_avg:97.78ms +step:857/1695 train_time:83796ms step_avg:97.78ms +step:858/1695 train_time:83894ms step_avg:97.78ms +step:859/1695 train_time:83993ms step_avg:97.78ms +step:860/1695 train_time:84092ms step_avg:97.78ms +step:861/1695 train_time:84190ms step_avg:97.78ms +step:862/1695 train_time:84288ms step_avg:97.78ms +step:863/1695 train_time:84386ms step_avg:97.78ms +step:864/1695 train_time:84486ms step_avg:97.78ms +step:865/1695 train_time:84584ms step_avg:97.78ms +step:866/1695 train_time:84682ms step_avg:97.78ms +step:867/1695 train_time:84780ms step_avg:97.79ms +step:868/1695 train_time:84878ms step_avg:97.79ms +step:869/1695 train_time:84977ms step_avg:97.79ms +step:870/1695 train_time:85076ms step_avg:97.79ms +step:871/1695 train_time:85174ms step_avg:97.79ms +step:872/1695 train_time:85273ms step_avg:97.79ms +step:873/1695 train_time:85371ms step_avg:97.79ms +step:874/1695 train_time:85470ms step_avg:97.79ms +step:875/1695 train_time:85568ms step_avg:97.79ms +step:875/1695 val_loss:3.5355 train_time:85664ms step_avg:97.90ms +step:876/1695 train_time:85691ms step_avg:97.82ms +step:877/1695 train_time:85779ms step_avg:97.81ms +step:878/1695 train_time:85879ms step_avg:97.81ms +step:879/1695 train_time:86210ms step_avg:98.08ms +step:880/1695 train_time:86304ms step_avg:98.07ms +step:881/1695 train_time:86402ms step_avg:98.07ms +step:882/1695 train_time:86501ms step_avg:98.07ms +step:883/1695 train_time:86600ms step_avg:98.08ms +step:884/1695 train_time:86699ms step_avg:98.08ms +step:885/1695 train_time:86798ms step_avg:98.08ms +step:886/1695 train_time:86897ms step_avg:98.08ms +step:887/1695 train_time:86995ms step_avg:98.08ms +step:888/1695 train_time:87100ms step_avg:98.09ms +step:889/1695 train_time:87203ms step_avg:98.09ms +step:890/1695 train_time:87302ms step_avg:98.09ms +step:891/1695 train_time:87401ms step_avg:98.09ms +step:892/1695 train_time:87500ms step_avg:98.09ms +step:893/1695 train_time:87599ms step_avg:98.10ms +step:894/1695 train_time:87698ms step_avg:98.10ms +step:895/1695 train_time:87797ms step_avg:98.10ms +step:896/1695 train_time:87896ms step_avg:98.10ms +step:897/1695 train_time:87996ms step_avg:98.10ms +step:898/1695 train_time:88099ms step_avg:98.11ms +step:899/1695 train_time:88200ms step_avg:98.11ms +step:900/1695 train_time:88300ms step_avg:98.11ms +step:901/1695 train_time:88400ms step_avg:98.11ms +step:902/1695 train_time:88499ms step_avg:98.11ms +step:903/1695 train_time:88598ms step_avg:98.12ms +step:904/1695 train_time:88697ms step_avg:98.12ms +step:905/1695 train_time:88797ms step_avg:98.12ms +step:906/1695 train_time:88896ms step_avg:98.12ms +step:907/1695 train_time:88996ms step_avg:98.12ms +step:908/1695 train_time:89097ms step_avg:98.12ms +step:909/1695 train_time:89200ms step_avg:98.13ms +step:910/1695 train_time:89299ms step_avg:98.13ms +step:911/1695 train_time:89399ms step_avg:98.13ms +step:912/1695 train_time:89499ms step_avg:98.13ms +step:913/1695 train_time:89599ms step_avg:98.14ms +step:914/1695 train_time:89698ms step_avg:98.14ms +step:915/1695 train_time:89797ms step_avg:98.14ms +step:916/1695 train_time:89897ms step_avg:98.14ms +step:917/1695 train_time:89997ms step_avg:98.14ms +step:918/1695 train_time:90099ms step_avg:98.15ms +step:919/1695 train_time:90199ms step_avg:98.15ms +step:920/1695 train_time:90299ms step_avg:98.15ms +step:921/1695 train_time:90399ms step_avg:98.15ms +step:922/1695 train_time:90499ms step_avg:98.15ms +step:923/1695 train_time:90599ms step_avg:98.16ms +step:924/1695 train_time:90698ms step_avg:98.16ms +step:925/1695 train_time:90798ms step_avg:98.16ms +step:926/1695 train_time:90898ms step_avg:98.16ms +step:927/1695 train_time:90997ms step_avg:98.16ms +step:928/1695 train_time:91098ms step_avg:98.17ms +step:929/1695 train_time:91198ms step_avg:98.17ms +step:930/1695 train_time:91298ms step_avg:98.17ms +step:931/1695 train_time:91398ms step_avg:98.17ms +step:932/1695 train_time:91498ms step_avg:98.17ms +step:933/1695 train_time:91597ms step_avg:98.17ms +step:934/1695 train_time:91697ms step_avg:98.18ms +step:935/1695 train_time:91797ms step_avg:98.18ms +step:936/1695 train_time:91898ms step_avg:98.18ms +step:937/1695 train_time:91997ms step_avg:98.18ms +step:938/1695 train_time:92098ms step_avg:98.19ms +step:939/1695 train_time:92199ms step_avg:98.19ms +step:940/1695 train_time:92299ms step_avg:98.19ms +step:941/1695 train_time:92399ms step_avg:98.19ms +step:942/1695 train_time:92499ms step_avg:98.19ms +step:943/1695 train_time:92599ms step_avg:98.20ms +step:944/1695 train_time:92698ms step_avg:98.20ms +step:945/1695 train_time:92799ms step_avg:98.20ms +step:946/1695 train_time:92899ms step_avg:98.20ms +step:947/1695 train_time:92998ms step_avg:98.20ms +step:948/1695 train_time:93098ms step_avg:98.20ms +step:949/1695 train_time:93198ms step_avg:98.21ms +step:950/1695 train_time:93298ms step_avg:98.21ms +step:951/1695 train_time:93397ms step_avg:98.21ms +step:952/1695 train_time:93498ms step_avg:98.21ms +step:953/1695 train_time:93598ms step_avg:98.21ms +step:954/1695 train_time:93698ms step_avg:98.22ms +step:955/1695 train_time:93797ms step_avg:98.22ms +step:956/1695 train_time:93898ms step_avg:98.22ms +step:957/1695 train_time:93997ms step_avg:98.22ms +step:958/1695 train_time:94097ms step_avg:98.22ms +step:959/1695 train_time:94197ms step_avg:98.22ms +step:960/1695 train_time:94297ms step_avg:98.23ms +step:961/1695 train_time:94396ms step_avg:98.23ms +step:962/1695 train_time:94495ms step_avg:98.23ms +step:963/1695 train_time:94595ms step_avg:98.23ms +step:964/1695 train_time:94695ms step_avg:98.23ms +step:965/1695 train_time:94795ms step_avg:98.23ms +step:966/1695 train_time:94895ms step_avg:98.23ms +step:967/1695 train_time:94995ms step_avg:98.24ms +step:968/1695 train_time:95094ms step_avg:98.24ms +step:969/1695 train_time:95195ms step_avg:98.24ms +step:970/1695 train_time:95295ms step_avg:98.24ms +step:971/1695 train_time:95394ms step_avg:98.24ms +step:972/1695 train_time:95495ms step_avg:98.25ms +step:973/1695 train_time:95595ms step_avg:98.25ms +step:974/1695 train_time:95695ms step_avg:98.25ms +step:975/1695 train_time:95795ms step_avg:98.25ms +step:976/1695 train_time:95895ms step_avg:98.25ms +step:977/1695 train_time:95995ms step_avg:98.26ms +step:978/1695 train_time:96094ms step_avg:98.26ms +step:979/1695 train_time:96195ms step_avg:98.26ms +step:980/1695 train_time:96295ms step_avg:98.26ms +step:981/1695 train_time:96395ms step_avg:98.26ms +step:982/1695 train_time:96496ms step_avg:98.26ms +step:983/1695 train_time:96596ms step_avg:98.27ms +step:984/1695 train_time:96696ms step_avg:98.27ms +step:985/1695 train_time:96796ms step_avg:98.27ms +step:986/1695 train_time:96897ms step_avg:98.27ms +step:987/1695 train_time:96998ms step_avg:98.28ms +step:988/1695 train_time:97098ms step_avg:98.28ms +step:989/1695 train_time:97198ms step_avg:98.28ms +step:990/1695 train_time:97297ms step_avg:98.28ms +step:991/1695 train_time:97399ms step_avg:98.28ms +step:992/1695 train_time:97499ms step_avg:98.28ms +step:993/1695 train_time:97599ms step_avg:98.29ms +step:994/1695 train_time:97699ms step_avg:98.29ms +step:995/1695 train_time:97798ms step_avg:98.29ms +step:996/1695 train_time:97898ms step_avg:98.29ms +step:997/1695 train_time:97998ms step_avg:98.29ms +step:998/1695 train_time:98097ms step_avg:98.29ms +step:999/1695 train_time:98198ms step_avg:98.30ms +step:1000/1695 train_time:98297ms step_avg:98.30ms +step:1000/1695 val_loss:3.4893 train_time:98396ms step_avg:98.40ms +step:1001/1695 train_time:98423ms step_avg:98.32ms +step:1002/1695 train_time:98510ms step_avg:98.31ms +step:1003/1695 train_time:98610ms step_avg:98.31ms +step:1004/1695 train_time:98710ms step_avg:98.32ms +step:1005/1695 train_time:98810ms step_avg:98.32ms +step:1006/1695 train_time:98910ms step_avg:98.32ms +step:1007/1695 train_time:99009ms step_avg:98.32ms +step:1008/1695 train_time:99108ms step_avg:98.32ms +step:1009/1695 train_time:99208ms step_avg:98.32ms +step:1010/1695 train_time:99307ms step_avg:98.32ms +step:1011/1695 train_time:99410ms step_avg:98.33ms +step:1012/1695 train_time:99513ms step_avg:98.33ms +step:1013/1695 train_time:99614ms step_avg:98.34ms +step:1014/1695 train_time:99714ms step_avg:98.34ms +step:1015/1695 train_time:99813ms step_avg:98.34ms +step:1016/1695 train_time:99913ms step_avg:98.34ms +step:1017/1695 train_time:100012ms step_avg:98.34ms +step:1018/1695 train_time:100112ms step_avg:98.34ms +step:1019/1695 train_time:100211ms step_avg:98.34ms +step:1020/1695 train_time:100312ms step_avg:98.35ms +step:1021/1695 train_time:100414ms step_avg:98.35ms +step:1022/1695 train_time:100514ms step_avg:98.35ms +step:1023/1695 train_time:100615ms step_avg:98.35ms +step:1024/1695 train_time:100717ms step_avg:98.36ms +step:1025/1695 train_time:100816ms step_avg:98.36ms +step:1026/1695 train_time:100916ms step_avg:98.36ms +step:1027/1695 train_time:101015ms step_avg:98.36ms +step:1028/1695 train_time:101114ms step_avg:98.36ms +step:1029/1695 train_time:101215ms step_avg:98.36ms +step:1030/1695 train_time:101314ms step_avg:98.36ms +step:1031/1695 train_time:101414ms step_avg:98.37ms +step:1032/1695 train_time:101514ms step_avg:98.37ms +step:1033/1695 train_time:101614ms step_avg:98.37ms +step:1034/1695 train_time:101714ms step_avg:98.37ms +step:1035/1695 train_time:101815ms step_avg:98.37ms +step:1036/1695 train_time:101914ms step_avg:98.37ms +step:1037/1695 train_time:102015ms step_avg:98.38ms +step:1038/1695 train_time:102114ms step_avg:98.38ms +step:1039/1695 train_time:102213ms step_avg:98.38ms +step:1040/1695 train_time:102312ms step_avg:98.38ms +step:1041/1695 train_time:102413ms step_avg:98.38ms +step:1042/1695 train_time:102512ms step_avg:98.38ms +step:1043/1695 train_time:102613ms step_avg:98.38ms +step:1044/1695 train_time:102713ms step_avg:98.38ms +step:1045/1695 train_time:102813ms step_avg:98.39ms +step:1046/1695 train_time:102913ms step_avg:98.39ms +step:1047/1695 train_time:103013ms step_avg:98.39ms +step:1048/1695 train_time:103113ms step_avg:98.39ms +step:1049/1695 train_time:103212ms step_avg:98.39ms +step:1050/1695 train_time:103312ms step_avg:98.39ms +step:1051/1695 train_time:103413ms step_avg:98.39ms +step:1052/1695 train_time:103513ms step_avg:98.40ms +step:1053/1695 train_time:103613ms step_avg:98.40ms +step:1054/1695 train_time:103713ms step_avg:98.40ms +step:1055/1695 train_time:103813ms step_avg:98.40ms +step:1056/1695 train_time:103913ms step_avg:98.40ms +step:1057/1695 train_time:104013ms step_avg:98.40ms +step:1058/1695 train_time:104113ms step_avg:98.41ms +step:1059/1695 train_time:104213ms step_avg:98.41ms +step:1060/1695 train_time:104312ms step_avg:98.41ms +step:1061/1695 train_time:104411ms step_avg:98.41ms +step:1062/1695 train_time:104511ms step_avg:98.41ms +step:1063/1695 train_time:104612ms step_avg:98.41ms +step:1064/1695 train_time:104713ms step_avg:98.41ms +step:1065/1695 train_time:104813ms step_avg:98.42ms +step:1066/1695 train_time:104913ms step_avg:98.42ms +step:1067/1695 train_time:105013ms step_avg:98.42ms +step:1068/1695 train_time:105113ms step_avg:98.42ms +step:1069/1695 train_time:105212ms step_avg:98.42ms +step:1070/1695 train_time:105312ms step_avg:98.42ms +step:1071/1695 train_time:105412ms step_avg:98.42ms +step:1072/1695 train_time:105512ms step_avg:98.43ms +step:1073/1695 train_time:105613ms step_avg:98.43ms +step:1074/1695 train_time:105713ms step_avg:98.43ms +step:1075/1695 train_time:105813ms step_avg:98.43ms +step:1076/1695 train_time:105912ms step_avg:98.43ms +step:1077/1695 train_time:106014ms step_avg:98.43ms +step:1078/1695 train_time:106113ms step_avg:98.44ms +step:1079/1695 train_time:106213ms step_avg:98.44ms +step:1080/1695 train_time:106312ms step_avg:98.44ms +step:1081/1695 train_time:106412ms step_avg:98.44ms +step:1082/1695 train_time:106512ms step_avg:98.44ms +step:1083/1695 train_time:106612ms step_avg:98.44ms +step:1084/1695 train_time:106712ms step_avg:98.44ms +step:1085/1695 train_time:106813ms step_avg:98.44ms +step:1086/1695 train_time:106913ms step_avg:98.45ms +step:1087/1695 train_time:107013ms step_avg:98.45ms +step:1088/1695 train_time:107113ms step_avg:98.45ms +step:1089/1695 train_time:107213ms step_avg:98.45ms +step:1090/1695 train_time:107313ms step_avg:98.45ms +step:1091/1695 train_time:107413ms step_avg:98.45ms +step:1092/1695 train_time:107513ms step_avg:98.45ms +step:1093/1695 train_time:107613ms step_avg:98.46ms +step:1094/1695 train_time:107713ms step_avg:98.46ms +step:1095/1695 train_time:107813ms step_avg:98.46ms +step:1096/1695 train_time:107914ms step_avg:98.46ms +step:1097/1695 train_time:108013ms step_avg:98.46ms +step:1098/1695 train_time:108113ms step_avg:98.46ms +step:1099/1695 train_time:108212ms step_avg:98.46ms +step:1100/1695 train_time:108313ms step_avg:98.47ms +step:1101/1695 train_time:108412ms step_avg:98.47ms +step:1102/1695 train_time:108512ms step_avg:98.47ms +step:1103/1695 train_time:108612ms step_avg:98.47ms +step:1104/1695 train_time:108712ms step_avg:98.47ms +step:1105/1695 train_time:108812ms step_avg:98.47ms +step:1106/1695 train_time:108913ms step_avg:98.48ms +step:1107/1695 train_time:109013ms step_avg:98.48ms +step:1108/1695 train_time:109113ms step_avg:98.48ms +step:1109/1695 train_time:109213ms step_avg:98.48ms +step:1110/1695 train_time:109313ms step_avg:98.48ms +step:1111/1695 train_time:109413ms step_avg:98.48ms +step:1112/1695 train_time:109513ms step_avg:98.48ms +step:1113/1695 train_time:109613ms step_avg:98.48ms +step:1114/1695 train_time:109713ms step_avg:98.49ms +step:1115/1695 train_time:109813ms step_avg:98.49ms +step:1116/1695 train_time:109913ms step_avg:98.49ms +step:1117/1695 train_time:110012ms step_avg:98.49ms +step:1118/1695 train_time:110111ms step_avg:98.49ms +step:1119/1695 train_time:110211ms step_avg:98.49ms +step:1120/1695 train_time:110313ms step_avg:98.49ms +step:1121/1695 train_time:110413ms step_avg:98.50ms +step:1122/1695 train_time:110513ms step_avg:98.50ms +step:1123/1695 train_time:110613ms step_avg:98.50ms +step:1124/1695 train_time:110713ms step_avg:98.50ms +step:1125/1695 train_time:110813ms step_avg:98.50ms +step:1125/1695 val_loss:3.4387 train_time:110911ms step_avg:98.59ms +step:1126/1695 train_time:110938ms step_avg:98.52ms +step:1127/1695 train_time:111023ms step_avg:98.51ms +step:1128/1695 train_time:111124ms step_avg:98.51ms +step:1129/1695 train_time:111226ms step_avg:98.52ms +step:1130/1695 train_time:111327ms step_avg:98.52ms +step:1131/1695 train_time:111427ms step_avg:98.52ms +step:1132/1695 train_time:111526ms step_avg:98.52ms +step:1133/1695 train_time:111626ms step_avg:98.52ms +step:1134/1695 train_time:111726ms step_avg:98.52ms +step:1135/1695 train_time:111825ms step_avg:98.52ms +step:1136/1695 train_time:111927ms step_avg:98.53ms +step:1137/1695 train_time:112031ms step_avg:98.53ms +step:1138/1695 train_time:112132ms step_avg:98.53ms +step:1139/1695 train_time:112233ms step_avg:98.54ms +step:1140/1695 train_time:112333ms step_avg:98.54ms +step:1141/1695 train_time:112433ms step_avg:98.54ms +step:1142/1695 train_time:112533ms step_avg:98.54ms +step:1143/1695 train_time:112634ms step_avg:98.54ms +step:1144/1695 train_time:112734ms step_avg:98.54ms +step:1145/1695 train_time:112837ms step_avg:98.55ms +step:1146/1695 train_time:112939ms step_avg:98.55ms +step:1147/1695 train_time:113041ms step_avg:98.55ms +step:1148/1695 train_time:113142ms step_avg:98.56ms +step:1149/1695 train_time:113244ms step_avg:98.56ms +step:1150/1695 train_time:113346ms step_avg:98.56ms +step:1151/1695 train_time:113446ms step_avg:98.56ms +step:1152/1695 train_time:113547ms step_avg:98.57ms +step:1153/1695 train_time:113648ms step_avg:98.57ms +step:1154/1695 train_time:113749ms step_avg:98.57ms +step:1155/1695 train_time:113850ms step_avg:98.57ms +step:1156/1695 train_time:113951ms step_avg:98.57ms +step:1157/1695 train_time:114054ms step_avg:98.58ms +step:1158/1695 train_time:114154ms step_avg:98.58ms +step:1159/1695 train_time:114255ms step_avg:98.58ms +step:1160/1695 train_time:114355ms step_avg:98.58ms +step:1161/1695 train_time:114455ms step_avg:98.58ms +step:1162/1695 train_time:114554ms step_avg:98.58ms +step:1163/1695 train_time:114657ms step_avg:98.59ms +step:1164/1695 train_time:114757ms step_avg:98.59ms +step:1165/1695 train_time:114859ms step_avg:98.59ms +step:1166/1695 train_time:114961ms step_avg:98.59ms +step:1167/1695 train_time:115063ms step_avg:98.60ms +step:1168/1695 train_time:115165ms step_avg:98.60ms +step:1169/1695 train_time:115266ms step_avg:98.60ms +step:1170/1695 train_time:115367ms step_avg:98.60ms +step:1171/1695 train_time:115467ms step_avg:98.61ms +step:1172/1695 train_time:115570ms step_avg:98.61ms +step:1173/1695 train_time:115670ms step_avg:98.61ms +step:1174/1695 train_time:115771ms step_avg:98.61ms +step:1175/1695 train_time:115871ms step_avg:98.61ms +step:1176/1695 train_time:115973ms step_avg:98.62ms +step:1177/1695 train_time:116073ms step_avg:98.62ms +step:1178/1695 train_time:116173ms step_avg:98.62ms +step:1179/1695 train_time:116276ms step_avg:98.62ms +step:1180/1695 train_time:116375ms step_avg:98.62ms +step:1181/1695 train_time:116476ms step_avg:98.63ms +step:1182/1695 train_time:116577ms step_avg:98.63ms +step:1183/1695 train_time:116677ms step_avg:98.63ms +step:1184/1695 train_time:116780ms step_avg:98.63ms +step:1185/1695 train_time:116882ms step_avg:98.63ms +step:1186/1695 train_time:116984ms step_avg:98.64ms +step:1187/1695 train_time:117085ms step_avg:98.64ms +step:1188/1695 train_time:117186ms step_avg:98.64ms +step:1189/1695 train_time:117286ms step_avg:98.64ms +step:1190/1695 train_time:117387ms step_avg:98.64ms +step:1191/1695 train_time:117490ms step_avg:98.65ms +step:1192/1695 train_time:117591ms step_avg:98.65ms +step:1193/1695 train_time:117692ms step_avg:98.65ms +step:1194/1695 train_time:117794ms step_avg:98.65ms +step:1195/1695 train_time:117894ms step_avg:98.66ms +step:1196/1695 train_time:117994ms step_avg:98.66ms +step:1197/1695 train_time:118095ms step_avg:98.66ms +step:1198/1695 train_time:118195ms step_avg:98.66ms +step:1199/1695 train_time:118296ms step_avg:98.66ms +step:1200/1695 train_time:118396ms step_avg:98.66ms +step:1201/1695 train_time:118497ms step_avg:98.67ms +step:1202/1695 train_time:118600ms step_avg:98.67ms +step:1203/1695 train_time:118702ms step_avg:98.67ms +step:1204/1695 train_time:118804ms step_avg:98.67ms +step:1205/1695 train_time:118904ms step_avg:98.68ms +step:1206/1695 train_time:119005ms step_avg:98.68ms +step:1207/1695 train_time:119107ms step_avg:98.68ms +step:1208/1695 train_time:119209ms step_avg:98.68ms +step:1209/1695 train_time:119309ms step_avg:98.68ms +step:1210/1695 train_time:119410ms step_avg:98.69ms +step:1211/1695 train_time:119512ms step_avg:98.69ms +step:1212/1695 train_time:119613ms step_avg:98.69ms +step:1213/1695 train_time:119714ms step_avg:98.69ms +step:1214/1695 train_time:119814ms step_avg:98.69ms +step:1215/1695 train_time:119914ms step_avg:98.69ms +step:1216/1695 train_time:120016ms step_avg:98.70ms +step:1217/1695 train_time:120117ms step_avg:98.70ms +step:1218/1695 train_time:120218ms step_avg:98.70ms +step:1219/1695 train_time:120320ms step_avg:98.70ms +step:1220/1695 train_time:120422ms step_avg:98.71ms +step:1221/1695 train_time:120523ms step_avg:98.71ms +step:1222/1695 train_time:120623ms step_avg:98.71ms +step:1223/1695 train_time:120727ms step_avg:98.71ms +step:1224/1695 train_time:120827ms step_avg:98.72ms +step:1225/1695 train_time:120928ms step_avg:98.72ms +step:1226/1695 train_time:121029ms step_avg:98.72ms +step:1227/1695 train_time:121130ms step_avg:98.72ms +step:1228/1695 train_time:121231ms step_avg:98.72ms +step:1229/1695 train_time:121332ms step_avg:98.72ms +step:1230/1695 train_time:121432ms step_avg:98.73ms +step:1231/1695 train_time:121533ms step_avg:98.73ms +step:1232/1695 train_time:121634ms step_avg:98.73ms +step:1233/1695 train_time:121734ms step_avg:98.73ms +step:1234/1695 train_time:121836ms step_avg:98.73ms +step:1235/1695 train_time:121936ms step_avg:98.73ms +step:1236/1695 train_time:122038ms step_avg:98.74ms +step:1237/1695 train_time:122140ms step_avg:98.74ms +step:1238/1695 train_time:122242ms step_avg:98.74ms +step:1239/1695 train_time:122345ms step_avg:98.75ms +step:1240/1695 train_time:122445ms step_avg:98.75ms +step:1241/1695 train_time:122546ms step_avg:98.75ms +step:1242/1695 train_time:122647ms step_avg:98.75ms +step:1243/1695 train_time:122748ms step_avg:98.75ms +step:1244/1695 train_time:122850ms step_avg:98.75ms +step:1245/1695 train_time:122951ms step_avg:98.76ms +step:1246/1695 train_time:123053ms step_avg:98.76ms +step:1247/1695 train_time:123154ms step_avg:98.76ms +step:1248/1695 train_time:123256ms step_avg:98.76ms +step:1249/1695 train_time:123356ms step_avg:98.76ms +step:1250/1695 train_time:123455ms step_avg:98.76ms +step:1250/1695 val_loss:3.3926 train_time:123554ms step_avg:98.84ms +step:1251/1695 train_time:123581ms step_avg:98.79ms +step:1252/1695 train_time:123668ms step_avg:98.78ms +step:1253/1695 train_time:123770ms step_avg:98.78ms +step:1254/1695 train_time:123872ms step_avg:98.78ms +step:1255/1695 train_time:123973ms step_avg:98.78ms +step:1256/1695 train_time:124073ms step_avg:98.78ms +step:1257/1695 train_time:124173ms step_avg:98.79ms +step:1258/1695 train_time:124273ms step_avg:98.79ms +step:1259/1695 train_time:124373ms step_avg:98.79ms +step:1260/1695 train_time:124473ms step_avg:98.79ms +step:1261/1695 train_time:124576ms step_avg:98.79ms +step:1262/1695 train_time:124678ms step_avg:98.79ms +step:1263/1695 train_time:124779ms step_avg:98.80ms +step:1264/1695 train_time:124880ms step_avg:98.80ms +step:1265/1695 train_time:124981ms step_avg:98.80ms +step:1266/1695 train_time:125081ms step_avg:98.80ms +step:1267/1695 train_time:125182ms step_avg:98.80ms +step:1268/1695 train_time:125282ms step_avg:98.80ms +step:1269/1695 train_time:125383ms step_avg:98.80ms +step:1270/1695 train_time:125485ms step_avg:98.81ms +step:1271/1695 train_time:125588ms step_avg:98.81ms +step:1272/1695 train_time:125689ms step_avg:98.81ms +step:1273/1695 train_time:125790ms step_avg:98.81ms +step:1274/1695 train_time:125891ms step_avg:98.82ms +step:1275/1695 train_time:125992ms step_avg:98.82ms +step:1276/1695 train_time:126094ms step_avg:98.82ms +step:1277/1695 train_time:126195ms step_avg:98.82ms +step:1278/1695 train_time:126295ms step_avg:98.82ms +step:1279/1695 train_time:126396ms step_avg:98.82ms +step:1280/1695 train_time:126496ms step_avg:98.83ms +step:1281/1695 train_time:126597ms step_avg:98.83ms +step:1282/1695 train_time:126697ms step_avg:98.83ms +step:1283/1695 train_time:126797ms step_avg:98.83ms +step:1284/1695 train_time:126898ms step_avg:98.83ms +step:1285/1695 train_time:126999ms step_avg:98.83ms +step:1286/1695 train_time:127100ms step_avg:98.83ms +step:1287/1695 train_time:127200ms step_avg:98.83ms +step:1288/1695 train_time:127302ms step_avg:98.84ms +step:1289/1695 train_time:127404ms step_avg:98.84ms +step:1290/1695 train_time:127504ms step_avg:98.84ms +step:1291/1695 train_time:127605ms step_avg:98.84ms +step:1292/1695 train_time:127707ms step_avg:98.84ms +step:1293/1695 train_time:127809ms step_avg:98.85ms +step:1294/1695 train_time:127912ms step_avg:98.85ms +step:1295/1695 train_time:128014ms step_avg:98.85ms +step:1296/1695 train_time:128114ms step_avg:98.85ms +step:1297/1695 train_time:128216ms step_avg:98.86ms +step:1298/1695 train_time:128317ms step_avg:98.86ms +step:1299/1695 train_time:128418ms step_avg:98.86ms +step:1300/1695 train_time:128518ms step_avg:98.86ms +step:1301/1695 train_time:128619ms step_avg:98.86ms +step:1302/1695 train_time:128720ms step_avg:98.86ms +step:1303/1695 train_time:128821ms step_avg:98.87ms +step:1304/1695 train_time:128923ms step_avg:98.87ms +step:1305/1695 train_time:129024ms step_avg:98.87ms +step:1306/1695 train_time:129127ms step_avg:98.87ms +step:1307/1695 train_time:129229ms step_avg:98.87ms +step:1308/1695 train_time:129330ms step_avg:98.88ms +step:1309/1695 train_time:129433ms step_avg:98.88ms +step:1310/1695 train_time:129535ms step_avg:98.88ms +step:1311/1695 train_time:129636ms step_avg:98.88ms +step:1312/1695 train_time:129736ms step_avg:98.88ms +step:1313/1695 train_time:129838ms step_avg:98.89ms +step:1314/1695 train_time:129938ms step_avg:98.89ms +step:1315/1695 train_time:130038ms step_avg:98.89ms +step:1316/1695 train_time:130139ms step_avg:98.89ms +step:1317/1695 train_time:130239ms step_avg:98.89ms +step:1318/1695 train_time:130339ms step_avg:98.89ms +step:1319/1695 train_time:130441ms step_avg:98.89ms +step:1320/1695 train_time:130543ms step_avg:98.90ms +step:1321/1695 train_time:130646ms step_avg:98.90ms +step:1322/1695 train_time:130749ms step_avg:98.90ms +step:1323/1695 train_time:130850ms step_avg:98.90ms +step:1324/1695 train_time:130952ms step_avg:98.91ms +step:1325/1695 train_time:131053ms step_avg:98.91ms +step:1326/1695 train_time:131154ms step_avg:98.91ms +step:1327/1695 train_time:131255ms step_avg:98.91ms +step:1328/1695 train_time:131355ms step_avg:98.91ms +step:1329/1695 train_time:131456ms step_avg:98.91ms +step:1330/1695 train_time:131556ms step_avg:98.91ms +step:1331/1695 train_time:131657ms step_avg:98.92ms +step:1332/1695 train_time:131758ms step_avg:98.92ms +step:1333/1695 train_time:131859ms step_avg:98.92ms +step:1334/1695 train_time:131960ms step_avg:98.92ms +step:1335/1695 train_time:132062ms step_avg:98.92ms +step:1336/1695 train_time:132163ms step_avg:98.92ms +step:1337/1695 train_time:132265ms step_avg:98.93ms +step:1338/1695 train_time:132366ms step_avg:98.93ms +step:1339/1695 train_time:132467ms step_avg:98.93ms +step:1340/1695 train_time:132569ms step_avg:98.93ms +step:1341/1695 train_time:132670ms step_avg:98.93ms +step:1342/1695 train_time:132770ms step_avg:98.93ms +step:1343/1695 train_time:132873ms step_avg:98.94ms +step:1344/1695 train_time:132974ms step_avg:98.94ms +step:1345/1695 train_time:133074ms step_avg:98.94ms +step:1346/1695 train_time:133176ms step_avg:98.94ms +step:1347/1695 train_time:133277ms step_avg:98.94ms +step:1348/1695 train_time:133377ms step_avg:98.94ms +step:1349/1695 train_time:133477ms step_avg:98.95ms +step:1350/1695 train_time:133578ms step_avg:98.95ms +step:1351/1695 train_time:133678ms step_avg:98.95ms +step:1352/1695 train_time:133778ms step_avg:98.95ms +step:1353/1695 train_time:133879ms step_avg:98.95ms +step:1354/1695 train_time:133980ms step_avg:98.95ms +step:1355/1695 train_time:134081ms step_avg:98.95ms +step:1356/1695 train_time:134183ms step_avg:98.95ms +step:1357/1695 train_time:134283ms step_avg:98.96ms +step:1358/1695 train_time:134384ms step_avg:98.96ms +step:1359/1695 train_time:134485ms step_avg:98.96ms +step:1360/1695 train_time:134586ms step_avg:98.96ms +step:1361/1695 train_time:134686ms step_avg:98.96ms +step:1362/1695 train_time:134787ms step_avg:98.96ms +step:1363/1695 train_time:134889ms step_avg:98.96ms +step:1364/1695 train_time:134992ms step_avg:98.97ms +step:1365/1695 train_time:135094ms step_avg:98.97ms +step:1366/1695 train_time:135194ms step_avg:98.97ms +step:1367/1695 train_time:135295ms step_avg:98.97ms +step:1368/1695 train_time:135397ms step_avg:98.97ms +step:1369/1695 train_time:135496ms step_avg:98.97ms +step:1370/1695 train_time:135596ms step_avg:98.98ms +step:1371/1695 train_time:135697ms step_avg:98.98ms +step:1372/1695 train_time:135798ms step_avg:98.98ms +step:1373/1695 train_time:135899ms step_avg:98.98ms +step:1374/1695 train_time:136000ms step_avg:98.98ms +step:1375/1695 train_time:136101ms step_avg:98.98ms +step:1375/1695 val_loss:3.3538 train_time:136200ms step_avg:99.05ms +step:1376/1695 train_time:136228ms step_avg:99.00ms +step:1377/1695 train_time:136317ms step_avg:99.00ms +step:1378/1695 train_time:136418ms step_avg:99.00ms +step:1379/1695 train_time:136518ms step_avg:99.00ms +step:1380/1695 train_time:136620ms step_avg:99.00ms +step:1381/1695 train_time:136720ms step_avg:99.00ms +step:1382/1695 train_time:136820ms step_avg:99.00ms +step:1383/1695 train_time:136919ms step_avg:99.00ms +step:1384/1695 train_time:137019ms step_avg:99.00ms +step:1385/1695 train_time:137122ms step_avg:99.01ms +step:1386/1695 train_time:137230ms step_avg:99.01ms +step:1387/1695 train_time:137331ms step_avg:99.01ms +step:1388/1695 train_time:137434ms step_avg:99.02ms +step:1389/1695 train_time:137537ms step_avg:99.02ms +step:1390/1695 train_time:137638ms step_avg:99.02ms +step:1391/1695 train_time:137739ms step_avg:99.02ms +step:1392/1695 train_time:137840ms step_avg:99.02ms +step:1393/1695 train_time:137941ms step_avg:99.02ms +step:1394/1695 train_time:138043ms step_avg:99.03ms +step:1395/1695 train_time:138146ms step_avg:99.03ms +step:1396/1695 train_time:138249ms step_avg:99.03ms +step:1397/1695 train_time:138353ms step_avg:99.04ms +step:1398/1695 train_time:138456ms step_avg:99.04ms +step:1399/1695 train_time:138559ms step_avg:99.04ms +step:1400/1695 train_time:138660ms step_avg:99.04ms +step:1401/1695 train_time:138762ms step_avg:99.04ms +step:1402/1695 train_time:138863ms step_avg:99.05ms +step:1403/1695 train_time:138965ms step_avg:99.05ms +step:1404/1695 train_time:139067ms step_avg:99.05ms +step:1405/1695 train_time:139170ms step_avg:99.05ms +step:1406/1695 train_time:139273ms step_avg:99.06ms +step:1407/1695 train_time:139376ms step_avg:99.06ms +step:1408/1695 train_time:139477ms step_avg:99.06ms +step:1409/1695 train_time:139582ms step_avg:99.06ms +step:1410/1695 train_time:139683ms step_avg:99.07ms +step:1411/1695 train_time:139785ms step_avg:99.07ms +step:1412/1695 train_time:139889ms step_avg:99.07ms +step:1413/1695 train_time:139989ms step_avg:99.07ms +step:1414/1695 train_time:140091ms step_avg:99.07ms +step:1415/1695 train_time:140194ms step_avg:99.08ms +step:1416/1695 train_time:140295ms step_avg:99.08ms +step:1417/1695 train_time:140397ms step_avg:99.08ms +step:1418/1695 train_time:140498ms step_avg:99.08ms +step:1419/1695 train_time:140601ms step_avg:99.08ms +step:1420/1695 train_time:140702ms step_avg:99.09ms +step:1421/1695 train_time:140804ms step_avg:99.09ms +step:1422/1695 train_time:140907ms step_avg:99.09ms +step:1423/1695 train_time:141009ms step_avg:99.09ms +step:1424/1695 train_time:141112ms step_avg:99.10ms +step:1425/1695 train_time:141213ms step_avg:99.10ms +step:1426/1695 train_time:141315ms step_avg:99.10ms +step:1427/1695 train_time:141417ms step_avg:99.10ms +step:1428/1695 train_time:141519ms step_avg:99.10ms +step:1429/1695 train_time:141620ms step_avg:99.10ms +step:1430/1695 train_time:141721ms step_avg:99.11ms +step:1431/1695 train_time:141824ms step_avg:99.11ms +step:1432/1695 train_time:141925ms step_avg:99.11ms +step:1433/1695 train_time:142028ms step_avg:99.11ms +step:1434/1695 train_time:142129ms step_avg:99.11ms +step:1435/1695 train_time:142233ms step_avg:99.12ms +step:1436/1695 train_time:142337ms step_avg:99.12ms +step:1437/1695 train_time:142439ms step_avg:99.12ms +step:1438/1695 train_time:142540ms step_avg:99.12ms +step:1439/1695 train_time:142643ms step_avg:99.13ms +step:1440/1695 train_time:142745ms step_avg:99.13ms +step:1441/1695 train_time:142848ms step_avg:99.13ms +step:1442/1695 train_time:142948ms step_avg:99.13ms +step:1443/1695 train_time:143049ms step_avg:99.13ms +step:1444/1695 train_time:143152ms step_avg:99.14ms +step:1445/1695 train_time:143253ms step_avg:99.14ms +step:1446/1695 train_time:143354ms step_avg:99.14ms +step:1447/1695 train_time:143455ms step_avg:99.14ms +step:1448/1695 train_time:143558ms step_avg:99.14ms +step:1449/1695 train_time:143659ms step_avg:99.14ms +step:1450/1695 train_time:143760ms step_avg:99.14ms +step:1451/1695 train_time:143861ms step_avg:99.15ms +step:1452/1695 train_time:143964ms step_avg:99.15ms +step:1453/1695 train_time:144069ms step_avg:99.15ms +step:1454/1695 train_time:144171ms step_avg:99.15ms +step:1455/1695 train_time:144273ms step_avg:99.16ms +step:1456/1695 train_time:144375ms step_avg:99.16ms +step:1457/1695 train_time:144478ms step_avg:99.16ms +step:1458/1695 train_time:144580ms step_avg:99.16ms +step:1459/1695 train_time:144682ms step_avg:99.17ms +step:1460/1695 train_time:144784ms step_avg:99.17ms +step:1461/1695 train_time:144887ms step_avg:99.17ms +step:1462/1695 train_time:144988ms step_avg:99.17ms +step:1463/1695 train_time:145089ms step_avg:99.17ms +step:1464/1695 train_time:145192ms step_avg:99.17ms +step:1465/1695 train_time:145293ms step_avg:99.18ms +step:1466/1695 train_time:145395ms step_avg:99.18ms +step:1467/1695 train_time:145495ms step_avg:99.18ms +step:1468/1695 train_time:145598ms step_avg:99.18ms +step:1469/1695 train_time:145700ms step_avg:99.18ms +step:1470/1695 train_time:145801ms step_avg:99.18ms +step:1471/1695 train_time:145903ms step_avg:99.19ms +step:1472/1695 train_time:146005ms step_avg:99.19ms +step:1473/1695 train_time:146107ms step_avg:99.19ms +step:1474/1695 train_time:146209ms step_avg:99.19ms +step:1475/1695 train_time:146310ms step_avg:99.19ms +step:1476/1695 train_time:146413ms step_avg:99.20ms +step:1477/1695 train_time:146515ms step_avg:99.20ms +step:1478/1695 train_time:146617ms step_avg:99.20ms +step:1479/1695 train_time:146718ms step_avg:99.20ms +step:1480/1695 train_time:146819ms step_avg:99.20ms +step:1481/1695 train_time:146920ms step_avg:99.20ms +step:1482/1695 train_time:147022ms step_avg:99.21ms +step:1483/1695 train_time:147125ms step_avg:99.21ms +step:1484/1695 train_time:147227ms step_avg:99.21ms +step:1485/1695 train_time:147329ms step_avg:99.21ms +step:1486/1695 train_time:147431ms step_avg:99.21ms +step:1487/1695 train_time:147532ms step_avg:99.21ms +step:1488/1695 train_time:147635ms step_avg:99.22ms +step:1489/1695 train_time:147737ms step_avg:99.22ms +step:1490/1695 train_time:147838ms step_avg:99.22ms +step:1491/1695 train_time:147940ms step_avg:99.22ms +step:1492/1695 train_time:148041ms step_avg:99.22ms +step:1493/1695 train_time:148142ms step_avg:99.22ms +step:1494/1695 train_time:148245ms step_avg:99.23ms +step:1495/1695 train_time:148349ms step_avg:99.23ms +step:1496/1695 train_time:148451ms step_avg:99.23ms +step:1497/1695 train_time:148552ms step_avg:99.23ms +step:1498/1695 train_time:148654ms step_avg:99.23ms +step:1499/1695 train_time:148755ms step_avg:99.24ms +step:1500/1695 train_time:148857ms step_avg:99.24ms +step:1500/1695 val_loss:3.3189 train_time:148955ms step_avg:99.30ms +step:1501/1695 train_time:148982ms step_avg:99.26ms +step:1502/1695 train_time:149067ms step_avg:99.25ms +step:1503/1695 train_time:149169ms step_avg:99.25ms +step:1504/1695 train_time:149269ms step_avg:99.25ms +step:1505/1695 train_time:149370ms step_avg:99.25ms +step:1506/1695 train_time:149472ms step_avg:99.25ms +step:1507/1695 train_time:149572ms step_avg:99.25ms +step:1508/1695 train_time:149673ms step_avg:99.25ms +step:1509/1695 train_time:149776ms step_avg:99.26ms +step:1510/1695 train_time:149878ms step_avg:99.26ms +step:1511/1695 train_time:149982ms step_avg:99.26ms +step:1512/1695 train_time:150085ms step_avg:99.26ms +step:1513/1695 train_time:150187ms step_avg:99.26ms +step:1514/1695 train_time:150289ms step_avg:99.27ms +step:1515/1695 train_time:150395ms step_avg:99.27ms +step:1516/1695 train_time:150496ms step_avg:99.27ms +step:1517/1695 train_time:150597ms step_avg:99.27ms +step:1518/1695 train_time:150699ms step_avg:99.27ms +step:1519/1695 train_time:150803ms step_avg:99.28ms +step:1520/1695 train_time:150905ms step_avg:99.28ms +step:1521/1695 train_time:151006ms step_avg:99.28ms +step:1522/1695 train_time:151108ms step_avg:99.28ms +step:1523/1695 train_time:151210ms step_avg:99.28ms +step:1524/1695 train_time:151314ms step_avg:99.29ms +step:1525/1695 train_time:151417ms step_avg:99.29ms +step:1526/1695 train_time:151520ms step_avg:99.29ms +step:1527/1695 train_time:151623ms step_avg:99.29ms +step:1528/1695 train_time:151727ms step_avg:99.30ms +step:1529/1695 train_time:151828ms step_avg:99.30ms +step:1530/1695 train_time:151931ms step_avg:99.30ms +step:1531/1695 train_time:152032ms step_avg:99.30ms +step:1532/1695 train_time:152135ms step_avg:99.31ms +step:1533/1695 train_time:152238ms step_avg:99.31ms +step:1534/1695 train_time:152340ms step_avg:99.31ms +step:1535/1695 train_time:152442ms step_avg:99.31ms +step:1536/1695 train_time:152543ms step_avg:99.31ms +step:1537/1695 train_time:152644ms step_avg:99.31ms +step:1538/1695 train_time:152746ms step_avg:99.31ms +step:1539/1695 train_time:152847ms step_avg:99.32ms +step:1540/1695 train_time:152950ms step_avg:99.32ms +step:1541/1695 train_time:153052ms step_avg:99.32ms +step:1542/1695 train_time:153156ms step_avg:99.32ms +step:1543/1695 train_time:153258ms step_avg:99.32ms +step:1544/1695 train_time:153360ms step_avg:99.33ms +step:1545/1695 train_time:153462ms step_avg:99.33ms +step:1546/1695 train_time:153564ms step_avg:99.33ms +step:1547/1695 train_time:153666ms step_avg:99.33ms +step:1548/1695 train_time:153768ms step_avg:99.33ms +step:1549/1695 train_time:153870ms step_avg:99.34ms +step:1550/1695 train_time:153971ms step_avg:99.34ms +step:1551/1695 train_time:154074ms step_avg:99.34ms +step:1552/1695 train_time:154175ms step_avg:99.34ms +step:1553/1695 train_time:154280ms step_avg:99.34ms +step:1554/1695 train_time:154381ms step_avg:99.34ms +step:1555/1695 train_time:154482ms step_avg:99.35ms +step:1556/1695 train_time:154584ms step_avg:99.35ms +step:1557/1695 train_time:154688ms step_avg:99.35ms +step:1558/1695 train_time:154791ms step_avg:99.35ms +step:1559/1695 train_time:154893ms step_avg:99.35ms +step:1560/1695 train_time:154995ms step_avg:99.36ms +step:1561/1695 train_time:155097ms step_avg:99.36ms +step:1562/1695 train_time:155200ms step_avg:99.36ms +step:1563/1695 train_time:155304ms step_avg:99.36ms +step:1564/1695 train_time:155405ms step_avg:99.36ms +step:1565/1695 train_time:155506ms step_avg:99.37ms +step:1566/1695 train_time:155608ms step_avg:99.37ms +step:1567/1695 train_time:155709ms step_avg:99.37ms +step:1568/1695 train_time:155810ms step_avg:99.37ms +step:1569/1695 train_time:155911ms step_avg:99.37ms +step:1570/1695 train_time:156015ms step_avg:99.37ms +step:1571/1695 train_time:156117ms step_avg:99.37ms +step:1572/1695 train_time:156219ms step_avg:99.38ms +step:1573/1695 train_time:156321ms step_avg:99.38ms +step:1574/1695 train_time:156422ms step_avg:99.38ms +step:1575/1695 train_time:156525ms step_avg:99.38ms +step:1576/1695 train_time:156627ms step_avg:99.38ms +step:1577/1695 train_time:156730ms step_avg:99.38ms +step:1578/1695 train_time:156831ms step_avg:99.39ms +step:1579/1695 train_time:156932ms step_avg:99.39ms +step:1580/1695 train_time:157035ms step_avg:99.39ms +step:1581/1695 train_time:157137ms step_avg:99.39ms +step:1582/1695 train_time:157240ms step_avg:99.39ms +step:1583/1695 train_time:157343ms step_avg:99.40ms +step:1584/1695 train_time:157446ms step_avg:99.40ms +step:1585/1695 train_time:157547ms step_avg:99.40ms +step:1586/1695 train_time:157650ms step_avg:99.40ms +step:1587/1695 train_time:157751ms step_avg:99.40ms +step:1588/1695 train_time:157851ms step_avg:99.40ms +step:1589/1695 train_time:157952ms step_avg:99.40ms +step:1590/1695 train_time:158053ms step_avg:99.40ms +step:1591/1695 train_time:158155ms step_avg:99.41ms +step:1592/1695 train_time:158259ms step_avg:99.41ms +step:1593/1695 train_time:158361ms step_avg:99.41ms +step:1594/1695 train_time:158464ms step_avg:99.41ms +step:1595/1695 train_time:158566ms step_avg:99.41ms +step:1596/1695 train_time:158667ms step_avg:99.42ms +step:1597/1695 train_time:158769ms step_avg:99.42ms +step:1598/1695 train_time:158873ms step_avg:99.42ms +step:1599/1695 train_time:158974ms step_avg:99.42ms +step:1600/1695 train_time:159076ms step_avg:99.42ms +step:1601/1695 train_time:159179ms step_avg:99.42ms +step:1602/1695 train_time:159280ms step_avg:99.43ms +step:1603/1695 train_time:159382ms step_avg:99.43ms +step:1604/1695 train_time:159483ms step_avg:99.43ms +step:1605/1695 train_time:159587ms step_avg:99.43ms +step:1606/1695 train_time:159690ms step_avg:99.43ms +step:1607/1695 train_time:159790ms step_avg:99.43ms +step:1608/1695 train_time:159891ms step_avg:99.43ms +step:1609/1695 train_time:159992ms step_avg:99.44ms +step:1610/1695 train_time:160094ms step_avg:99.44ms +step:1611/1695 train_time:160196ms step_avg:99.44ms +step:1612/1695 train_time:160299ms step_avg:99.44ms +step:1613/1695 train_time:160401ms step_avg:99.44ms +step:1614/1695 train_time:160502ms step_avg:99.44ms +step:1615/1695 train_time:160604ms step_avg:99.45ms +step:1616/1695 train_time:160707ms step_avg:99.45ms +step:1617/1695 train_time:160809ms step_avg:99.45ms +step:1618/1695 train_time:160910ms step_avg:99.45ms +step:1619/1695 train_time:161011ms step_avg:99.45ms +step:1620/1695 train_time:161114ms step_avg:99.45ms +step:1621/1695 train_time:161215ms step_avg:99.45ms +step:1622/1695 train_time:161318ms step_avg:99.46ms +step:1623/1695 train_time:161420ms step_avg:99.46ms +step:1624/1695 train_time:161522ms step_avg:99.46ms +step:1625/1695 train_time:161626ms step_avg:99.46ms +step:1625/1695 val_loss:3.2901 train_time:161726ms step_avg:99.52ms +step:1626/1695 train_time:161754ms step_avg:99.48ms +step:1627/1695 train_time:161839ms step_avg:99.47ms +step:1628/1695 train_time:161940ms step_avg:99.47ms +step:1629/1695 train_time:162042ms step_avg:99.47ms +step:1630/1695 train_time:162143ms step_avg:99.47ms +step:1631/1695 train_time:162245ms step_avg:99.48ms +step:1632/1695 train_time:162347ms step_avg:99.48ms +step:1633/1695 train_time:162449ms step_avg:99.48ms +step:1634/1695 train_time:162551ms step_avg:99.48ms +step:1635/1695 train_time:162655ms step_avg:99.48ms +step:1636/1695 train_time:162758ms step_avg:99.49ms +step:1637/1695 train_time:162860ms step_avg:99.49ms +step:1638/1695 train_time:162963ms step_avg:99.49ms +step:1639/1695 train_time:163064ms step_avg:99.49ms +step:1640/1695 train_time:163167ms step_avg:99.49ms +step:1641/1695 train_time:163270ms step_avg:99.49ms +step:1642/1695 train_time:163371ms step_avg:99.50ms +step:1643/1695 train_time:163473ms step_avg:99.50ms +step:1644/1695 train_time:163575ms step_avg:99.50ms +step:1645/1695 train_time:163679ms step_avg:99.50ms +step:1646/1695 train_time:163781ms step_avg:99.50ms +step:1647/1695 train_time:163886ms step_avg:99.51ms +step:1648/1695 train_time:163990ms step_avg:99.51ms +step:1649/1695 train_time:164092ms step_avg:99.51ms +step:1650/1695 train_time:164195ms step_avg:99.51ms +step:1651/1695 train_time:164297ms step_avg:99.51ms +step:1652/1695 train_time:164400ms step_avg:99.52ms +step:1653/1695 train_time:164503ms step_avg:99.52ms +step:1654/1695 train_time:164605ms step_avg:99.52ms +step:1655/1695 train_time:164707ms step_avg:99.52ms +step:1656/1695 train_time:164809ms step_avg:99.52ms +step:1657/1695 train_time:164912ms step_avg:99.52ms +step:1658/1695 train_time:165016ms step_avg:99.53ms +step:1659/1695 train_time:165122ms step_avg:99.53ms +step:1660/1695 train_time:165224ms step_avg:99.53ms +step:1661/1695 train_time:165328ms step_avg:99.53ms +step:1662/1695 train_time:165432ms step_avg:99.54ms +step:1663/1695 train_time:165536ms step_avg:99.54ms +step:1664/1695 train_time:165638ms step_avg:99.54ms +step:1665/1695 train_time:165743ms step_avg:99.55ms +step:1666/1695 train_time:165846ms step_avg:99.55ms +step:1667/1695 train_time:165950ms step_avg:99.55ms +step:1668/1695 train_time:166057ms step_avg:99.55ms +step:1669/1695 train_time:166161ms step_avg:99.56ms +step:1670/1695 train_time:166264ms step_avg:99.56ms +step:1671/1695 train_time:166366ms step_avg:99.56ms +step:1672/1695 train_time:166469ms step_avg:99.56ms +step:1673/1695 train_time:166572ms step_avg:99.56ms +step:1674/1695 train_time:166674ms step_avg:99.57ms +step:1675/1695 train_time:166777ms step_avg:99.57ms +step:1676/1695 train_time:166881ms step_avg:99.57ms +step:1677/1695 train_time:166982ms step_avg:99.57ms +step:1678/1695 train_time:167088ms step_avg:99.58ms +step:1679/1695 train_time:167190ms step_avg:99.58ms +step:1680/1695 train_time:167292ms step_avg:99.58ms +step:1681/1695 train_time:167395ms step_avg:99.58ms +step:1682/1695 train_time:167502ms step_avg:99.58ms +step:1683/1695 train_time:167603ms step_avg:99.59ms +step:1684/1695 train_time:167707ms step_avg:99.59ms +step:1685/1695 train_time:167810ms step_avg:99.59ms +step:1686/1695 train_time:167913ms step_avg:99.59ms +step:1687/1695 train_time:168017ms step_avg:99.60ms +step:1688/1695 train_time:168119ms step_avg:99.60ms +step:1689/1695 train_time:168220ms step_avg:99.60ms +step:1690/1695 train_time:168322ms step_avg:99.60ms +step:1691/1695 train_time:168425ms step_avg:99.60ms +step:1692/1695 train_time:168528ms step_avg:99.60ms +step:1693/1695 train_time:168632ms step_avg:99.61ms +step:1694/1695 train_time:168736ms step_avg:99.61ms +step:1695/1695 train_time:168839ms step_avg:99.61ms +step:1695/1695 val_loss:3.2772 train_time:168938ms step_avg:99.67ms +peak memory allocated: 34004 MiB reserved: 49660 MiB diff --git a/records/082325_SparseAttnGate/21e732fb-4c4b-4db9-94bc-9fcd5d59b080.txt b/records/082325_SparseAttnGate/21e732fb-4c4b-4db9-94bc-9fcd5d59b080.txt new file mode 100644 index 000000000..6b4098881 --- /dev/null +++ b/records/082325_SparseAttnGate/21e732fb-4c4b-4db9-94bc-9fcd5d59b080.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:08:17 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 32C P0 118W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 296819 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 296820 C /usr/bin/python3 614MiB | +| 0 N/A N/A 296821 C /usr/bin/python3 614MiB | +| 0 N/A N/A 296822 C /usr/bin/python3 614MiB | +| 0 N/A N/A 296823 C /usr/bin/python3 614MiB | +| 0 N/A N/A 296824 C /usr/bin/python3 614MiB | +| 0 N/A N/A 296825 C /usr/bin/python3 614MiB | +| 0 N/A N/A 296826 C /usr/bin/python3 614MiB | +| 1 N/A N/A 296820 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 296821 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 296822 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 296823 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 296824 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 296825 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 296826 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.04ms +step:1/1695 train_time:157ms step_avg:156.91ms +step:2/1695 train_time:187ms step_avg:93.26ms +step:3/1695 train_time:254ms step_avg:84.82ms +step:4/1695 train_time:346ms step_avg:86.61ms +step:5/1695 train_time:439ms step_avg:87.84ms +step:6/1695 train_time:532ms step_avg:88.67ms +step:7/1695 train_time:626ms step_avg:89.39ms +step:8/1695 train_time:719ms step_avg:89.89ms +step:9/1695 train_time:813ms step_avg:90.31ms +step:10/1695 train_time:906ms step_avg:90.55ms +step:11/1695 train_time:998ms step_avg:90.75ms +step:12/1695 train_time:1092ms step_avg:91.04ms +step:13/1695 train_time:1190ms step_avg:91.51ms +step:14/1695 train_time:1285ms step_avg:91.79ms +step:15/1695 train_time:1379ms step_avg:91.96ms +step:16/1695 train_time:1473ms step_avg:92.03ms +step:17/1695 train_time:1566ms step_avg:92.15ms +step:18/1695 train_time:1659ms step_avg:92.19ms +step:19/1695 train_time:1753ms step_avg:92.25ms +step:20/1695 train_time:1846ms step_avg:92.32ms +step:21/1695 train_time:1940ms step_avg:92.38ms +step:22/1695 train_time:2034ms step_avg:92.44ms +step:23/1695 train_time:2128ms step_avg:92.54ms +step:24/1695 train_time:2223ms step_avg:92.62ms +step:25/1695 train_time:2317ms step_avg:92.70ms +step:26/1695 train_time:2411ms step_avg:92.75ms +step:27/1695 train_time:2505ms step_avg:92.80ms +step:28/1695 train_time:2599ms step_avg:92.83ms +step:29/1695 train_time:2693ms step_avg:92.86ms +step:30/1695 train_time:2786ms step_avg:92.88ms +step:31/1695 train_time:2880ms step_avg:92.92ms +step:32/1695 train_time:2974ms step_avg:92.93ms +step:33/1695 train_time:3068ms step_avg:92.96ms +step:34/1695 train_time:3162ms step_avg:92.99ms +step:35/1695 train_time:3255ms step_avg:93.00ms +step:36/1695 train_time:3349ms step_avg:93.02ms +step:37/1695 train_time:3443ms step_avg:93.05ms +step:38/1695 train_time:3537ms step_avg:93.08ms +step:39/1695 train_time:3631ms step_avg:93.10ms +step:40/1695 train_time:3725ms step_avg:93.13ms +step:41/1695 train_time:3819ms step_avg:93.15ms +step:42/1695 train_time:3913ms step_avg:93.16ms +step:43/1695 train_time:4007ms step_avg:93.19ms +step:44/1695 train_time:4101ms step_avg:93.20ms +step:45/1695 train_time:4194ms step_avg:93.20ms +step:46/1695 train_time:4288ms step_avg:93.22ms +step:47/1695 train_time:4382ms step_avg:93.23ms +step:48/1695 train_time:4476ms step_avg:93.24ms +step:49/1695 train_time:4571ms step_avg:93.28ms +step:50/1695 train_time:4665ms step_avg:93.29ms +step:51/1695 train_time:4759ms step_avg:93.30ms +step:52/1695 train_time:4852ms step_avg:93.30ms +step:53/1695 train_time:4946ms step_avg:93.32ms +step:54/1695 train_time:5041ms step_avg:93.36ms +step:55/1695 train_time:5135ms step_avg:93.36ms +step:56/1695 train_time:5229ms step_avg:93.37ms +step:57/1695 train_time:5322ms step_avg:93.37ms +step:58/1695 train_time:5415ms step_avg:93.36ms +step:59/1695 train_time:5508ms step_avg:93.36ms +step:60/1695 train_time:5602ms step_avg:93.37ms +step:61/1695 train_time:5695ms step_avg:93.37ms +step:62/1695 train_time:5789ms step_avg:93.38ms +step:63/1695 train_time:5883ms step_avg:93.38ms +step:64/1695 train_time:5977ms step_avg:93.39ms +step:65/1695 train_time:6070ms step_avg:93.39ms +step:66/1695 train_time:6164ms step_avg:93.39ms +step:67/1695 train_time:6257ms step_avg:93.39ms +step:68/1695 train_time:6352ms step_avg:93.41ms +step:69/1695 train_time:6447ms step_avg:93.43ms +step:70/1695 train_time:6541ms step_avg:93.45ms +step:71/1695 train_time:6635ms step_avg:93.45ms +step:72/1695 train_time:6730ms step_avg:93.47ms +step:73/1695 train_time:6824ms step_avg:93.48ms +step:74/1695 train_time:6918ms step_avg:93.49ms +step:75/1695 train_time:7012ms step_avg:93.49ms +step:76/1695 train_time:7106ms step_avg:93.49ms +step:77/1695 train_time:7198ms step_avg:93.49ms +step:78/1695 train_time:7292ms step_avg:93.49ms +step:79/1695 train_time:7386ms step_avg:93.49ms +step:80/1695 train_time:7480ms step_avg:93.49ms +step:81/1695 train_time:7573ms step_avg:93.49ms +step:82/1695 train_time:7666ms step_avg:93.49ms +step:83/1695 train_time:7760ms step_avg:93.49ms +step:84/1695 train_time:7854ms step_avg:93.50ms +step:85/1695 train_time:7948ms step_avg:93.50ms +step:86/1695 train_time:8041ms step_avg:93.50ms +step:87/1695 train_time:8135ms step_avg:93.51ms +step:88/1695 train_time:8228ms step_avg:93.51ms +step:89/1695 train_time:8322ms step_avg:93.51ms +step:90/1695 train_time:8416ms step_avg:93.51ms +step:91/1695 train_time:8510ms step_avg:93.51ms +step:92/1695 train_time:8603ms step_avg:93.51ms +step:93/1695 train_time:8696ms step_avg:93.51ms +step:94/1695 train_time:8790ms step_avg:93.51ms +step:95/1695 train_time:8884ms step_avg:93.51ms +step:96/1695 train_time:8977ms step_avg:93.51ms +step:97/1695 train_time:9071ms step_avg:93.52ms +step:98/1695 train_time:9166ms step_avg:93.53ms +step:99/1695 train_time:9259ms step_avg:93.52ms +step:100/1695 train_time:9353ms step_avg:93.53ms +step:101/1695 train_time:9447ms step_avg:93.54ms +step:102/1695 train_time:9541ms step_avg:93.54ms +step:103/1695 train_time:9635ms step_avg:93.54ms +step:104/1695 train_time:9729ms step_avg:93.55ms +step:105/1695 train_time:9823ms step_avg:93.55ms +step:106/1695 train_time:9916ms step_avg:93.55ms +step:107/1695 train_time:10011ms step_avg:93.56ms +step:108/1695 train_time:10105ms step_avg:93.56ms +step:109/1695 train_time:10198ms step_avg:93.56ms +step:110/1695 train_time:10292ms step_avg:93.56ms +step:111/1695 train_time:10386ms step_avg:93.57ms +step:112/1695 train_time:10479ms step_avg:93.57ms +step:113/1695 train_time:10573ms step_avg:93.57ms +step:114/1695 train_time:10667ms step_avg:93.57ms +step:115/1695 train_time:10762ms step_avg:93.58ms +step:116/1695 train_time:10855ms step_avg:93.58ms +step:117/1695 train_time:10950ms step_avg:93.59ms +step:118/1695 train_time:11045ms step_avg:93.60ms +step:119/1695 train_time:11139ms step_avg:93.60ms +step:120/1695 train_time:11231ms step_avg:93.59ms +step:121/1695 train_time:11324ms step_avg:93.58ms +step:122/1695 train_time:11417ms step_avg:93.58ms +step:123/1695 train_time:11512ms step_avg:93.59ms +step:124/1695 train_time:11605ms step_avg:93.59ms +step:125/1695 train_time:11698ms step_avg:93.58ms +step:125/1695 val_loss:4.6063 train_time:11789ms step_avg:94.31ms +step:126/1695 train_time:11816ms step_avg:93.78ms +step:127/1695 train_time:11894ms step_avg:93.65ms +step:128/1695 train_time:11996ms step_avg:93.72ms +step:129/1695 train_time:12091ms step_avg:93.73ms +step:130/1695 train_time:12186ms step_avg:93.74ms +step:131/1695 train_time:12279ms step_avg:93.73ms +step:132/1695 train_time:12372ms step_avg:93.73ms +step:133/1695 train_time:12466ms step_avg:93.73ms +step:134/1695 train_time:12559ms step_avg:93.73ms +step:135/1695 train_time:12653ms step_avg:93.72ms +step:136/1695 train_time:12747ms step_avg:93.73ms +step:137/1695 train_time:12840ms step_avg:93.72ms +step:138/1695 train_time:12935ms step_avg:93.74ms +step:139/1695 train_time:13031ms step_avg:93.75ms +step:140/1695 train_time:13127ms step_avg:93.76ms +step:141/1695 train_time:13221ms step_avg:93.77ms +step:142/1695 train_time:13315ms step_avg:93.77ms +step:143/1695 train_time:13409ms step_avg:93.77ms +step:144/1695 train_time:13502ms step_avg:93.77ms +step:145/1695 train_time:13596ms step_avg:93.76ms +step:146/1695 train_time:13689ms step_avg:93.76ms +step:147/1695 train_time:13785ms step_avg:93.78ms +step:148/1695 train_time:13877ms step_avg:93.76ms +step:149/1695 train_time:13972ms step_avg:93.77ms +step:150/1695 train_time:14068ms step_avg:93.78ms +step:151/1695 train_time:14164ms step_avg:93.80ms +step:152/1695 train_time:14258ms step_avg:93.80ms +step:153/1695 train_time:14353ms step_avg:93.81ms +step:154/1695 train_time:14446ms step_avg:93.80ms +step:155/1695 train_time:14539ms step_avg:93.80ms +step:156/1695 train_time:14633ms step_avg:93.80ms +step:157/1695 train_time:14726ms step_avg:93.80ms +step:158/1695 train_time:14821ms step_avg:93.80ms +step:159/1695 train_time:14915ms step_avg:93.80ms +step:160/1695 train_time:15009ms step_avg:93.81ms +step:161/1695 train_time:15105ms step_avg:93.82ms +step:162/1695 train_time:15199ms step_avg:93.82ms +step:163/1695 train_time:15294ms step_avg:93.83ms +step:164/1695 train_time:15389ms step_avg:93.84ms +step:165/1695 train_time:15483ms step_avg:93.84ms +step:166/1695 train_time:15577ms step_avg:93.84ms +step:167/1695 train_time:15671ms step_avg:93.84ms +step:168/1695 train_time:15766ms step_avg:93.85ms +step:169/1695 train_time:15860ms step_avg:93.85ms +step:170/1695 train_time:15954ms step_avg:93.85ms +step:171/1695 train_time:16047ms step_avg:93.84ms +step:172/1695 train_time:16142ms step_avg:93.85ms +step:173/1695 train_time:16236ms step_avg:93.85ms +step:174/1695 train_time:16331ms step_avg:93.85ms +step:175/1695 train_time:16425ms step_avg:93.86ms +step:176/1695 train_time:16519ms step_avg:93.86ms +step:177/1695 train_time:16614ms step_avg:93.86ms +step:178/1695 train_time:16709ms step_avg:93.87ms +step:179/1695 train_time:16803ms step_avg:93.87ms +step:180/1695 train_time:16897ms step_avg:93.87ms +step:181/1695 train_time:16991ms step_avg:93.87ms +step:182/1695 train_time:17085ms step_avg:93.88ms +step:183/1695 train_time:17179ms step_avg:93.88ms +step:184/1695 train_time:17273ms step_avg:93.88ms +step:185/1695 train_time:17368ms step_avg:93.88ms +step:186/1695 train_time:17462ms step_avg:93.88ms +step:187/1695 train_time:17557ms step_avg:93.89ms +step:188/1695 train_time:17651ms step_avg:93.89ms +step:189/1695 train_time:17746ms step_avg:93.89ms +step:190/1695 train_time:17841ms step_avg:93.90ms +step:191/1695 train_time:17934ms step_avg:93.90ms +step:192/1695 train_time:18028ms step_avg:93.90ms +step:193/1695 train_time:18123ms step_avg:93.90ms +step:194/1695 train_time:18217ms step_avg:93.90ms +step:195/1695 train_time:18311ms step_avg:93.90ms +step:196/1695 train_time:18406ms step_avg:93.91ms +step:197/1695 train_time:18499ms step_avg:93.91ms +step:198/1695 train_time:18593ms step_avg:93.91ms +step:199/1695 train_time:18688ms step_avg:93.91ms +step:200/1695 train_time:18782ms step_avg:93.91ms +step:201/1695 train_time:18876ms step_avg:93.91ms +step:202/1695 train_time:18970ms step_avg:93.91ms +step:203/1695 train_time:19065ms step_avg:93.92ms +step:204/1695 train_time:19159ms step_avg:93.92ms +step:205/1695 train_time:19253ms step_avg:93.92ms +step:206/1695 train_time:19348ms step_avg:93.92ms +step:207/1695 train_time:19442ms step_avg:93.92ms +step:208/1695 train_time:19536ms step_avg:93.92ms +step:209/1695 train_time:19631ms step_avg:93.93ms +step:210/1695 train_time:19726ms step_avg:93.93ms +step:211/1695 train_time:19820ms step_avg:93.93ms +step:212/1695 train_time:19913ms step_avg:93.93ms +step:213/1695 train_time:20007ms step_avg:93.93ms +step:214/1695 train_time:20102ms step_avg:93.93ms +step:215/1695 train_time:20195ms step_avg:93.93ms +step:216/1695 train_time:20289ms step_avg:93.93ms +step:217/1695 train_time:20383ms step_avg:93.93ms +step:218/1695 train_time:20478ms step_avg:93.93ms +step:219/1695 train_time:20572ms step_avg:93.93ms +step:220/1695 train_time:20667ms step_avg:93.94ms +step:221/1695 train_time:20760ms step_avg:93.94ms +step:222/1695 train_time:20854ms step_avg:93.94ms +step:223/1695 train_time:20948ms step_avg:93.94ms +step:224/1695 train_time:21042ms step_avg:93.94ms +step:225/1695 train_time:21136ms step_avg:93.94ms +step:226/1695 train_time:21230ms step_avg:93.94ms +step:227/1695 train_time:21324ms step_avg:93.94ms +step:228/1695 train_time:21418ms step_avg:93.94ms +step:229/1695 train_time:21513ms step_avg:93.94ms +step:230/1695 train_time:21607ms step_avg:93.94ms +step:231/1695 train_time:21702ms step_avg:93.95ms +step:232/1695 train_time:21795ms step_avg:93.94ms +step:233/1695 train_time:21889ms step_avg:93.95ms +step:234/1695 train_time:21983ms step_avg:93.95ms +step:235/1695 train_time:22077ms step_avg:93.94ms +step:236/1695 train_time:22172ms step_avg:93.95ms +step:237/1695 train_time:22265ms step_avg:93.95ms +step:238/1695 train_time:22359ms step_avg:93.95ms +step:239/1695 train_time:22454ms step_avg:93.95ms +step:240/1695 train_time:22548ms step_avg:93.95ms +step:241/1695 train_time:22642ms step_avg:93.95ms +step:242/1695 train_time:22736ms step_avg:93.95ms +step:243/1695 train_time:22830ms step_avg:93.95ms +step:244/1695 train_time:22925ms step_avg:93.95ms +step:245/1695 train_time:23018ms step_avg:93.95ms +step:246/1695 train_time:23113ms step_avg:93.95ms +step:247/1695 train_time:23207ms step_avg:93.95ms +step:248/1695 train_time:23301ms step_avg:93.96ms +step:249/1695 train_time:23395ms step_avg:93.96ms +step:250/1695 train_time:23489ms step_avg:93.96ms +step:250/1695 val_loss:4.0781 train_time:23582ms step_avg:94.33ms +step:251/1695 train_time:23610ms step_avg:94.06ms +step:252/1695 train_time:23686ms step_avg:93.99ms +step:253/1695 train_time:23785ms step_avg:94.01ms +step:254/1695 train_time:23880ms step_avg:94.02ms +step:255/1695 train_time:23975ms step_avg:94.02ms +step:256/1695 train_time:24069ms step_avg:94.02ms +step:257/1695 train_time:24162ms step_avg:94.02ms +step:258/1695 train_time:24257ms step_avg:94.02ms +step:259/1695 train_time:24350ms step_avg:94.02ms +step:260/1695 train_time:24444ms step_avg:94.01ms +step:261/1695 train_time:24540ms step_avg:94.02ms +step:262/1695 train_time:24636ms step_avg:94.03ms +step:263/1695 train_time:24731ms step_avg:94.03ms +step:264/1695 train_time:24826ms step_avg:94.04ms +step:265/1695 train_time:24921ms step_avg:94.04ms +step:266/1695 train_time:25016ms step_avg:94.04ms +step:267/1695 train_time:25110ms step_avg:94.05ms +step:268/1695 train_time:25204ms step_avg:94.05ms +step:269/1695 train_time:25298ms step_avg:94.05ms +step:270/1695 train_time:25393ms step_avg:94.05ms +step:271/1695 train_time:25486ms step_avg:94.05ms +step:272/1695 train_time:25582ms step_avg:94.05ms +step:273/1695 train_time:25678ms step_avg:94.06ms +step:274/1695 train_time:25774ms step_avg:94.07ms +step:275/1695 train_time:25868ms step_avg:94.07ms +step:276/1695 train_time:25963ms step_avg:94.07ms +step:277/1695 train_time:26058ms step_avg:94.07ms +step:278/1695 train_time:26153ms step_avg:94.08ms +step:279/1695 train_time:26247ms step_avg:94.08ms +step:280/1695 train_time:26341ms step_avg:94.07ms +step:281/1695 train_time:26435ms step_avg:94.07ms +step:282/1695 train_time:26529ms step_avg:94.07ms +step:283/1695 train_time:26625ms step_avg:94.08ms +step:284/1695 train_time:26719ms step_avg:94.08ms +step:285/1695 train_time:26814ms step_avg:94.09ms +step:286/1695 train_time:26908ms step_avg:94.08ms +step:287/1695 train_time:27002ms step_avg:94.08ms +step:288/1695 train_time:27098ms step_avg:94.09ms +step:289/1695 train_time:27193ms step_avg:94.09ms +step:290/1695 train_time:27287ms step_avg:94.09ms +step:291/1695 train_time:27381ms step_avg:94.09ms +step:292/1695 train_time:27476ms step_avg:94.10ms +step:293/1695 train_time:27570ms step_avg:94.10ms +step:294/1695 train_time:27665ms step_avg:94.10ms +step:295/1695 train_time:27760ms step_avg:94.10ms +step:296/1695 train_time:27854ms step_avg:94.10ms +step:297/1695 train_time:27948ms step_avg:94.10ms +step:298/1695 train_time:28043ms step_avg:94.10ms +step:299/1695 train_time:28138ms step_avg:94.11ms +step:300/1695 train_time:28233ms step_avg:94.11ms +step:301/1695 train_time:28326ms step_avg:94.11ms +step:302/1695 train_time:28421ms step_avg:94.11ms +step:303/1695 train_time:28516ms step_avg:94.11ms +step:304/1695 train_time:28610ms step_avg:94.11ms +step:305/1695 train_time:28704ms step_avg:94.11ms +step:306/1695 train_time:28800ms step_avg:94.12ms +step:307/1695 train_time:28894ms step_avg:94.12ms +step:308/1695 train_time:28989ms step_avg:94.12ms +step:309/1695 train_time:29084ms step_avg:94.12ms +step:310/1695 train_time:29179ms step_avg:94.12ms +step:311/1695 train_time:29274ms step_avg:94.13ms +step:312/1695 train_time:29368ms step_avg:94.13ms +step:313/1695 train_time:29463ms step_avg:94.13ms +step:314/1695 train_time:29558ms step_avg:94.13ms +step:315/1695 train_time:29653ms step_avg:94.14ms +step:316/1695 train_time:29747ms step_avg:94.14ms +step:317/1695 train_time:29842ms step_avg:94.14ms +step:318/1695 train_time:29937ms step_avg:94.14ms +step:319/1695 train_time:30031ms step_avg:94.14ms +step:320/1695 train_time:30125ms step_avg:94.14ms +step:321/1695 train_time:30220ms step_avg:94.14ms +step:322/1695 train_time:30315ms step_avg:94.15ms +step:323/1695 train_time:30409ms step_avg:94.15ms +step:324/1695 train_time:30504ms step_avg:94.15ms +step:325/1695 train_time:30598ms step_avg:94.15ms +step:326/1695 train_time:30694ms step_avg:94.15ms +step:327/1695 train_time:30788ms step_avg:94.15ms +step:328/1695 train_time:30882ms step_avg:94.15ms +step:329/1695 train_time:30978ms step_avg:94.16ms +step:330/1695 train_time:31072ms step_avg:94.16ms +step:331/1695 train_time:31166ms step_avg:94.16ms +step:332/1695 train_time:31261ms step_avg:94.16ms +step:333/1695 train_time:31356ms step_avg:94.16ms +step:334/1695 train_time:31450ms step_avg:94.16ms +step:335/1695 train_time:31544ms step_avg:94.16ms +step:336/1695 train_time:31638ms step_avg:94.16ms +step:337/1695 train_time:31734ms step_avg:94.17ms +step:338/1695 train_time:31828ms step_avg:94.16ms +step:339/1695 train_time:31922ms step_avg:94.17ms +step:340/1695 train_time:32017ms step_avg:94.17ms +step:341/1695 train_time:32113ms step_avg:94.17ms +step:342/1695 train_time:32206ms step_avg:94.17ms +step:343/1695 train_time:32301ms step_avg:94.17ms +step:344/1695 train_time:32396ms step_avg:94.18ms +step:345/1695 train_time:32490ms step_avg:94.18ms +step:346/1695 train_time:32585ms step_avg:94.18ms +step:347/1695 train_time:32680ms step_avg:94.18ms +step:348/1695 train_time:32775ms step_avg:94.18ms +step:349/1695 train_time:32869ms step_avg:94.18ms +step:350/1695 train_time:32964ms step_avg:94.18ms +step:351/1695 train_time:33059ms step_avg:94.18ms +step:352/1695 train_time:33154ms step_avg:94.19ms +step:353/1695 train_time:33248ms step_avg:94.19ms +step:354/1695 train_time:33342ms step_avg:94.19ms +step:355/1695 train_time:33436ms step_avg:94.19ms +step:356/1695 train_time:33530ms step_avg:94.19ms +step:357/1695 train_time:33625ms step_avg:94.19ms +step:358/1695 train_time:33719ms step_avg:94.19ms +step:359/1695 train_time:33814ms step_avg:94.19ms +step:360/1695 train_time:33907ms step_avg:94.19ms +step:361/1695 train_time:34002ms step_avg:94.19ms +step:362/1695 train_time:34097ms step_avg:94.19ms +step:363/1695 train_time:34192ms step_avg:94.19ms +step:364/1695 train_time:34286ms step_avg:94.19ms +step:365/1695 train_time:34380ms step_avg:94.19ms +step:366/1695 train_time:34474ms step_avg:94.19ms +step:367/1695 train_time:34568ms step_avg:94.19ms +step:368/1695 train_time:34664ms step_avg:94.19ms +step:369/1695 train_time:34759ms step_avg:94.20ms +step:370/1695 train_time:34853ms step_avg:94.20ms +step:371/1695 train_time:34948ms step_avg:94.20ms +step:372/1695 train_time:35042ms step_avg:94.20ms +step:373/1695 train_time:35137ms step_avg:94.20ms +step:374/1695 train_time:35232ms step_avg:94.20ms +step:375/1695 train_time:35326ms step_avg:94.20ms +step:375/1695 val_loss:3.8822 train_time:35417ms step_avg:94.45ms +step:376/1695 train_time:35446ms step_avg:94.27ms +step:377/1695 train_time:35524ms step_avg:94.23ms +step:378/1695 train_time:35623ms step_avg:94.24ms +step:379/1695 train_time:35721ms step_avg:94.25ms +step:380/1695 train_time:35817ms step_avg:94.26ms +step:381/1695 train_time:35913ms step_avg:94.26ms +step:382/1695 train_time:36007ms step_avg:94.26ms +step:383/1695 train_time:36102ms step_avg:94.26ms +step:384/1695 train_time:36198ms step_avg:94.26ms +step:385/1695 train_time:36293ms step_avg:94.27ms +step:386/1695 train_time:36389ms step_avg:94.27ms +step:387/1695 train_time:36486ms step_avg:94.28ms +step:388/1695 train_time:36584ms step_avg:94.29ms +step:389/1695 train_time:36682ms step_avg:94.30ms +step:390/1695 train_time:36780ms step_avg:94.31ms +step:391/1695 train_time:36877ms step_avg:94.32ms +step:392/1695 train_time:36973ms step_avg:94.32ms +step:393/1695 train_time:37069ms step_avg:94.32ms +step:394/1695 train_time:37164ms step_avg:94.33ms +step:395/1695 train_time:37261ms step_avg:94.33ms +step:396/1695 train_time:37356ms step_avg:94.33ms +step:397/1695 train_time:37452ms step_avg:94.34ms +step:398/1695 train_time:37548ms step_avg:94.34ms +step:399/1695 train_time:37644ms step_avg:94.35ms +step:400/1695 train_time:37742ms step_avg:94.35ms +step:401/1695 train_time:37837ms step_avg:94.36ms +step:402/1695 train_time:37934ms step_avg:94.36ms +step:403/1695 train_time:38030ms step_avg:94.37ms +step:404/1695 train_time:38125ms step_avg:94.37ms +step:405/1695 train_time:38221ms step_avg:94.37ms +step:406/1695 train_time:38317ms step_avg:94.38ms +step:407/1695 train_time:38412ms step_avg:94.38ms +step:408/1695 train_time:38509ms step_avg:94.38ms +step:409/1695 train_time:38605ms step_avg:94.39ms +step:410/1695 train_time:38702ms step_avg:94.39ms +step:411/1695 train_time:38799ms step_avg:94.40ms +step:412/1695 train_time:38895ms step_avg:94.41ms +step:413/1695 train_time:38992ms step_avg:94.41ms +step:414/1695 train_time:39088ms step_avg:94.41ms +step:415/1695 train_time:39183ms step_avg:94.42ms +step:416/1695 train_time:39279ms step_avg:94.42ms +step:417/1695 train_time:39377ms step_avg:94.43ms +step:418/1695 train_time:39472ms step_avg:94.43ms +step:419/1695 train_time:39568ms step_avg:94.43ms +step:420/1695 train_time:39664ms step_avg:94.44ms +step:421/1695 train_time:39760ms step_avg:94.44ms +step:422/1695 train_time:39857ms step_avg:94.45ms +step:423/1695 train_time:39954ms step_avg:94.45ms +step:424/1695 train_time:40051ms step_avg:94.46ms +step:425/1695 train_time:40147ms step_avg:94.46ms +step:426/1695 train_time:40242ms step_avg:94.47ms +step:427/1695 train_time:40338ms step_avg:94.47ms +step:428/1695 train_time:40434ms step_avg:94.47ms +step:429/1695 train_time:40531ms step_avg:94.48ms +step:430/1695 train_time:40627ms step_avg:94.48ms +step:431/1695 train_time:40722ms step_avg:94.48ms +step:432/1695 train_time:40819ms step_avg:94.49ms +step:433/1695 train_time:40916ms step_avg:94.49ms +step:434/1695 train_time:41013ms step_avg:94.50ms +step:435/1695 train_time:41109ms step_avg:94.50ms +step:436/1695 train_time:41204ms step_avg:94.51ms +step:437/1695 train_time:41301ms step_avg:94.51ms +step:438/1695 train_time:41397ms step_avg:94.51ms +step:439/1695 train_time:41493ms step_avg:94.52ms +step:440/1695 train_time:41589ms step_avg:94.52ms +step:441/1695 train_time:41684ms step_avg:94.52ms +step:442/1695 train_time:41781ms step_avg:94.53ms +step:443/1695 train_time:41878ms step_avg:94.53ms +step:444/1695 train_time:41975ms step_avg:94.54ms +step:445/1695 train_time:42071ms step_avg:94.54ms +step:446/1695 train_time:42167ms step_avg:94.55ms +step:447/1695 train_time:42263ms step_avg:94.55ms +step:448/1695 train_time:42359ms step_avg:94.55ms +step:449/1695 train_time:42456ms step_avg:94.56ms +step:450/1695 train_time:42552ms step_avg:94.56ms +step:451/1695 train_time:42648ms step_avg:94.56ms +step:452/1695 train_time:42744ms step_avg:94.57ms +step:453/1695 train_time:42840ms step_avg:94.57ms +step:454/1695 train_time:42937ms step_avg:94.57ms +step:455/1695 train_time:43033ms step_avg:94.58ms +step:456/1695 train_time:43129ms step_avg:94.58ms +step:457/1695 train_time:43225ms step_avg:94.58ms +step:458/1695 train_time:43321ms step_avg:94.59ms +step:459/1695 train_time:43418ms step_avg:94.59ms +step:460/1695 train_time:43514ms step_avg:94.60ms +step:461/1695 train_time:43610ms step_avg:94.60ms +step:462/1695 train_time:43707ms step_avg:94.60ms +step:463/1695 train_time:43802ms step_avg:94.60ms +step:464/1695 train_time:43899ms step_avg:94.61ms +step:465/1695 train_time:43995ms step_avg:94.61ms +step:466/1695 train_time:44091ms step_avg:94.62ms +step:467/1695 train_time:44187ms step_avg:94.62ms +step:468/1695 train_time:44283ms step_avg:94.62ms +step:469/1695 train_time:44379ms step_avg:94.63ms +step:470/1695 train_time:44477ms step_avg:94.63ms +step:471/1695 train_time:44573ms step_avg:94.64ms +step:472/1695 train_time:44669ms step_avg:94.64ms +step:473/1695 train_time:44765ms step_avg:94.64ms +step:474/1695 train_time:44862ms step_avg:94.64ms +step:475/1695 train_time:44958ms step_avg:94.65ms +step:476/1695 train_time:45054ms step_avg:94.65ms +step:477/1695 train_time:45151ms step_avg:94.66ms +step:478/1695 train_time:45246ms step_avg:94.66ms +step:479/1695 train_time:45342ms step_avg:94.66ms +step:480/1695 train_time:45439ms step_avg:94.66ms +step:481/1695 train_time:45535ms step_avg:94.67ms +step:482/1695 train_time:45632ms step_avg:94.67ms +step:483/1695 train_time:45727ms step_avg:94.67ms +step:484/1695 train_time:45824ms step_avg:94.68ms +step:485/1695 train_time:45920ms step_avg:94.68ms +step:486/1695 train_time:46017ms step_avg:94.68ms +step:487/1695 train_time:46113ms step_avg:94.69ms +step:488/1695 train_time:46209ms step_avg:94.69ms +step:489/1695 train_time:46305ms step_avg:94.69ms +step:490/1695 train_time:46401ms step_avg:94.70ms +step:491/1695 train_time:46498ms step_avg:94.70ms +step:492/1695 train_time:46594ms step_avg:94.70ms +step:493/1695 train_time:46690ms step_avg:94.71ms +step:494/1695 train_time:46787ms step_avg:94.71ms +step:495/1695 train_time:46883ms step_avg:94.71ms +step:496/1695 train_time:46979ms step_avg:94.72ms +step:497/1695 train_time:47077ms step_avg:94.72ms +step:498/1695 train_time:47173ms step_avg:94.72ms +step:499/1695 train_time:47270ms step_avg:94.73ms +step:500/1695 train_time:47366ms step_avg:94.73ms +step:500/1695 val_loss:3.7326 train_time:47459ms step_avg:94.92ms +step:501/1695 train_time:47487ms step_avg:94.78ms +step:502/1695 train_time:47571ms step_avg:94.76ms +step:503/1695 train_time:47672ms step_avg:94.78ms +step:504/1695 train_time:47768ms step_avg:94.78ms +step:505/1695 train_time:47863ms step_avg:94.78ms +step:506/1695 train_time:47959ms step_avg:94.78ms +step:507/1695 train_time:48055ms step_avg:94.78ms +step:508/1695 train_time:48151ms step_avg:94.78ms +step:509/1695 train_time:48246ms step_avg:94.79ms +step:510/1695 train_time:48342ms step_avg:94.79ms +step:511/1695 train_time:48438ms step_avg:94.79ms +step:512/1695 train_time:48537ms step_avg:94.80ms +step:513/1695 train_time:48635ms step_avg:94.80ms +step:514/1695 train_time:48732ms step_avg:94.81ms +step:515/1695 train_time:48829ms step_avg:94.81ms +step:516/1695 train_time:48925ms step_avg:94.82ms +step:517/1695 train_time:49021ms step_avg:94.82ms +step:518/1695 train_time:49117ms step_avg:94.82ms +step:519/1695 train_time:49213ms step_avg:94.82ms +step:520/1695 train_time:49309ms step_avg:94.82ms +step:521/1695 train_time:49404ms step_avg:94.83ms +step:522/1695 train_time:49501ms step_avg:94.83ms +step:523/1695 train_time:49598ms step_avg:94.83ms +step:524/1695 train_time:49696ms step_avg:94.84ms +step:525/1695 train_time:49794ms step_avg:94.85ms +step:526/1695 train_time:49890ms step_avg:94.85ms +step:527/1695 train_time:49986ms step_avg:94.85ms +step:528/1695 train_time:50082ms step_avg:94.85ms +step:529/1695 train_time:50179ms step_avg:94.86ms +step:530/1695 train_time:50275ms step_avg:94.86ms +step:531/1695 train_time:50371ms step_avg:94.86ms +step:532/1695 train_time:50467ms step_avg:94.86ms +step:533/1695 train_time:50564ms step_avg:94.87ms +step:534/1695 train_time:50661ms step_avg:94.87ms +step:535/1695 train_time:50760ms step_avg:94.88ms +step:536/1695 train_time:50858ms step_avg:94.88ms +step:537/1695 train_time:50954ms step_avg:94.89ms +step:538/1695 train_time:51050ms step_avg:94.89ms +step:539/1695 train_time:51146ms step_avg:94.89ms +step:540/1695 train_time:51243ms step_avg:94.90ms +step:541/1695 train_time:51340ms step_avg:94.90ms +step:542/1695 train_time:51436ms step_avg:94.90ms +step:543/1695 train_time:51533ms step_avg:94.90ms +step:544/1695 train_time:51629ms step_avg:94.91ms +step:545/1695 train_time:51725ms step_avg:94.91ms +step:546/1695 train_time:51822ms step_avg:94.91ms +step:547/1695 train_time:51920ms step_avg:94.92ms +step:548/1695 train_time:52017ms step_avg:94.92ms +step:549/1695 train_time:52113ms step_avg:94.92ms +step:550/1695 train_time:52210ms step_avg:94.93ms +step:551/1695 train_time:52306ms step_avg:94.93ms +step:552/1695 train_time:52402ms step_avg:94.93ms +step:553/1695 train_time:52499ms step_avg:94.94ms +step:554/1695 train_time:52596ms step_avg:94.94ms +step:555/1695 train_time:52693ms step_avg:94.94ms +step:556/1695 train_time:52789ms step_avg:94.94ms +step:557/1695 train_time:52887ms step_avg:94.95ms +step:558/1695 train_time:52985ms step_avg:94.95ms +step:559/1695 train_time:53083ms step_avg:94.96ms +step:560/1695 train_time:53180ms step_avg:94.96ms +step:561/1695 train_time:53277ms step_avg:94.97ms +step:562/1695 train_time:53373ms step_avg:94.97ms +step:563/1695 train_time:53469ms step_avg:94.97ms +step:564/1695 train_time:53565ms step_avg:94.97ms +step:565/1695 train_time:53662ms step_avg:94.98ms +step:566/1695 train_time:53758ms step_avg:94.98ms +step:567/1695 train_time:53855ms step_avg:94.98ms +step:568/1695 train_time:53951ms step_avg:94.98ms +step:569/1695 train_time:54047ms step_avg:94.99ms +step:570/1695 train_time:54143ms step_avg:94.99ms +step:571/1695 train_time:54240ms step_avg:94.99ms +step:572/1695 train_time:54337ms step_avg:94.99ms +step:573/1695 train_time:54433ms step_avg:95.00ms +step:574/1695 train_time:54528ms step_avg:95.00ms +step:575/1695 train_time:54624ms step_avg:95.00ms +step:576/1695 train_time:54721ms step_avg:95.00ms +step:577/1695 train_time:54818ms step_avg:95.01ms +step:578/1695 train_time:54915ms step_avg:95.01ms +step:579/1695 train_time:55011ms step_avg:95.01ms +step:580/1695 train_time:55107ms step_avg:95.01ms +step:581/1695 train_time:55204ms step_avg:95.02ms +step:582/1695 train_time:55300ms step_avg:95.02ms +step:583/1695 train_time:55397ms step_avg:95.02ms +step:584/1695 train_time:55495ms step_avg:95.03ms +step:585/1695 train_time:55591ms step_avg:95.03ms +step:586/1695 train_time:55687ms step_avg:95.03ms +step:587/1695 train_time:55784ms step_avg:95.03ms +step:588/1695 train_time:55882ms step_avg:95.04ms +step:589/1695 train_time:55980ms step_avg:95.04ms +step:590/1695 train_time:56076ms step_avg:95.04ms +step:591/1695 train_time:56173ms step_avg:95.05ms +step:592/1695 train_time:56269ms step_avg:95.05ms +step:593/1695 train_time:56366ms step_avg:95.05ms +step:594/1695 train_time:56463ms step_avg:95.06ms +step:595/1695 train_time:56560ms step_avg:95.06ms +step:596/1695 train_time:56656ms step_avg:95.06ms +step:597/1695 train_time:56752ms step_avg:95.06ms +step:598/1695 train_time:56850ms step_avg:95.07ms +step:599/1695 train_time:56945ms step_avg:95.07ms +step:600/1695 train_time:57042ms step_avg:95.07ms +step:601/1695 train_time:57139ms step_avg:95.07ms +step:602/1695 train_time:57236ms step_avg:95.08ms +step:603/1695 train_time:57331ms step_avg:95.08ms +step:604/1695 train_time:57427ms step_avg:95.08ms +step:605/1695 train_time:57523ms step_avg:95.08ms +step:606/1695 train_time:57619ms step_avg:95.08ms +step:607/1695 train_time:57717ms step_avg:95.09ms +step:608/1695 train_time:57813ms step_avg:95.09ms +step:609/1695 train_time:57909ms step_avg:95.09ms +step:610/1695 train_time:58005ms step_avg:95.09ms +step:611/1695 train_time:58102ms step_avg:95.09ms +step:612/1695 train_time:58200ms step_avg:95.10ms +step:613/1695 train_time:58298ms step_avg:95.10ms +step:614/1695 train_time:58394ms step_avg:95.11ms +step:615/1695 train_time:58490ms step_avg:95.11ms +step:616/1695 train_time:58586ms step_avg:95.11ms +step:617/1695 train_time:58684ms step_avg:95.11ms +step:618/1695 train_time:58781ms step_avg:95.12ms +step:619/1695 train_time:58878ms step_avg:95.12ms +step:620/1695 train_time:58974ms step_avg:95.12ms +step:621/1695 train_time:59070ms step_avg:95.12ms +step:622/1695 train_time:59166ms step_avg:95.12ms +step:623/1695 train_time:59262ms step_avg:95.12ms +step:624/1695 train_time:59358ms step_avg:95.13ms +step:625/1695 train_time:59455ms step_avg:95.13ms +step:625/1695 val_loss:3.6470 train_time:59549ms step_avg:95.28ms +step:626/1695 train_time:59577ms step_avg:95.17ms +step:627/1695 train_time:59658ms step_avg:95.15ms +step:628/1695 train_time:59759ms step_avg:95.16ms +step:629/1695 train_time:59857ms step_avg:95.16ms +step:630/1695 train_time:59954ms step_avg:95.17ms +step:631/1695 train_time:60051ms step_avg:95.17ms +step:632/1695 train_time:60148ms step_avg:95.17ms +step:633/1695 train_time:60245ms step_avg:95.17ms +step:634/1695 train_time:60342ms step_avg:95.18ms +step:635/1695 train_time:60669ms step_avg:95.54ms +step:636/1695 train_time:60764ms step_avg:95.54ms +step:637/1695 train_time:60861ms step_avg:95.54ms +step:638/1695 train_time:60959ms step_avg:95.55ms +step:639/1695 train_time:61056ms step_avg:95.55ms +step:640/1695 train_time:61153ms step_avg:95.55ms +step:641/1695 train_time:61547ms step_avg:96.02ms +step:642/1695 train_time:61643ms step_avg:96.02ms +step:643/1695 train_time:61741ms step_avg:96.02ms +step:644/1695 train_time:61838ms step_avg:96.02ms +step:645/1695 train_time:61935ms step_avg:96.02ms +step:646/1695 train_time:62032ms step_avg:96.03ms +step:647/1695 train_time:62129ms step_avg:96.03ms +step:648/1695 train_time:62226ms step_avg:96.03ms +step:649/1695 train_time:62323ms step_avg:96.03ms +step:650/1695 train_time:62421ms step_avg:96.03ms +step:651/1695 train_time:62522ms step_avg:96.04ms +step:652/1695 train_time:62917ms step_avg:96.50ms +step:653/1695 train_time:62966ms step_avg:96.43ms +step:654/1695 train_time:63062ms step_avg:96.43ms +step:655/1695 train_time:63160ms step_avg:96.43ms +step:656/1695 train_time:63258ms step_avg:96.43ms +step:657/1695 train_time:63355ms step_avg:96.43ms +step:658/1695 train_time:63452ms step_avg:96.43ms +step:659/1695 train_time:63549ms step_avg:96.43ms +step:660/1695 train_time:63646ms step_avg:96.43ms +step:661/1695 train_time:63743ms step_avg:96.43ms +step:662/1695 train_time:63844ms step_avg:96.44ms +step:663/1695 train_time:63943ms step_avg:96.45ms +step:664/1695 train_time:64042ms step_avg:96.45ms +step:665/1695 train_time:64140ms step_avg:96.45ms +step:666/1695 train_time:64238ms step_avg:96.45ms +step:667/1695 train_time:64336ms step_avg:96.46ms +step:668/1695 train_time:64434ms step_avg:96.46ms +step:669/1695 train_time:64532ms step_avg:96.46ms +step:670/1695 train_time:64629ms step_avg:96.46ms +step:671/1695 train_time:64727ms step_avg:96.46ms +step:672/1695 train_time:64824ms step_avg:96.46ms +step:673/1695 train_time:64922ms step_avg:96.47ms +step:674/1695 train_time:65020ms step_avg:96.47ms +step:675/1695 train_time:65118ms step_avg:96.47ms +step:676/1695 train_time:65216ms step_avg:96.47ms +step:677/1695 train_time:65314ms step_avg:96.47ms +step:678/1695 train_time:65411ms step_avg:96.48ms +step:679/1695 train_time:65508ms step_avg:96.48ms +step:680/1695 train_time:65606ms step_avg:96.48ms +step:681/1695 train_time:65703ms step_avg:96.48ms +step:682/1695 train_time:65802ms step_avg:96.48ms +step:683/1695 train_time:65899ms step_avg:96.48ms +step:684/1695 train_time:65997ms step_avg:96.49ms +step:685/1695 train_time:66094ms step_avg:96.49ms +step:686/1695 train_time:66192ms step_avg:96.49ms +step:687/1695 train_time:66290ms step_avg:96.49ms +step:688/1695 train_time:66387ms step_avg:96.49ms +step:689/1695 train_time:66485ms step_avg:96.49ms +step:690/1695 train_time:66583ms step_avg:96.50ms +step:691/1695 train_time:66682ms step_avg:96.50ms +step:692/1695 train_time:66780ms step_avg:96.50ms +step:693/1695 train_time:66878ms step_avg:96.50ms +step:694/1695 train_time:66975ms step_avg:96.51ms +step:695/1695 train_time:67073ms step_avg:96.51ms +step:696/1695 train_time:67171ms step_avg:96.51ms +step:697/1695 train_time:67269ms step_avg:96.51ms +step:698/1695 train_time:67366ms step_avg:96.51ms +step:699/1695 train_time:67464ms step_avg:96.51ms +step:700/1695 train_time:67562ms step_avg:96.52ms +step:701/1695 train_time:67661ms step_avg:96.52ms +step:702/1695 train_time:67758ms step_avg:96.52ms +step:703/1695 train_time:67856ms step_avg:96.52ms +step:704/1695 train_time:67954ms step_avg:96.53ms +step:705/1695 train_time:68051ms step_avg:96.53ms +step:706/1695 train_time:68149ms step_avg:96.53ms +step:707/1695 train_time:68247ms step_avg:96.53ms +step:708/1695 train_time:68344ms step_avg:96.53ms +step:709/1695 train_time:68442ms step_avg:96.53ms +step:710/1695 train_time:68541ms step_avg:96.54ms +step:711/1695 train_time:68638ms step_avg:96.54ms +step:712/1695 train_time:68735ms step_avg:96.54ms +step:713/1695 train_time:68833ms step_avg:96.54ms +step:714/1695 train_time:68930ms step_avg:96.54ms +step:715/1695 train_time:69028ms step_avg:96.54ms +step:716/1695 train_time:69125ms step_avg:96.54ms +step:717/1695 train_time:69224ms step_avg:96.55ms +step:718/1695 train_time:69322ms step_avg:96.55ms +step:719/1695 train_time:69420ms step_avg:96.55ms +step:720/1695 train_time:69518ms step_avg:96.55ms +step:721/1695 train_time:69616ms step_avg:96.56ms +step:722/1695 train_time:69714ms step_avg:96.56ms +step:723/1695 train_time:69811ms step_avg:96.56ms +step:724/1695 train_time:69909ms step_avg:96.56ms +step:725/1695 train_time:70007ms step_avg:96.56ms +step:726/1695 train_time:70105ms step_avg:96.56ms +step:727/1695 train_time:70203ms step_avg:96.57ms +step:728/1695 train_time:70302ms step_avg:96.57ms +step:729/1695 train_time:70684ms step_avg:96.96ms +step:730/1695 train_time:70778ms step_avg:96.96ms +step:731/1695 train_time:70875ms step_avg:96.96ms +step:732/1695 train_time:70973ms step_avg:96.96ms +step:733/1695 train_time:71069ms step_avg:96.96ms +step:734/1695 train_time:71166ms step_avg:96.96ms +step:735/1695 train_time:71263ms step_avg:96.96ms +step:736/1695 train_time:71361ms step_avg:96.96ms +step:737/1695 train_time:71457ms step_avg:96.96ms +step:738/1695 train_time:71554ms step_avg:96.96ms +step:739/1695 train_time:71657ms step_avg:96.96ms +step:740/1695 train_time:71756ms step_avg:96.97ms +step:741/1695 train_time:71854ms step_avg:96.97ms +step:742/1695 train_time:71952ms step_avg:96.97ms +step:743/1695 train_time:72049ms step_avg:96.97ms +step:744/1695 train_time:72146ms step_avg:96.97ms +step:745/1695 train_time:72244ms step_avg:96.97ms +step:746/1695 train_time:72342ms step_avg:96.97ms +step:747/1695 train_time:72440ms step_avg:96.97ms +step:748/1695 train_time:72537ms step_avg:96.98ms +step:749/1695 train_time:72636ms step_avg:96.98ms +step:750/1695 train_time:72735ms step_avg:96.98ms +step:750/1695 val_loss:3.5832 train_time:72831ms step_avg:97.11ms +step:751/1695 train_time:72860ms step_avg:97.02ms +step:752/1695 train_time:72941ms step_avg:97.00ms +step:753/1695 train_time:73042ms step_avg:97.00ms +step:754/1695 train_time:73141ms step_avg:97.00ms +step:755/1695 train_time:73237ms step_avg:97.00ms +step:756/1695 train_time:73335ms step_avg:97.00ms +step:757/1695 train_time:73432ms step_avg:97.00ms +step:758/1695 train_time:73531ms step_avg:97.01ms +step:759/1695 train_time:73629ms step_avg:97.01ms +step:760/1695 train_time:73727ms step_avg:97.01ms +step:761/1695 train_time:73824ms step_avg:97.01ms +step:762/1695 train_time:73923ms step_avg:97.01ms +step:763/1695 train_time:74023ms step_avg:97.02ms +step:764/1695 train_time:74121ms step_avg:97.02ms +step:765/1695 train_time:74219ms step_avg:97.02ms +step:766/1695 train_time:74317ms step_avg:97.02ms +step:767/1695 train_time:74415ms step_avg:97.02ms +step:768/1695 train_time:74512ms step_avg:97.02ms +step:769/1695 train_time:74610ms step_avg:97.02ms +step:770/1695 train_time:74708ms step_avg:97.02ms +step:771/1695 train_time:74807ms step_avg:97.03ms +step:772/1695 train_time:75220ms step_avg:97.43ms +step:773/1695 train_time:75315ms step_avg:97.43ms +step:774/1695 train_time:75412ms step_avg:97.43ms +step:775/1695 train_time:75509ms step_avg:97.43ms +step:776/1695 train_time:75607ms step_avg:97.43ms +step:777/1695 train_time:75704ms step_avg:97.43ms +step:778/1695 train_time:76026ms step_avg:97.72ms +step:779/1695 train_time:76122ms step_avg:97.72ms +step:780/1695 train_time:76219ms step_avg:97.72ms +step:781/1695 train_time:76316ms step_avg:97.72ms +step:782/1695 train_time:76413ms step_avg:97.71ms +step:783/1695 train_time:76511ms step_avg:97.71ms +step:784/1695 train_time:76608ms step_avg:97.71ms +step:785/1695 train_time:76705ms step_avg:97.71ms +step:786/1695 train_time:76802ms step_avg:97.71ms +step:787/1695 train_time:76901ms step_avg:97.71ms +step:788/1695 train_time:77000ms step_avg:97.72ms +step:789/1695 train_time:77099ms step_avg:97.72ms +step:790/1695 train_time:77197ms step_avg:97.72ms +step:791/1695 train_time:77610ms step_avg:98.12ms +step:792/1695 train_time:77707ms step_avg:98.11ms +step:793/1695 train_time:77803ms step_avg:98.11ms +step:794/1695 train_time:77900ms step_avg:98.11ms +step:795/1695 train_time:77997ms step_avg:98.11ms +step:796/1695 train_time:78095ms step_avg:98.11ms +step:797/1695 train_time:78192ms step_avg:98.11ms +step:798/1695 train_time:78290ms step_avg:98.11ms +step:799/1695 train_time:78387ms step_avg:98.11ms +step:800/1695 train_time:78490ms step_avg:98.11ms +step:801/1695 train_time:78592ms step_avg:98.12ms +step:802/1695 train_time:78691ms step_avg:98.12ms +step:803/1695 train_time:78789ms step_avg:98.12ms +step:804/1695 train_time:78888ms step_avg:98.12ms +step:805/1695 train_time:78987ms step_avg:98.12ms +step:806/1695 train_time:79085ms step_avg:98.12ms +step:807/1695 train_time:79183ms step_avg:98.12ms +step:808/1695 train_time:79281ms step_avg:98.12ms +step:809/1695 train_time:79378ms step_avg:98.12ms +step:810/1695 train_time:79478ms step_avg:98.12ms +step:811/1695 train_time:79578ms step_avg:98.12ms +step:812/1695 train_time:79677ms step_avg:98.12ms +step:813/1695 train_time:79774ms step_avg:98.12ms +step:814/1695 train_time:79872ms step_avg:98.12ms +step:815/1695 train_time:79970ms step_avg:98.12ms +step:816/1695 train_time:80069ms step_avg:98.12ms +step:817/1695 train_time:80168ms step_avg:98.12ms +step:818/1695 train_time:80266ms step_avg:98.12ms +step:819/1695 train_time:80364ms step_avg:98.12ms +step:820/1695 train_time:80463ms step_avg:98.13ms +step:821/1695 train_time:80562ms step_avg:98.13ms +step:822/1695 train_time:80661ms step_avg:98.13ms +step:823/1695 train_time:80761ms step_avg:98.13ms +step:824/1695 train_time:80859ms step_avg:98.13ms +step:825/1695 train_time:80957ms step_avg:98.13ms +step:826/1695 train_time:81055ms step_avg:98.13ms +step:827/1695 train_time:81153ms step_avg:98.13ms +step:828/1695 train_time:81251ms step_avg:98.13ms +step:829/1695 train_time:81350ms step_avg:98.13ms +step:830/1695 train_time:81449ms step_avg:98.13ms +step:831/1695 train_time:81548ms step_avg:98.13ms +step:832/1695 train_time:81648ms step_avg:98.13ms +step:833/1695 train_time:81747ms step_avg:98.14ms +step:834/1695 train_time:81846ms step_avg:98.14ms +step:835/1695 train_time:81945ms step_avg:98.14ms +step:836/1695 train_time:82043ms step_avg:98.14ms +step:837/1695 train_time:82141ms step_avg:98.14ms +step:838/1695 train_time:82240ms step_avg:98.14ms +step:839/1695 train_time:82338ms step_avg:98.14ms +step:840/1695 train_time:82436ms step_avg:98.14ms +step:841/1695 train_time:82534ms step_avg:98.14ms +step:842/1695 train_time:82632ms step_avg:98.14ms +step:843/1695 train_time:82730ms step_avg:98.14ms +step:844/1695 train_time:82829ms step_avg:98.14ms +step:845/1695 train_time:82928ms step_avg:98.14ms +step:846/1695 train_time:83028ms step_avg:98.14ms +step:847/1695 train_time:83126ms step_avg:98.14ms +step:848/1695 train_time:83226ms step_avg:98.14ms +step:849/1695 train_time:83326ms step_avg:98.15ms +step:850/1695 train_time:83425ms step_avg:98.15ms +step:851/1695 train_time:83524ms step_avg:98.15ms +step:852/1695 train_time:83622ms step_avg:98.15ms +step:853/1695 train_time:83721ms step_avg:98.15ms +step:854/1695 train_time:83819ms step_avg:98.15ms +step:855/1695 train_time:83916ms step_avg:98.15ms +step:856/1695 train_time:84014ms step_avg:98.15ms +step:857/1695 train_time:84112ms step_avg:98.15ms +step:858/1695 train_time:84210ms step_avg:98.15ms +step:859/1695 train_time:84309ms step_avg:98.15ms +step:860/1695 train_time:84408ms step_avg:98.15ms +step:861/1695 train_time:84507ms step_avg:98.15ms +step:862/1695 train_time:84605ms step_avg:98.15ms +step:863/1695 train_time:84704ms step_avg:98.15ms +step:864/1695 train_time:84802ms step_avg:98.15ms +step:865/1695 train_time:84900ms step_avg:98.15ms +step:866/1695 train_time:84999ms step_avg:98.15ms +step:867/1695 train_time:85097ms step_avg:98.15ms +step:868/1695 train_time:85195ms step_avg:98.15ms +step:869/1695 train_time:85293ms step_avg:98.15ms +step:870/1695 train_time:85391ms step_avg:98.15ms +step:871/1695 train_time:85490ms step_avg:98.15ms +step:872/1695 train_time:85589ms step_avg:98.15ms +step:873/1695 train_time:85689ms step_avg:98.15ms +step:874/1695 train_time:85788ms step_avg:98.16ms +step:875/1695 train_time:85887ms step_avg:98.16ms +step:875/1695 val_loss:3.5360 train_time:85984ms step_avg:98.27ms +step:876/1695 train_time:86012ms step_avg:98.19ms +step:877/1695 train_time:86096ms step_avg:98.17ms +step:878/1695 train_time:86196ms step_avg:98.17ms +step:879/1695 train_time:86296ms step_avg:98.17ms +step:880/1695 train_time:86394ms step_avg:98.17ms +step:881/1695 train_time:86493ms step_avg:98.18ms +step:882/1695 train_time:86592ms step_avg:98.18ms +step:883/1695 train_time:86691ms step_avg:98.18ms +step:884/1695 train_time:86791ms step_avg:98.18ms +step:885/1695 train_time:86889ms step_avg:98.18ms +step:886/1695 train_time:86990ms step_avg:98.18ms +step:887/1695 train_time:87092ms step_avg:98.19ms +step:888/1695 train_time:87195ms step_avg:98.19ms +step:889/1695 train_time:87295ms step_avg:98.19ms +step:890/1695 train_time:87395ms step_avg:98.20ms +step:891/1695 train_time:87494ms step_avg:98.20ms +step:892/1695 train_time:87593ms step_avg:98.20ms +step:893/1695 train_time:87694ms step_avg:98.20ms +step:894/1695 train_time:87793ms step_avg:98.20ms +step:895/1695 train_time:87891ms step_avg:98.20ms +step:896/1695 train_time:87992ms step_avg:98.21ms +step:897/1695 train_time:88093ms step_avg:98.21ms +step:898/1695 train_time:88193ms step_avg:98.21ms +step:899/1695 train_time:88294ms step_avg:98.21ms +step:900/1695 train_time:88394ms step_avg:98.22ms +step:901/1695 train_time:88493ms step_avg:98.22ms +step:902/1695 train_time:88592ms step_avg:98.22ms +step:903/1695 train_time:88691ms step_avg:98.22ms +step:904/1695 train_time:88791ms step_avg:98.22ms +step:905/1695 train_time:88890ms step_avg:98.22ms +step:906/1695 train_time:88989ms step_avg:98.22ms +step:907/1695 train_time:89089ms step_avg:98.22ms +step:908/1695 train_time:89190ms step_avg:98.23ms +step:909/1695 train_time:89290ms step_avg:98.23ms +step:910/1695 train_time:89390ms step_avg:98.23ms +step:911/1695 train_time:89490ms step_avg:98.23ms +step:912/1695 train_time:89589ms step_avg:98.23ms +step:913/1695 train_time:89687ms step_avg:98.23ms +step:914/1695 train_time:89787ms step_avg:98.24ms +step:915/1695 train_time:89885ms step_avg:98.24ms +step:916/1695 train_time:89984ms step_avg:98.24ms +step:917/1695 train_time:90083ms step_avg:98.24ms +step:918/1695 train_time:90182ms step_avg:98.24ms +step:919/1695 train_time:90281ms step_avg:98.24ms +step:920/1695 train_time:90382ms step_avg:98.24ms +step:921/1695 train_time:90481ms step_avg:98.24ms +step:922/1695 train_time:90582ms step_avg:98.24ms +step:923/1695 train_time:90681ms step_avg:98.25ms +step:924/1695 train_time:90782ms step_avg:98.25ms +step:925/1695 train_time:90882ms step_avg:98.25ms +step:926/1695 train_time:90982ms step_avg:98.25ms +step:927/1695 train_time:91081ms step_avg:98.25ms +step:928/1695 train_time:91180ms step_avg:98.25ms +step:929/1695 train_time:91279ms step_avg:98.26ms +step:930/1695 train_time:91380ms step_avg:98.26ms +step:931/1695 train_time:91479ms step_avg:98.26ms +step:932/1695 train_time:91579ms step_avg:98.26ms +step:933/1695 train_time:91680ms step_avg:98.26ms +step:934/1695 train_time:91780ms step_avg:98.27ms +step:935/1695 train_time:91879ms step_avg:98.27ms +step:936/1695 train_time:91980ms step_avg:98.27ms +step:937/1695 train_time:92080ms step_avg:98.27ms +step:938/1695 train_time:92181ms step_avg:98.27ms +step:939/1695 train_time:92280ms step_avg:98.27ms +step:940/1695 train_time:92380ms step_avg:98.28ms +step:941/1695 train_time:92480ms step_avg:98.28ms +step:942/1695 train_time:92579ms step_avg:98.28ms +step:943/1695 train_time:92679ms step_avg:98.28ms +step:944/1695 train_time:92780ms step_avg:98.28ms +step:945/1695 train_time:92880ms step_avg:98.29ms +step:946/1695 train_time:92980ms step_avg:98.29ms +step:947/1695 train_time:93080ms step_avg:98.29ms +step:948/1695 train_time:93179ms step_avg:98.29ms +step:949/1695 train_time:93279ms step_avg:98.29ms +step:950/1695 train_time:93379ms step_avg:98.29ms +step:951/1695 train_time:93479ms step_avg:98.30ms +step:952/1695 train_time:93579ms step_avg:98.30ms +step:953/1695 train_time:93679ms step_avg:98.30ms +step:954/1695 train_time:93779ms step_avg:98.30ms +step:955/1695 train_time:93879ms step_avg:98.30ms +step:956/1695 train_time:93980ms step_avg:98.30ms +step:957/1695 train_time:94079ms step_avg:98.31ms +step:958/1695 train_time:94180ms step_avg:98.31ms +step:959/1695 train_time:94279ms step_avg:98.31ms +step:960/1695 train_time:94379ms step_avg:98.31ms +step:961/1695 train_time:94480ms step_avg:98.31ms +step:962/1695 train_time:94580ms step_avg:98.32ms +step:963/1695 train_time:94681ms step_avg:98.32ms +step:964/1695 train_time:94781ms step_avg:98.32ms +step:965/1695 train_time:94880ms step_avg:98.32ms +step:966/1695 train_time:94980ms step_avg:98.32ms +step:967/1695 train_time:95080ms step_avg:98.32ms +step:968/1695 train_time:95181ms step_avg:98.33ms +step:969/1695 train_time:95281ms step_avg:98.33ms +step:970/1695 train_time:95380ms step_avg:98.33ms +step:971/1695 train_time:95480ms step_avg:98.33ms +step:972/1695 train_time:95580ms step_avg:98.33ms +step:973/1695 train_time:95680ms step_avg:98.33ms +step:974/1695 train_time:95779ms step_avg:98.34ms +step:975/1695 train_time:95879ms step_avg:98.34ms +step:976/1695 train_time:95978ms step_avg:98.34ms +step:977/1695 train_time:96079ms step_avg:98.34ms +step:978/1695 train_time:96180ms step_avg:98.34ms +step:979/1695 train_time:96280ms step_avg:98.34ms +step:980/1695 train_time:96380ms step_avg:98.35ms +step:981/1695 train_time:96479ms step_avg:98.35ms +step:982/1695 train_time:96578ms step_avg:98.35ms +step:983/1695 train_time:96680ms step_avg:98.35ms +step:984/1695 train_time:96780ms step_avg:98.35ms +step:985/1695 train_time:96880ms step_avg:98.36ms +step:986/1695 train_time:96980ms step_avg:98.36ms +step:987/1695 train_time:97080ms step_avg:98.36ms +step:988/1695 train_time:97181ms step_avg:98.36ms +step:989/1695 train_time:97281ms step_avg:98.36ms +step:990/1695 train_time:97381ms step_avg:98.36ms +step:991/1695 train_time:97480ms step_avg:98.37ms +step:992/1695 train_time:97579ms step_avg:98.37ms +step:993/1695 train_time:97679ms step_avg:98.37ms +step:994/1695 train_time:97779ms step_avg:98.37ms +step:995/1695 train_time:97879ms step_avg:98.37ms +step:996/1695 train_time:97979ms step_avg:98.37ms +step:997/1695 train_time:98079ms step_avg:98.37ms +step:998/1695 train_time:98178ms step_avg:98.38ms +step:999/1695 train_time:98279ms step_avg:98.38ms +step:1000/1695 train_time:98379ms step_avg:98.38ms +step:1000/1695 val_loss:3.4899 train_time:98475ms step_avg:98.48ms +step:1001/1695 train_time:98504ms step_avg:98.41ms +step:1002/1695 train_time:98585ms step_avg:98.39ms +step:1003/1695 train_time:98686ms step_avg:98.39ms +step:1004/1695 train_time:98786ms step_avg:98.39ms +step:1005/1695 train_time:98885ms step_avg:98.39ms +step:1006/1695 train_time:98984ms step_avg:98.39ms +step:1007/1695 train_time:99083ms step_avg:98.39ms +step:1008/1695 train_time:99182ms step_avg:98.39ms +step:1009/1695 train_time:99282ms step_avg:98.40ms +step:1010/1695 train_time:99381ms step_avg:98.40ms +step:1011/1695 train_time:99483ms step_avg:98.40ms +step:1012/1695 train_time:99585ms step_avg:98.40ms +step:1013/1695 train_time:99686ms step_avg:98.41ms +step:1014/1695 train_time:99788ms step_avg:98.41ms +step:1015/1695 train_time:99888ms step_avg:98.41ms +step:1016/1695 train_time:99987ms step_avg:98.41ms +step:1017/1695 train_time:100087ms step_avg:98.41ms +step:1018/1695 train_time:100188ms step_avg:98.42ms +step:1019/1695 train_time:100287ms step_avg:98.42ms +step:1020/1695 train_time:100389ms step_avg:98.42ms +step:1021/1695 train_time:100490ms step_avg:98.42ms +step:1022/1695 train_time:100591ms step_avg:98.43ms +step:1023/1695 train_time:100692ms step_avg:98.43ms +step:1024/1695 train_time:100794ms step_avg:98.43ms +step:1025/1695 train_time:100894ms step_avg:98.43ms +step:1026/1695 train_time:100994ms step_avg:98.43ms +step:1027/1695 train_time:101093ms step_avg:98.44ms +step:1028/1695 train_time:101194ms step_avg:98.44ms +step:1029/1695 train_time:101295ms step_avg:98.44ms +step:1030/1695 train_time:101394ms step_avg:98.44ms +step:1031/1695 train_time:101494ms step_avg:98.44ms +step:1032/1695 train_time:101594ms step_avg:98.44ms +step:1033/1695 train_time:101693ms step_avg:98.44ms +step:1034/1695 train_time:101792ms step_avg:98.45ms +step:1035/1695 train_time:101892ms step_avg:98.45ms +step:1036/1695 train_time:101991ms step_avg:98.45ms +step:1037/1695 train_time:102091ms step_avg:98.45ms +step:1038/1695 train_time:102192ms step_avg:98.45ms +step:1039/1695 train_time:102292ms step_avg:98.45ms +step:1040/1695 train_time:102392ms step_avg:98.45ms +step:1041/1695 train_time:102492ms step_avg:98.46ms +step:1042/1695 train_time:102593ms step_avg:98.46ms +step:1043/1695 train_time:102693ms step_avg:98.46ms +step:1044/1695 train_time:102792ms step_avg:98.46ms +step:1045/1695 train_time:102892ms step_avg:98.46ms +step:1046/1695 train_time:102992ms step_avg:98.46ms +step:1047/1695 train_time:103091ms step_avg:98.46ms +step:1048/1695 train_time:103191ms step_avg:98.46ms +step:1049/1695 train_time:103291ms step_avg:98.47ms +step:1050/1695 train_time:103392ms step_avg:98.47ms +step:1051/1695 train_time:103493ms step_avg:98.47ms +step:1052/1695 train_time:103593ms step_avg:98.47ms +step:1053/1695 train_time:103693ms step_avg:98.47ms +step:1054/1695 train_time:103793ms step_avg:98.47ms +step:1055/1695 train_time:103892ms step_avg:98.48ms +step:1056/1695 train_time:103991ms step_avg:98.48ms +step:1057/1695 train_time:104091ms step_avg:98.48ms +step:1058/1695 train_time:104191ms step_avg:98.48ms +step:1059/1695 train_time:104290ms step_avg:98.48ms +step:1060/1695 train_time:104390ms step_avg:98.48ms +step:1061/1695 train_time:104491ms step_avg:98.48ms +step:1062/1695 train_time:104591ms step_avg:98.49ms +step:1063/1695 train_time:104691ms step_avg:98.49ms +step:1064/1695 train_time:104791ms step_avg:98.49ms +step:1065/1695 train_time:104891ms step_avg:98.49ms +step:1066/1695 train_time:104990ms step_avg:98.49ms +step:1067/1695 train_time:105091ms step_avg:98.49ms +step:1068/1695 train_time:105191ms step_avg:98.49ms +step:1069/1695 train_time:105292ms step_avg:98.50ms +step:1070/1695 train_time:105392ms step_avg:98.50ms +step:1071/1695 train_time:105492ms step_avg:98.50ms +step:1072/1695 train_time:105592ms step_avg:98.50ms +step:1073/1695 train_time:105691ms step_avg:98.50ms +step:1074/1695 train_time:105791ms step_avg:98.50ms +step:1075/1695 train_time:105890ms step_avg:98.50ms +step:1076/1695 train_time:105991ms step_avg:98.50ms +step:1077/1695 train_time:106091ms step_avg:98.51ms +step:1078/1695 train_time:106192ms step_avg:98.51ms +step:1079/1695 train_time:106292ms step_avg:98.51ms +step:1080/1695 train_time:106392ms step_avg:98.51ms +step:1081/1695 train_time:106491ms step_avg:98.51ms +step:1082/1695 train_time:106593ms step_avg:98.51ms +step:1083/1695 train_time:106692ms step_avg:98.52ms +step:1084/1695 train_time:106792ms step_avg:98.52ms +step:1085/1695 train_time:106891ms step_avg:98.52ms +step:1086/1695 train_time:106991ms step_avg:98.52ms +step:1087/1695 train_time:107092ms step_avg:98.52ms +step:1088/1695 train_time:107192ms step_avg:98.52ms +step:1089/1695 train_time:107291ms step_avg:98.52ms +step:1090/1695 train_time:107392ms step_avg:98.52ms +step:1091/1695 train_time:107492ms step_avg:98.53ms +step:1092/1695 train_time:107593ms step_avg:98.53ms +step:1093/1695 train_time:107692ms step_avg:98.53ms +step:1094/1695 train_time:107791ms step_avg:98.53ms +step:1095/1695 train_time:107891ms step_avg:98.53ms +step:1096/1695 train_time:107991ms step_avg:98.53ms +step:1097/1695 train_time:108090ms step_avg:98.53ms +step:1098/1695 train_time:108191ms step_avg:98.53ms +step:1099/1695 train_time:108289ms step_avg:98.53ms +step:1100/1695 train_time:108389ms step_avg:98.54ms +step:1101/1695 train_time:108490ms step_avg:98.54ms +step:1102/1695 train_time:108590ms step_avg:98.54ms +step:1103/1695 train_time:108690ms step_avg:98.54ms +step:1104/1695 train_time:108791ms step_avg:98.54ms +step:1105/1695 train_time:108892ms step_avg:98.54ms +step:1106/1695 train_time:108991ms step_avg:98.55ms +step:1107/1695 train_time:109092ms step_avg:98.55ms +step:1108/1695 train_time:109193ms step_avg:98.55ms +step:1109/1695 train_time:109291ms step_avg:98.55ms +step:1110/1695 train_time:109392ms step_avg:98.55ms +step:1111/1695 train_time:109493ms step_avg:98.55ms +step:1112/1695 train_time:109593ms step_avg:98.55ms +step:1113/1695 train_time:109692ms step_avg:98.56ms +step:1114/1695 train_time:109792ms step_avg:98.56ms +step:1115/1695 train_time:109892ms step_avg:98.56ms +step:1116/1695 train_time:109991ms step_avg:98.56ms +step:1117/1695 train_time:110092ms step_avg:98.56ms +step:1118/1695 train_time:110193ms step_avg:98.56ms +step:1119/1695 train_time:110293ms step_avg:98.56ms +step:1120/1695 train_time:110392ms step_avg:98.56ms +step:1121/1695 train_time:110492ms step_avg:98.57ms +step:1122/1695 train_time:110593ms step_avg:98.57ms +step:1123/1695 train_time:110693ms step_avg:98.57ms +step:1124/1695 train_time:110792ms step_avg:98.57ms +step:1125/1695 train_time:110892ms step_avg:98.57ms +step:1125/1695 val_loss:3.4392 train_time:110988ms step_avg:98.66ms +step:1126/1695 train_time:111017ms step_avg:98.59ms +step:1127/1695 train_time:111099ms step_avg:98.58ms +step:1128/1695 train_time:111201ms step_avg:98.58ms +step:1129/1695 train_time:111301ms step_avg:98.58ms +step:1130/1695 train_time:111401ms step_avg:98.59ms +step:1131/1695 train_time:111502ms step_avg:98.59ms +step:1132/1695 train_time:111602ms step_avg:98.59ms +step:1133/1695 train_time:111702ms step_avg:98.59ms +step:1134/1695 train_time:111802ms step_avg:98.59ms +step:1135/1695 train_time:111903ms step_avg:98.59ms +step:1136/1695 train_time:112004ms step_avg:98.60ms +step:1137/1695 train_time:112106ms step_avg:98.60ms +step:1138/1695 train_time:112206ms step_avg:98.60ms +step:1139/1695 train_time:112306ms step_avg:98.60ms +step:1140/1695 train_time:112406ms step_avg:98.60ms +step:1141/1695 train_time:112506ms step_avg:98.60ms +step:1142/1695 train_time:112607ms step_avg:98.60ms +step:1143/1695 train_time:112706ms step_avg:98.61ms +step:1144/1695 train_time:112807ms step_avg:98.61ms +step:1145/1695 train_time:112908ms step_avg:98.61ms +step:1146/1695 train_time:113008ms step_avg:98.61ms +step:1147/1695 train_time:113108ms step_avg:98.61ms +step:1148/1695 train_time:113207ms step_avg:98.61ms +step:1149/1695 train_time:113308ms step_avg:98.61ms +step:1150/1695 train_time:113408ms step_avg:98.62ms +step:1151/1695 train_time:113509ms step_avg:98.62ms +step:1152/1695 train_time:113609ms step_avg:98.62ms +step:1153/1695 train_time:113710ms step_avg:98.62ms +step:1154/1695 train_time:113810ms step_avg:98.62ms +step:1155/1695 train_time:113910ms step_avg:98.62ms +step:1156/1695 train_time:114010ms step_avg:98.62ms +step:1157/1695 train_time:114112ms step_avg:98.63ms +step:1158/1695 train_time:114212ms step_avg:98.63ms +step:1159/1695 train_time:114313ms step_avg:98.63ms +step:1160/1695 train_time:114417ms step_avg:98.64ms +step:1161/1695 train_time:114518ms step_avg:98.64ms +step:1162/1695 train_time:114619ms step_avg:98.64ms +step:1163/1695 train_time:114723ms step_avg:98.64ms +step:1164/1695 train_time:114825ms step_avg:98.65ms +step:1165/1695 train_time:114926ms step_avg:98.65ms +step:1166/1695 train_time:115027ms step_avg:98.65ms +step:1167/1695 train_time:115127ms step_avg:98.65ms +step:1168/1695 train_time:115228ms step_avg:98.65ms +step:1169/1695 train_time:115328ms step_avg:98.66ms +step:1170/1695 train_time:115429ms step_avg:98.66ms +step:1171/1695 train_time:115532ms step_avg:98.66ms +step:1172/1695 train_time:115635ms step_avg:98.66ms +step:1173/1695 train_time:115736ms step_avg:98.67ms +step:1174/1695 train_time:115837ms step_avg:98.67ms +step:1175/1695 train_time:115938ms step_avg:98.67ms +step:1176/1695 train_time:116039ms step_avg:98.67ms +step:1177/1695 train_time:116140ms step_avg:98.67ms +step:1178/1695 train_time:116241ms step_avg:98.68ms +step:1179/1695 train_time:116346ms step_avg:98.68ms +step:1180/1695 train_time:116447ms step_avg:98.68ms +step:1181/1695 train_time:116547ms step_avg:98.69ms +step:1182/1695 train_time:116648ms step_avg:98.69ms +step:1183/1695 train_time:116748ms step_avg:98.69ms +step:1184/1695 train_time:116850ms step_avg:98.69ms +step:1185/1695 train_time:116951ms step_avg:98.69ms +step:1186/1695 train_time:117053ms step_avg:98.70ms +step:1187/1695 train_time:117154ms step_avg:98.70ms +step:1188/1695 train_time:117256ms step_avg:98.70ms +step:1189/1695 train_time:117357ms step_avg:98.70ms +step:1190/1695 train_time:117460ms step_avg:98.71ms +step:1191/1695 train_time:117560ms step_avg:98.71ms +step:1192/1695 train_time:117662ms step_avg:98.71ms +step:1193/1695 train_time:117762ms step_avg:98.71ms +step:1194/1695 train_time:117863ms step_avg:98.71ms +step:1195/1695 train_time:117964ms step_avg:98.71ms +step:1196/1695 train_time:118065ms step_avg:98.72ms +step:1197/1695 train_time:118166ms step_avg:98.72ms +step:1198/1695 train_time:118267ms step_avg:98.72ms +step:1199/1695 train_time:118367ms step_avg:98.72ms +step:1200/1695 train_time:118467ms step_avg:98.72ms +step:1201/1695 train_time:118569ms step_avg:98.72ms +step:1202/1695 train_time:118670ms step_avg:98.73ms +step:1203/1695 train_time:118778ms step_avg:98.74ms +step:1204/1695 train_time:118873ms step_avg:98.73ms +step:1205/1695 train_time:118975ms step_avg:98.73ms +step:1206/1695 train_time:119077ms step_avg:98.74ms +step:1207/1695 train_time:119179ms step_avg:98.74ms +step:1208/1695 train_time:119280ms step_avg:98.74ms +step:1209/1695 train_time:119381ms step_avg:98.74ms +step:1210/1695 train_time:119482ms step_avg:98.75ms +step:1211/1695 train_time:119583ms step_avg:98.75ms +step:1212/1695 train_time:119685ms step_avg:98.75ms +step:1213/1695 train_time:119785ms step_avg:98.75ms +step:1214/1695 train_time:119885ms step_avg:98.75ms +step:1215/1695 train_time:119986ms step_avg:98.75ms +step:1216/1695 train_time:120089ms step_avg:98.76ms +step:1217/1695 train_time:120189ms step_avg:98.76ms +step:1218/1695 train_time:120290ms step_avg:98.76ms +step:1219/1695 train_time:120390ms step_avg:98.76ms +step:1220/1695 train_time:120491ms step_avg:98.76ms +step:1221/1695 train_time:120592ms step_avg:98.77ms +step:1222/1695 train_time:120693ms step_avg:98.77ms +step:1223/1695 train_time:120795ms step_avg:98.77ms +step:1224/1695 train_time:120895ms step_avg:98.77ms +step:1225/1695 train_time:120998ms step_avg:98.77ms +step:1226/1695 train_time:121101ms step_avg:98.78ms +step:1227/1695 train_time:121201ms step_avg:98.78ms +step:1228/1695 train_time:121302ms step_avg:98.78ms +step:1229/1695 train_time:121404ms step_avg:98.78ms +step:1230/1695 train_time:121505ms step_avg:98.78ms +step:1231/1695 train_time:121606ms step_avg:98.79ms +step:1232/1695 train_time:121706ms step_avg:98.79ms +step:1233/1695 train_time:121806ms step_avg:98.79ms +step:1234/1695 train_time:121907ms step_avg:98.79ms +step:1235/1695 train_time:122008ms step_avg:98.79ms +step:1236/1695 train_time:122109ms step_avg:98.79ms +step:1237/1695 train_time:122209ms step_avg:98.79ms +step:1238/1695 train_time:122310ms step_avg:98.80ms +step:1239/1695 train_time:122410ms step_avg:98.80ms +step:1240/1695 train_time:122512ms step_avg:98.80ms +step:1241/1695 train_time:122613ms step_avg:98.80ms +step:1242/1695 train_time:122714ms step_avg:98.80ms +step:1243/1695 train_time:122815ms step_avg:98.81ms +step:1244/1695 train_time:122916ms step_avg:98.81ms +step:1245/1695 train_time:123018ms step_avg:98.81ms +step:1246/1695 train_time:123121ms step_avg:98.81ms +step:1247/1695 train_time:123222ms step_avg:98.81ms +step:1248/1695 train_time:123323ms step_avg:98.82ms +step:1249/1695 train_time:123424ms step_avg:98.82ms +step:1250/1695 train_time:123525ms step_avg:98.82ms +step:1250/1695 val_loss:3.3925 train_time:123622ms step_avg:98.90ms +step:1251/1695 train_time:123650ms step_avg:98.84ms +step:1252/1695 train_time:123734ms step_avg:98.83ms +step:1253/1695 train_time:123835ms step_avg:98.83ms +step:1254/1695 train_time:123936ms step_avg:98.83ms +step:1255/1695 train_time:124037ms step_avg:98.83ms +step:1256/1695 train_time:124138ms step_avg:98.84ms +step:1257/1695 train_time:124238ms step_avg:98.84ms +step:1258/1695 train_time:124339ms step_avg:98.84ms +step:1259/1695 train_time:124439ms step_avg:98.84ms +step:1260/1695 train_time:124540ms step_avg:98.84ms +step:1261/1695 train_time:124642ms step_avg:98.84ms +step:1262/1695 train_time:124744ms step_avg:98.85ms +step:1263/1695 train_time:124844ms step_avg:98.85ms +step:1264/1695 train_time:124944ms step_avg:98.85ms +step:1265/1695 train_time:125043ms step_avg:98.85ms +step:1266/1695 train_time:125143ms step_avg:98.85ms +step:1267/1695 train_time:125243ms step_avg:98.85ms +step:1268/1695 train_time:125344ms step_avg:98.85ms +step:1269/1695 train_time:125445ms step_avg:98.85ms +step:1270/1695 train_time:125546ms step_avg:98.86ms +step:1271/1695 train_time:125648ms step_avg:98.86ms +step:1272/1695 train_time:125748ms step_avg:98.86ms +step:1273/1695 train_time:125849ms step_avg:98.86ms +step:1274/1695 train_time:125949ms step_avg:98.86ms +step:1275/1695 train_time:126051ms step_avg:98.86ms +step:1276/1695 train_time:126154ms step_avg:98.87ms +step:1277/1695 train_time:126255ms step_avg:98.87ms +step:1278/1695 train_time:126357ms step_avg:98.87ms +step:1279/1695 train_time:126460ms step_avg:98.87ms +step:1280/1695 train_time:126560ms step_avg:98.88ms +step:1281/1695 train_time:126662ms step_avg:98.88ms +step:1282/1695 train_time:126762ms step_avg:98.88ms +step:1283/1695 train_time:126861ms step_avg:98.88ms +step:1284/1695 train_time:126961ms step_avg:98.88ms +step:1285/1695 train_time:127062ms step_avg:98.88ms +step:1286/1695 train_time:127161ms step_avg:98.88ms +step:1287/1695 train_time:127262ms step_avg:98.88ms +step:1288/1695 train_time:127363ms step_avg:98.88ms +step:1289/1695 train_time:127465ms step_avg:98.89ms +step:1290/1695 train_time:127565ms step_avg:98.89ms +step:1291/1695 train_time:127666ms step_avg:98.89ms +step:1292/1695 train_time:127766ms step_avg:98.89ms +step:1293/1695 train_time:127867ms step_avg:98.89ms +step:1294/1695 train_time:127969ms step_avg:98.89ms +step:1295/1695 train_time:128070ms step_avg:98.90ms +step:1296/1695 train_time:128171ms step_avg:98.90ms +step:1297/1695 train_time:128273ms step_avg:98.90ms +step:1298/1695 train_time:128374ms step_avg:98.90ms +step:1299/1695 train_time:128476ms step_avg:98.90ms +step:1300/1695 train_time:128578ms step_avg:98.91ms +step:1301/1695 train_time:128679ms step_avg:98.91ms +step:1302/1695 train_time:128780ms step_avg:98.91ms +step:1303/1695 train_time:128881ms step_avg:98.91ms +step:1304/1695 train_time:128982ms step_avg:98.91ms +step:1305/1695 train_time:129083ms step_avg:98.91ms +step:1306/1695 train_time:129182ms step_avg:98.91ms +step:1307/1695 train_time:129283ms step_avg:98.92ms +step:1308/1695 train_time:129385ms step_avg:98.92ms +step:1309/1695 train_time:129486ms step_avg:98.92ms +step:1310/1695 train_time:129587ms step_avg:98.92ms +step:1311/1695 train_time:129688ms step_avg:98.92ms +step:1312/1695 train_time:129788ms step_avg:98.92ms +step:1313/1695 train_time:129890ms step_avg:98.93ms +step:1314/1695 train_time:129992ms step_avg:98.93ms +step:1315/1695 train_time:130093ms step_avg:98.93ms +step:1316/1695 train_time:130194ms step_avg:98.93ms +step:1317/1695 train_time:130294ms step_avg:98.93ms +step:1318/1695 train_time:130396ms step_avg:98.93ms +step:1319/1695 train_time:130498ms step_avg:98.94ms +step:1320/1695 train_time:130600ms step_avg:98.94ms +step:1321/1695 train_time:130701ms step_avg:98.94ms +step:1322/1695 train_time:130802ms step_avg:98.94ms +step:1323/1695 train_time:130903ms step_avg:98.94ms +step:1324/1695 train_time:131003ms step_avg:98.95ms +step:1325/1695 train_time:131104ms step_avg:98.95ms +step:1326/1695 train_time:131205ms step_avg:98.95ms +step:1327/1695 train_time:131307ms step_avg:98.95ms +step:1328/1695 train_time:131406ms step_avg:98.95ms +step:1329/1695 train_time:131506ms step_avg:98.95ms +step:1330/1695 train_time:131606ms step_avg:98.95ms +step:1331/1695 train_time:131709ms step_avg:98.95ms +step:1332/1695 train_time:131811ms step_avg:98.96ms +step:1333/1695 train_time:131912ms step_avg:98.96ms +step:1334/1695 train_time:132013ms step_avg:98.96ms +step:1335/1695 train_time:132115ms step_avg:98.96ms +step:1336/1695 train_time:132216ms step_avg:98.96ms +step:1337/1695 train_time:132318ms step_avg:98.97ms +step:1338/1695 train_time:132419ms step_avg:98.97ms +step:1339/1695 train_time:132521ms step_avg:98.97ms +step:1340/1695 train_time:132621ms step_avg:98.97ms +step:1341/1695 train_time:132722ms step_avg:98.97ms +step:1342/1695 train_time:132822ms step_avg:98.97ms +step:1343/1695 train_time:132922ms step_avg:98.97ms +step:1344/1695 train_time:133022ms step_avg:98.97ms +step:1345/1695 train_time:133122ms step_avg:98.98ms +step:1346/1695 train_time:133223ms step_avg:98.98ms +step:1347/1695 train_time:133324ms step_avg:98.98ms +step:1348/1695 train_time:133425ms step_avg:98.98ms +step:1349/1695 train_time:133527ms step_avg:98.98ms +step:1350/1695 train_time:133628ms step_avg:98.98ms +step:1351/1695 train_time:133728ms step_avg:98.98ms +step:1352/1695 train_time:133829ms step_avg:98.99ms +step:1353/1695 train_time:133930ms step_avg:98.99ms +step:1354/1695 train_time:134031ms step_avg:98.99ms +step:1355/1695 train_time:134132ms step_avg:98.99ms +step:1356/1695 train_time:134234ms step_avg:98.99ms +step:1357/1695 train_time:134335ms step_avg:98.99ms +step:1358/1695 train_time:134436ms step_avg:99.00ms +step:1359/1695 train_time:134538ms step_avg:99.00ms +step:1360/1695 train_time:134638ms step_avg:99.00ms +step:1361/1695 train_time:134739ms step_avg:99.00ms +step:1362/1695 train_time:134839ms step_avg:99.00ms +step:1363/1695 train_time:134942ms step_avg:99.00ms +step:1364/1695 train_time:135042ms step_avg:99.00ms +step:1365/1695 train_time:135143ms step_avg:99.01ms +step:1366/1695 train_time:135243ms step_avg:99.01ms +step:1367/1695 train_time:135343ms step_avg:99.01ms +step:1368/1695 train_time:135444ms step_avg:99.01ms +step:1369/1695 train_time:135544ms step_avg:99.01ms +step:1370/1695 train_time:135646ms step_avg:99.01ms +step:1371/1695 train_time:135747ms step_avg:99.01ms +step:1372/1695 train_time:135848ms step_avg:99.01ms +step:1373/1695 train_time:135951ms step_avg:99.02ms +step:1374/1695 train_time:136051ms step_avg:99.02ms +step:1375/1695 train_time:136154ms step_avg:99.02ms +step:1375/1695 val_loss:3.3531 train_time:136252ms step_avg:99.09ms +step:1376/1695 train_time:136281ms step_avg:99.04ms +step:1377/1695 train_time:136367ms step_avg:99.03ms +step:1378/1695 train_time:136471ms step_avg:99.04ms +step:1379/1695 train_time:136572ms step_avg:99.04ms +step:1380/1695 train_time:136675ms step_avg:99.04ms +step:1381/1695 train_time:136775ms step_avg:99.04ms +step:1382/1695 train_time:136874ms step_avg:99.04ms +step:1383/1695 train_time:136973ms step_avg:99.04ms +step:1384/1695 train_time:137074ms step_avg:99.04ms +step:1385/1695 train_time:137174ms step_avg:99.04ms +step:1386/1695 train_time:137276ms step_avg:99.04ms +step:1387/1695 train_time:137378ms step_avg:99.05ms +step:1388/1695 train_time:137481ms step_avg:99.05ms +step:1389/1695 train_time:137585ms step_avg:99.05ms +step:1390/1695 train_time:137687ms step_avg:99.06ms +step:1391/1695 train_time:137788ms step_avg:99.06ms +step:1392/1695 train_time:137891ms step_avg:99.06ms +step:1393/1695 train_time:137992ms step_avg:99.06ms +step:1394/1695 train_time:138093ms step_avg:99.06ms +step:1395/1695 train_time:138194ms step_avg:99.06ms +step:1396/1695 train_time:138296ms step_avg:99.07ms +step:1397/1695 train_time:138398ms step_avg:99.07ms +step:1398/1695 train_time:138500ms step_avg:99.07ms +step:1399/1695 train_time:138601ms step_avg:99.07ms +step:1400/1695 train_time:138704ms step_avg:99.07ms +step:1401/1695 train_time:138806ms step_avg:99.08ms +step:1402/1695 train_time:138909ms step_avg:99.08ms +step:1403/1695 train_time:139012ms step_avg:99.08ms +step:1404/1695 train_time:139114ms step_avg:99.08ms +step:1405/1695 train_time:139215ms step_avg:99.09ms +step:1406/1695 train_time:139317ms step_avg:99.09ms +step:1407/1695 train_time:139418ms step_avg:99.09ms +step:1408/1695 train_time:139519ms step_avg:99.09ms +step:1409/1695 train_time:139625ms step_avg:99.09ms +step:1410/1695 train_time:139726ms step_avg:99.10ms +step:1411/1695 train_time:139828ms step_avg:99.10ms +step:1412/1695 train_time:139931ms step_avg:99.10ms +step:1413/1695 train_time:140032ms step_avg:99.10ms +step:1414/1695 train_time:140135ms step_avg:99.11ms +step:1415/1695 train_time:140237ms step_avg:99.11ms +step:1416/1695 train_time:140337ms step_avg:99.11ms +step:1417/1695 train_time:140438ms step_avg:99.11ms +step:1418/1695 train_time:140539ms step_avg:99.11ms +step:1419/1695 train_time:140641ms step_avg:99.11ms +step:1420/1695 train_time:140743ms step_avg:99.12ms +step:1421/1695 train_time:140846ms step_avg:99.12ms +step:1422/1695 train_time:140947ms step_avg:99.12ms +step:1423/1695 train_time:141050ms step_avg:99.12ms +step:1424/1695 train_time:141152ms step_avg:99.12ms +step:1425/1695 train_time:141253ms step_avg:99.12ms +step:1426/1695 train_time:141356ms step_avg:99.13ms +step:1427/1695 train_time:141457ms step_avg:99.13ms +step:1428/1695 train_time:141559ms step_avg:99.13ms +step:1429/1695 train_time:141661ms step_avg:99.13ms +step:1430/1695 train_time:141762ms step_avg:99.13ms +step:1431/1695 train_time:141863ms step_avg:99.14ms +step:1432/1695 train_time:141966ms step_avg:99.14ms +step:1433/1695 train_time:142068ms step_avg:99.14ms +step:1434/1695 train_time:142169ms step_avg:99.14ms +step:1435/1695 train_time:142272ms step_avg:99.14ms +step:1436/1695 train_time:142375ms step_avg:99.15ms +step:1437/1695 train_time:142477ms step_avg:99.15ms +step:1438/1695 train_time:142577ms step_avg:99.15ms +step:1439/1695 train_time:142680ms step_avg:99.15ms +step:1440/1695 train_time:142782ms step_avg:99.15ms +step:1441/1695 train_time:142885ms step_avg:99.16ms +step:1442/1695 train_time:142986ms step_avg:99.16ms +step:1443/1695 train_time:143088ms step_avg:99.16ms +step:1444/1695 train_time:143190ms step_avg:99.16ms +step:1445/1695 train_time:143292ms step_avg:99.16ms +step:1446/1695 train_time:143395ms step_avg:99.17ms +step:1447/1695 train_time:143496ms step_avg:99.17ms +step:1448/1695 train_time:143600ms step_avg:99.17ms +step:1449/1695 train_time:143701ms step_avg:99.17ms +step:1450/1695 train_time:143802ms step_avg:99.17ms +step:1451/1695 train_time:143903ms step_avg:99.18ms +step:1452/1695 train_time:144005ms step_avg:99.18ms +step:1453/1695 train_time:144109ms step_avg:99.18ms +step:1454/1695 train_time:144212ms step_avg:99.18ms +step:1455/1695 train_time:144314ms step_avg:99.19ms +step:1456/1695 train_time:144416ms step_avg:99.19ms +step:1457/1695 train_time:144519ms step_avg:99.19ms +step:1458/1695 train_time:144621ms step_avg:99.19ms +step:1459/1695 train_time:144723ms step_avg:99.19ms +step:1460/1695 train_time:144825ms step_avg:99.20ms +step:1461/1695 train_time:144928ms step_avg:99.20ms +step:1462/1695 train_time:145029ms step_avg:99.20ms +step:1463/1695 train_time:145130ms step_avg:99.20ms +step:1464/1695 train_time:145232ms step_avg:99.20ms +step:1465/1695 train_time:145333ms step_avg:99.20ms +step:1466/1695 train_time:145435ms step_avg:99.21ms +step:1467/1695 train_time:145535ms step_avg:99.21ms +step:1468/1695 train_time:145638ms step_avg:99.21ms +step:1469/1695 train_time:145740ms step_avg:99.21ms +step:1470/1695 train_time:145842ms step_avg:99.21ms +step:1471/1695 train_time:145944ms step_avg:99.21ms +step:1472/1695 train_time:146046ms step_avg:99.22ms +step:1473/1695 train_time:146148ms step_avg:99.22ms +step:1474/1695 train_time:146249ms step_avg:99.22ms +step:1475/1695 train_time:146351ms step_avg:99.22ms +step:1476/1695 train_time:146454ms step_avg:99.22ms +step:1477/1695 train_time:146556ms step_avg:99.23ms +step:1478/1695 train_time:146657ms step_avg:99.23ms +step:1479/1695 train_time:146759ms step_avg:99.23ms +step:1480/1695 train_time:146860ms step_avg:99.23ms +step:1481/1695 train_time:146962ms step_avg:99.23ms +step:1482/1695 train_time:147064ms step_avg:99.23ms +step:1483/1695 train_time:147166ms step_avg:99.24ms +step:1484/1695 train_time:147269ms step_avg:99.24ms +step:1485/1695 train_time:147371ms step_avg:99.24ms +step:1486/1695 train_time:147473ms step_avg:99.24ms +step:1487/1695 train_time:147574ms step_avg:99.24ms +step:1488/1695 train_time:147676ms step_avg:99.24ms +step:1489/1695 train_time:147778ms step_avg:99.25ms +step:1490/1695 train_time:147880ms step_avg:99.25ms +step:1491/1695 train_time:147982ms step_avg:99.25ms +step:1492/1695 train_time:148083ms step_avg:99.25ms +step:1493/1695 train_time:148185ms step_avg:99.25ms +step:1494/1695 train_time:148288ms step_avg:99.26ms +step:1495/1695 train_time:148391ms step_avg:99.26ms +step:1496/1695 train_time:148492ms step_avg:99.26ms +step:1497/1695 train_time:148593ms step_avg:99.26ms +step:1498/1695 train_time:148695ms step_avg:99.26ms +step:1499/1695 train_time:148796ms step_avg:99.26ms +step:1500/1695 train_time:148898ms step_avg:99.27ms +step:1500/1695 val_loss:3.3182 train_time:148996ms step_avg:99.33ms +step:1501/1695 train_time:149025ms step_avg:99.28ms +step:1502/1695 train_time:149112ms step_avg:99.28ms +step:1503/1695 train_time:149215ms step_avg:99.28ms +step:1504/1695 train_time:149315ms step_avg:99.28ms +step:1505/1695 train_time:149418ms step_avg:99.28ms +step:1506/1695 train_time:149519ms step_avg:99.28ms +step:1507/1695 train_time:149621ms step_avg:99.28ms +step:1508/1695 train_time:149721ms step_avg:99.28ms +step:1509/1695 train_time:149823ms step_avg:99.29ms +step:1510/1695 train_time:149925ms step_avg:99.29ms +step:1511/1695 train_time:150029ms step_avg:99.29ms +step:1512/1695 train_time:150131ms step_avg:99.29ms +step:1513/1695 train_time:150233ms step_avg:99.29ms +step:1514/1695 train_time:150335ms step_avg:99.30ms +step:1515/1695 train_time:150441ms step_avg:99.30ms +step:1516/1695 train_time:150542ms step_avg:99.30ms +step:1517/1695 train_time:150642ms step_avg:99.30ms +step:1518/1695 train_time:150743ms step_avg:99.30ms +step:1519/1695 train_time:150847ms step_avg:99.31ms +step:1520/1695 train_time:150949ms step_avg:99.31ms +step:1521/1695 train_time:151050ms step_avg:99.31ms +step:1522/1695 train_time:151151ms step_avg:99.31ms +step:1523/1695 train_time:151253ms step_avg:99.31ms +step:1524/1695 train_time:151357ms step_avg:99.32ms +step:1525/1695 train_time:151461ms step_avg:99.32ms +step:1526/1695 train_time:151563ms step_avg:99.32ms +step:1527/1695 train_time:151665ms step_avg:99.32ms +step:1528/1695 train_time:151770ms step_avg:99.33ms +step:1529/1695 train_time:151872ms step_avg:99.33ms +step:1530/1695 train_time:151976ms step_avg:99.33ms +step:1531/1695 train_time:152078ms step_avg:99.33ms +step:1532/1695 train_time:152180ms step_avg:99.33ms +step:1533/1695 train_time:152282ms step_avg:99.34ms +step:1534/1695 train_time:152383ms step_avg:99.34ms +step:1535/1695 train_time:152486ms step_avg:99.34ms +step:1536/1695 train_time:152587ms step_avg:99.34ms +step:1537/1695 train_time:152688ms step_avg:99.34ms +step:1538/1695 train_time:152790ms step_avg:99.34ms +step:1539/1695 train_time:152892ms step_avg:99.34ms +step:1540/1695 train_time:152996ms step_avg:99.35ms +step:1541/1695 train_time:153099ms step_avg:99.35ms +step:1542/1695 train_time:153203ms step_avg:99.35ms +step:1543/1695 train_time:153305ms step_avg:99.36ms +step:1544/1695 train_time:153407ms step_avg:99.36ms +step:1545/1695 train_time:153509ms step_avg:99.36ms +step:1546/1695 train_time:153610ms step_avg:99.36ms +step:1547/1695 train_time:153712ms step_avg:99.36ms +step:1548/1695 train_time:153814ms step_avg:99.36ms +step:1549/1695 train_time:153916ms step_avg:99.36ms +step:1550/1695 train_time:154019ms step_avg:99.37ms +step:1551/1695 train_time:154121ms step_avg:99.37ms +step:1552/1695 train_time:154223ms step_avg:99.37ms +step:1553/1695 train_time:154325ms step_avg:99.37ms +step:1554/1695 train_time:154426ms step_avg:99.37ms +step:1555/1695 train_time:154528ms step_avg:99.37ms +step:1556/1695 train_time:154630ms step_avg:99.38ms +step:1557/1695 train_time:154734ms step_avg:99.38ms +step:1558/1695 train_time:154837ms step_avg:99.38ms +step:1559/1695 train_time:154940ms step_avg:99.38ms +step:1560/1695 train_time:155042ms step_avg:99.39ms +step:1561/1695 train_time:155144ms step_avg:99.39ms +step:1562/1695 train_time:155246ms step_avg:99.39ms +step:1563/1695 train_time:155350ms step_avg:99.39ms +step:1564/1695 train_time:155451ms step_avg:99.39ms +step:1565/1695 train_time:155553ms step_avg:99.39ms +step:1566/1695 train_time:155655ms step_avg:99.40ms +step:1567/1695 train_time:155755ms step_avg:99.40ms +step:1568/1695 train_time:155857ms step_avg:99.40ms +step:1569/1695 train_time:155958ms step_avg:99.40ms +step:1570/1695 train_time:156062ms step_avg:99.40ms +step:1571/1695 train_time:156163ms step_avg:99.40ms +step:1572/1695 train_time:156265ms step_avg:99.41ms +step:1573/1695 train_time:156367ms step_avg:99.41ms +step:1574/1695 train_time:156468ms step_avg:99.41ms +step:1575/1695 train_time:156568ms step_avg:99.41ms +step:1576/1695 train_time:156670ms step_avg:99.41ms +step:1577/1695 train_time:156773ms step_avg:99.41ms +step:1578/1695 train_time:156875ms step_avg:99.41ms +step:1579/1695 train_time:156978ms step_avg:99.42ms +step:1580/1695 train_time:157082ms step_avg:99.42ms +step:1581/1695 train_time:157184ms step_avg:99.42ms +step:1582/1695 train_time:157285ms step_avg:99.42ms +step:1583/1695 train_time:157389ms step_avg:99.42ms +step:1584/1695 train_time:157492ms step_avg:99.43ms +step:1585/1695 train_time:157593ms step_avg:99.43ms +step:1586/1695 train_time:157696ms step_avg:99.43ms +step:1587/1695 train_time:157798ms step_avg:99.43ms +step:1588/1695 train_time:157899ms step_avg:99.43ms +step:1589/1695 train_time:158000ms step_avg:99.43ms +step:1590/1695 train_time:158102ms step_avg:99.44ms +step:1591/1695 train_time:158205ms step_avg:99.44ms +step:1592/1695 train_time:158307ms step_avg:99.44ms +step:1593/1695 train_time:158408ms step_avg:99.44ms +step:1594/1695 train_time:158512ms step_avg:99.44ms +step:1595/1695 train_time:158614ms step_avg:99.44ms +step:1596/1695 train_time:158715ms step_avg:99.45ms +step:1597/1695 train_time:158818ms step_avg:99.45ms +step:1598/1695 train_time:158922ms step_avg:99.45ms +step:1599/1695 train_time:159023ms step_avg:99.45ms +step:1600/1695 train_time:159124ms step_avg:99.45ms +step:1601/1695 train_time:159227ms step_avg:99.45ms +step:1602/1695 train_time:159329ms step_avg:99.46ms +step:1603/1695 train_time:159429ms step_avg:99.46ms +step:1604/1695 train_time:159531ms step_avg:99.46ms +step:1605/1695 train_time:159633ms step_avg:99.46ms +step:1606/1695 train_time:159735ms step_avg:99.46ms +step:1607/1695 train_time:159836ms step_avg:99.46ms +step:1608/1695 train_time:159939ms step_avg:99.46ms +step:1609/1695 train_time:160040ms step_avg:99.47ms +step:1610/1695 train_time:160143ms step_avg:99.47ms +step:1611/1695 train_time:160245ms step_avg:99.47ms +step:1612/1695 train_time:160347ms step_avg:99.47ms +step:1613/1695 train_time:160449ms step_avg:99.47ms +step:1614/1695 train_time:160549ms step_avg:99.47ms +step:1615/1695 train_time:160651ms step_avg:99.47ms +step:1616/1695 train_time:160752ms step_avg:99.48ms +step:1617/1695 train_time:160854ms step_avg:99.48ms +step:1618/1695 train_time:160956ms step_avg:99.48ms +step:1619/1695 train_time:161058ms step_avg:99.48ms +step:1620/1695 train_time:161162ms step_avg:99.48ms +step:1621/1695 train_time:161263ms step_avg:99.48ms +step:1622/1695 train_time:161365ms step_avg:99.49ms +step:1623/1695 train_time:161467ms step_avg:99.49ms +step:1624/1695 train_time:161569ms step_avg:99.49ms +step:1625/1695 train_time:161673ms step_avg:99.49ms +step:1625/1695 val_loss:3.2898 train_time:161771ms step_avg:99.55ms +step:1626/1695 train_time:161800ms step_avg:99.51ms +step:1627/1695 train_time:161886ms step_avg:99.50ms +step:1628/1695 train_time:161988ms step_avg:99.50ms +step:1629/1695 train_time:162092ms step_avg:99.50ms +step:1630/1695 train_time:162193ms step_avg:99.51ms +step:1631/1695 train_time:162295ms step_avg:99.51ms +step:1632/1695 train_time:162397ms step_avg:99.51ms +step:1633/1695 train_time:162498ms step_avg:99.51ms +step:1634/1695 train_time:162600ms step_avg:99.51ms +step:1635/1695 train_time:162702ms step_avg:99.51ms +step:1636/1695 train_time:162805ms step_avg:99.51ms +step:1637/1695 train_time:162908ms step_avg:99.52ms +step:1638/1695 train_time:163010ms step_avg:99.52ms +step:1639/1695 train_time:163113ms step_avg:99.52ms +step:1640/1695 train_time:163216ms step_avg:99.52ms +step:1641/1695 train_time:163318ms step_avg:99.52ms +step:1642/1695 train_time:163421ms step_avg:99.53ms +step:1643/1695 train_time:163522ms step_avg:99.53ms +step:1644/1695 train_time:163624ms step_avg:99.53ms +step:1645/1695 train_time:163727ms step_avg:99.53ms +step:1646/1695 train_time:163830ms step_avg:99.53ms +step:1647/1695 train_time:163934ms step_avg:99.54ms +step:1648/1695 train_time:164040ms step_avg:99.54ms +step:1649/1695 train_time:164142ms step_avg:99.54ms +step:1650/1695 train_time:164244ms step_avg:99.54ms +step:1651/1695 train_time:164345ms step_avg:99.54ms +step:1652/1695 train_time:164447ms step_avg:99.54ms +step:1653/1695 train_time:164550ms step_avg:99.55ms +step:1654/1695 train_time:164654ms step_avg:99.55ms +step:1655/1695 train_time:164756ms step_avg:99.55ms +step:1656/1695 train_time:164859ms step_avg:99.55ms +step:1657/1695 train_time:164961ms step_avg:99.55ms +step:1658/1695 train_time:165064ms step_avg:99.56ms +step:1659/1695 train_time:165170ms step_avg:99.56ms +step:1660/1695 train_time:165272ms step_avg:99.56ms +step:1661/1695 train_time:165377ms step_avg:99.56ms +step:1662/1695 train_time:165482ms step_avg:99.57ms +step:1663/1695 train_time:165584ms step_avg:99.57ms +step:1664/1695 train_time:165686ms step_avg:99.57ms +step:1665/1695 train_time:165795ms step_avg:99.58ms +step:1666/1695 train_time:165898ms step_avg:99.58ms +step:1667/1695 train_time:166001ms step_avg:99.58ms +step:1668/1695 train_time:166107ms step_avg:99.58ms +step:1669/1695 train_time:166211ms step_avg:99.59ms +step:1670/1695 train_time:166313ms step_avg:99.59ms +step:1671/1695 train_time:166416ms step_avg:99.59ms +step:1672/1695 train_time:166520ms step_avg:99.59ms +step:1673/1695 train_time:166622ms step_avg:99.59ms +step:1674/1695 train_time:166724ms step_avg:99.60ms +step:1675/1695 train_time:166826ms step_avg:99.60ms +step:1676/1695 train_time:166931ms step_avg:99.60ms +step:1677/1695 train_time:167033ms step_avg:99.60ms +step:1678/1695 train_time:167137ms step_avg:99.60ms +step:1679/1695 train_time:167240ms step_avg:99.61ms +step:1680/1695 train_time:167342ms step_avg:99.61ms +step:1681/1695 train_time:167444ms step_avg:99.61ms +step:1682/1695 train_time:167549ms step_avg:99.61ms +step:1683/1695 train_time:167651ms step_avg:99.61ms +step:1684/1695 train_time:167754ms step_avg:99.62ms +step:1685/1695 train_time:167857ms step_avg:99.62ms +step:1686/1695 train_time:167960ms step_avg:99.62ms +step:1687/1695 train_time:168062ms step_avg:99.62ms +step:1688/1695 train_time:168165ms step_avg:99.62ms +step:1689/1695 train_time:168266ms step_avg:99.62ms +step:1690/1695 train_time:168369ms step_avg:99.63ms +step:1691/1695 train_time:168472ms step_avg:99.63ms +step:1692/1695 train_time:168573ms step_avg:99.63ms +step:1693/1695 train_time:168676ms step_avg:99.63ms +step:1694/1695 train_time:168779ms step_avg:99.63ms +step:1695/1695 train_time:168882ms step_avg:99.64ms +step:1695/1695 val_loss:3.2767 train_time:168981ms step_avg:99.69ms +peak memory allocated: 34077 MiB reserved: 49660 MiB diff --git a/records/082325_SparseAttnGate/4518e917-cec2-4c81-9c1a-53b0644c2326.txt b/records/082325_SparseAttnGate/4518e917-cec2-4c81-9c1a-53b0644c2326.txt new file mode 100644 index 000000000..f79b4ae11 --- /dev/null +++ b/records/082325_SparseAttnGate/4518e917-cec2-4c81-9c1a-53b0644c2326.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:44:03 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 32C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 37C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 124W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 31C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 38C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 317064 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 317065 C /usr/bin/python3 614MiB | +| 0 N/A N/A 317066 C /usr/bin/python3 614MiB | +| 0 N/A N/A 317067 C /usr/bin/python3 614MiB | +| 0 N/A N/A 317068 C /usr/bin/python3 614MiB | +| 0 N/A N/A 317069 C /usr/bin/python3 614MiB | +| 0 N/A N/A 317070 C /usr/bin/python3 614MiB | +| 0 N/A N/A 317071 C /usr/bin/python3 614MiB | +| 1 N/A N/A 317065 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 317066 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 317067 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 317068 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 317069 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 317070 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 317071 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:155ms step_avg:155.10ms +step:2/1695 train_time:182ms step_avg:91.07ms +step:3/1695 train_time:253ms step_avg:84.30ms +step:4/1695 train_time:345ms step_avg:86.24ms +step:5/1695 train_time:437ms step_avg:87.38ms +step:6/1695 train_time:530ms step_avg:88.29ms +step:7/1695 train_time:623ms step_avg:88.94ms +step:8/1695 train_time:715ms step_avg:89.43ms +step:9/1695 train_time:808ms step_avg:89.82ms +step:10/1695 train_time:901ms step_avg:90.13ms +step:11/1695 train_time:994ms step_avg:90.37ms +step:12/1695 train_time:1088ms step_avg:90.68ms +step:13/1695 train_time:1184ms step_avg:91.05ms +step:14/1695 train_time:1278ms step_avg:91.30ms +step:15/1695 train_time:1372ms step_avg:91.47ms +step:16/1695 train_time:1466ms step_avg:91.61ms +step:17/1695 train_time:1559ms step_avg:91.71ms +step:18/1695 train_time:1652ms step_avg:91.77ms +step:19/1695 train_time:1746ms step_avg:91.88ms +step:20/1695 train_time:1839ms step_avg:91.93ms +step:21/1695 train_time:1932ms step_avg:91.98ms +step:22/1695 train_time:2025ms step_avg:92.03ms +step:23/1695 train_time:2120ms step_avg:92.16ms +step:24/1695 train_time:2213ms step_avg:92.20ms +step:25/1695 train_time:2307ms step_avg:92.29ms +step:26/1695 train_time:2401ms step_avg:92.35ms +step:27/1695 train_time:2495ms step_avg:92.40ms +step:28/1695 train_time:2589ms step_avg:92.45ms +step:29/1695 train_time:2682ms step_avg:92.49ms +step:30/1695 train_time:2776ms step_avg:92.53ms +step:31/1695 train_time:2870ms step_avg:92.59ms +step:32/1695 train_time:2964ms step_avg:92.62ms +step:33/1695 train_time:3057ms step_avg:92.65ms +step:34/1695 train_time:3152ms step_avg:92.70ms +step:35/1695 train_time:3247ms step_avg:92.76ms +step:36/1695 train_time:3341ms step_avg:92.81ms +step:37/1695 train_time:3436ms step_avg:92.85ms +step:38/1695 train_time:3530ms step_avg:92.89ms +step:39/1695 train_time:3624ms step_avg:92.91ms +step:40/1695 train_time:3718ms step_avg:92.95ms +step:41/1695 train_time:3812ms step_avg:92.98ms +step:42/1695 train_time:3906ms step_avg:93.01ms +step:43/1695 train_time:4001ms step_avg:93.04ms +step:44/1695 train_time:4094ms step_avg:93.04ms +step:45/1695 train_time:4187ms step_avg:93.05ms +step:46/1695 train_time:4282ms step_avg:93.08ms +step:47/1695 train_time:4376ms step_avg:93.10ms +step:48/1695 train_time:4471ms step_avg:93.14ms +step:49/1695 train_time:4565ms step_avg:93.16ms +step:50/1695 train_time:4658ms step_avg:93.17ms +step:51/1695 train_time:4752ms step_avg:93.18ms +step:52/1695 train_time:4846ms step_avg:93.18ms +step:53/1695 train_time:4940ms step_avg:93.20ms +step:54/1695 train_time:5033ms step_avg:93.20ms +step:55/1695 train_time:5126ms step_avg:93.21ms +step:56/1695 train_time:5220ms step_avg:93.21ms +step:57/1695 train_time:5313ms step_avg:93.21ms +step:58/1695 train_time:5407ms step_avg:93.22ms +step:59/1695 train_time:5501ms step_avg:93.23ms +step:60/1695 train_time:5594ms step_avg:93.24ms +step:61/1695 train_time:5689ms step_avg:93.26ms +step:62/1695 train_time:5782ms step_avg:93.27ms +step:63/1695 train_time:5876ms step_avg:93.27ms +step:64/1695 train_time:5971ms step_avg:93.29ms +step:65/1695 train_time:6064ms step_avg:93.29ms +step:66/1695 train_time:6157ms step_avg:93.29ms +step:67/1695 train_time:6251ms step_avg:93.30ms +step:68/1695 train_time:6345ms step_avg:93.31ms +step:69/1695 train_time:6438ms step_avg:93.31ms +step:70/1695 train_time:6531ms step_avg:93.31ms +step:71/1695 train_time:6625ms step_avg:93.31ms +step:72/1695 train_time:6719ms step_avg:93.32ms +step:73/1695 train_time:6812ms step_avg:93.31ms +step:74/1695 train_time:6906ms step_avg:93.32ms +step:75/1695 train_time:7000ms step_avg:93.33ms +step:76/1695 train_time:7093ms step_avg:93.33ms +step:77/1695 train_time:7188ms step_avg:93.35ms +step:78/1695 train_time:7280ms step_avg:93.33ms +step:79/1695 train_time:7373ms step_avg:93.33ms +step:80/1695 train_time:7468ms step_avg:93.35ms +step:81/1695 train_time:7562ms step_avg:93.36ms +step:82/1695 train_time:7656ms step_avg:93.36ms +step:83/1695 train_time:7750ms step_avg:93.37ms +step:84/1695 train_time:7844ms step_avg:93.38ms +step:85/1695 train_time:7937ms step_avg:93.38ms +step:86/1695 train_time:8031ms step_avg:93.38ms +step:87/1695 train_time:8124ms step_avg:93.38ms +step:88/1695 train_time:8217ms step_avg:93.38ms +step:89/1695 train_time:8311ms step_avg:93.38ms +step:90/1695 train_time:8405ms step_avg:93.39ms +step:91/1695 train_time:8498ms step_avg:93.39ms +step:92/1695 train_time:8591ms step_avg:93.39ms +step:93/1695 train_time:8685ms step_avg:93.39ms +step:94/1695 train_time:8780ms step_avg:93.40ms +step:95/1695 train_time:8873ms step_avg:93.40ms +step:96/1695 train_time:8967ms step_avg:93.41ms +step:97/1695 train_time:9062ms step_avg:93.42ms +step:98/1695 train_time:9155ms step_avg:93.41ms +step:99/1695 train_time:9248ms step_avg:93.41ms +step:100/1695 train_time:9342ms step_avg:93.42ms +step:101/1695 train_time:9435ms step_avg:93.42ms +step:102/1695 train_time:9530ms step_avg:93.43ms +step:103/1695 train_time:9624ms step_avg:93.43ms +step:104/1695 train_time:9717ms step_avg:93.43ms +step:105/1695 train_time:9810ms step_avg:93.43ms +step:106/1695 train_time:9903ms step_avg:93.43ms +step:107/1695 train_time:9996ms step_avg:93.42ms +step:108/1695 train_time:10090ms step_avg:93.42ms +step:109/1695 train_time:10184ms step_avg:93.43ms +step:110/1695 train_time:10278ms step_avg:93.44ms +step:111/1695 train_time:10371ms step_avg:93.43ms +step:112/1695 train_time:10465ms step_avg:93.44ms +step:113/1695 train_time:10558ms step_avg:93.43ms +step:114/1695 train_time:10652ms step_avg:93.43ms +step:115/1695 train_time:10746ms step_avg:93.45ms +step:116/1695 train_time:10840ms step_avg:93.45ms +step:117/1695 train_time:10934ms step_avg:93.45ms +step:118/1695 train_time:11028ms step_avg:93.46ms +step:119/1695 train_time:11122ms step_avg:93.46ms +step:120/1695 train_time:11215ms step_avg:93.46ms +step:121/1695 train_time:11309ms step_avg:93.46ms +step:122/1695 train_time:11402ms step_avg:93.46ms +step:123/1695 train_time:11495ms step_avg:93.46ms +step:124/1695 train_time:11589ms step_avg:93.46ms +step:125/1695 train_time:11683ms step_avg:93.46ms +step:125/1695 val_loss:4.6053 train_time:11775ms step_avg:94.20ms +step:126/1695 train_time:11803ms step_avg:93.67ms +step:127/1695 train_time:11879ms step_avg:93.54ms +step:128/1695 train_time:11981ms step_avg:93.60ms +step:129/1695 train_time:12078ms step_avg:93.63ms +step:130/1695 train_time:12172ms step_avg:93.63ms +step:131/1695 train_time:12265ms step_avg:93.63ms +step:132/1695 train_time:12358ms step_avg:93.62ms +step:133/1695 train_time:12452ms step_avg:93.62ms +step:134/1695 train_time:12545ms step_avg:93.62ms +step:135/1695 train_time:12638ms step_avg:93.62ms +step:136/1695 train_time:12732ms step_avg:93.61ms +step:137/1695 train_time:12826ms step_avg:93.62ms +step:138/1695 train_time:12921ms step_avg:93.63ms +step:139/1695 train_time:13017ms step_avg:93.64ms +step:140/1695 train_time:13112ms step_avg:93.66ms +step:141/1695 train_time:13207ms step_avg:93.66ms +step:142/1695 train_time:13300ms step_avg:93.66ms +step:143/1695 train_time:13394ms step_avg:93.66ms +step:144/1695 train_time:13488ms step_avg:93.67ms +step:145/1695 train_time:13581ms step_avg:93.67ms +step:146/1695 train_time:13675ms step_avg:93.66ms +step:147/1695 train_time:13769ms step_avg:93.66ms +step:148/1695 train_time:13862ms step_avg:93.66ms +step:149/1695 train_time:13957ms step_avg:93.67ms +step:150/1695 train_time:14053ms step_avg:93.68ms +step:151/1695 train_time:14148ms step_avg:93.69ms +step:152/1695 train_time:14242ms step_avg:93.70ms +step:153/1695 train_time:14335ms step_avg:93.69ms +step:154/1695 train_time:14430ms step_avg:93.70ms +step:155/1695 train_time:14523ms step_avg:93.70ms +step:156/1695 train_time:14617ms step_avg:93.70ms +step:157/1695 train_time:14712ms step_avg:93.70ms +step:158/1695 train_time:14806ms step_avg:93.71ms +step:159/1695 train_time:14900ms step_avg:93.71ms +step:160/1695 train_time:14993ms step_avg:93.71ms +step:161/1695 train_time:15087ms step_avg:93.71ms +step:162/1695 train_time:15182ms step_avg:93.72ms +step:163/1695 train_time:15276ms step_avg:93.72ms +step:164/1695 train_time:15370ms step_avg:93.72ms +step:165/1695 train_time:15464ms step_avg:93.72ms +step:166/1695 train_time:15558ms step_avg:93.72ms +step:167/1695 train_time:15653ms step_avg:93.73ms +step:168/1695 train_time:15747ms step_avg:93.73ms +step:169/1695 train_time:15840ms step_avg:93.73ms +step:170/1695 train_time:15935ms step_avg:93.73ms +step:171/1695 train_time:16029ms step_avg:93.74ms +step:172/1695 train_time:16124ms step_avg:93.74ms +step:173/1695 train_time:16218ms step_avg:93.74ms +step:174/1695 train_time:16312ms step_avg:93.75ms +step:175/1695 train_time:16405ms step_avg:93.74ms +step:176/1695 train_time:16499ms step_avg:93.75ms +step:177/1695 train_time:16593ms step_avg:93.75ms +step:178/1695 train_time:16687ms step_avg:93.75ms +step:179/1695 train_time:16781ms step_avg:93.75ms +step:180/1695 train_time:16875ms step_avg:93.75ms +step:181/1695 train_time:16969ms step_avg:93.75ms +step:182/1695 train_time:17064ms step_avg:93.76ms +step:183/1695 train_time:17157ms step_avg:93.76ms +step:184/1695 train_time:17252ms step_avg:93.76ms +step:185/1695 train_time:17348ms step_avg:93.77ms +step:186/1695 train_time:17441ms step_avg:93.77ms +step:187/1695 train_time:17536ms step_avg:93.77ms +step:188/1695 train_time:17629ms step_avg:93.77ms +step:189/1695 train_time:17723ms step_avg:93.77ms +step:190/1695 train_time:17817ms step_avg:93.78ms +step:191/1695 train_time:17911ms step_avg:93.78ms +step:192/1695 train_time:18005ms step_avg:93.78ms +step:193/1695 train_time:18099ms step_avg:93.78ms +step:194/1695 train_time:18193ms step_avg:93.78ms +step:195/1695 train_time:18288ms step_avg:93.78ms +step:196/1695 train_time:18381ms step_avg:93.78ms +step:197/1695 train_time:18476ms step_avg:93.78ms +step:198/1695 train_time:18570ms step_avg:93.79ms +step:199/1695 train_time:18664ms step_avg:93.79ms +step:200/1695 train_time:18758ms step_avg:93.79ms +step:201/1695 train_time:18853ms step_avg:93.79ms +step:202/1695 train_time:18948ms step_avg:93.80ms +step:203/1695 train_time:19043ms step_avg:93.81ms +step:204/1695 train_time:19136ms step_avg:93.80ms +step:205/1695 train_time:19230ms step_avg:93.81ms +step:206/1695 train_time:19325ms step_avg:93.81ms +step:207/1695 train_time:19418ms step_avg:93.81ms +step:208/1695 train_time:19512ms step_avg:93.81ms +step:209/1695 train_time:19606ms step_avg:93.81ms +step:210/1695 train_time:19700ms step_avg:93.81ms +step:211/1695 train_time:19795ms step_avg:93.81ms +step:212/1695 train_time:19888ms step_avg:93.81ms +step:213/1695 train_time:19982ms step_avg:93.81ms +step:214/1695 train_time:20077ms step_avg:93.82ms +step:215/1695 train_time:20171ms step_avg:93.82ms +step:216/1695 train_time:20266ms step_avg:93.82ms +step:217/1695 train_time:20359ms step_avg:93.82ms +step:218/1695 train_time:20454ms step_avg:93.82ms +step:219/1695 train_time:20548ms step_avg:93.83ms +step:220/1695 train_time:20642ms step_avg:93.83ms +step:221/1695 train_time:20736ms step_avg:93.83ms +step:222/1695 train_time:20831ms step_avg:93.83ms +step:223/1695 train_time:20926ms step_avg:93.84ms +step:224/1695 train_time:21019ms step_avg:93.84ms +step:225/1695 train_time:21113ms step_avg:93.84ms +step:226/1695 train_time:21208ms step_avg:93.84ms +step:227/1695 train_time:21301ms step_avg:93.84ms +step:228/1695 train_time:21396ms step_avg:93.84ms +step:229/1695 train_time:21489ms step_avg:93.84ms +step:230/1695 train_time:21583ms step_avg:93.84ms +step:231/1695 train_time:21677ms step_avg:93.84ms +step:232/1695 train_time:21771ms step_avg:93.84ms +step:233/1695 train_time:21864ms step_avg:93.84ms +step:234/1695 train_time:21959ms step_avg:93.84ms +step:235/1695 train_time:22053ms step_avg:93.84ms +step:236/1695 train_time:22148ms step_avg:93.85ms +step:237/1695 train_time:22242ms step_avg:93.85ms +step:238/1695 train_time:22336ms step_avg:93.85ms +step:239/1695 train_time:22431ms step_avg:93.85ms +step:240/1695 train_time:22525ms step_avg:93.85ms +step:241/1695 train_time:22619ms step_avg:93.85ms +step:242/1695 train_time:22712ms step_avg:93.85ms +step:243/1695 train_time:22806ms step_avg:93.85ms +step:244/1695 train_time:22901ms step_avg:93.86ms +step:245/1695 train_time:22995ms step_avg:93.86ms +step:246/1695 train_time:23090ms step_avg:93.86ms +step:247/1695 train_time:23184ms step_avg:93.86ms +step:248/1695 train_time:23277ms step_avg:93.86ms +step:249/1695 train_time:23373ms step_avg:93.87ms +step:250/1695 train_time:23467ms step_avg:93.87ms +step:250/1695 val_loss:4.0744 train_time:23559ms step_avg:94.24ms +step:251/1695 train_time:23587ms step_avg:93.97ms +step:252/1695 train_time:23664ms step_avg:93.90ms +step:253/1695 train_time:23762ms step_avg:93.92ms +step:254/1695 train_time:23857ms step_avg:93.93ms +step:255/1695 train_time:23951ms step_avg:93.93ms +step:256/1695 train_time:24045ms step_avg:93.92ms +step:257/1695 train_time:24138ms step_avg:93.92ms +step:258/1695 train_time:24232ms step_avg:93.92ms +step:259/1695 train_time:24327ms step_avg:93.93ms +step:260/1695 train_time:24419ms step_avg:93.92ms +step:261/1695 train_time:24514ms step_avg:93.92ms +step:262/1695 train_time:24609ms step_avg:93.93ms +step:263/1695 train_time:24706ms step_avg:93.94ms +step:264/1695 train_time:24801ms step_avg:93.94ms +step:265/1695 train_time:24897ms step_avg:93.95ms +step:266/1695 train_time:24992ms step_avg:93.96ms +step:267/1695 train_time:25087ms step_avg:93.96ms +step:268/1695 train_time:25180ms step_avg:93.96ms +step:269/1695 train_time:25275ms step_avg:93.96ms +step:270/1695 train_time:25369ms step_avg:93.96ms +step:271/1695 train_time:25462ms step_avg:93.96ms +step:272/1695 train_time:25557ms step_avg:93.96ms +step:273/1695 train_time:25653ms step_avg:93.97ms +step:274/1695 train_time:25748ms step_avg:93.97ms +step:275/1695 train_time:25843ms step_avg:93.97ms +step:276/1695 train_time:25937ms step_avg:93.98ms +step:277/1695 train_time:26032ms step_avg:93.98ms +step:278/1695 train_time:26127ms step_avg:93.98ms +step:279/1695 train_time:26221ms step_avg:93.98ms +step:280/1695 train_time:26315ms step_avg:93.98ms +step:281/1695 train_time:26408ms step_avg:93.98ms +step:282/1695 train_time:26502ms step_avg:93.98ms +step:283/1695 train_time:26597ms step_avg:93.98ms +step:284/1695 train_time:26692ms step_avg:93.99ms +step:285/1695 train_time:26787ms step_avg:93.99ms +step:286/1695 train_time:26882ms step_avg:93.99ms +step:287/1695 train_time:26976ms step_avg:93.99ms +step:288/1695 train_time:27071ms step_avg:94.00ms +step:289/1695 train_time:27166ms step_avg:94.00ms +step:290/1695 train_time:27260ms step_avg:94.00ms +step:291/1695 train_time:27355ms step_avg:94.00ms +step:292/1695 train_time:27449ms step_avg:94.00ms +step:293/1695 train_time:27544ms step_avg:94.01ms +step:294/1695 train_time:27638ms step_avg:94.01ms +step:295/1695 train_time:27734ms step_avg:94.01ms +step:296/1695 train_time:27828ms step_avg:94.01ms +step:297/1695 train_time:27922ms step_avg:94.01ms +step:298/1695 train_time:28016ms step_avg:94.01ms +step:299/1695 train_time:28111ms step_avg:94.02ms +step:300/1695 train_time:28206ms step_avg:94.02ms +step:301/1695 train_time:28300ms step_avg:94.02ms +step:302/1695 train_time:28394ms step_avg:94.02ms +step:303/1695 train_time:28488ms step_avg:94.02ms +step:304/1695 train_time:28583ms step_avg:94.02ms +step:305/1695 train_time:28677ms step_avg:94.02ms +step:306/1695 train_time:28772ms step_avg:94.03ms +step:307/1695 train_time:28867ms step_avg:94.03ms +step:308/1695 train_time:28961ms step_avg:94.03ms +step:309/1695 train_time:29056ms step_avg:94.03ms +step:310/1695 train_time:29151ms step_avg:94.04ms +step:311/1695 train_time:29246ms step_avg:94.04ms +step:312/1695 train_time:29340ms step_avg:94.04ms +step:313/1695 train_time:29434ms step_avg:94.04ms +step:314/1695 train_time:29529ms step_avg:94.04ms +step:315/1695 train_time:29623ms step_avg:94.04ms +step:316/1695 train_time:29717ms step_avg:94.04ms +step:317/1695 train_time:29812ms step_avg:94.04ms +step:318/1695 train_time:29907ms step_avg:94.05ms +step:319/1695 train_time:30001ms step_avg:94.05ms +step:320/1695 train_time:30096ms step_avg:94.05ms +step:321/1695 train_time:30191ms step_avg:94.05ms +step:322/1695 train_time:30285ms step_avg:94.05ms +step:323/1695 train_time:30379ms step_avg:94.05ms +step:324/1695 train_time:30474ms step_avg:94.05ms +step:325/1695 train_time:30569ms step_avg:94.06ms +step:326/1695 train_time:30663ms step_avg:94.06ms +step:327/1695 train_time:30757ms step_avg:94.06ms +step:328/1695 train_time:30854ms step_avg:94.07ms +step:329/1695 train_time:30948ms step_avg:94.07ms +step:330/1695 train_time:31043ms step_avg:94.07ms +step:331/1695 train_time:31136ms step_avg:94.07ms +step:332/1695 train_time:31232ms step_avg:94.07ms +step:333/1695 train_time:31327ms step_avg:94.07ms +step:334/1695 train_time:31420ms step_avg:94.07ms +step:335/1695 train_time:31515ms step_avg:94.08ms +step:336/1695 train_time:31610ms step_avg:94.08ms +step:337/1695 train_time:31704ms step_avg:94.08ms +step:338/1695 train_time:31798ms step_avg:94.08ms +step:339/1695 train_time:31893ms step_avg:94.08ms +step:340/1695 train_time:31988ms step_avg:94.08ms +step:341/1695 train_time:32082ms step_avg:94.08ms +step:342/1695 train_time:32176ms step_avg:94.08ms +step:343/1695 train_time:32271ms step_avg:94.08ms +step:344/1695 train_time:32366ms step_avg:94.09ms +step:345/1695 train_time:32460ms step_avg:94.09ms +step:346/1695 train_time:32555ms step_avg:94.09ms +step:347/1695 train_time:32650ms step_avg:94.09ms +step:348/1695 train_time:32743ms step_avg:94.09ms +step:349/1695 train_time:32838ms step_avg:94.09ms +step:350/1695 train_time:32932ms step_avg:94.09ms +step:351/1695 train_time:33027ms step_avg:94.09ms +step:352/1695 train_time:33121ms step_avg:94.09ms +step:353/1695 train_time:33215ms step_avg:94.09ms +step:354/1695 train_time:33310ms step_avg:94.10ms +step:355/1695 train_time:33404ms step_avg:94.09ms +step:356/1695 train_time:33498ms step_avg:94.09ms +step:357/1695 train_time:33593ms step_avg:94.10ms +step:358/1695 train_time:33687ms step_avg:94.10ms +step:359/1695 train_time:33781ms step_avg:94.10ms +step:360/1695 train_time:33876ms step_avg:94.10ms +step:361/1695 train_time:33971ms step_avg:94.10ms +step:362/1695 train_time:34066ms step_avg:94.10ms +step:363/1695 train_time:34160ms step_avg:94.10ms +step:364/1695 train_time:34255ms step_avg:94.11ms +step:365/1695 train_time:34350ms step_avg:94.11ms +step:366/1695 train_time:34444ms step_avg:94.11ms +step:367/1695 train_time:34538ms step_avg:94.11ms +step:368/1695 train_time:34633ms step_avg:94.11ms +step:369/1695 train_time:34728ms step_avg:94.11ms +step:370/1695 train_time:34822ms step_avg:94.11ms +step:371/1695 train_time:34917ms step_avg:94.12ms +step:372/1695 train_time:35012ms step_avg:94.12ms +step:373/1695 train_time:35107ms step_avg:94.12ms +step:374/1695 train_time:35202ms step_avg:94.12ms +step:375/1695 train_time:35296ms step_avg:94.12ms +step:375/1695 val_loss:3.8794 train_time:35390ms step_avg:94.37ms +step:376/1695 train_time:35418ms step_avg:94.20ms +step:377/1695 train_time:35494ms step_avg:94.15ms +step:378/1695 train_time:35595ms step_avg:94.17ms +step:379/1695 train_time:35692ms step_avg:94.17ms +step:380/1695 train_time:35789ms step_avg:94.18ms +step:381/1695 train_time:35885ms step_avg:94.19ms +step:382/1695 train_time:35980ms step_avg:94.19ms +step:383/1695 train_time:36075ms step_avg:94.19ms +step:384/1695 train_time:36172ms step_avg:94.20ms +step:385/1695 train_time:36267ms step_avg:94.20ms +step:386/1695 train_time:36363ms step_avg:94.20ms +step:387/1695 train_time:36459ms step_avg:94.21ms +step:388/1695 train_time:36556ms step_avg:94.22ms +step:389/1695 train_time:36653ms step_avg:94.22ms +step:390/1695 train_time:36750ms step_avg:94.23ms +step:391/1695 train_time:36846ms step_avg:94.24ms +step:392/1695 train_time:36943ms step_avg:94.24ms +step:393/1695 train_time:37039ms step_avg:94.25ms +step:394/1695 train_time:37135ms step_avg:94.25ms +step:395/1695 train_time:37231ms step_avg:94.26ms +step:396/1695 train_time:37327ms step_avg:94.26ms +step:397/1695 train_time:37424ms step_avg:94.27ms +step:398/1695 train_time:37521ms step_avg:94.27ms +step:399/1695 train_time:37618ms step_avg:94.28ms +step:400/1695 train_time:37714ms step_avg:94.29ms +step:401/1695 train_time:37811ms step_avg:94.29ms +step:402/1695 train_time:37909ms step_avg:94.30ms +step:403/1695 train_time:38005ms step_avg:94.31ms +step:404/1695 train_time:38101ms step_avg:94.31ms +step:405/1695 train_time:38196ms step_avg:94.31ms +step:406/1695 train_time:38292ms step_avg:94.32ms +step:407/1695 train_time:38388ms step_avg:94.32ms +step:408/1695 train_time:38485ms step_avg:94.33ms +step:409/1695 train_time:38582ms step_avg:94.33ms +step:410/1695 train_time:38679ms step_avg:94.34ms +step:411/1695 train_time:38776ms step_avg:94.34ms +step:412/1695 train_time:38872ms step_avg:94.35ms +step:413/1695 train_time:38969ms step_avg:94.36ms +step:414/1695 train_time:39065ms step_avg:94.36ms +step:415/1695 train_time:39161ms step_avg:94.36ms +step:416/1695 train_time:39256ms step_avg:94.37ms +step:417/1695 train_time:39351ms step_avg:94.37ms +step:418/1695 train_time:39448ms step_avg:94.37ms +step:419/1695 train_time:39545ms step_avg:94.38ms +step:420/1695 train_time:39643ms step_avg:94.39ms +step:421/1695 train_time:39739ms step_avg:94.39ms +step:422/1695 train_time:39834ms step_avg:94.39ms +step:423/1695 train_time:39931ms step_avg:94.40ms +step:424/1695 train_time:40027ms step_avg:94.40ms +step:425/1695 train_time:40124ms step_avg:94.41ms +step:426/1695 train_time:40221ms step_avg:94.42ms +step:427/1695 train_time:40317ms step_avg:94.42ms +step:428/1695 train_time:40413ms step_avg:94.42ms +step:429/1695 train_time:40509ms step_avg:94.43ms +step:430/1695 train_time:40607ms step_avg:94.44ms +step:431/1695 train_time:40703ms step_avg:94.44ms +step:432/1695 train_time:40800ms step_avg:94.44ms +step:433/1695 train_time:40896ms step_avg:94.45ms +step:434/1695 train_time:40991ms step_avg:94.45ms +step:435/1695 train_time:41087ms step_avg:94.45ms +step:436/1695 train_time:41184ms step_avg:94.46ms +step:437/1695 train_time:41280ms step_avg:94.46ms +step:438/1695 train_time:41376ms step_avg:94.47ms +step:439/1695 train_time:41472ms step_avg:94.47ms +step:440/1695 train_time:41568ms step_avg:94.47ms +step:441/1695 train_time:41665ms step_avg:94.48ms +step:442/1695 train_time:41760ms step_avg:94.48ms +step:443/1695 train_time:41856ms step_avg:94.48ms +step:444/1695 train_time:41952ms step_avg:94.49ms +step:445/1695 train_time:42048ms step_avg:94.49ms +step:446/1695 train_time:42144ms step_avg:94.49ms +step:447/1695 train_time:42241ms step_avg:94.50ms +step:448/1695 train_time:42337ms step_avg:94.50ms +step:449/1695 train_time:42433ms step_avg:94.50ms +step:450/1695 train_time:42530ms step_avg:94.51ms +step:451/1695 train_time:42626ms step_avg:94.51ms +step:452/1695 train_time:42721ms step_avg:94.52ms +step:453/1695 train_time:42817ms step_avg:94.52ms +step:454/1695 train_time:42913ms step_avg:94.52ms +step:455/1695 train_time:43010ms step_avg:94.53ms +step:456/1695 train_time:43107ms step_avg:94.53ms +step:457/1695 train_time:43203ms step_avg:94.54ms +step:458/1695 train_time:43300ms step_avg:94.54ms +step:459/1695 train_time:43396ms step_avg:94.54ms +step:460/1695 train_time:43492ms step_avg:94.55ms +step:461/1695 train_time:43589ms step_avg:94.55ms +step:462/1695 train_time:43685ms step_avg:94.56ms +step:463/1695 train_time:43782ms step_avg:94.56ms +step:464/1695 train_time:43878ms step_avg:94.56ms +step:465/1695 train_time:43973ms step_avg:94.57ms +step:466/1695 train_time:44069ms step_avg:94.57ms +step:467/1695 train_time:44165ms step_avg:94.57ms +step:468/1695 train_time:44262ms step_avg:94.58ms +step:469/1695 train_time:44358ms step_avg:94.58ms +step:470/1695 train_time:44453ms step_avg:94.58ms +step:471/1695 train_time:44549ms step_avg:94.58ms +step:472/1695 train_time:44646ms step_avg:94.59ms +step:473/1695 train_time:44742ms step_avg:94.59ms +step:474/1695 train_time:44838ms step_avg:94.60ms +step:475/1695 train_time:44934ms step_avg:94.60ms +step:476/1695 train_time:45031ms step_avg:94.60ms +step:477/1695 train_time:45128ms step_avg:94.61ms +step:478/1695 train_time:45224ms step_avg:94.61ms +step:479/1695 train_time:45321ms step_avg:94.62ms +step:480/1695 train_time:45417ms step_avg:94.62ms +step:481/1695 train_time:45513ms step_avg:94.62ms +step:482/1695 train_time:45609ms step_avg:94.63ms +step:483/1695 train_time:45707ms step_avg:94.63ms +step:484/1695 train_time:45804ms step_avg:94.64ms +step:485/1695 train_time:45900ms step_avg:94.64ms +step:486/1695 train_time:45996ms step_avg:94.64ms +step:487/1695 train_time:46092ms step_avg:94.64ms +step:488/1695 train_time:46188ms step_avg:94.65ms +step:489/1695 train_time:46284ms step_avg:94.65ms +step:490/1695 train_time:46381ms step_avg:94.65ms +step:491/1695 train_time:46477ms step_avg:94.66ms +step:492/1695 train_time:46572ms step_avg:94.66ms +step:493/1695 train_time:46669ms step_avg:94.66ms +step:494/1695 train_time:46766ms step_avg:94.67ms +step:495/1695 train_time:46863ms step_avg:94.67ms +step:496/1695 train_time:46959ms step_avg:94.68ms +step:497/1695 train_time:47055ms step_avg:94.68ms +step:498/1695 train_time:47150ms step_avg:94.68ms +step:499/1695 train_time:47247ms step_avg:94.68ms +step:500/1695 train_time:47342ms step_avg:94.68ms +step:500/1695 val_loss:3.7347 train_time:47437ms step_avg:94.87ms +step:501/1695 train_time:47465ms step_avg:94.74ms +step:502/1695 train_time:47545ms step_avg:94.71ms +step:503/1695 train_time:47647ms step_avg:94.73ms +step:504/1695 train_time:47744ms step_avg:94.73ms +step:505/1695 train_time:47840ms step_avg:94.73ms +step:506/1695 train_time:47935ms step_avg:94.73ms +step:507/1695 train_time:48032ms step_avg:94.74ms +step:508/1695 train_time:48127ms step_avg:94.74ms +step:509/1695 train_time:48223ms step_avg:94.74ms +step:510/1695 train_time:48318ms step_avg:94.74ms +step:511/1695 train_time:48414ms step_avg:94.74ms +step:512/1695 train_time:48514ms step_avg:94.75ms +step:513/1695 train_time:48613ms step_avg:94.76ms +step:514/1695 train_time:48712ms step_avg:94.77ms +step:515/1695 train_time:48810ms step_avg:94.78ms +step:516/1695 train_time:48907ms step_avg:94.78ms +step:517/1695 train_time:49004ms step_avg:94.78ms +step:518/1695 train_time:49099ms step_avg:94.78ms +step:519/1695 train_time:49195ms step_avg:94.79ms +step:520/1695 train_time:49291ms step_avg:94.79ms +step:521/1695 train_time:49388ms step_avg:94.79ms +step:522/1695 train_time:49485ms step_avg:94.80ms +step:523/1695 train_time:49581ms step_avg:94.80ms +step:524/1695 train_time:49679ms step_avg:94.81ms +step:525/1695 train_time:49776ms step_avg:94.81ms +step:526/1695 train_time:49873ms step_avg:94.82ms +step:527/1695 train_time:49969ms step_avg:94.82ms +step:528/1695 train_time:50066ms step_avg:94.82ms +step:529/1695 train_time:50163ms step_avg:94.83ms +step:530/1695 train_time:50259ms step_avg:94.83ms +step:531/1695 train_time:50354ms step_avg:94.83ms +step:532/1695 train_time:50451ms step_avg:94.83ms +step:533/1695 train_time:50548ms step_avg:94.84ms +step:534/1695 train_time:50646ms step_avg:94.84ms +step:535/1695 train_time:50743ms step_avg:94.85ms +step:536/1695 train_time:50840ms step_avg:94.85ms +step:537/1695 train_time:50936ms step_avg:94.85ms +step:538/1695 train_time:51033ms step_avg:94.86ms +step:539/1695 train_time:51130ms step_avg:94.86ms +step:540/1695 train_time:51227ms step_avg:94.86ms +step:541/1695 train_time:51323ms step_avg:94.87ms +step:542/1695 train_time:51418ms step_avg:94.87ms +step:543/1695 train_time:51515ms step_avg:94.87ms +step:544/1695 train_time:51614ms step_avg:94.88ms +step:545/1695 train_time:51711ms step_avg:94.88ms +step:546/1695 train_time:51808ms step_avg:94.89ms +step:547/1695 train_time:51905ms step_avg:94.89ms +step:548/1695 train_time:52001ms step_avg:94.89ms +step:549/1695 train_time:52097ms step_avg:94.89ms +step:550/1695 train_time:52193ms step_avg:94.90ms +step:551/1695 train_time:52290ms step_avg:94.90ms +step:552/1695 train_time:52387ms step_avg:94.90ms +step:553/1695 train_time:52484ms step_avg:94.91ms +step:554/1695 train_time:52580ms step_avg:94.91ms +step:555/1695 train_time:52676ms step_avg:94.91ms +step:556/1695 train_time:52773ms step_avg:94.92ms +step:557/1695 train_time:52870ms step_avg:94.92ms +step:558/1695 train_time:52966ms step_avg:94.92ms +step:559/1695 train_time:53062ms step_avg:94.92ms +step:560/1695 train_time:53158ms step_avg:94.93ms +step:561/1695 train_time:53254ms step_avg:94.93ms +step:562/1695 train_time:53351ms step_avg:94.93ms +step:563/1695 train_time:53448ms step_avg:94.93ms +step:564/1695 train_time:53545ms step_avg:94.94ms +step:565/1695 train_time:53641ms step_avg:94.94ms +step:566/1695 train_time:53738ms step_avg:94.94ms +step:567/1695 train_time:53834ms step_avg:94.95ms +step:568/1695 train_time:53931ms step_avg:94.95ms +step:569/1695 train_time:54028ms step_avg:94.95ms +step:570/1695 train_time:54125ms step_avg:94.96ms +step:571/1695 train_time:54469ms step_avg:95.39ms +step:572/1695 train_time:54563ms step_avg:95.39ms +step:573/1695 train_time:54659ms step_avg:95.39ms +step:574/1695 train_time:54754ms step_avg:95.39ms +step:575/1695 train_time:54850ms step_avg:95.39ms +step:576/1695 train_time:54946ms step_avg:95.39ms +step:577/1695 train_time:55041ms step_avg:95.39ms +step:578/1695 train_time:55137ms step_avg:95.39ms +step:579/1695 train_time:55233ms step_avg:95.39ms +step:580/1695 train_time:55329ms step_avg:95.39ms +step:581/1695 train_time:55428ms step_avg:95.40ms +step:582/1695 train_time:55526ms step_avg:95.41ms +step:583/1695 train_time:55624ms step_avg:95.41ms +step:584/1695 train_time:55721ms step_avg:95.41ms +step:585/1695 train_time:55817ms step_avg:95.41ms +step:586/1695 train_time:55913ms step_avg:95.41ms +step:587/1695 train_time:56009ms step_avg:95.42ms +step:588/1695 train_time:56106ms step_avg:95.42ms +step:589/1695 train_time:56202ms step_avg:95.42ms +step:590/1695 train_time:56298ms step_avg:95.42ms +step:591/1695 train_time:56395ms step_avg:95.42ms +step:592/1695 train_time:56493ms step_avg:95.43ms +step:593/1695 train_time:56591ms step_avg:95.43ms +step:594/1695 train_time:56689ms step_avg:95.44ms +step:595/1695 train_time:56787ms step_avg:95.44ms +step:596/1695 train_time:56883ms step_avg:95.44ms +step:597/1695 train_time:56979ms step_avg:95.44ms +step:598/1695 train_time:57075ms step_avg:95.44ms +step:599/1695 train_time:57172ms step_avg:95.45ms +step:600/1695 train_time:57268ms step_avg:95.45ms +step:601/1695 train_time:57365ms step_avg:95.45ms +step:602/1695 train_time:57461ms step_avg:95.45ms +step:603/1695 train_time:57559ms step_avg:95.45ms +step:604/1695 train_time:57656ms step_avg:95.46ms +step:605/1695 train_time:57755ms step_avg:95.46ms +step:606/1695 train_time:57852ms step_avg:95.46ms +step:607/1695 train_time:57949ms step_avg:95.47ms +step:608/1695 train_time:58045ms step_avg:95.47ms +step:609/1695 train_time:58141ms step_avg:95.47ms +step:610/1695 train_time:58236ms step_avg:95.47ms +step:611/1695 train_time:58333ms step_avg:95.47ms +step:612/1695 train_time:58429ms step_avg:95.47ms +step:613/1695 train_time:58527ms step_avg:95.48ms +step:614/1695 train_time:58623ms step_avg:95.48ms +step:615/1695 train_time:58720ms step_avg:95.48ms +step:616/1695 train_time:58816ms step_avg:95.48ms +step:617/1695 train_time:58914ms step_avg:95.48ms +step:618/1695 train_time:59012ms step_avg:95.49ms +step:619/1695 train_time:59109ms step_avg:95.49ms +step:620/1695 train_time:59206ms step_avg:95.49ms +step:621/1695 train_time:59301ms step_avg:95.49ms +step:622/1695 train_time:59396ms step_avg:95.49ms +step:623/1695 train_time:59493ms step_avg:95.49ms +step:624/1695 train_time:59590ms step_avg:95.50ms +step:625/1695 train_time:59688ms step_avg:95.50ms +step:625/1695 val_loss:3.6510 train_time:59783ms step_avg:95.65ms +step:626/1695 train_time:59811ms step_avg:95.54ms +step:627/1695 train_time:59894ms step_avg:95.52ms +step:628/1695 train_time:59994ms step_avg:95.53ms +step:629/1695 train_time:60306ms step_avg:95.88ms +step:630/1695 train_time:60402ms step_avg:95.88ms +step:631/1695 train_time:60498ms step_avg:95.88ms +step:632/1695 train_time:60595ms step_avg:95.88ms +step:633/1695 train_time:60693ms step_avg:95.88ms +step:634/1695 train_time:60790ms step_avg:95.88ms +step:635/1695 train_time:60887ms step_avg:95.89ms +step:636/1695 train_time:60984ms step_avg:95.89ms +step:637/1695 train_time:61082ms step_avg:95.89ms +step:638/1695 train_time:61179ms step_avg:95.89ms +step:639/1695 train_time:61280ms step_avg:95.90ms +step:640/1695 train_time:61379ms step_avg:95.90ms +step:641/1695 train_time:61477ms step_avg:95.91ms +step:642/1695 train_time:61574ms step_avg:95.91ms +step:643/1695 train_time:61672ms step_avg:95.91ms +step:644/1695 train_time:61769ms step_avg:95.92ms +step:645/1695 train_time:61867ms step_avg:95.92ms +step:646/1695 train_time:61964ms step_avg:95.92ms +step:647/1695 train_time:62061ms step_avg:95.92ms +step:648/1695 train_time:62159ms step_avg:95.92ms +step:649/1695 train_time:62257ms step_avg:95.93ms +step:650/1695 train_time:62356ms step_avg:95.93ms +step:651/1695 train_time:62453ms step_avg:95.93ms +step:652/1695 train_time:62552ms step_avg:95.94ms +step:653/1695 train_time:62885ms step_avg:96.30ms +step:654/1695 train_time:62980ms step_avg:96.30ms +step:655/1695 train_time:63077ms step_avg:96.30ms +step:656/1695 train_time:63175ms step_avg:96.30ms +step:657/1695 train_time:63273ms step_avg:96.31ms +step:658/1695 train_time:63370ms step_avg:96.31ms +step:659/1695 train_time:63468ms step_avg:96.31ms +step:660/1695 train_time:63564ms step_avg:96.31ms +step:661/1695 train_time:63661ms step_avg:96.31ms +step:662/1695 train_time:63759ms step_avg:96.31ms +step:663/1695 train_time:63860ms step_avg:96.32ms +step:664/1695 train_time:63959ms step_avg:96.32ms +step:665/1695 train_time:64057ms step_avg:96.33ms +step:666/1695 train_time:64155ms step_avg:96.33ms +step:667/1695 train_time:64253ms step_avg:96.33ms +step:668/1695 train_time:64351ms step_avg:96.33ms +step:669/1695 train_time:64448ms step_avg:96.33ms +step:670/1695 train_time:64545ms step_avg:96.34ms +step:671/1695 train_time:64642ms step_avg:96.34ms +step:672/1695 train_time:64740ms step_avg:96.34ms +step:673/1695 train_time:64838ms step_avg:96.34ms +step:674/1695 train_time:64938ms step_avg:96.35ms +step:675/1695 train_time:65036ms step_avg:96.35ms +step:676/1695 train_time:65134ms step_avg:96.35ms +step:677/1695 train_time:65232ms step_avg:96.35ms +step:678/1695 train_time:65330ms step_avg:96.36ms +step:679/1695 train_time:65427ms step_avg:96.36ms +step:680/1695 train_time:65524ms step_avg:96.36ms +step:681/1695 train_time:65621ms step_avg:96.36ms +step:682/1695 train_time:65718ms step_avg:96.36ms +step:683/1695 train_time:65817ms step_avg:96.36ms +step:684/1695 train_time:65916ms step_avg:96.37ms +step:685/1695 train_time:66015ms step_avg:96.37ms +step:686/1695 train_time:66114ms step_avg:96.38ms +step:687/1695 train_time:66212ms step_avg:96.38ms +step:688/1695 train_time:66311ms step_avg:96.38ms +step:689/1695 train_time:66409ms step_avg:96.39ms +step:690/1695 train_time:66507ms step_avg:96.39ms +step:691/1695 train_time:66606ms step_avg:96.39ms +step:692/1695 train_time:66703ms step_avg:96.39ms +step:693/1695 train_time:66801ms step_avg:96.39ms +step:694/1695 train_time:66899ms step_avg:96.40ms +step:695/1695 train_time:66998ms step_avg:96.40ms +step:696/1695 train_time:67096ms step_avg:96.40ms +step:697/1695 train_time:67195ms step_avg:96.41ms +step:698/1695 train_time:67294ms step_avg:96.41ms +step:699/1695 train_time:67392ms step_avg:96.41ms +step:700/1695 train_time:67490ms step_avg:96.41ms +step:701/1695 train_time:67589ms step_avg:96.42ms +step:702/1695 train_time:67686ms step_avg:96.42ms +step:703/1695 train_time:67786ms step_avg:96.42ms +step:704/1695 train_time:67885ms step_avg:96.43ms +step:705/1695 train_time:67984ms step_avg:96.43ms +step:706/1695 train_time:68083ms step_avg:96.43ms +step:707/1695 train_time:68180ms step_avg:96.44ms +step:708/1695 train_time:68278ms step_avg:96.44ms +step:709/1695 train_time:68375ms step_avg:96.44ms +step:710/1695 train_time:68473ms step_avg:96.44ms +step:711/1695 train_time:68570ms step_avg:96.44ms +step:712/1695 train_time:68668ms step_avg:96.44ms +step:713/1695 train_time:68767ms step_avg:96.45ms +step:714/1695 train_time:69177ms step_avg:96.89ms +step:715/1695 train_time:69271ms step_avg:96.88ms +step:716/1695 train_time:69368ms step_avg:96.88ms +step:717/1695 train_time:69465ms step_avg:96.88ms +step:718/1695 train_time:69561ms step_avg:96.88ms +step:719/1695 train_time:69658ms step_avg:96.88ms +step:720/1695 train_time:70021ms step_avg:97.25ms +step:721/1695 train_time:70117ms step_avg:97.25ms +step:722/1695 train_time:70214ms step_avg:97.25ms +step:723/1695 train_time:70311ms step_avg:97.25ms +step:724/1695 train_time:70408ms step_avg:97.25ms +step:725/1695 train_time:70505ms step_avg:97.25ms +step:726/1695 train_time:70601ms step_avg:97.25ms +step:727/1695 train_time:70698ms step_avg:97.25ms +step:728/1695 train_time:70796ms step_avg:97.25ms +step:729/1695 train_time:70898ms step_avg:97.25ms +step:730/1695 train_time:70998ms step_avg:97.26ms +step:731/1695 train_time:71097ms step_avg:97.26ms +step:732/1695 train_time:71196ms step_avg:97.26ms +step:733/1695 train_time:71293ms step_avg:97.26ms +step:734/1695 train_time:71391ms step_avg:97.26ms +step:735/1695 train_time:71490ms step_avg:97.27ms +step:736/1695 train_time:71588ms step_avg:97.27ms +step:737/1695 train_time:71685ms step_avg:97.27ms +step:738/1695 train_time:71782ms step_avg:97.27ms +step:739/1695 train_time:71879ms step_avg:97.27ms +step:740/1695 train_time:71978ms step_avg:97.27ms +step:741/1695 train_time:72076ms step_avg:97.27ms +step:742/1695 train_time:72175ms step_avg:97.27ms +step:743/1695 train_time:72274ms step_avg:97.27ms +step:744/1695 train_time:72372ms step_avg:97.27ms +step:745/1695 train_time:72469ms step_avg:97.27ms +step:746/1695 train_time:72566ms step_avg:97.27ms +step:747/1695 train_time:72664ms step_avg:97.27ms +step:748/1695 train_time:72761ms step_avg:97.27ms +step:749/1695 train_time:72860ms step_avg:97.28ms +step:750/1695 train_time:72958ms step_avg:97.28ms +step:750/1695 val_loss:3.5898 train_time:73054ms step_avg:97.40ms +step:751/1695 train_time:73082ms step_avg:97.31ms +step:752/1695 train_time:73165ms step_avg:97.29ms +step:753/1695 train_time:73269ms step_avg:97.30ms +step:754/1695 train_time:73367ms step_avg:97.30ms +step:755/1695 train_time:73465ms step_avg:97.30ms +step:756/1695 train_time:73564ms step_avg:97.31ms +step:757/1695 train_time:73661ms step_avg:97.31ms +step:758/1695 train_time:73759ms step_avg:97.31ms +step:759/1695 train_time:73856ms step_avg:97.31ms +step:760/1695 train_time:73953ms step_avg:97.31ms +step:761/1695 train_time:74050ms step_avg:97.31ms +step:762/1695 train_time:74149ms step_avg:97.31ms +step:763/1695 train_time:74248ms step_avg:97.31ms +step:764/1695 train_time:74347ms step_avg:97.31ms +step:765/1695 train_time:74445ms step_avg:97.31ms +step:766/1695 train_time:74544ms step_avg:97.32ms +step:767/1695 train_time:74643ms step_avg:97.32ms +step:768/1695 train_time:74741ms step_avg:97.32ms +step:769/1695 train_time:74839ms step_avg:97.32ms +step:770/1695 train_time:74936ms step_avg:97.32ms +step:771/1695 train_time:75034ms step_avg:97.32ms +step:772/1695 train_time:75370ms step_avg:97.63ms +step:773/1695 train_time:75465ms step_avg:97.63ms +step:774/1695 train_time:75562ms step_avg:97.63ms +step:775/1695 train_time:75660ms step_avg:97.63ms +step:776/1695 train_time:75757ms step_avg:97.62ms +step:777/1695 train_time:75854ms step_avg:97.62ms +step:778/1695 train_time:75950ms step_avg:97.62ms +step:779/1695 train_time:76047ms step_avg:97.62ms +step:780/1695 train_time:76144ms step_avg:97.62ms +step:781/1695 train_time:76243ms step_avg:97.62ms +step:782/1695 train_time:76346ms step_avg:97.63ms +step:783/1695 train_time:76444ms step_avg:97.63ms +step:784/1695 train_time:76543ms step_avg:97.63ms +step:785/1695 train_time:76641ms step_avg:97.63ms +step:786/1695 train_time:76739ms step_avg:97.63ms +step:787/1695 train_time:76837ms step_avg:97.63ms +step:788/1695 train_time:76935ms step_avg:97.63ms +step:789/1695 train_time:77032ms step_avg:97.63ms +step:790/1695 train_time:77358ms step_avg:97.92ms +step:791/1695 train_time:77455ms step_avg:97.92ms +step:792/1695 train_time:77552ms step_avg:97.92ms +step:793/1695 train_time:77649ms step_avg:97.92ms +step:794/1695 train_time:77746ms step_avg:97.92ms +step:795/1695 train_time:78185ms step_avg:98.35ms +step:796/1695 train_time:78234ms step_avg:98.28ms +step:797/1695 train_time:78331ms step_avg:98.28ms +step:798/1695 train_time:78428ms step_avg:98.28ms +step:799/1695 train_time:78525ms step_avg:98.28ms +step:800/1695 train_time:78623ms step_avg:98.28ms +step:801/1695 train_time:78720ms step_avg:98.28ms +step:802/1695 train_time:78818ms step_avg:98.28ms +step:803/1695 train_time:78915ms step_avg:98.27ms +step:804/1695 train_time:79011ms step_avg:98.27ms +step:805/1695 train_time:79111ms step_avg:98.27ms +step:806/1695 train_time:79211ms step_avg:98.28ms +step:807/1695 train_time:79309ms step_avg:98.28ms +step:808/1695 train_time:79407ms step_avg:98.28ms +step:809/1695 train_time:79506ms step_avg:98.28ms +step:810/1695 train_time:79605ms step_avg:98.28ms +step:811/1695 train_time:79703ms step_avg:98.28ms +step:812/1695 train_time:79801ms step_avg:98.28ms +step:813/1695 train_time:79898ms step_avg:98.28ms +step:814/1695 train_time:79996ms step_avg:98.27ms +step:815/1695 train_time:80093ms step_avg:98.27ms +step:816/1695 train_time:80192ms step_avg:98.27ms +step:817/1695 train_time:80290ms step_avg:98.27ms +step:818/1695 train_time:80388ms step_avg:98.27ms +step:819/1695 train_time:80487ms step_avg:98.27ms +step:820/1695 train_time:80585ms step_avg:98.27ms +step:821/1695 train_time:80684ms step_avg:98.27ms +step:822/1695 train_time:80782ms step_avg:98.27ms +step:823/1695 train_time:80880ms step_avg:98.27ms +step:824/1695 train_time:80978ms step_avg:98.27ms +step:825/1695 train_time:81076ms step_avg:98.27ms +step:826/1695 train_time:81174ms step_avg:98.27ms +step:827/1695 train_time:81273ms step_avg:98.27ms +step:828/1695 train_time:81372ms step_avg:98.28ms +step:829/1695 train_time:81470ms step_avg:98.28ms +step:830/1695 train_time:81568ms step_avg:98.27ms +step:831/1695 train_time:81667ms step_avg:98.27ms +step:832/1695 train_time:81765ms step_avg:98.28ms +step:833/1695 train_time:81862ms step_avg:98.27ms +step:834/1695 train_time:81961ms step_avg:98.27ms +step:835/1695 train_time:82058ms step_avg:98.27ms +step:836/1695 train_time:82157ms step_avg:98.27ms +step:837/1695 train_time:82256ms step_avg:98.27ms +step:838/1695 train_time:82355ms step_avg:98.28ms +step:839/1695 train_time:82454ms step_avg:98.28ms +step:840/1695 train_time:82554ms step_avg:98.28ms +step:841/1695 train_time:82651ms step_avg:98.28ms +step:842/1695 train_time:82750ms step_avg:98.28ms +step:843/1695 train_time:82848ms step_avg:98.28ms +step:844/1695 train_time:82945ms step_avg:98.28ms +step:845/1695 train_time:83043ms step_avg:98.28ms +step:846/1695 train_time:83141ms step_avg:98.28ms +step:847/1695 train_time:83240ms step_avg:98.28ms +step:848/1695 train_time:83339ms step_avg:98.28ms +step:849/1695 train_time:83439ms step_avg:98.28ms +step:850/1695 train_time:83538ms step_avg:98.28ms +step:851/1695 train_time:83637ms step_avg:98.28ms +step:852/1695 train_time:83736ms step_avg:98.28ms +step:853/1695 train_time:83835ms step_avg:98.28ms +step:854/1695 train_time:83934ms step_avg:98.28ms +step:855/1695 train_time:84032ms step_avg:98.28ms +step:856/1695 train_time:84129ms step_avg:98.28ms +step:857/1695 train_time:84226ms step_avg:98.28ms +step:858/1695 train_time:84324ms step_avg:98.28ms +step:859/1695 train_time:84423ms step_avg:98.28ms +step:860/1695 train_time:84522ms step_avg:98.28ms +step:861/1695 train_time:84621ms step_avg:98.28ms +step:862/1695 train_time:84720ms step_avg:98.28ms +step:863/1695 train_time:84818ms step_avg:98.28ms +step:864/1695 train_time:84917ms step_avg:98.28ms +step:865/1695 train_time:85016ms step_avg:98.28ms +step:866/1695 train_time:85115ms step_avg:98.29ms +step:867/1695 train_time:85213ms step_avg:98.29ms +step:868/1695 train_time:85311ms step_avg:98.28ms +step:869/1695 train_time:85409ms step_avg:98.28ms +step:870/1695 train_time:85506ms step_avg:98.28ms +step:871/1695 train_time:85605ms step_avg:98.28ms +step:872/1695 train_time:85703ms step_avg:98.28ms +step:873/1695 train_time:85802ms step_avg:98.28ms +step:874/1695 train_time:85902ms step_avg:98.29ms +step:875/1695 train_time:86003ms step_avg:98.29ms +step:875/1695 val_loss:3.5387 train_time:86100ms step_avg:98.40ms +step:876/1695 train_time:86128ms step_avg:98.32ms +step:877/1695 train_time:86208ms step_avg:98.30ms +step:878/1695 train_time:86308ms step_avg:98.30ms +step:879/1695 train_time:86406ms step_avg:98.30ms +step:880/1695 train_time:86503ms step_avg:98.30ms +step:881/1695 train_time:86602ms step_avg:98.30ms +step:882/1695 train_time:86701ms step_avg:98.30ms +step:883/1695 train_time:86800ms step_avg:98.30ms +step:884/1695 train_time:86899ms step_avg:98.30ms +step:885/1695 train_time:86997ms step_avg:98.30ms +step:886/1695 train_time:87099ms step_avg:98.31ms +step:887/1695 train_time:87201ms step_avg:98.31ms +step:888/1695 train_time:87303ms step_avg:98.31ms +step:889/1695 train_time:87404ms step_avg:98.32ms +step:890/1695 train_time:87503ms step_avg:98.32ms +step:891/1695 train_time:87603ms step_avg:98.32ms +step:892/1695 train_time:87703ms step_avg:98.32ms +step:893/1695 train_time:87802ms step_avg:98.32ms +step:894/1695 train_time:87901ms step_avg:98.32ms +step:895/1695 train_time:88000ms step_avg:98.32ms +step:896/1695 train_time:88099ms step_avg:98.33ms +step:897/1695 train_time:88200ms step_avg:98.33ms +step:898/1695 train_time:88301ms step_avg:98.33ms +step:899/1695 train_time:88402ms step_avg:98.33ms +step:900/1695 train_time:88503ms step_avg:98.34ms +step:901/1695 train_time:88602ms step_avg:98.34ms +step:902/1695 train_time:88702ms step_avg:98.34ms +step:903/1695 train_time:88801ms step_avg:98.34ms +step:904/1695 train_time:88901ms step_avg:98.34ms +step:905/1695 train_time:89001ms step_avg:98.34ms +step:906/1695 train_time:89100ms step_avg:98.34ms +step:907/1695 train_time:89200ms step_avg:98.35ms +step:908/1695 train_time:89301ms step_avg:98.35ms +step:909/1695 train_time:89402ms step_avg:98.35ms +step:910/1695 train_time:89502ms step_avg:98.35ms +step:911/1695 train_time:89602ms step_avg:98.36ms +step:912/1695 train_time:89702ms step_avg:98.36ms +step:913/1695 train_time:89801ms step_avg:98.36ms +step:914/1695 train_time:89901ms step_avg:98.36ms +step:915/1695 train_time:90001ms step_avg:98.36ms +step:916/1695 train_time:90101ms step_avg:98.36ms +step:917/1695 train_time:90201ms step_avg:98.37ms +step:918/1695 train_time:90302ms step_avg:98.37ms +step:919/1695 train_time:90403ms step_avg:98.37ms +step:920/1695 train_time:90503ms step_avg:98.37ms +step:921/1695 train_time:90604ms step_avg:98.38ms +step:922/1695 train_time:90703ms step_avg:98.38ms +step:923/1695 train_time:90803ms step_avg:98.38ms +step:924/1695 train_time:90902ms step_avg:98.38ms +step:925/1695 train_time:91001ms step_avg:98.38ms +step:926/1695 train_time:91100ms step_avg:98.38ms +step:927/1695 train_time:91200ms step_avg:98.38ms +step:928/1695 train_time:91300ms step_avg:98.38ms +step:929/1695 train_time:91400ms step_avg:98.39ms +step:930/1695 train_time:91500ms step_avg:98.39ms +step:931/1695 train_time:91601ms step_avg:98.39ms +step:932/1695 train_time:91701ms step_avg:98.39ms +step:933/1695 train_time:91801ms step_avg:98.39ms +step:934/1695 train_time:91900ms step_avg:98.39ms +step:935/1695 train_time:91999ms step_avg:98.40ms +step:936/1695 train_time:92098ms step_avg:98.40ms +step:937/1695 train_time:92198ms step_avg:98.40ms +step:938/1695 train_time:92298ms step_avg:98.40ms +step:939/1695 train_time:92399ms step_avg:98.40ms +step:940/1695 train_time:92499ms step_avg:98.40ms +step:941/1695 train_time:92600ms step_avg:98.41ms +step:942/1695 train_time:92701ms step_avg:98.41ms +step:943/1695 train_time:92801ms step_avg:98.41ms +step:944/1695 train_time:92900ms step_avg:98.41ms +step:945/1695 train_time:93001ms step_avg:98.41ms +step:946/1695 train_time:93101ms step_avg:98.42ms +step:947/1695 train_time:93201ms step_avg:98.42ms +step:948/1695 train_time:93300ms step_avg:98.42ms +step:949/1695 train_time:93400ms step_avg:98.42ms +step:950/1695 train_time:93500ms step_avg:98.42ms +step:951/1695 train_time:93601ms step_avg:98.42ms +step:952/1695 train_time:93701ms step_avg:98.43ms +step:953/1695 train_time:93801ms step_avg:98.43ms +step:954/1695 train_time:93901ms step_avg:98.43ms +step:955/1695 train_time:94001ms step_avg:98.43ms +step:956/1695 train_time:94100ms step_avg:98.43ms +step:957/1695 train_time:94199ms step_avg:98.43ms +step:958/1695 train_time:94299ms step_avg:98.43ms +step:959/1695 train_time:94398ms step_avg:98.43ms +step:960/1695 train_time:94498ms step_avg:98.44ms +step:961/1695 train_time:94600ms step_avg:98.44ms +step:962/1695 train_time:94700ms step_avg:98.44ms +step:963/1695 train_time:94800ms step_avg:98.44ms +step:964/1695 train_time:94900ms step_avg:98.44ms +step:965/1695 train_time:95000ms step_avg:98.45ms +step:966/1695 train_time:95099ms step_avg:98.45ms +step:967/1695 train_time:95199ms step_avg:98.45ms +step:968/1695 train_time:95299ms step_avg:98.45ms +step:969/1695 train_time:95400ms step_avg:98.45ms +step:970/1695 train_time:95500ms step_avg:98.45ms +step:971/1695 train_time:95599ms step_avg:98.45ms +step:972/1695 train_time:95701ms step_avg:98.46ms +step:973/1695 train_time:95800ms step_avg:98.46ms +step:974/1695 train_time:95900ms step_avg:98.46ms +step:975/1695 train_time:96001ms step_avg:98.46ms +step:976/1695 train_time:96102ms step_avg:98.46ms +step:977/1695 train_time:96201ms step_avg:98.47ms +step:978/1695 train_time:96301ms step_avg:98.47ms +step:979/1695 train_time:96401ms step_avg:98.47ms +step:980/1695 train_time:96501ms step_avg:98.47ms +step:981/1695 train_time:96601ms step_avg:98.47ms +step:982/1695 train_time:96701ms step_avg:98.47ms +step:983/1695 train_time:96801ms step_avg:98.48ms +step:984/1695 train_time:96901ms step_avg:98.48ms +step:985/1695 train_time:97002ms step_avg:98.48ms +step:986/1695 train_time:97103ms step_avg:98.48ms +step:987/1695 train_time:97205ms step_avg:98.48ms +step:988/1695 train_time:97304ms step_avg:98.49ms +step:989/1695 train_time:97403ms step_avg:98.49ms +step:990/1695 train_time:97503ms step_avg:98.49ms +step:991/1695 train_time:97604ms step_avg:98.49ms +step:992/1695 train_time:97704ms step_avg:98.49ms +step:993/1695 train_time:97803ms step_avg:98.49ms +step:994/1695 train_time:97903ms step_avg:98.49ms +step:995/1695 train_time:98003ms step_avg:98.50ms +step:996/1695 train_time:98102ms step_avg:98.50ms +step:997/1695 train_time:98202ms step_avg:98.50ms +step:998/1695 train_time:98301ms step_avg:98.50ms +step:999/1695 train_time:98402ms step_avg:98.50ms +step:1000/1695 train_time:98501ms step_avg:98.50ms +step:1000/1695 val_loss:3.4933 train_time:98599ms step_avg:98.60ms +step:1001/1695 train_time:98627ms step_avg:98.53ms +step:1002/1695 train_time:98709ms step_avg:98.51ms +step:1003/1695 train_time:98815ms step_avg:98.52ms +step:1004/1695 train_time:98916ms step_avg:98.52ms +step:1005/1695 train_time:99015ms step_avg:98.52ms +step:1006/1695 train_time:99115ms step_avg:98.52ms +step:1007/1695 train_time:99214ms step_avg:98.52ms +step:1008/1695 train_time:99313ms step_avg:98.52ms +step:1009/1695 train_time:99412ms step_avg:98.53ms +step:1010/1695 train_time:99510ms step_avg:98.52ms +step:1011/1695 train_time:99611ms step_avg:98.53ms +step:1012/1695 train_time:99712ms step_avg:98.53ms +step:1013/1695 train_time:99813ms step_avg:98.53ms +step:1014/1695 train_time:99915ms step_avg:98.54ms +step:1015/1695 train_time:100016ms step_avg:98.54ms +step:1016/1695 train_time:100116ms step_avg:98.54ms +step:1017/1695 train_time:100216ms step_avg:98.54ms +step:1018/1695 train_time:100316ms step_avg:98.54ms +step:1019/1695 train_time:100415ms step_avg:98.54ms +step:1020/1695 train_time:100515ms step_avg:98.54ms +step:1021/1695 train_time:100616ms step_avg:98.55ms +step:1022/1695 train_time:100716ms step_avg:98.55ms +step:1023/1695 train_time:100818ms step_avg:98.55ms +step:1024/1695 train_time:100920ms step_avg:98.55ms +step:1025/1695 train_time:101021ms step_avg:98.56ms +step:1026/1695 train_time:101121ms step_avg:98.56ms +step:1027/1695 train_time:101220ms step_avg:98.56ms +step:1028/1695 train_time:101319ms step_avg:98.56ms +step:1029/1695 train_time:101419ms step_avg:98.56ms +step:1030/1695 train_time:101519ms step_avg:98.56ms +step:1031/1695 train_time:101620ms step_avg:98.56ms +step:1032/1695 train_time:101720ms step_avg:98.57ms +step:1033/1695 train_time:101821ms step_avg:98.57ms +step:1034/1695 train_time:101921ms step_avg:98.57ms +step:1035/1695 train_time:102021ms step_avg:98.57ms +step:1036/1695 train_time:102121ms step_avg:98.57ms +step:1037/1695 train_time:102221ms step_avg:98.57ms +step:1038/1695 train_time:102320ms step_avg:98.57ms +step:1039/1695 train_time:102419ms step_avg:98.57ms +step:1040/1695 train_time:102520ms step_avg:98.58ms +step:1041/1695 train_time:102620ms step_avg:98.58ms +step:1042/1695 train_time:102720ms step_avg:98.58ms +step:1043/1695 train_time:102820ms step_avg:98.58ms +step:1044/1695 train_time:102920ms step_avg:98.58ms +step:1045/1695 train_time:103020ms step_avg:98.58ms +step:1046/1695 train_time:103120ms step_avg:98.59ms +step:1047/1695 train_time:103220ms step_avg:98.59ms +step:1048/1695 train_time:103320ms step_avg:98.59ms +step:1049/1695 train_time:103419ms step_avg:98.59ms +step:1050/1695 train_time:103519ms step_avg:98.59ms +step:1051/1695 train_time:103620ms step_avg:98.59ms +step:1052/1695 train_time:103719ms step_avg:98.59ms +step:1053/1695 train_time:103819ms step_avg:98.59ms +step:1054/1695 train_time:103918ms step_avg:98.59ms +step:1055/1695 train_time:104018ms step_avg:98.60ms +step:1056/1695 train_time:104117ms step_avg:98.60ms +step:1057/1695 train_time:104217ms step_avg:98.60ms +step:1058/1695 train_time:104316ms step_avg:98.60ms +step:1059/1695 train_time:104416ms step_avg:98.60ms +step:1060/1695 train_time:104516ms step_avg:98.60ms +step:1061/1695 train_time:104616ms step_avg:98.60ms +step:1062/1695 train_time:104717ms step_avg:98.60ms +step:1063/1695 train_time:104817ms step_avg:98.60ms +step:1064/1695 train_time:104917ms step_avg:98.61ms +step:1065/1695 train_time:105017ms step_avg:98.61ms +step:1066/1695 train_time:105117ms step_avg:98.61ms +step:1067/1695 train_time:105218ms step_avg:98.61ms +step:1068/1695 train_time:105317ms step_avg:98.61ms +step:1069/1695 train_time:105417ms step_avg:98.61ms +step:1070/1695 train_time:105517ms step_avg:98.61ms +step:1071/1695 train_time:105617ms step_avg:98.61ms +step:1072/1695 train_time:105716ms step_avg:98.62ms +step:1073/1695 train_time:105816ms step_avg:98.62ms +step:1074/1695 train_time:105916ms step_avg:98.62ms +step:1075/1695 train_time:106016ms step_avg:98.62ms +step:1076/1695 train_time:106116ms step_avg:98.62ms +step:1077/1695 train_time:106217ms step_avg:98.62ms +step:1078/1695 train_time:106317ms step_avg:98.62ms +step:1079/1695 train_time:106418ms step_avg:98.63ms +step:1080/1695 train_time:106517ms step_avg:98.63ms +step:1081/1695 train_time:106616ms step_avg:98.63ms +step:1082/1695 train_time:106717ms step_avg:98.63ms +step:1083/1695 train_time:106817ms step_avg:98.63ms +step:1084/1695 train_time:106917ms step_avg:98.63ms +step:1085/1695 train_time:107017ms step_avg:98.63ms +step:1086/1695 train_time:107117ms step_avg:98.63ms +step:1087/1695 train_time:107217ms step_avg:98.64ms +step:1088/1695 train_time:107317ms step_avg:98.64ms +step:1089/1695 train_time:107417ms step_avg:98.64ms +step:1090/1695 train_time:107518ms step_avg:98.64ms +step:1091/1695 train_time:107619ms step_avg:98.64ms +step:1092/1695 train_time:107719ms step_avg:98.64ms +step:1093/1695 train_time:107818ms step_avg:98.64ms +step:1094/1695 train_time:107918ms step_avg:98.64ms +step:1095/1695 train_time:108017ms step_avg:98.65ms +step:1096/1695 train_time:108117ms step_avg:98.65ms +step:1097/1695 train_time:108217ms step_avg:98.65ms +step:1098/1695 train_time:108317ms step_avg:98.65ms +step:1099/1695 train_time:108416ms step_avg:98.65ms +step:1100/1695 train_time:108516ms step_avg:98.65ms +step:1101/1695 train_time:108616ms step_avg:98.65ms +step:1102/1695 train_time:108716ms step_avg:98.65ms +step:1103/1695 train_time:108816ms step_avg:98.65ms +step:1104/1695 train_time:108916ms step_avg:98.66ms +step:1105/1695 train_time:109016ms step_avg:98.66ms +step:1106/1695 train_time:109116ms step_avg:98.66ms +step:1107/1695 train_time:109217ms step_avg:98.66ms +step:1108/1695 train_time:109317ms step_avg:98.66ms +step:1109/1695 train_time:109417ms step_avg:98.66ms +step:1110/1695 train_time:109517ms step_avg:98.66ms +step:1111/1695 train_time:109617ms step_avg:98.66ms +step:1112/1695 train_time:109717ms step_avg:98.67ms +step:1113/1695 train_time:109817ms step_avg:98.67ms +step:1114/1695 train_time:109917ms step_avg:98.67ms +step:1115/1695 train_time:110017ms step_avg:98.67ms +step:1116/1695 train_time:110117ms step_avg:98.67ms +step:1117/1695 train_time:110217ms step_avg:98.67ms +step:1118/1695 train_time:110316ms step_avg:98.67ms +step:1119/1695 train_time:110416ms step_avg:98.67ms +step:1120/1695 train_time:110516ms step_avg:98.68ms +step:1121/1695 train_time:110616ms step_avg:98.68ms +step:1122/1695 train_time:110716ms step_avg:98.68ms +step:1123/1695 train_time:110816ms step_avg:98.68ms +step:1124/1695 train_time:110916ms step_avg:98.68ms +step:1125/1695 train_time:111016ms step_avg:98.68ms +step:1125/1695 val_loss:3.4421 train_time:111114ms step_avg:98.77ms +step:1126/1695 train_time:111142ms step_avg:98.70ms +step:1127/1695 train_time:111225ms step_avg:98.69ms +step:1128/1695 train_time:111328ms step_avg:98.70ms +step:1129/1695 train_time:111428ms step_avg:98.70ms +step:1130/1695 train_time:111526ms step_avg:98.70ms +step:1131/1695 train_time:111625ms step_avg:98.70ms +step:1132/1695 train_time:111724ms step_avg:98.70ms +step:1133/1695 train_time:111824ms step_avg:98.70ms +step:1134/1695 train_time:111923ms step_avg:98.70ms +step:1135/1695 train_time:112022ms step_avg:98.70ms +step:1136/1695 train_time:112124ms step_avg:98.70ms +step:1137/1695 train_time:112225ms step_avg:98.70ms +step:1138/1695 train_time:112328ms step_avg:98.71ms +step:1139/1695 train_time:112429ms step_avg:98.71ms +step:1140/1695 train_time:112530ms step_avg:98.71ms +step:1141/1695 train_time:112630ms step_avg:98.71ms +step:1142/1695 train_time:112731ms step_avg:98.71ms +step:1143/1695 train_time:112831ms step_avg:98.72ms +step:1144/1695 train_time:112933ms step_avg:98.72ms +step:1145/1695 train_time:113034ms step_avg:98.72ms +step:1146/1695 train_time:113135ms step_avg:98.72ms +step:1147/1695 train_time:113236ms step_avg:98.72ms +step:1148/1695 train_time:113337ms step_avg:98.73ms +step:1149/1695 train_time:113438ms step_avg:98.73ms +step:1150/1695 train_time:113539ms step_avg:98.73ms +step:1151/1695 train_time:113640ms step_avg:98.73ms +step:1152/1695 train_time:113740ms step_avg:98.73ms +step:1153/1695 train_time:113842ms step_avg:98.74ms +step:1154/1695 train_time:113942ms step_avg:98.74ms +step:1155/1695 train_time:114042ms step_avg:98.74ms +step:1156/1695 train_time:114142ms step_avg:98.74ms +step:1157/1695 train_time:114243ms step_avg:98.74ms +step:1158/1695 train_time:114342ms step_avg:98.74ms +step:1159/1695 train_time:114442ms step_avg:98.74ms +step:1160/1695 train_time:114544ms step_avg:98.74ms +step:1161/1695 train_time:114644ms step_avg:98.75ms +step:1162/1695 train_time:114743ms step_avg:98.75ms +step:1163/1695 train_time:114846ms step_avg:98.75ms +step:1164/1695 train_time:114947ms step_avg:98.75ms +step:1165/1695 train_time:115048ms step_avg:98.75ms +step:1166/1695 train_time:115149ms step_avg:98.76ms +step:1167/1695 train_time:115250ms step_avg:98.76ms +step:1168/1695 train_time:115353ms step_avg:98.76ms +step:1169/1695 train_time:115455ms step_avg:98.76ms +step:1170/1695 train_time:115555ms step_avg:98.77ms +step:1171/1695 train_time:115655ms step_avg:98.77ms +step:1172/1695 train_time:115756ms step_avg:98.77ms +step:1173/1695 train_time:115857ms step_avg:98.77ms +step:1174/1695 train_time:115959ms step_avg:98.77ms +step:1175/1695 train_time:116060ms step_avg:98.77ms +step:1176/1695 train_time:116161ms step_avg:98.78ms +step:1177/1695 train_time:116262ms step_avg:98.78ms +step:1178/1695 train_time:116363ms step_avg:98.78ms +step:1179/1695 train_time:116465ms step_avg:98.78ms +step:1180/1695 train_time:116565ms step_avg:98.78ms +step:1181/1695 train_time:116666ms step_avg:98.79ms +step:1182/1695 train_time:116765ms step_avg:98.79ms +step:1183/1695 train_time:116866ms step_avg:98.79ms +step:1184/1695 train_time:116971ms step_avg:98.79ms +step:1185/1695 train_time:117074ms step_avg:98.80ms +step:1186/1695 train_time:117175ms step_avg:98.80ms +step:1187/1695 train_time:117275ms step_avg:98.80ms +step:1188/1695 train_time:117376ms step_avg:98.80ms +step:1189/1695 train_time:117476ms step_avg:98.80ms +step:1190/1695 train_time:117576ms step_avg:98.80ms +step:1191/1695 train_time:117676ms step_avg:98.80ms +step:1192/1695 train_time:117777ms step_avg:98.81ms +step:1193/1695 train_time:117878ms step_avg:98.81ms +step:1194/1695 train_time:117980ms step_avg:98.81ms +step:1195/1695 train_time:118081ms step_avg:98.81ms +step:1196/1695 train_time:118181ms step_avg:98.81ms +step:1197/1695 train_time:118282ms step_avg:98.82ms +step:1198/1695 train_time:118382ms step_avg:98.82ms +step:1199/1695 train_time:118482ms step_avg:98.82ms +step:1200/1695 train_time:118582ms step_avg:98.82ms +step:1201/1695 train_time:118682ms step_avg:98.82ms +step:1202/1695 train_time:118783ms step_avg:98.82ms +step:1203/1695 train_time:118883ms step_avg:98.82ms +step:1204/1695 train_time:118984ms step_avg:98.82ms +step:1205/1695 train_time:119084ms step_avg:98.83ms +step:1206/1695 train_time:119185ms step_avg:98.83ms +step:1207/1695 train_time:119285ms step_avg:98.83ms +step:1208/1695 train_time:119385ms step_avg:98.83ms +step:1209/1695 train_time:119486ms step_avg:98.83ms +step:1210/1695 train_time:119586ms step_avg:98.83ms +step:1211/1695 train_time:119689ms step_avg:98.84ms +step:1212/1695 train_time:119790ms step_avg:98.84ms +step:1213/1695 train_time:119892ms step_avg:98.84ms +step:1214/1695 train_time:119993ms step_avg:98.84ms +step:1215/1695 train_time:120095ms step_avg:98.84ms +step:1216/1695 train_time:120198ms step_avg:98.85ms +step:1217/1695 train_time:120298ms step_avg:98.85ms +step:1218/1695 train_time:120400ms step_avg:98.85ms +step:1219/1695 train_time:120500ms step_avg:98.85ms +step:1220/1695 train_time:120601ms step_avg:98.85ms +step:1221/1695 train_time:120702ms step_avg:98.85ms +step:1222/1695 train_time:120803ms step_avg:98.86ms +step:1223/1695 train_time:120903ms step_avg:98.86ms +step:1224/1695 train_time:121003ms step_avg:98.86ms +step:1225/1695 train_time:121104ms step_avg:98.86ms +step:1226/1695 train_time:121204ms step_avg:98.86ms +step:1227/1695 train_time:121305ms step_avg:98.86ms +step:1228/1695 train_time:121405ms step_avg:98.86ms +step:1229/1695 train_time:121506ms step_avg:98.87ms +step:1230/1695 train_time:121606ms step_avg:98.87ms +step:1231/1695 train_time:121708ms step_avg:98.87ms +step:1232/1695 train_time:121809ms step_avg:98.87ms +step:1233/1695 train_time:121911ms step_avg:98.87ms +step:1234/1695 train_time:122013ms step_avg:98.88ms +step:1235/1695 train_time:122114ms step_avg:98.88ms +step:1236/1695 train_time:122215ms step_avg:98.88ms +step:1237/1695 train_time:122317ms step_avg:98.88ms +step:1238/1695 train_time:122418ms step_avg:98.88ms +step:1239/1695 train_time:122519ms step_avg:98.89ms +step:1240/1695 train_time:122620ms step_avg:98.89ms +step:1241/1695 train_time:122721ms step_avg:98.89ms +step:1242/1695 train_time:122823ms step_avg:98.89ms +step:1243/1695 train_time:122922ms step_avg:98.89ms +step:1244/1695 train_time:123022ms step_avg:98.89ms +step:1245/1695 train_time:123122ms step_avg:98.89ms +step:1246/1695 train_time:123223ms step_avg:98.89ms +step:1247/1695 train_time:123323ms step_avg:98.90ms +step:1248/1695 train_time:123423ms step_avg:98.90ms +step:1249/1695 train_time:123523ms step_avg:98.90ms +step:1250/1695 train_time:123623ms step_avg:98.90ms +step:1250/1695 val_loss:3.3966 train_time:123721ms step_avg:98.98ms +step:1251/1695 train_time:123749ms step_avg:98.92ms +step:1252/1695 train_time:123832ms step_avg:98.91ms +step:1253/1695 train_time:123934ms step_avg:98.91ms +step:1254/1695 train_time:124035ms step_avg:98.91ms +step:1255/1695 train_time:124136ms step_avg:98.91ms +step:1256/1695 train_time:124235ms step_avg:98.91ms +step:1257/1695 train_time:124335ms step_avg:98.91ms +step:1258/1695 train_time:124436ms step_avg:98.92ms +step:1259/1695 train_time:124537ms step_avg:98.92ms +step:1260/1695 train_time:124637ms step_avg:98.92ms +step:1261/1695 train_time:124739ms step_avg:98.92ms +step:1262/1695 train_time:124844ms step_avg:98.93ms +step:1263/1695 train_time:124945ms step_avg:98.93ms +step:1264/1695 train_time:125045ms step_avg:98.93ms +step:1265/1695 train_time:125145ms step_avg:98.93ms +step:1266/1695 train_time:125245ms step_avg:98.93ms +step:1267/1695 train_time:125346ms step_avg:98.93ms +step:1268/1695 train_time:125447ms step_avg:98.93ms +step:1269/1695 train_time:125547ms step_avg:98.93ms +step:1270/1695 train_time:125647ms step_avg:98.93ms +step:1271/1695 train_time:125749ms step_avg:98.94ms +step:1272/1695 train_time:125849ms step_avg:98.94ms +step:1273/1695 train_time:125951ms step_avg:98.94ms +step:1274/1695 train_time:126051ms step_avg:98.94ms +step:1275/1695 train_time:126152ms step_avg:98.94ms +step:1276/1695 train_time:126255ms step_avg:98.95ms +step:1277/1695 train_time:126357ms step_avg:98.95ms +step:1278/1695 train_time:126459ms step_avg:98.95ms +step:1279/1695 train_time:126559ms step_avg:98.95ms +step:1280/1695 train_time:126660ms step_avg:98.95ms +step:1281/1695 train_time:126762ms step_avg:98.96ms +step:1282/1695 train_time:126863ms step_avg:98.96ms +step:1283/1695 train_time:126964ms step_avg:98.96ms +step:1284/1695 train_time:127064ms step_avg:98.96ms +step:1285/1695 train_time:127165ms step_avg:98.96ms +step:1286/1695 train_time:127266ms step_avg:98.96ms +step:1287/1695 train_time:127367ms step_avg:98.96ms +step:1288/1695 train_time:127467ms step_avg:98.97ms +step:1289/1695 train_time:127568ms step_avg:98.97ms +step:1290/1695 train_time:127668ms step_avg:98.97ms +step:1291/1695 train_time:127769ms step_avg:98.97ms +step:1292/1695 train_time:127869ms step_avg:98.97ms +step:1293/1695 train_time:127970ms step_avg:98.97ms +step:1294/1695 train_time:128071ms step_avg:98.97ms +step:1295/1695 train_time:128172ms step_avg:98.97ms +step:1296/1695 train_time:128274ms step_avg:98.98ms +step:1297/1695 train_time:128375ms step_avg:98.98ms +step:1298/1695 train_time:128476ms step_avg:98.98ms +step:1299/1695 train_time:128577ms step_avg:98.98ms +step:1300/1695 train_time:128678ms step_avg:98.98ms +step:1301/1695 train_time:128780ms step_avg:98.99ms +step:1302/1695 train_time:128882ms step_avg:98.99ms +step:1303/1695 train_time:128983ms step_avg:98.99ms +step:1304/1695 train_time:129084ms step_avg:98.99ms +step:1305/1695 train_time:129186ms step_avg:98.99ms +step:1306/1695 train_time:129286ms step_avg:98.99ms +step:1307/1695 train_time:129386ms step_avg:98.99ms +step:1308/1695 train_time:129486ms step_avg:99.00ms +step:1309/1695 train_time:129587ms step_avg:99.00ms +step:1310/1695 train_time:129688ms step_avg:99.00ms +step:1311/1695 train_time:129789ms step_avg:99.00ms +step:1312/1695 train_time:129890ms step_avg:99.00ms +step:1313/1695 train_time:129992ms step_avg:99.00ms +step:1314/1695 train_time:130093ms step_avg:99.01ms +step:1315/1695 train_time:130194ms step_avg:99.01ms +step:1316/1695 train_time:130297ms step_avg:99.01ms +step:1317/1695 train_time:130397ms step_avg:99.01ms +step:1318/1695 train_time:130498ms step_avg:99.01ms +step:1319/1695 train_time:130600ms step_avg:99.01ms +step:1320/1695 train_time:130703ms step_avg:99.02ms +step:1321/1695 train_time:130804ms step_avg:99.02ms +step:1322/1695 train_time:130905ms step_avg:99.02ms +step:1323/1695 train_time:131005ms step_avg:99.02ms +step:1324/1695 train_time:131106ms step_avg:99.02ms +step:1325/1695 train_time:131207ms step_avg:99.02ms +step:1326/1695 train_time:131307ms step_avg:99.03ms +step:1327/1695 train_time:131408ms step_avg:99.03ms +step:1328/1695 train_time:131507ms step_avg:99.03ms +step:1329/1695 train_time:131607ms step_avg:99.03ms +step:1330/1695 train_time:131708ms step_avg:99.03ms +step:1331/1695 train_time:131808ms step_avg:99.03ms +step:1332/1695 train_time:131909ms step_avg:99.03ms +step:1333/1695 train_time:132010ms step_avg:99.03ms +step:1334/1695 train_time:132112ms step_avg:99.03ms +step:1335/1695 train_time:132212ms step_avg:99.04ms +step:1336/1695 train_time:132314ms step_avg:99.04ms +step:1337/1695 train_time:132415ms step_avg:99.04ms +step:1338/1695 train_time:132516ms step_avg:99.04ms +step:1339/1695 train_time:132618ms step_avg:99.04ms +step:1340/1695 train_time:132718ms step_avg:99.04ms +step:1341/1695 train_time:132820ms step_avg:99.05ms +step:1342/1695 train_time:132921ms step_avg:99.05ms +step:1343/1695 train_time:133022ms step_avg:99.05ms +step:1344/1695 train_time:133122ms step_avg:99.05ms +step:1345/1695 train_time:133224ms step_avg:99.05ms +step:1346/1695 train_time:133326ms step_avg:99.05ms +step:1347/1695 train_time:133427ms step_avg:99.06ms +step:1348/1695 train_time:133527ms step_avg:99.06ms +step:1349/1695 train_time:133627ms step_avg:99.06ms +step:1350/1695 train_time:133728ms step_avg:99.06ms +step:1351/1695 train_time:133828ms step_avg:99.06ms +step:1352/1695 train_time:133928ms step_avg:99.06ms +step:1353/1695 train_time:134028ms step_avg:99.06ms +step:1354/1695 train_time:134129ms step_avg:99.06ms +step:1355/1695 train_time:134230ms step_avg:99.06ms +step:1356/1695 train_time:134332ms step_avg:99.06ms +step:1357/1695 train_time:134434ms step_avg:99.07ms +step:1358/1695 train_time:134536ms step_avg:99.07ms +step:1359/1695 train_time:134637ms step_avg:99.07ms +step:1360/1695 train_time:134737ms step_avg:99.07ms +step:1361/1695 train_time:134837ms step_avg:99.07ms +step:1362/1695 train_time:134938ms step_avg:99.07ms +step:1363/1695 train_time:135040ms step_avg:99.08ms +step:1364/1695 train_time:135142ms step_avg:99.08ms +step:1365/1695 train_time:135244ms step_avg:99.08ms +step:1366/1695 train_time:135344ms step_avg:99.08ms +step:1367/1695 train_time:135445ms step_avg:99.08ms +step:1368/1695 train_time:135546ms step_avg:99.08ms +step:1369/1695 train_time:135646ms step_avg:99.08ms +step:1370/1695 train_time:135745ms step_avg:99.08ms +step:1371/1695 train_time:135847ms step_avg:99.09ms +step:1372/1695 train_time:135948ms step_avg:99.09ms +step:1373/1695 train_time:136048ms step_avg:99.09ms +step:1374/1695 train_time:136148ms step_avg:99.09ms +step:1375/1695 train_time:136248ms step_avg:99.09ms +step:1375/1695 val_loss:3.3574 train_time:136346ms step_avg:99.16ms +step:1376/1695 train_time:136374ms step_avg:99.11ms +step:1377/1695 train_time:136458ms step_avg:99.10ms +step:1378/1695 train_time:136564ms step_avg:99.10ms +step:1379/1695 train_time:136664ms step_avg:99.10ms +step:1380/1695 train_time:136767ms step_avg:99.11ms +step:1381/1695 train_time:136867ms step_avg:99.11ms +step:1382/1695 train_time:136966ms step_avg:99.11ms +step:1383/1695 train_time:137066ms step_avg:99.11ms +step:1384/1695 train_time:137167ms step_avg:99.11ms +step:1385/1695 train_time:137268ms step_avg:99.11ms +step:1386/1695 train_time:137372ms step_avg:99.11ms +step:1387/1695 train_time:137475ms step_avg:99.12ms +step:1388/1695 train_time:137577ms step_avg:99.12ms +step:1389/1695 train_time:137679ms step_avg:99.12ms +step:1390/1695 train_time:137780ms step_avg:99.12ms +step:1391/1695 train_time:137881ms step_avg:99.12ms +step:1392/1695 train_time:137983ms step_avg:99.13ms +step:1393/1695 train_time:138085ms step_avg:99.13ms +step:1394/1695 train_time:138187ms step_avg:99.13ms +step:1395/1695 train_time:138290ms step_avg:99.13ms +step:1396/1695 train_time:138391ms step_avg:99.13ms +step:1397/1695 train_time:138494ms step_avg:99.14ms +step:1398/1695 train_time:138595ms step_avg:99.14ms +step:1399/1695 train_time:138697ms step_avg:99.14ms +step:1400/1695 train_time:138799ms step_avg:99.14ms +step:1401/1695 train_time:138900ms step_avg:99.14ms +step:1402/1695 train_time:139001ms step_avg:99.14ms +step:1403/1695 train_time:139105ms step_avg:99.15ms +step:1404/1695 train_time:139207ms step_avg:99.15ms +step:1405/1695 train_time:139309ms step_avg:99.15ms +step:1406/1695 train_time:139411ms step_avg:99.15ms +step:1407/1695 train_time:139513ms step_avg:99.16ms +step:1408/1695 train_time:139614ms step_avg:99.16ms +step:1409/1695 train_time:139718ms step_avg:99.16ms +step:1410/1695 train_time:139819ms step_avg:99.16ms +step:1411/1695 train_time:139920ms step_avg:99.16ms +step:1412/1695 train_time:140024ms step_avg:99.17ms +step:1413/1695 train_time:140125ms step_avg:99.17ms +step:1414/1695 train_time:140227ms step_avg:99.17ms +step:1415/1695 train_time:140329ms step_avg:99.17ms +step:1416/1695 train_time:140431ms step_avg:99.17ms +step:1417/1695 train_time:140532ms step_avg:99.18ms +step:1418/1695 train_time:140633ms step_avg:99.18ms +step:1419/1695 train_time:140736ms step_avg:99.18ms +step:1420/1695 train_time:140837ms step_avg:99.18ms +step:1421/1695 train_time:140939ms step_avg:99.18ms +step:1422/1695 train_time:141041ms step_avg:99.18ms +step:1423/1695 train_time:141142ms step_avg:99.19ms +step:1424/1695 train_time:141245ms step_avg:99.19ms +step:1425/1695 train_time:141347ms step_avg:99.19ms +step:1426/1695 train_time:141450ms step_avg:99.19ms +step:1427/1695 train_time:141552ms step_avg:99.20ms +step:1428/1695 train_time:141653ms step_avg:99.20ms +step:1429/1695 train_time:141754ms step_avg:99.20ms +step:1430/1695 train_time:141856ms step_avg:99.20ms +step:1431/1695 train_time:141957ms step_avg:99.20ms +step:1432/1695 train_time:142059ms step_avg:99.20ms +step:1433/1695 train_time:142161ms step_avg:99.21ms +step:1434/1695 train_time:142263ms step_avg:99.21ms +step:1435/1695 train_time:142366ms step_avg:99.21ms +step:1436/1695 train_time:142469ms step_avg:99.21ms +step:1437/1695 train_time:142571ms step_avg:99.21ms +step:1438/1695 train_time:142672ms step_avg:99.22ms +step:1439/1695 train_time:142775ms step_avg:99.22ms +step:1440/1695 train_time:142877ms step_avg:99.22ms +step:1441/1695 train_time:142980ms step_avg:99.22ms +step:1442/1695 train_time:143081ms step_avg:99.22ms +step:1443/1695 train_time:143182ms step_avg:99.23ms +step:1444/1695 train_time:143284ms step_avg:99.23ms +step:1445/1695 train_time:143384ms step_avg:99.23ms +step:1446/1695 train_time:143487ms step_avg:99.23ms +step:1447/1695 train_time:143588ms step_avg:99.23ms +step:1448/1695 train_time:143691ms step_avg:99.23ms +step:1449/1695 train_time:143792ms step_avg:99.24ms +step:1450/1695 train_time:143893ms step_avg:99.24ms +step:1451/1695 train_time:143994ms step_avg:99.24ms +step:1452/1695 train_time:144095ms step_avg:99.24ms +step:1453/1695 train_time:144196ms step_avg:99.24ms +step:1454/1695 train_time:144299ms step_avg:99.24ms +step:1455/1695 train_time:144403ms step_avg:99.25ms +step:1456/1695 train_time:144505ms step_avg:99.25ms +step:1457/1695 train_time:144607ms step_avg:99.25ms +step:1458/1695 train_time:144709ms step_avg:99.25ms +step:1459/1695 train_time:144811ms step_avg:99.25ms +step:1460/1695 train_time:144912ms step_avg:99.25ms +step:1461/1695 train_time:145014ms step_avg:99.26ms +step:1462/1695 train_time:145116ms step_avg:99.26ms +step:1463/1695 train_time:145217ms step_avg:99.26ms +step:1464/1695 train_time:145319ms step_avg:99.26ms +step:1465/1695 train_time:145419ms step_avg:99.26ms +step:1466/1695 train_time:145522ms step_avg:99.26ms +step:1467/1695 train_time:145624ms step_avg:99.27ms +step:1468/1695 train_time:145727ms step_avg:99.27ms +step:1469/1695 train_time:145831ms step_avg:99.27ms +step:1470/1695 train_time:145932ms step_avg:99.27ms +step:1471/1695 train_time:146032ms step_avg:99.27ms +step:1472/1695 train_time:146133ms step_avg:99.27ms +step:1473/1695 train_time:146234ms step_avg:99.28ms +step:1474/1695 train_time:146335ms step_avg:99.28ms +step:1475/1695 train_time:146438ms step_avg:99.28ms +step:1476/1695 train_time:146540ms step_avg:99.28ms +step:1477/1695 train_time:146644ms step_avg:99.28ms +step:1478/1695 train_time:146747ms step_avg:99.29ms +step:1479/1695 train_time:146848ms step_avg:99.29ms +step:1480/1695 train_time:146950ms step_avg:99.29ms +step:1481/1695 train_time:147057ms step_avg:99.30ms +step:1482/1695 train_time:147152ms step_avg:99.29ms +step:1483/1695 train_time:147253ms step_avg:99.29ms +step:1484/1695 train_time:147355ms step_avg:99.30ms +step:1485/1695 train_time:147456ms step_avg:99.30ms +step:1486/1695 train_time:147558ms step_avg:99.30ms +step:1487/1695 train_time:147661ms step_avg:99.30ms +step:1488/1695 train_time:147765ms step_avg:99.30ms +step:1489/1695 train_time:147867ms step_avg:99.31ms +step:1490/1695 train_time:147969ms step_avg:99.31ms +step:1491/1695 train_time:148070ms step_avg:99.31ms +step:1492/1695 train_time:148171ms step_avg:99.31ms +step:1493/1695 train_time:148272ms step_avg:99.31ms +step:1494/1695 train_time:148374ms step_avg:99.31ms +step:1495/1695 train_time:148475ms step_avg:99.31ms +step:1496/1695 train_time:148576ms step_avg:99.32ms +step:1497/1695 train_time:148676ms step_avg:99.32ms +step:1498/1695 train_time:148779ms step_avg:99.32ms +step:1499/1695 train_time:148881ms step_avg:99.32ms +step:1500/1695 train_time:148984ms step_avg:99.32ms +step:1500/1695 val_loss:3.3221 train_time:149083ms step_avg:99.39ms +step:1501/1695 train_time:149111ms step_avg:99.34ms +step:1502/1695 train_time:149199ms step_avg:99.33ms +step:1503/1695 train_time:149302ms step_avg:99.34ms +step:1504/1695 train_time:149404ms step_avg:99.34ms +step:1505/1695 train_time:149505ms step_avg:99.34ms +step:1506/1695 train_time:149606ms step_avg:99.34ms +step:1507/1695 train_time:149707ms step_avg:99.34ms +step:1508/1695 train_time:149808ms step_avg:99.34ms +step:1509/1695 train_time:149910ms step_avg:99.34ms +step:1510/1695 train_time:150012ms step_avg:99.35ms +step:1511/1695 train_time:150115ms step_avg:99.35ms +step:1512/1695 train_time:150217ms step_avg:99.35ms +step:1513/1695 train_time:150319ms step_avg:99.35ms +step:1514/1695 train_time:150421ms step_avg:99.35ms +step:1515/1695 train_time:150527ms step_avg:99.36ms +step:1516/1695 train_time:150629ms step_avg:99.36ms +step:1517/1695 train_time:150729ms step_avg:99.36ms +step:1518/1695 train_time:150830ms step_avg:99.36ms +step:1519/1695 train_time:150934ms step_avg:99.36ms +step:1520/1695 train_time:151035ms step_avg:99.37ms +step:1521/1695 train_time:151137ms step_avg:99.37ms +step:1522/1695 train_time:151238ms step_avg:99.37ms +step:1523/1695 train_time:151340ms step_avg:99.37ms +step:1524/1695 train_time:151446ms step_avg:99.37ms +step:1525/1695 train_time:151550ms step_avg:99.38ms +step:1526/1695 train_time:151652ms step_avg:99.38ms +step:1527/1695 train_time:151753ms step_avg:99.38ms +step:1528/1695 train_time:151858ms step_avg:99.38ms +step:1529/1695 train_time:151960ms step_avg:99.39ms +step:1530/1695 train_time:152063ms step_avg:99.39ms +step:1531/1695 train_time:152164ms step_avg:99.39ms +step:1532/1695 train_time:152267ms step_avg:99.39ms +step:1533/1695 train_time:152369ms step_avg:99.39ms +step:1534/1695 train_time:152470ms step_avg:99.39ms +step:1535/1695 train_time:152572ms step_avg:99.40ms +step:1536/1695 train_time:152673ms step_avg:99.40ms +step:1537/1695 train_time:152774ms step_avg:99.40ms +step:1538/1695 train_time:152875ms step_avg:99.40ms +step:1539/1695 train_time:152977ms step_avg:99.40ms +step:1540/1695 train_time:153081ms step_avg:99.40ms +step:1541/1695 train_time:153186ms step_avg:99.41ms +step:1542/1695 train_time:153290ms step_avg:99.41ms +step:1543/1695 train_time:153393ms step_avg:99.41ms +step:1544/1695 train_time:153495ms step_avg:99.41ms +step:1545/1695 train_time:153597ms step_avg:99.42ms +step:1546/1695 train_time:153698ms step_avg:99.42ms +step:1547/1695 train_time:153800ms step_avg:99.42ms +step:1548/1695 train_time:153902ms step_avg:99.42ms +step:1549/1695 train_time:154005ms step_avg:99.42ms +step:1550/1695 train_time:154106ms step_avg:99.42ms +step:1551/1695 train_time:154208ms step_avg:99.42ms +step:1552/1695 train_time:154311ms step_avg:99.43ms +step:1553/1695 train_time:154413ms step_avg:99.43ms +step:1554/1695 train_time:154515ms step_avg:99.43ms +step:1555/1695 train_time:154616ms step_avg:99.43ms +step:1556/1695 train_time:154718ms step_avg:99.43ms +step:1557/1695 train_time:154823ms step_avg:99.44ms +step:1558/1695 train_time:154926ms step_avg:99.44ms +step:1559/1695 train_time:155027ms step_avg:99.44ms +step:1560/1695 train_time:155129ms step_avg:99.44ms +step:1561/1695 train_time:155230ms step_avg:99.44ms +step:1562/1695 train_time:155333ms step_avg:99.44ms +step:1563/1695 train_time:155437ms step_avg:99.45ms +step:1564/1695 train_time:155538ms step_avg:99.45ms +step:1565/1695 train_time:155639ms step_avg:99.45ms +step:1566/1695 train_time:155740ms step_avg:99.45ms +step:1567/1695 train_time:155841ms step_avg:99.45ms +step:1568/1695 train_time:155942ms step_avg:99.45ms +step:1569/1695 train_time:156044ms step_avg:99.45ms +step:1570/1695 train_time:156148ms step_avg:99.46ms +step:1571/1695 train_time:156250ms step_avg:99.46ms +step:1572/1695 train_time:156351ms step_avg:99.46ms +step:1573/1695 train_time:156453ms step_avg:99.46ms +step:1574/1695 train_time:156556ms step_avg:99.46ms +step:1575/1695 train_time:156657ms step_avg:99.46ms +step:1576/1695 train_time:156758ms step_avg:99.47ms +step:1577/1695 train_time:156861ms step_avg:99.47ms +step:1578/1695 train_time:156963ms step_avg:99.47ms +step:1579/1695 train_time:157065ms step_avg:99.47ms +step:1580/1695 train_time:157167ms step_avg:99.47ms +step:1581/1695 train_time:157270ms step_avg:99.47ms +step:1582/1695 train_time:157371ms step_avg:99.48ms +step:1583/1695 train_time:157475ms step_avg:99.48ms +step:1584/1695 train_time:157579ms step_avg:99.48ms +step:1585/1695 train_time:157680ms step_avg:99.48ms +step:1586/1695 train_time:157782ms step_avg:99.48ms +step:1587/1695 train_time:157884ms step_avg:99.49ms +step:1588/1695 train_time:157985ms step_avg:99.49ms +step:1589/1695 train_time:158087ms step_avg:99.49ms +step:1590/1695 train_time:158189ms step_avg:99.49ms +step:1591/1695 train_time:158290ms step_avg:99.49ms +step:1592/1695 train_time:158393ms step_avg:99.49ms +step:1593/1695 train_time:158494ms step_avg:99.49ms +step:1594/1695 train_time:158598ms step_avg:99.50ms +step:1595/1695 train_time:158699ms step_avg:99.50ms +step:1596/1695 train_time:158801ms step_avg:99.50ms +step:1597/1695 train_time:158904ms step_avg:99.50ms +step:1598/1695 train_time:159008ms step_avg:99.50ms +step:1599/1695 train_time:159109ms step_avg:99.51ms +step:1600/1695 train_time:159211ms step_avg:99.51ms +step:1601/1695 train_time:159313ms step_avg:99.51ms +step:1602/1695 train_time:159416ms step_avg:99.51ms +step:1603/1695 train_time:159517ms step_avg:99.51ms +step:1604/1695 train_time:159618ms step_avg:99.51ms +step:1605/1695 train_time:159721ms step_avg:99.51ms +step:1606/1695 train_time:159823ms step_avg:99.52ms +step:1607/1695 train_time:159925ms step_avg:99.52ms +step:1608/1695 train_time:160026ms step_avg:99.52ms +step:1609/1695 train_time:160127ms step_avg:99.52ms +step:1610/1695 train_time:160230ms step_avg:99.52ms +step:1611/1695 train_time:160332ms step_avg:99.52ms +step:1612/1695 train_time:160435ms step_avg:99.53ms +step:1613/1695 train_time:160535ms step_avg:99.53ms +step:1614/1695 train_time:160636ms step_avg:99.53ms +step:1615/1695 train_time:160738ms step_avg:99.53ms +step:1616/1695 train_time:160838ms step_avg:99.53ms +step:1617/1695 train_time:160942ms step_avg:99.53ms +step:1618/1695 train_time:161044ms step_avg:99.53ms +step:1619/1695 train_time:161147ms step_avg:99.53ms +step:1620/1695 train_time:161249ms step_avg:99.54ms +step:1621/1695 train_time:161351ms step_avg:99.54ms +step:1622/1695 train_time:161452ms step_avg:99.54ms +step:1623/1695 train_time:161552ms step_avg:99.54ms +step:1624/1695 train_time:161654ms step_avg:99.54ms +step:1625/1695 train_time:161756ms step_avg:99.54ms +step:1625/1695 val_loss:3.2931 train_time:161855ms step_avg:99.60ms +step:1626/1695 train_time:161883ms step_avg:99.56ms +step:1627/1695 train_time:161972ms step_avg:99.55ms +step:1628/1695 train_time:162075ms step_avg:99.55ms +step:1629/1695 train_time:162178ms step_avg:99.56ms +step:1630/1695 train_time:162279ms step_avg:99.56ms +step:1631/1695 train_time:162381ms step_avg:99.56ms +step:1632/1695 train_time:162482ms step_avg:99.56ms +step:1633/1695 train_time:162582ms step_avg:99.56ms +step:1634/1695 train_time:162686ms step_avg:99.56ms +step:1635/1695 train_time:162787ms step_avg:99.56ms +step:1636/1695 train_time:162892ms step_avg:99.57ms +step:1637/1695 train_time:162994ms step_avg:99.57ms +step:1638/1695 train_time:163097ms step_avg:99.57ms +step:1639/1695 train_time:163199ms step_avg:99.57ms +step:1640/1695 train_time:163301ms step_avg:99.57ms +step:1641/1695 train_time:163405ms step_avg:99.58ms +step:1642/1695 train_time:163506ms step_avg:99.58ms +step:1643/1695 train_time:163608ms step_avg:99.58ms +step:1644/1695 train_time:163709ms step_avg:99.58ms +step:1645/1695 train_time:163812ms step_avg:99.58ms +step:1646/1695 train_time:163914ms step_avg:99.58ms +step:1647/1695 train_time:164019ms step_avg:99.59ms +step:1648/1695 train_time:164123ms step_avg:99.59ms +step:1649/1695 train_time:164226ms step_avg:99.59ms +step:1650/1695 train_time:164328ms step_avg:99.59ms +step:1651/1695 train_time:164430ms step_avg:99.59ms +step:1652/1695 train_time:164533ms step_avg:99.60ms +step:1653/1695 train_time:164636ms step_avg:99.60ms +step:1654/1695 train_time:164739ms step_avg:99.60ms +step:1655/1695 train_time:164841ms step_avg:99.60ms +step:1656/1695 train_time:164944ms step_avg:99.60ms +step:1657/1695 train_time:165046ms step_avg:99.61ms +step:1658/1695 train_time:165148ms step_avg:99.61ms +step:1659/1695 train_time:165254ms step_avg:99.61ms +step:1660/1695 train_time:165357ms step_avg:99.61ms +step:1661/1695 train_time:165460ms step_avg:99.61ms +step:1662/1695 train_time:165565ms step_avg:99.62ms +step:1663/1695 train_time:165668ms step_avg:99.62ms +step:1664/1695 train_time:165771ms step_avg:99.62ms +step:1665/1695 train_time:165878ms step_avg:99.63ms +step:1666/1695 train_time:165981ms step_avg:99.63ms +step:1667/1695 train_time:166083ms step_avg:99.63ms +step:1668/1695 train_time:166187ms step_avg:99.63ms +step:1669/1695 train_time:166291ms step_avg:99.64ms +step:1670/1695 train_time:166393ms step_avg:99.64ms +step:1671/1695 train_time:166495ms step_avg:99.64ms +step:1672/1695 train_time:166600ms step_avg:99.64ms +step:1673/1695 train_time:166703ms step_avg:99.64ms +step:1674/1695 train_time:166806ms step_avg:99.64ms +step:1675/1695 train_time:166908ms step_avg:99.65ms +step:1676/1695 train_time:167013ms step_avg:99.65ms +step:1677/1695 train_time:167115ms step_avg:99.65ms +step:1678/1695 train_time:167220ms step_avg:99.65ms +step:1679/1695 train_time:167325ms step_avg:99.66ms +step:1680/1695 train_time:167427ms step_avg:99.66ms +step:1681/1695 train_time:167530ms step_avg:99.66ms +step:1682/1695 train_time:167636ms step_avg:99.66ms +step:1683/1695 train_time:167739ms step_avg:99.67ms +step:1684/1695 train_time:167842ms step_avg:99.67ms +step:1685/1695 train_time:167945ms step_avg:99.67ms +step:1686/1695 train_time:168047ms step_avg:99.67ms +step:1687/1695 train_time:168149ms step_avg:99.67ms +step:1688/1695 train_time:168251ms step_avg:99.67ms +step:1689/1695 train_time:168352ms step_avg:99.68ms +step:1690/1695 train_time:168455ms step_avg:99.68ms +step:1691/1695 train_time:168558ms step_avg:99.68ms +step:1692/1695 train_time:168661ms step_avg:99.68ms +step:1693/1695 train_time:168764ms step_avg:99.68ms +step:1694/1695 train_time:168868ms step_avg:99.69ms +step:1695/1695 train_time:168971ms step_avg:99.69ms +step:1695/1695 val_loss:3.2802 train_time:169070ms step_avg:99.75ms +peak memory allocated: 34004 MiB reserved: 49600 MiB diff --git a/records/082325_SparseAttnGate/48b19604-5049-48c9-956c-8ddc4d0781fb.txt b/records/082325_SparseAttnGate/48b19604-5049-48c9-956c-8ddc4d0781fb.txt new file mode 100644 index 000000000..c9fef39e2 --- /dev/null +++ b/records/082325_SparseAttnGate/48b19604-5049-48c9-956c-8ddc4d0781fb.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:48:02 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 32C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 37C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 38C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 319303 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 319304 C /usr/bin/python3 614MiB | +| 0 N/A N/A 319305 C /usr/bin/python3 614MiB | +| 0 N/A N/A 319306 C /usr/bin/python3 614MiB | +| 0 N/A N/A 319307 C /usr/bin/python3 614MiB | +| 0 N/A N/A 319308 C /usr/bin/python3 614MiB | +| 0 N/A N/A 319309 C /usr/bin/python3 614MiB | +| 0 N/A N/A 319310 C /usr/bin/python3 614MiB | +| 1 N/A N/A 319304 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 319305 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 319306 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 319307 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 319308 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 319309 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 319310 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.06ms +step:1/1695 train_time:155ms step_avg:155.16ms +step:2/1695 train_time:182ms step_avg:90.82ms +step:3/1695 train_time:250ms step_avg:83.49ms +step:4/1695 train_time:342ms step_avg:85.55ms +step:5/1695 train_time:435ms step_avg:86.91ms +step:6/1695 train_time:527ms step_avg:87.78ms +step:7/1695 train_time:619ms step_avg:88.50ms +step:8/1695 train_time:712ms step_avg:89.01ms +step:9/1695 train_time:805ms step_avg:89.42ms +step:10/1695 train_time:898ms step_avg:89.80ms +step:11/1695 train_time:990ms step_avg:90.03ms +step:12/1695 train_time:1086ms step_avg:90.46ms +step:13/1695 train_time:1182ms step_avg:90.92ms +step:14/1695 train_time:1277ms step_avg:91.23ms +step:15/1695 train_time:1371ms step_avg:91.38ms +step:16/1695 train_time:1465ms step_avg:91.54ms +step:17/1695 train_time:1558ms step_avg:91.66ms +step:18/1695 train_time:1651ms step_avg:91.75ms +step:19/1695 train_time:1744ms step_avg:91.79ms +step:20/1695 train_time:1838ms step_avg:91.88ms +step:21/1695 train_time:1931ms step_avg:91.93ms +step:22/1695 train_time:2025ms step_avg:92.02ms +step:23/1695 train_time:2119ms step_avg:92.11ms +step:24/1695 train_time:2213ms step_avg:92.19ms +step:25/1695 train_time:2306ms step_avg:92.25ms +step:26/1695 train_time:2401ms step_avg:92.34ms +step:27/1695 train_time:2494ms step_avg:92.38ms +step:28/1695 train_time:2588ms step_avg:92.43ms +step:29/1695 train_time:2682ms step_avg:92.48ms +step:30/1695 train_time:2776ms step_avg:92.52ms +step:31/1695 train_time:2869ms step_avg:92.54ms +step:32/1695 train_time:2962ms step_avg:92.57ms +step:33/1695 train_time:3056ms step_avg:92.61ms +step:34/1695 train_time:3149ms step_avg:92.62ms +step:35/1695 train_time:3243ms step_avg:92.66ms +step:36/1695 train_time:3338ms step_avg:92.73ms +step:37/1695 train_time:3431ms step_avg:92.73ms +step:38/1695 train_time:3525ms step_avg:92.76ms +step:39/1695 train_time:3619ms step_avg:92.79ms +step:40/1695 train_time:3713ms step_avg:92.82ms +step:41/1695 train_time:3806ms step_avg:92.83ms +step:42/1695 train_time:3900ms step_avg:92.85ms +step:43/1695 train_time:3993ms step_avg:92.86ms +step:44/1695 train_time:4086ms step_avg:92.87ms +step:45/1695 train_time:4181ms step_avg:92.90ms +step:46/1695 train_time:4274ms step_avg:92.92ms +step:47/1695 train_time:4368ms step_avg:92.93ms +step:48/1695 train_time:4461ms step_avg:92.95ms +step:49/1695 train_time:4555ms step_avg:92.96ms +step:50/1695 train_time:4648ms step_avg:92.96ms +step:51/1695 train_time:4743ms step_avg:92.99ms +step:52/1695 train_time:4835ms step_avg:92.98ms +step:53/1695 train_time:4929ms step_avg:93.00ms +step:54/1695 train_time:5023ms step_avg:93.01ms +step:55/1695 train_time:5116ms step_avg:93.02ms +step:56/1695 train_time:5210ms step_avg:93.03ms +step:57/1695 train_time:5304ms step_avg:93.06ms +step:58/1695 train_time:5399ms step_avg:93.08ms +step:59/1695 train_time:5494ms step_avg:93.12ms +step:60/1695 train_time:5588ms step_avg:93.13ms +step:61/1695 train_time:5681ms step_avg:93.13ms +step:62/1695 train_time:5775ms step_avg:93.14ms +step:63/1695 train_time:5868ms step_avg:93.14ms +step:64/1695 train_time:5961ms step_avg:93.15ms +step:65/1695 train_time:6054ms step_avg:93.14ms +step:66/1695 train_time:6147ms step_avg:93.14ms +step:67/1695 train_time:6240ms step_avg:93.13ms +step:68/1695 train_time:6333ms step_avg:93.14ms +step:69/1695 train_time:6427ms step_avg:93.14ms +step:70/1695 train_time:6522ms step_avg:93.17ms +step:71/1695 train_time:6616ms step_avg:93.18ms +step:72/1695 train_time:6710ms step_avg:93.19ms +step:73/1695 train_time:6804ms step_avg:93.21ms +step:74/1695 train_time:6899ms step_avg:93.23ms +step:75/1695 train_time:6993ms step_avg:93.24ms +step:76/1695 train_time:7086ms step_avg:93.24ms +step:77/1695 train_time:7181ms step_avg:93.25ms +step:78/1695 train_time:7275ms step_avg:93.27ms +step:79/1695 train_time:7369ms step_avg:93.28ms +step:80/1695 train_time:7462ms step_avg:93.28ms +step:81/1695 train_time:7556ms step_avg:93.29ms +step:82/1695 train_time:7650ms step_avg:93.29ms +step:83/1695 train_time:7743ms step_avg:93.29ms +step:84/1695 train_time:7837ms step_avg:93.30ms +step:85/1695 train_time:7931ms step_avg:93.30ms +step:86/1695 train_time:8025ms step_avg:93.31ms +step:87/1695 train_time:8121ms step_avg:93.34ms +step:88/1695 train_time:8213ms step_avg:93.33ms +step:89/1695 train_time:8306ms step_avg:93.32ms +step:90/1695 train_time:8400ms step_avg:93.33ms +step:91/1695 train_time:8494ms step_avg:93.34ms +step:92/1695 train_time:8588ms step_avg:93.35ms +step:93/1695 train_time:8682ms step_avg:93.35ms +step:94/1695 train_time:8775ms step_avg:93.35ms +step:95/1695 train_time:8868ms step_avg:93.35ms +step:96/1695 train_time:8962ms step_avg:93.35ms +step:97/1695 train_time:9055ms step_avg:93.36ms +step:98/1695 train_time:9149ms step_avg:93.35ms +step:99/1695 train_time:9242ms step_avg:93.35ms +step:100/1695 train_time:9336ms step_avg:93.36ms +step:101/1695 train_time:9429ms step_avg:93.36ms +step:102/1695 train_time:9524ms step_avg:93.37ms +step:103/1695 train_time:9619ms step_avg:93.38ms +step:104/1695 train_time:9712ms step_avg:93.38ms +step:105/1695 train_time:9806ms step_avg:93.39ms +step:106/1695 train_time:9900ms step_avg:93.40ms +step:107/1695 train_time:9994ms step_avg:93.40ms +step:108/1695 train_time:10087ms step_avg:93.40ms +step:109/1695 train_time:10180ms step_avg:93.40ms +step:110/1695 train_time:10274ms step_avg:93.40ms +step:111/1695 train_time:10368ms step_avg:93.41ms +step:112/1695 train_time:10462ms step_avg:93.41ms +step:113/1695 train_time:10556ms step_avg:93.42ms +step:114/1695 train_time:10649ms step_avg:93.41ms +step:115/1695 train_time:10742ms step_avg:93.41ms +step:116/1695 train_time:10835ms step_avg:93.41ms +step:117/1695 train_time:10929ms step_avg:93.41ms +step:118/1695 train_time:11023ms step_avg:93.41ms +step:119/1695 train_time:11116ms step_avg:93.41ms +step:120/1695 train_time:11210ms step_avg:93.41ms +step:121/1695 train_time:11305ms step_avg:93.43ms +step:122/1695 train_time:11397ms step_avg:93.42ms +step:123/1695 train_time:11490ms step_avg:93.42ms +step:124/1695 train_time:11584ms step_avg:93.42ms +step:125/1695 train_time:11678ms step_avg:93.43ms +step:125/1695 val_loss:4.5872 train_time:11770ms step_avg:94.16ms +step:126/1695 train_time:11798ms step_avg:93.64ms +step:127/1695 train_time:11875ms step_avg:93.50ms +step:128/1695 train_time:11974ms step_avg:93.55ms +step:129/1695 train_time:12070ms step_avg:93.56ms +step:130/1695 train_time:12165ms step_avg:93.58ms +step:131/1695 train_time:12257ms step_avg:93.57ms +step:132/1695 train_time:12351ms step_avg:93.57ms +step:133/1695 train_time:12444ms step_avg:93.56ms +step:134/1695 train_time:12537ms step_avg:93.56ms +step:135/1695 train_time:12630ms step_avg:93.56ms +step:136/1695 train_time:12724ms step_avg:93.56ms +step:137/1695 train_time:12818ms step_avg:93.56ms +step:138/1695 train_time:12913ms step_avg:93.57ms +step:139/1695 train_time:13008ms step_avg:93.58ms +step:140/1695 train_time:13104ms step_avg:93.60ms +step:141/1695 train_time:13199ms step_avg:93.61ms +step:142/1695 train_time:13292ms step_avg:93.61ms +step:143/1695 train_time:13386ms step_avg:93.61ms +step:144/1695 train_time:13481ms step_avg:93.62ms +step:145/1695 train_time:13574ms step_avg:93.61ms +step:146/1695 train_time:13668ms step_avg:93.62ms +step:147/1695 train_time:13762ms step_avg:93.62ms +step:148/1695 train_time:13857ms step_avg:93.63ms +step:149/1695 train_time:13951ms step_avg:93.63ms +step:150/1695 train_time:14047ms step_avg:93.64ms +step:151/1695 train_time:14142ms step_avg:93.65ms +step:152/1695 train_time:14236ms step_avg:93.66ms +step:153/1695 train_time:14330ms step_avg:93.66ms +step:154/1695 train_time:14424ms step_avg:93.66ms +step:155/1695 train_time:14518ms step_avg:93.66ms +step:156/1695 train_time:14613ms step_avg:93.67ms +step:157/1695 train_time:14706ms step_avg:93.67ms +step:158/1695 train_time:14801ms step_avg:93.67ms +step:159/1695 train_time:14895ms step_avg:93.68ms +step:160/1695 train_time:14989ms step_avg:93.68ms +step:161/1695 train_time:15084ms step_avg:93.69ms +step:162/1695 train_time:15179ms step_avg:93.69ms +step:163/1695 train_time:15273ms step_avg:93.70ms +step:164/1695 train_time:15367ms step_avg:93.70ms +step:165/1695 train_time:15461ms step_avg:93.70ms +step:166/1695 train_time:15554ms step_avg:93.70ms +step:167/1695 train_time:15648ms step_avg:93.70ms +step:168/1695 train_time:15742ms step_avg:93.71ms +step:169/1695 train_time:15838ms step_avg:93.71ms +step:170/1695 train_time:15931ms step_avg:93.71ms +step:171/1695 train_time:16026ms step_avg:93.72ms +step:172/1695 train_time:16121ms step_avg:93.73ms +step:173/1695 train_time:16214ms step_avg:93.73ms +step:174/1695 train_time:16308ms step_avg:93.73ms +step:175/1695 train_time:16403ms step_avg:93.73ms +step:176/1695 train_time:16496ms step_avg:93.73ms +step:177/1695 train_time:16590ms step_avg:93.73ms +step:178/1695 train_time:16684ms step_avg:93.73ms +step:179/1695 train_time:16778ms step_avg:93.73ms +step:180/1695 train_time:16873ms step_avg:93.74ms +step:181/1695 train_time:16967ms step_avg:93.74ms +step:182/1695 train_time:17061ms step_avg:93.74ms +step:183/1695 train_time:17155ms step_avg:93.74ms +step:184/1695 train_time:17249ms step_avg:93.74ms +step:185/1695 train_time:17343ms step_avg:93.75ms +step:186/1695 train_time:17437ms step_avg:93.75ms +step:187/1695 train_time:17530ms step_avg:93.74ms +step:188/1695 train_time:17624ms step_avg:93.75ms +step:189/1695 train_time:17718ms step_avg:93.74ms +step:190/1695 train_time:17811ms step_avg:93.74ms +step:191/1695 train_time:17906ms step_avg:93.75ms +step:192/1695 train_time:18000ms step_avg:93.75ms +step:193/1695 train_time:18094ms step_avg:93.75ms +step:194/1695 train_time:18188ms step_avg:93.75ms +step:195/1695 train_time:18282ms step_avg:93.75ms +step:196/1695 train_time:18377ms step_avg:93.76ms +step:197/1695 train_time:18470ms step_avg:93.76ms +step:198/1695 train_time:18564ms step_avg:93.76ms +step:199/1695 train_time:18658ms step_avg:93.76ms +step:200/1695 train_time:18752ms step_avg:93.76ms +step:201/1695 train_time:18846ms step_avg:93.76ms +step:202/1695 train_time:18941ms step_avg:93.77ms +step:203/1695 train_time:19035ms step_avg:93.77ms +step:204/1695 train_time:19130ms step_avg:93.77ms +step:205/1695 train_time:19224ms step_avg:93.77ms +step:206/1695 train_time:19318ms step_avg:93.78ms +step:207/1695 train_time:19412ms step_avg:93.78ms +step:208/1695 train_time:19506ms step_avg:93.78ms +step:209/1695 train_time:19600ms step_avg:93.78ms +step:210/1695 train_time:19694ms step_avg:93.78ms +step:211/1695 train_time:19788ms step_avg:93.78ms +step:212/1695 train_time:19883ms step_avg:93.79ms +step:213/1695 train_time:19977ms step_avg:93.79ms +step:214/1695 train_time:20070ms step_avg:93.79ms +step:215/1695 train_time:20164ms step_avg:93.79ms +step:216/1695 train_time:20258ms step_avg:93.79ms +step:217/1695 train_time:20352ms step_avg:93.79ms +step:218/1695 train_time:20446ms step_avg:93.79ms +step:219/1695 train_time:20540ms step_avg:93.79ms +step:220/1695 train_time:20634ms step_avg:93.79ms +step:221/1695 train_time:20728ms step_avg:93.79ms +step:222/1695 train_time:20822ms step_avg:93.79ms +step:223/1695 train_time:20916ms step_avg:93.79ms +step:224/1695 train_time:21009ms step_avg:93.79ms +step:225/1695 train_time:21104ms step_avg:93.80ms +step:226/1695 train_time:21200ms step_avg:93.80ms +step:227/1695 train_time:21294ms step_avg:93.80ms +step:228/1695 train_time:21388ms step_avg:93.81ms +step:229/1695 train_time:21481ms step_avg:93.80ms +step:230/1695 train_time:21576ms step_avg:93.81ms +step:231/1695 train_time:21669ms step_avg:93.81ms +step:232/1695 train_time:21763ms step_avg:93.81ms +step:233/1695 train_time:21857ms step_avg:93.81ms +step:234/1695 train_time:21950ms step_avg:93.80ms +step:235/1695 train_time:22044ms step_avg:93.80ms +step:236/1695 train_time:22138ms step_avg:93.81ms +step:237/1695 train_time:22232ms step_avg:93.81ms +step:238/1695 train_time:22327ms step_avg:93.81ms +step:239/1695 train_time:22421ms step_avg:93.81ms +step:240/1695 train_time:22514ms step_avg:93.81ms +step:241/1695 train_time:22608ms step_avg:93.81ms +step:242/1695 train_time:22704ms step_avg:93.82ms +step:243/1695 train_time:22798ms step_avg:93.82ms +step:244/1695 train_time:22893ms step_avg:93.82ms +step:245/1695 train_time:22986ms step_avg:93.82ms +step:246/1695 train_time:23080ms step_avg:93.82ms +step:247/1695 train_time:23174ms step_avg:93.82ms +step:248/1695 train_time:23268ms step_avg:93.82ms +step:249/1695 train_time:23362ms step_avg:93.82ms +step:250/1695 train_time:23457ms step_avg:93.83ms +step:250/1695 val_loss:4.0644 train_time:23548ms step_avg:94.19ms +step:251/1695 train_time:23576ms step_avg:93.93ms +step:252/1695 train_time:23651ms step_avg:93.85ms +step:253/1695 train_time:23751ms step_avg:93.88ms +step:254/1695 train_time:23846ms step_avg:93.88ms +step:255/1695 train_time:23941ms step_avg:93.89ms +step:256/1695 train_time:24035ms step_avg:93.89ms +step:257/1695 train_time:24128ms step_avg:93.88ms +step:258/1695 train_time:24222ms step_avg:93.88ms +step:259/1695 train_time:24316ms step_avg:93.88ms +step:260/1695 train_time:24411ms step_avg:93.89ms +step:261/1695 train_time:24504ms step_avg:93.89ms +step:262/1695 train_time:24600ms step_avg:93.89ms +step:263/1695 train_time:24697ms step_avg:93.90ms +step:264/1695 train_time:24794ms step_avg:93.92ms +step:265/1695 train_time:24888ms step_avg:93.92ms +step:266/1695 train_time:24983ms step_avg:93.92ms +step:267/1695 train_time:25077ms step_avg:93.92ms +step:268/1695 train_time:25171ms step_avg:93.92ms +step:269/1695 train_time:25264ms step_avg:93.92ms +step:270/1695 train_time:25358ms step_avg:93.92ms +step:271/1695 train_time:25451ms step_avg:93.92ms +step:272/1695 train_time:25546ms step_avg:93.92ms +step:273/1695 train_time:25641ms step_avg:93.92ms +step:274/1695 train_time:25736ms step_avg:93.93ms +step:275/1695 train_time:25831ms step_avg:93.93ms +step:276/1695 train_time:25927ms step_avg:93.94ms +step:277/1695 train_time:26022ms step_avg:93.94ms +step:278/1695 train_time:26117ms step_avg:93.95ms +step:279/1695 train_time:26210ms step_avg:93.94ms +step:280/1695 train_time:26304ms step_avg:93.94ms +step:281/1695 train_time:26399ms step_avg:93.95ms +step:282/1695 train_time:26492ms step_avg:93.94ms +step:283/1695 train_time:26586ms step_avg:93.94ms +step:284/1695 train_time:26682ms step_avg:93.95ms +step:285/1695 train_time:26777ms step_avg:93.95ms +step:286/1695 train_time:26871ms step_avg:93.95ms +step:287/1695 train_time:26967ms step_avg:93.96ms +step:288/1695 train_time:27062ms step_avg:93.97ms +step:289/1695 train_time:27157ms step_avg:93.97ms +step:290/1695 train_time:27251ms step_avg:93.97ms +step:291/1695 train_time:27345ms step_avg:93.97ms +step:292/1695 train_time:27440ms step_avg:93.97ms +step:293/1695 train_time:27534ms step_avg:93.97ms +step:294/1695 train_time:27629ms step_avg:93.97ms +step:295/1695 train_time:27724ms step_avg:93.98ms +step:296/1695 train_time:27821ms step_avg:93.99ms +step:297/1695 train_time:27915ms step_avg:93.99ms +step:298/1695 train_time:28009ms step_avg:93.99ms +step:299/1695 train_time:28104ms step_avg:93.99ms +step:300/1695 train_time:28199ms step_avg:94.00ms +step:301/1695 train_time:28293ms step_avg:94.00ms +step:302/1695 train_time:28387ms step_avg:94.00ms +step:303/1695 train_time:28482ms step_avg:94.00ms +step:304/1695 train_time:28576ms step_avg:94.00ms +step:305/1695 train_time:28670ms step_avg:94.00ms +step:306/1695 train_time:28765ms step_avg:94.00ms +step:307/1695 train_time:28860ms step_avg:94.01ms +step:308/1695 train_time:28955ms step_avg:94.01ms +step:309/1695 train_time:29049ms step_avg:94.01ms +step:310/1695 train_time:29143ms step_avg:94.01ms +step:311/1695 train_time:29238ms step_avg:94.01ms +step:312/1695 train_time:29332ms step_avg:94.01ms +step:313/1695 train_time:29427ms step_avg:94.02ms +step:314/1695 train_time:29522ms step_avg:94.02ms +step:315/1695 train_time:29617ms step_avg:94.02ms +step:316/1695 train_time:29710ms step_avg:94.02ms +step:317/1695 train_time:29805ms step_avg:94.02ms +step:318/1695 train_time:29901ms step_avg:94.03ms +step:319/1695 train_time:29995ms step_avg:94.03ms +step:320/1695 train_time:30089ms step_avg:94.03ms +step:321/1695 train_time:30184ms step_avg:94.03ms +step:322/1695 train_time:30279ms step_avg:94.04ms +step:323/1695 train_time:30374ms step_avg:94.04ms +step:324/1695 train_time:30468ms step_avg:94.04ms +step:325/1695 train_time:30562ms step_avg:94.04ms +step:326/1695 train_time:30656ms step_avg:94.04ms +step:327/1695 train_time:30751ms step_avg:94.04ms +step:328/1695 train_time:30846ms step_avg:94.04ms +step:329/1695 train_time:30942ms step_avg:94.05ms +step:330/1695 train_time:31036ms step_avg:94.05ms +step:331/1695 train_time:31130ms step_avg:94.05ms +step:332/1695 train_time:31224ms step_avg:94.05ms +step:333/1695 train_time:31319ms step_avg:94.05ms +step:334/1695 train_time:31414ms step_avg:94.05ms +step:335/1695 train_time:31508ms step_avg:94.05ms +step:336/1695 train_time:31602ms step_avg:94.05ms +step:337/1695 train_time:31697ms step_avg:94.06ms +step:338/1695 train_time:31791ms step_avg:94.06ms +step:339/1695 train_time:31886ms step_avg:94.06ms +step:340/1695 train_time:31981ms step_avg:94.06ms +step:341/1695 train_time:32074ms step_avg:94.06ms +step:342/1695 train_time:32168ms step_avg:94.06ms +step:343/1695 train_time:32263ms step_avg:94.06ms +step:344/1695 train_time:32358ms step_avg:94.06ms +step:345/1695 train_time:32452ms step_avg:94.06ms +step:346/1695 train_time:32547ms step_avg:94.07ms +step:347/1695 train_time:32643ms step_avg:94.07ms +step:348/1695 train_time:32738ms step_avg:94.07ms +step:349/1695 train_time:32833ms step_avg:94.08ms +step:350/1695 train_time:32926ms step_avg:94.08ms +step:351/1695 train_time:33023ms step_avg:94.08ms +step:352/1695 train_time:33117ms step_avg:94.08ms +step:353/1695 train_time:33211ms step_avg:94.08ms +step:354/1695 train_time:33307ms step_avg:94.09ms +step:355/1695 train_time:33401ms step_avg:94.09ms +step:356/1695 train_time:33496ms step_avg:94.09ms +step:357/1695 train_time:33590ms step_avg:94.09ms +step:358/1695 train_time:33686ms step_avg:94.09ms +step:359/1695 train_time:33780ms step_avg:94.10ms +step:360/1695 train_time:33874ms step_avg:94.09ms +step:361/1695 train_time:33969ms step_avg:94.10ms +step:362/1695 train_time:34064ms step_avg:94.10ms +step:363/1695 train_time:34159ms step_avg:94.10ms +step:364/1695 train_time:34253ms step_avg:94.10ms +step:365/1695 train_time:34347ms step_avg:94.10ms +step:366/1695 train_time:34441ms step_avg:94.10ms +step:367/1695 train_time:34535ms step_avg:94.10ms +step:368/1695 train_time:34628ms step_avg:94.10ms +step:369/1695 train_time:34723ms step_avg:94.10ms +step:370/1695 train_time:34818ms step_avg:94.10ms +step:371/1695 train_time:34912ms step_avg:94.10ms +step:372/1695 train_time:35007ms step_avg:94.10ms +step:373/1695 train_time:35101ms step_avg:94.10ms +step:374/1695 train_time:35195ms step_avg:94.10ms +step:375/1695 train_time:35289ms step_avg:94.10ms +step:375/1695 val_loss:3.8654 train_time:35382ms step_avg:94.35ms +step:376/1695 train_time:35410ms step_avg:94.17ms +step:377/1695 train_time:35486ms step_avg:94.13ms +step:378/1695 train_time:35588ms step_avg:94.15ms +step:379/1695 train_time:35685ms step_avg:94.15ms +step:380/1695 train_time:35781ms step_avg:94.16ms +step:381/1695 train_time:35877ms step_avg:94.17ms +step:382/1695 train_time:35972ms step_avg:94.17ms +step:383/1695 train_time:36068ms step_avg:94.17ms +step:384/1695 train_time:36163ms step_avg:94.18ms +step:385/1695 train_time:36260ms step_avg:94.18ms +step:386/1695 train_time:36354ms step_avg:94.18ms +step:387/1695 train_time:36450ms step_avg:94.19ms +step:388/1695 train_time:36547ms step_avg:94.19ms +step:389/1695 train_time:36645ms step_avg:94.20ms +step:390/1695 train_time:36741ms step_avg:94.21ms +step:391/1695 train_time:36838ms step_avg:94.21ms +step:392/1695 train_time:36934ms step_avg:94.22ms +step:393/1695 train_time:37030ms step_avg:94.22ms +step:394/1695 train_time:37125ms step_avg:94.23ms +step:395/1695 train_time:37221ms step_avg:94.23ms +step:396/1695 train_time:37317ms step_avg:94.24ms +step:397/1695 train_time:37413ms step_avg:94.24ms +step:398/1695 train_time:37509ms step_avg:94.24ms +step:399/1695 train_time:37606ms step_avg:94.25ms +step:400/1695 train_time:37702ms step_avg:94.25ms +step:401/1695 train_time:37799ms step_avg:94.26ms +step:402/1695 train_time:37895ms step_avg:94.27ms +step:403/1695 train_time:37991ms step_avg:94.27ms +step:404/1695 train_time:38087ms step_avg:94.27ms +step:405/1695 train_time:38182ms step_avg:94.28ms +step:406/1695 train_time:38278ms step_avg:94.28ms +step:407/1695 train_time:38375ms step_avg:94.29ms +step:408/1695 train_time:38472ms step_avg:94.29ms +step:409/1695 train_time:38568ms step_avg:94.30ms +step:410/1695 train_time:38664ms step_avg:94.30ms +step:411/1695 train_time:38760ms step_avg:94.31ms +step:412/1695 train_time:38857ms step_avg:94.31ms +step:413/1695 train_time:38953ms step_avg:94.32ms +step:414/1695 train_time:39049ms step_avg:94.32ms +step:415/1695 train_time:39145ms step_avg:94.33ms +step:416/1695 train_time:39241ms step_avg:94.33ms +step:417/1695 train_time:39337ms step_avg:94.33ms +step:418/1695 train_time:39434ms step_avg:94.34ms +step:419/1695 train_time:39529ms step_avg:94.34ms +step:420/1695 train_time:39625ms step_avg:94.34ms +step:421/1695 train_time:39721ms step_avg:94.35ms +step:422/1695 train_time:39817ms step_avg:94.35ms +step:423/1695 train_time:39915ms step_avg:94.36ms +step:424/1695 train_time:40011ms step_avg:94.36ms +step:425/1695 train_time:40106ms step_avg:94.37ms +step:426/1695 train_time:40202ms step_avg:94.37ms +step:427/1695 train_time:40299ms step_avg:94.38ms +step:428/1695 train_time:40394ms step_avg:94.38ms +step:429/1695 train_time:40490ms step_avg:94.38ms +step:430/1695 train_time:40586ms step_avg:94.39ms +step:431/1695 train_time:40682ms step_avg:94.39ms +step:432/1695 train_time:40778ms step_avg:94.39ms +step:433/1695 train_time:40874ms step_avg:94.40ms +step:434/1695 train_time:40970ms step_avg:94.40ms +step:435/1695 train_time:41065ms step_avg:94.40ms +step:436/1695 train_time:41161ms step_avg:94.41ms +step:437/1695 train_time:41257ms step_avg:94.41ms +step:438/1695 train_time:41354ms step_avg:94.41ms +step:439/1695 train_time:41449ms step_avg:94.42ms +step:440/1695 train_time:41545ms step_avg:94.42ms +step:441/1695 train_time:41641ms step_avg:94.42ms +step:442/1695 train_time:41737ms step_avg:94.43ms +step:443/1695 train_time:41834ms step_avg:94.43ms +step:444/1695 train_time:41929ms step_avg:94.44ms +step:445/1695 train_time:42025ms step_avg:94.44ms +step:446/1695 train_time:42121ms step_avg:94.44ms +step:447/1695 train_time:42218ms step_avg:94.45ms +step:448/1695 train_time:42314ms step_avg:94.45ms +step:449/1695 train_time:42410ms step_avg:94.45ms +step:450/1695 train_time:42505ms step_avg:94.46ms +step:451/1695 train_time:42602ms step_avg:94.46ms +step:452/1695 train_time:42698ms step_avg:94.47ms +step:453/1695 train_time:42794ms step_avg:94.47ms +step:454/1695 train_time:42890ms step_avg:94.47ms +step:455/1695 train_time:42986ms step_avg:94.47ms +step:456/1695 train_time:43082ms step_avg:94.48ms +step:457/1695 train_time:43179ms step_avg:94.48ms +step:458/1695 train_time:43275ms step_avg:94.49ms +step:459/1695 train_time:43370ms step_avg:94.49ms +step:460/1695 train_time:43466ms step_avg:94.49ms +step:461/1695 train_time:43562ms step_avg:94.50ms +step:462/1695 train_time:43658ms step_avg:94.50ms +step:463/1695 train_time:43755ms step_avg:94.50ms +step:464/1695 train_time:43852ms step_avg:94.51ms +step:465/1695 train_time:43947ms step_avg:94.51ms +step:466/1695 train_time:44044ms step_avg:94.51ms +step:467/1695 train_time:44140ms step_avg:94.52ms +step:468/1695 train_time:44236ms step_avg:94.52ms +step:469/1695 train_time:44333ms step_avg:94.53ms +step:470/1695 train_time:44430ms step_avg:94.53ms +step:471/1695 train_time:44526ms step_avg:94.53ms +step:472/1695 train_time:44622ms step_avg:94.54ms +step:473/1695 train_time:44718ms step_avg:94.54ms +step:474/1695 train_time:44814ms step_avg:94.54ms +step:475/1695 train_time:44911ms step_avg:94.55ms +step:476/1695 train_time:45006ms step_avg:94.55ms +step:477/1695 train_time:45103ms step_avg:94.56ms +step:478/1695 train_time:45199ms step_avg:94.56ms +step:479/1695 train_time:45296ms step_avg:94.56ms +step:480/1695 train_time:45393ms step_avg:94.57ms +step:481/1695 train_time:45489ms step_avg:94.57ms +step:482/1695 train_time:45585ms step_avg:94.58ms +step:483/1695 train_time:45682ms step_avg:94.58ms +step:484/1695 train_time:45777ms step_avg:94.58ms +step:485/1695 train_time:45874ms step_avg:94.59ms +step:486/1695 train_time:45970ms step_avg:94.59ms +step:487/1695 train_time:46066ms step_avg:94.59ms +step:488/1695 train_time:46162ms step_avg:94.59ms +step:489/1695 train_time:46259ms step_avg:94.60ms +step:490/1695 train_time:46356ms step_avg:94.60ms +step:491/1695 train_time:46452ms step_avg:94.61ms +step:492/1695 train_time:46547ms step_avg:94.61ms +step:493/1695 train_time:46643ms step_avg:94.61ms +step:494/1695 train_time:46739ms step_avg:94.61ms +step:495/1695 train_time:46835ms step_avg:94.62ms +step:496/1695 train_time:46931ms step_avg:94.62ms +step:497/1695 train_time:47027ms step_avg:94.62ms +step:498/1695 train_time:47123ms step_avg:94.63ms +step:499/1695 train_time:47219ms step_avg:94.63ms +step:500/1695 train_time:47316ms step_avg:94.63ms +step:500/1695 val_loss:3.7236 train_time:47410ms step_avg:94.82ms +step:501/1695 train_time:47439ms step_avg:94.69ms +step:502/1695 train_time:47522ms step_avg:94.66ms +step:503/1695 train_time:47620ms step_avg:94.67ms +step:504/1695 train_time:47717ms step_avg:94.68ms +step:505/1695 train_time:47813ms step_avg:94.68ms +step:506/1695 train_time:47909ms step_avg:94.68ms +step:507/1695 train_time:48005ms step_avg:94.68ms +step:508/1695 train_time:48100ms step_avg:94.69ms +step:509/1695 train_time:48197ms step_avg:94.69ms +step:510/1695 train_time:48293ms step_avg:94.69ms +step:511/1695 train_time:48389ms step_avg:94.69ms +step:512/1695 train_time:48485ms step_avg:94.70ms +step:513/1695 train_time:48583ms step_avg:94.70ms +step:514/1695 train_time:48681ms step_avg:94.71ms +step:515/1695 train_time:48778ms step_avg:94.72ms +step:516/1695 train_time:48875ms step_avg:94.72ms +step:517/1695 train_time:48971ms step_avg:94.72ms +step:518/1695 train_time:49067ms step_avg:94.72ms +step:519/1695 train_time:49163ms step_avg:94.73ms +step:520/1695 train_time:49259ms step_avg:94.73ms +step:521/1695 train_time:49356ms step_avg:94.73ms +step:522/1695 train_time:49453ms step_avg:94.74ms +step:523/1695 train_time:49550ms step_avg:94.74ms +step:524/1695 train_time:49647ms step_avg:94.75ms +step:525/1695 train_time:49743ms step_avg:94.75ms +step:526/1695 train_time:49841ms step_avg:94.75ms +step:527/1695 train_time:49937ms step_avg:94.76ms +step:528/1695 train_time:50034ms step_avg:94.76ms +step:529/1695 train_time:50129ms step_avg:94.76ms +step:530/1695 train_time:50225ms step_avg:94.76ms +step:531/1695 train_time:50321ms step_avg:94.77ms +step:532/1695 train_time:50418ms step_avg:94.77ms +step:533/1695 train_time:50514ms step_avg:94.77ms +step:534/1695 train_time:50611ms step_avg:94.78ms +step:535/1695 train_time:50708ms step_avg:94.78ms +step:536/1695 train_time:50805ms step_avg:94.79ms +step:537/1695 train_time:50901ms step_avg:94.79ms +step:538/1695 train_time:50998ms step_avg:94.79ms +step:539/1695 train_time:51095ms step_avg:94.80ms +step:540/1695 train_time:51191ms step_avg:94.80ms +step:541/1695 train_time:51287ms step_avg:94.80ms +step:542/1695 train_time:51383ms step_avg:94.80ms +step:543/1695 train_time:51480ms step_avg:94.81ms +step:544/1695 train_time:51576ms step_avg:94.81ms +step:545/1695 train_time:51673ms step_avg:94.81ms +step:546/1695 train_time:51770ms step_avg:94.82ms +step:547/1695 train_time:51866ms step_avg:94.82ms +step:548/1695 train_time:51962ms step_avg:94.82ms +step:549/1695 train_time:52059ms step_avg:94.83ms +step:550/1695 train_time:52156ms step_avg:94.83ms +step:551/1695 train_time:52253ms step_avg:94.83ms +step:552/1695 train_time:52349ms step_avg:94.83ms +step:553/1695 train_time:52444ms step_avg:94.84ms +step:554/1695 train_time:52542ms step_avg:94.84ms +step:555/1695 train_time:52639ms step_avg:94.85ms +step:556/1695 train_time:52736ms step_avg:94.85ms +step:557/1695 train_time:52833ms step_avg:94.85ms +step:558/1695 train_time:52930ms step_avg:94.86ms +step:559/1695 train_time:53026ms step_avg:94.86ms +step:560/1695 train_time:53123ms step_avg:94.86ms +step:561/1695 train_time:53220ms step_avg:94.87ms +step:562/1695 train_time:53317ms step_avg:94.87ms +step:563/1695 train_time:53413ms step_avg:94.87ms +step:564/1695 train_time:53510ms step_avg:94.88ms +step:565/1695 train_time:53606ms step_avg:94.88ms +step:566/1695 train_time:53704ms step_avg:94.88ms +step:567/1695 train_time:53801ms step_avg:94.89ms +step:568/1695 train_time:53898ms step_avg:94.89ms +step:569/1695 train_time:53993ms step_avg:94.89ms +step:570/1695 train_time:54089ms step_avg:94.89ms +step:571/1695 train_time:54185ms step_avg:94.89ms +step:572/1695 train_time:54281ms step_avg:94.90ms +step:573/1695 train_time:54378ms step_avg:94.90ms +step:574/1695 train_time:54474ms step_avg:94.90ms +step:575/1695 train_time:54571ms step_avg:94.91ms +step:576/1695 train_time:54668ms step_avg:94.91ms +step:577/1695 train_time:54764ms step_avg:94.91ms +step:578/1695 train_time:54860ms step_avg:94.91ms +step:579/1695 train_time:54957ms step_avg:94.92ms +step:580/1695 train_time:55053ms step_avg:94.92ms +step:581/1695 train_time:55149ms step_avg:94.92ms +step:582/1695 train_time:55245ms step_avg:94.92ms +step:583/1695 train_time:55341ms step_avg:94.93ms +step:584/1695 train_time:55439ms step_avg:94.93ms +step:585/1695 train_time:55537ms step_avg:94.93ms +step:586/1695 train_time:55636ms step_avg:94.94ms +step:587/1695 train_time:55733ms step_avg:94.95ms +step:588/1695 train_time:55830ms step_avg:94.95ms +step:589/1695 train_time:55925ms step_avg:94.95ms +step:590/1695 train_time:56022ms step_avg:94.95ms +step:591/1695 train_time:56119ms step_avg:94.96ms +step:592/1695 train_time:56215ms step_avg:94.96ms +step:593/1695 train_time:56312ms step_avg:94.96ms +step:594/1695 train_time:56409ms step_avg:94.96ms +step:595/1695 train_time:56505ms step_avg:94.97ms +step:596/1695 train_time:56602ms step_avg:94.97ms +step:597/1695 train_time:56699ms step_avg:94.97ms +step:598/1695 train_time:56796ms step_avg:94.98ms +step:599/1695 train_time:56892ms step_avg:94.98ms +step:600/1695 train_time:56988ms step_avg:94.98ms +step:601/1695 train_time:57084ms step_avg:94.98ms +step:602/1695 train_time:57180ms step_avg:94.98ms +step:603/1695 train_time:57276ms step_avg:94.99ms +step:604/1695 train_time:57374ms step_avg:94.99ms +step:605/1695 train_time:57471ms step_avg:94.99ms +step:606/1695 train_time:57567ms step_avg:95.00ms +step:607/1695 train_time:57663ms step_avg:95.00ms +step:608/1695 train_time:57760ms step_avg:95.00ms +step:609/1695 train_time:57857ms step_avg:95.00ms +step:610/1695 train_time:57953ms step_avg:95.01ms +step:611/1695 train_time:58049ms step_avg:95.01ms +step:612/1695 train_time:58144ms step_avg:95.01ms +step:613/1695 train_time:58240ms step_avg:95.01ms +step:614/1695 train_time:58338ms step_avg:95.01ms +step:615/1695 train_time:58436ms step_avg:95.02ms +step:616/1695 train_time:58533ms step_avg:95.02ms +step:617/1695 train_time:58629ms step_avg:95.02ms +step:618/1695 train_time:58725ms step_avg:95.02ms +step:619/1695 train_time:58821ms step_avg:95.03ms +step:620/1695 train_time:58918ms step_avg:95.03ms +step:621/1695 train_time:59015ms step_avg:95.03ms +step:622/1695 train_time:59112ms step_avg:95.03ms +step:623/1695 train_time:59207ms step_avg:95.04ms +step:624/1695 train_time:59303ms step_avg:95.04ms +step:625/1695 train_time:59401ms step_avg:95.04ms +step:625/1695 val_loss:3.6385 train_time:59496ms step_avg:95.19ms +step:626/1695 train_time:59524ms step_avg:95.09ms +step:627/1695 train_time:59604ms step_avg:95.06ms +step:628/1695 train_time:59709ms step_avg:95.08ms +step:629/1695 train_time:60139ms step_avg:95.61ms +step:630/1695 train_time:60235ms step_avg:95.61ms +step:631/1695 train_time:60332ms step_avg:95.61ms +step:632/1695 train_time:60429ms step_avg:95.62ms +step:633/1695 train_time:60525ms step_avg:95.62ms +step:634/1695 train_time:60623ms step_avg:95.62ms +step:635/1695 train_time:60720ms step_avg:95.62ms +step:636/1695 train_time:60817ms step_avg:95.62ms +step:637/1695 train_time:60914ms step_avg:95.63ms +step:638/1695 train_time:61015ms step_avg:95.63ms +step:639/1695 train_time:61115ms step_avg:95.64ms +step:640/1695 train_time:61215ms step_avg:95.65ms +step:641/1695 train_time:61313ms step_avg:95.65ms +step:642/1695 train_time:61771ms step_avg:96.22ms +step:643/1695 train_time:61820ms step_avg:96.14ms +step:644/1695 train_time:61916ms step_avg:96.14ms +step:645/1695 train_time:62013ms step_avg:96.14ms +step:646/1695 train_time:62110ms step_avg:96.15ms +step:647/1695 train_time:62207ms step_avg:96.15ms +step:648/1695 train_time:62305ms step_avg:96.15ms +step:649/1695 train_time:62402ms step_avg:96.15ms +step:650/1695 train_time:62499ms step_avg:96.15ms +step:651/1695 train_time:62596ms step_avg:96.15ms +step:652/1695 train_time:62696ms step_avg:96.16ms +step:653/1695 train_time:62796ms step_avg:96.17ms +step:654/1695 train_time:62896ms step_avg:96.17ms +step:655/1695 train_time:62994ms step_avg:96.17ms +step:656/1695 train_time:63092ms step_avg:96.18ms +step:657/1695 train_time:63189ms step_avg:96.18ms +step:658/1695 train_time:63286ms step_avg:96.18ms +step:659/1695 train_time:63383ms step_avg:96.18ms +step:660/1695 train_time:63480ms step_avg:96.18ms +step:661/1695 train_time:63577ms step_avg:96.18ms +step:662/1695 train_time:63676ms step_avg:96.19ms +step:663/1695 train_time:63775ms step_avg:96.19ms +step:664/1695 train_time:63873ms step_avg:96.19ms +step:665/1695 train_time:63971ms step_avg:96.20ms +step:666/1695 train_time:64070ms step_avg:96.20ms +step:667/1695 train_time:64168ms step_avg:96.20ms +step:668/1695 train_time:64265ms step_avg:96.21ms +step:669/1695 train_time:64363ms step_avg:96.21ms +step:670/1695 train_time:64460ms step_avg:96.21ms +step:671/1695 train_time:64558ms step_avg:96.21ms +step:672/1695 train_time:64655ms step_avg:96.21ms +step:673/1695 train_time:64753ms step_avg:96.22ms +step:674/1695 train_time:64851ms step_avg:96.22ms +step:675/1695 train_time:64949ms step_avg:96.22ms +step:676/1695 train_time:65046ms step_avg:96.22ms +step:677/1695 train_time:65144ms step_avg:96.22ms +step:678/1695 train_time:65241ms step_avg:96.23ms +step:679/1695 train_time:65339ms step_avg:96.23ms +step:680/1695 train_time:65438ms step_avg:96.23ms +step:681/1695 train_time:65535ms step_avg:96.23ms +step:682/1695 train_time:65633ms step_avg:96.24ms +step:683/1695 train_time:65731ms step_avg:96.24ms +step:684/1695 train_time:65829ms step_avg:96.24ms +step:685/1695 train_time:65926ms step_avg:96.24ms +step:686/1695 train_time:66024ms step_avg:96.24ms +step:687/1695 train_time:66122ms step_avg:96.25ms +step:688/1695 train_time:66220ms step_avg:96.25ms +step:689/1695 train_time:66319ms step_avg:96.25ms +step:690/1695 train_time:66417ms step_avg:96.26ms +step:691/1695 train_time:66515ms step_avg:96.26ms +step:692/1695 train_time:66613ms step_avg:96.26ms +step:693/1695 train_time:66712ms step_avg:96.27ms +step:694/1695 train_time:66811ms step_avg:96.27ms +step:695/1695 train_time:66910ms step_avg:96.27ms +step:696/1695 train_time:67008ms step_avg:96.28ms +step:697/1695 train_time:67105ms step_avg:96.28ms +step:698/1695 train_time:67202ms step_avg:96.28ms +step:699/1695 train_time:67299ms step_avg:96.28ms +step:700/1695 train_time:67397ms step_avg:96.28ms +step:701/1695 train_time:67496ms step_avg:96.28ms +step:702/1695 train_time:67593ms step_avg:96.29ms +step:703/1695 train_time:67692ms step_avg:96.29ms +step:704/1695 train_time:67789ms step_avg:96.29ms +step:705/1695 train_time:67887ms step_avg:96.29ms +step:706/1695 train_time:67985ms step_avg:96.30ms +step:707/1695 train_time:68082ms step_avg:96.30ms +step:708/1695 train_time:68179ms step_avg:96.30ms +step:709/1695 train_time:68277ms step_avg:96.30ms +step:710/1695 train_time:68375ms step_avg:96.30ms +step:711/1695 train_time:68474ms step_avg:96.31ms +step:712/1695 train_time:68572ms step_avg:96.31ms +step:713/1695 train_time:68669ms step_avg:96.31ms +step:714/1695 train_time:68767ms step_avg:96.31ms +step:715/1695 train_time:69087ms step_avg:96.63ms +step:716/1695 train_time:69182ms step_avg:96.62ms +step:717/1695 train_time:69280ms step_avg:96.62ms +step:718/1695 train_time:69376ms step_avg:96.62ms +step:719/1695 train_time:69767ms step_avg:97.03ms +step:720/1695 train_time:69863ms step_avg:97.03ms +step:721/1695 train_time:69960ms step_avg:97.03ms +step:722/1695 train_time:70058ms step_avg:97.03ms +step:723/1695 train_time:70154ms step_avg:97.03ms +step:724/1695 train_time:70251ms step_avg:97.03ms +step:725/1695 train_time:70348ms step_avg:97.03ms +step:726/1695 train_time:70444ms step_avg:97.03ms +step:727/1695 train_time:70542ms step_avg:97.03ms +step:728/1695 train_time:70640ms step_avg:97.03ms +step:729/1695 train_time:70742ms step_avg:97.04ms +step:730/1695 train_time:70844ms step_avg:97.05ms +step:731/1695 train_time:70943ms step_avg:97.05ms +step:732/1695 train_time:71043ms step_avg:97.05ms +step:733/1695 train_time:71142ms step_avg:97.06ms +step:734/1695 train_time:71240ms step_avg:97.06ms +step:735/1695 train_time:71336ms step_avg:97.06ms +step:736/1695 train_time:71434ms step_avg:97.06ms +step:737/1695 train_time:71531ms step_avg:97.06ms +step:738/1695 train_time:71628ms step_avg:97.06ms +step:739/1695 train_time:71726ms step_avg:97.06ms +step:740/1695 train_time:71824ms step_avg:97.06ms +step:741/1695 train_time:71923ms step_avg:97.06ms +step:742/1695 train_time:72021ms step_avg:97.06ms +step:743/1695 train_time:72119ms step_avg:97.07ms +step:744/1695 train_time:72217ms step_avg:97.07ms +step:745/1695 train_time:72314ms step_avg:97.07ms +step:746/1695 train_time:72412ms step_avg:97.07ms +step:747/1695 train_time:72509ms step_avg:97.07ms +step:748/1695 train_time:72606ms step_avg:97.07ms +step:749/1695 train_time:72704ms step_avg:97.07ms +step:750/1695 train_time:72802ms step_avg:97.07ms +step:750/1695 val_loss:3.5787 train_time:72898ms step_avg:97.20ms +step:751/1695 train_time:72926ms step_avg:97.11ms +step:752/1695 train_time:73011ms step_avg:97.09ms +step:753/1695 train_time:73110ms step_avg:97.09ms +step:754/1695 train_time:73208ms step_avg:97.09ms +step:755/1695 train_time:73306ms step_avg:97.09ms +step:756/1695 train_time:73405ms step_avg:97.10ms +step:757/1695 train_time:73503ms step_avg:97.10ms +step:758/1695 train_time:73600ms step_avg:97.10ms +step:759/1695 train_time:73697ms step_avg:97.10ms +step:760/1695 train_time:73795ms step_avg:97.10ms +step:761/1695 train_time:73893ms step_avg:97.10ms +step:762/1695 train_time:73993ms step_avg:97.10ms +step:763/1695 train_time:74092ms step_avg:97.11ms +step:764/1695 train_time:74190ms step_avg:97.11ms +step:765/1695 train_time:74288ms step_avg:97.11ms +step:766/1695 train_time:74387ms step_avg:97.11ms +step:767/1695 train_time:74486ms step_avg:97.11ms +step:768/1695 train_time:74584ms step_avg:97.12ms +step:769/1695 train_time:74683ms step_avg:97.12ms +step:770/1695 train_time:74781ms step_avg:97.12ms +step:771/1695 train_time:74880ms step_avg:97.12ms +step:772/1695 train_time:74978ms step_avg:97.12ms +step:773/1695 train_time:75078ms step_avg:97.13ms +step:774/1695 train_time:75177ms step_avg:97.13ms +step:775/1695 train_time:75276ms step_avg:97.13ms +step:776/1695 train_time:75374ms step_avg:97.13ms +step:777/1695 train_time:75473ms step_avg:97.13ms +step:778/1695 train_time:75570ms step_avg:97.13ms +step:779/1695 train_time:75667ms step_avg:97.13ms +step:780/1695 train_time:75766ms step_avg:97.14ms +step:781/1695 train_time:75864ms step_avg:97.14ms +step:782/1695 train_time:75964ms step_avg:97.14ms +step:783/1695 train_time:76062ms step_avg:97.14ms +step:784/1695 train_time:76160ms step_avg:97.14ms +step:785/1695 train_time:76259ms step_avg:97.14ms +step:786/1695 train_time:76358ms step_avg:97.15ms +step:787/1695 train_time:76457ms step_avg:97.15ms +step:788/1695 train_time:76555ms step_avg:97.15ms +step:789/1695 train_time:76653ms step_avg:97.15ms +step:790/1695 train_time:76751ms step_avg:97.15ms +step:791/1695 train_time:76848ms step_avg:97.15ms +step:792/1695 train_time:76946ms step_avg:97.15ms +step:793/1695 train_time:77045ms step_avg:97.16ms +step:794/1695 train_time:77144ms step_avg:97.16ms +step:795/1695 train_time:77244ms step_avg:97.16ms +step:796/1695 train_time:77585ms step_avg:97.47ms +step:797/1695 train_time:77682ms step_avg:97.47ms +step:798/1695 train_time:77779ms step_avg:97.47ms +step:799/1695 train_time:77877ms step_avg:97.47ms +step:800/1695 train_time:77974ms step_avg:97.47ms +step:801/1695 train_time:78071ms step_avg:97.47ms +step:802/1695 train_time:78168ms step_avg:97.47ms +step:803/1695 train_time:78265ms step_avg:97.47ms +step:804/1695 train_time:78603ms step_avg:97.76ms +step:805/1695 train_time:78699ms step_avg:97.76ms +step:806/1695 train_time:78796ms step_avg:97.76ms +step:807/1695 train_time:78894ms step_avg:97.76ms +step:808/1695 train_time:78991ms step_avg:97.76ms +step:809/1695 train_time:79089ms step_avg:97.76ms +step:810/1695 train_time:79187ms step_avg:97.76ms +step:811/1695 train_time:79285ms step_avg:97.76ms +step:812/1695 train_time:79382ms step_avg:97.76ms +step:813/1695 train_time:79483ms step_avg:97.77ms +step:814/1695 train_time:79584ms step_avg:97.77ms +step:815/1695 train_time:79683ms step_avg:97.77ms +step:816/1695 train_time:79782ms step_avg:97.77ms +step:817/1695 train_time:79881ms step_avg:97.77ms +step:818/1695 train_time:79979ms step_avg:97.77ms +step:819/1695 train_time:80077ms step_avg:97.77ms +step:820/1695 train_time:80176ms step_avg:97.78ms +step:821/1695 train_time:80274ms step_avg:97.78ms +step:822/1695 train_time:80371ms step_avg:97.78ms +step:823/1695 train_time:80469ms step_avg:97.77ms +step:824/1695 train_time:80566ms step_avg:97.77ms +step:825/1695 train_time:80664ms step_avg:97.78ms +step:826/1695 train_time:80762ms step_avg:97.78ms +step:827/1695 train_time:80861ms step_avg:97.78ms +step:828/1695 train_time:80959ms step_avg:97.78ms +step:829/1695 train_time:81057ms step_avg:97.78ms +step:830/1695 train_time:81155ms step_avg:97.78ms +step:831/1695 train_time:81253ms step_avg:97.78ms +step:832/1695 train_time:81352ms step_avg:97.78ms +step:833/1695 train_time:81450ms step_avg:97.78ms +step:834/1695 train_time:81547ms step_avg:97.78ms +step:835/1695 train_time:81645ms step_avg:97.78ms +step:836/1695 train_time:81745ms step_avg:97.78ms +step:837/1695 train_time:81843ms step_avg:97.78ms +step:838/1695 train_time:81941ms step_avg:97.78ms +step:839/1695 train_time:82039ms step_avg:97.78ms +step:840/1695 train_time:82138ms step_avg:97.78ms +step:841/1695 train_time:82237ms step_avg:97.79ms +step:842/1695 train_time:82336ms step_avg:97.79ms +step:843/1695 train_time:82435ms step_avg:97.79ms +step:844/1695 train_time:82534ms step_avg:97.79ms +step:845/1695 train_time:82631ms step_avg:97.79ms +step:846/1695 train_time:82729ms step_avg:97.79ms +step:847/1695 train_time:82827ms step_avg:97.79ms +step:848/1695 train_time:82925ms step_avg:97.79ms +step:849/1695 train_time:83023ms step_avg:97.79ms +step:850/1695 train_time:83120ms step_avg:97.79ms +step:851/1695 train_time:83218ms step_avg:97.79ms +step:852/1695 train_time:83316ms step_avg:97.79ms +step:853/1695 train_time:83414ms step_avg:97.79ms +step:854/1695 train_time:83513ms step_avg:97.79ms +step:855/1695 train_time:83610ms step_avg:97.79ms +step:856/1695 train_time:83708ms step_avg:97.79ms +step:857/1695 train_time:83806ms step_avg:97.79ms +step:858/1695 train_time:83904ms step_avg:97.79ms +step:859/1695 train_time:84002ms step_avg:97.79ms +step:860/1695 train_time:84101ms step_avg:97.79ms +step:861/1695 train_time:84199ms step_avg:97.79ms +step:862/1695 train_time:84298ms step_avg:97.79ms +step:863/1695 train_time:84396ms step_avg:97.79ms +step:864/1695 train_time:84495ms step_avg:97.80ms +step:865/1695 train_time:84594ms step_avg:97.80ms +step:866/1695 train_time:84693ms step_avg:97.80ms +step:867/1695 train_time:84791ms step_avg:97.80ms +step:868/1695 train_time:84889ms step_avg:97.80ms +step:869/1695 train_time:84986ms step_avg:97.80ms +step:870/1695 train_time:85084ms step_avg:97.80ms +step:871/1695 train_time:85182ms step_avg:97.80ms +step:872/1695 train_time:85282ms step_avg:97.80ms +step:873/1695 train_time:85382ms step_avg:97.80ms +step:874/1695 train_time:85481ms step_avg:97.80ms +step:875/1695 train_time:85581ms step_avg:97.81ms +step:875/1695 val_loss:3.5322 train_time:85677ms step_avg:97.92ms +step:876/1695 train_time:85705ms step_avg:97.84ms +step:877/1695 train_time:85791ms step_avg:97.82ms +step:878/1695 train_time:85890ms step_avg:97.82ms +step:879/1695 train_time:85989ms step_avg:97.83ms +step:880/1695 train_time:86086ms step_avg:97.82ms +step:881/1695 train_time:86185ms step_avg:97.83ms +step:882/1695 train_time:86285ms step_avg:97.83ms +step:883/1695 train_time:86384ms step_avg:97.83ms +step:884/1695 train_time:86483ms step_avg:97.83ms +step:885/1695 train_time:86581ms step_avg:97.83ms +step:886/1695 train_time:86682ms step_avg:97.84ms +step:887/1695 train_time:86785ms step_avg:97.84ms +step:888/1695 train_time:86887ms step_avg:97.85ms +step:889/1695 train_time:86988ms step_avg:97.85ms +step:890/1695 train_time:87087ms step_avg:97.85ms +step:891/1695 train_time:87186ms step_avg:97.85ms +step:892/1695 train_time:87285ms step_avg:97.85ms +step:893/1695 train_time:87384ms step_avg:97.85ms +step:894/1695 train_time:87483ms step_avg:97.86ms +step:895/1695 train_time:87583ms step_avg:97.86ms +step:896/1695 train_time:87682ms step_avg:97.86ms +step:897/1695 train_time:87782ms step_avg:97.86ms +step:898/1695 train_time:87883ms step_avg:97.87ms +step:899/1695 train_time:87984ms step_avg:97.87ms +step:900/1695 train_time:88085ms step_avg:97.87ms +step:901/1695 train_time:88185ms step_avg:97.87ms +step:902/1695 train_time:88284ms step_avg:97.88ms +step:903/1695 train_time:88384ms step_avg:97.88ms +step:904/1695 train_time:88483ms step_avg:97.88ms +step:905/1695 train_time:88582ms step_avg:97.88ms +step:906/1695 train_time:88682ms step_avg:97.88ms +step:907/1695 train_time:88782ms step_avg:97.89ms +step:908/1695 train_time:88883ms step_avg:97.89ms +step:909/1695 train_time:88983ms step_avg:97.89ms +step:910/1695 train_time:89083ms step_avg:97.89ms +step:911/1695 train_time:89183ms step_avg:97.90ms +step:912/1695 train_time:89283ms step_avg:97.90ms +step:913/1695 train_time:89382ms step_avg:97.90ms +step:914/1695 train_time:89481ms step_avg:97.90ms +step:915/1695 train_time:89580ms step_avg:97.90ms +step:916/1695 train_time:89680ms step_avg:97.90ms +step:917/1695 train_time:89778ms step_avg:97.90ms +step:918/1695 train_time:89878ms step_avg:97.91ms +step:919/1695 train_time:89977ms step_avg:97.91ms +step:920/1695 train_time:90077ms step_avg:97.91ms +step:921/1695 train_time:90176ms step_avg:97.91ms +step:922/1695 train_time:90275ms step_avg:97.91ms +step:923/1695 train_time:90374ms step_avg:97.91ms +step:924/1695 train_time:90473ms step_avg:97.91ms +step:925/1695 train_time:90574ms step_avg:97.92ms +step:926/1695 train_time:90674ms step_avg:97.92ms +step:927/1695 train_time:90773ms step_avg:97.92ms +step:928/1695 train_time:90871ms step_avg:97.92ms +step:929/1695 train_time:90971ms step_avg:97.92ms +step:930/1695 train_time:91071ms step_avg:97.93ms +step:931/1695 train_time:91171ms step_avg:97.93ms +step:932/1695 train_time:91271ms step_avg:97.93ms +step:933/1695 train_time:91370ms step_avg:97.93ms +step:934/1695 train_time:91469ms step_avg:97.93ms +step:935/1695 train_time:91568ms step_avg:97.93ms +step:936/1695 train_time:91667ms step_avg:97.93ms +step:937/1695 train_time:91767ms step_avg:97.94ms +step:938/1695 train_time:91866ms step_avg:97.94ms +step:939/1695 train_time:91967ms step_avg:97.94ms +step:940/1695 train_time:92067ms step_avg:97.94ms +step:941/1695 train_time:92168ms step_avg:97.95ms +step:942/1695 train_time:92267ms step_avg:97.95ms +step:943/1695 train_time:92367ms step_avg:97.95ms +step:944/1695 train_time:92466ms step_avg:97.95ms +step:945/1695 train_time:92567ms step_avg:97.95ms +step:946/1695 train_time:92666ms step_avg:97.96ms +step:947/1695 train_time:92766ms step_avg:97.96ms +step:948/1695 train_time:92865ms step_avg:97.96ms +step:949/1695 train_time:92965ms step_avg:97.96ms +step:950/1695 train_time:93065ms step_avg:97.96ms +step:951/1695 train_time:93166ms step_avg:97.97ms +step:952/1695 train_time:93266ms step_avg:97.97ms +step:953/1695 train_time:93366ms step_avg:97.97ms +step:954/1695 train_time:93466ms step_avg:97.97ms +step:955/1695 train_time:93567ms step_avg:97.98ms +step:956/1695 train_time:93666ms step_avg:97.98ms +step:957/1695 train_time:93766ms step_avg:97.98ms +step:958/1695 train_time:93865ms step_avg:97.98ms +step:959/1695 train_time:93965ms step_avg:97.98ms +step:960/1695 train_time:94065ms step_avg:97.98ms +step:961/1695 train_time:94165ms step_avg:97.99ms +step:962/1695 train_time:94265ms step_avg:97.99ms +step:963/1695 train_time:94365ms step_avg:97.99ms +step:964/1695 train_time:94466ms step_avg:97.99ms +step:965/1695 train_time:94566ms step_avg:98.00ms +step:966/1695 train_time:94669ms step_avg:98.00ms +step:967/1695 train_time:94769ms step_avg:98.00ms +step:968/1695 train_time:94867ms step_avg:98.00ms +step:969/1695 train_time:94967ms step_avg:98.01ms +step:970/1695 train_time:95067ms step_avg:98.01ms +step:971/1695 train_time:95167ms step_avg:98.01ms +step:972/1695 train_time:95266ms step_avg:98.01ms +step:973/1695 train_time:95366ms step_avg:98.01ms +step:974/1695 train_time:95465ms step_avg:98.01ms +step:975/1695 train_time:95565ms step_avg:98.02ms +step:976/1695 train_time:95665ms step_avg:98.02ms +step:977/1695 train_time:95765ms step_avg:98.02ms +step:978/1695 train_time:95864ms step_avg:98.02ms +step:979/1695 train_time:95965ms step_avg:98.02ms +step:980/1695 train_time:96065ms step_avg:98.03ms +step:981/1695 train_time:96164ms step_avg:98.03ms +step:982/1695 train_time:96265ms step_avg:98.03ms +step:983/1695 train_time:96366ms step_avg:98.03ms +step:984/1695 train_time:96465ms step_avg:98.03ms +step:985/1695 train_time:96565ms step_avg:98.04ms +step:986/1695 train_time:96666ms step_avg:98.04ms +step:987/1695 train_time:96766ms step_avg:98.04ms +step:988/1695 train_time:96866ms step_avg:98.04ms +step:989/1695 train_time:96966ms step_avg:98.04ms +step:990/1695 train_time:97065ms step_avg:98.05ms +step:991/1695 train_time:97166ms step_avg:98.05ms +step:992/1695 train_time:97265ms step_avg:98.05ms +step:993/1695 train_time:97365ms step_avg:98.05ms +step:994/1695 train_time:97465ms step_avg:98.05ms +step:995/1695 train_time:97565ms step_avg:98.06ms +step:996/1695 train_time:97664ms step_avg:98.06ms +step:997/1695 train_time:97764ms step_avg:98.06ms +step:998/1695 train_time:97864ms step_avg:98.06ms +step:999/1695 train_time:97965ms step_avg:98.06ms +step:1000/1695 train_time:98064ms step_avg:98.06ms +step:1000/1695 val_loss:3.4877 train_time:98160ms step_avg:98.16ms +step:1001/1695 train_time:98189ms step_avg:98.09ms +step:1002/1695 train_time:98270ms step_avg:98.07ms +step:1003/1695 train_time:98372ms step_avg:98.08ms +step:1004/1695 train_time:98471ms step_avg:98.08ms +step:1005/1695 train_time:98570ms step_avg:98.08ms +step:1006/1695 train_time:98668ms step_avg:98.08ms +step:1007/1695 train_time:98767ms step_avg:98.08ms +step:1008/1695 train_time:98865ms step_avg:98.08ms +step:1009/1695 train_time:98965ms step_avg:98.08ms +step:1010/1695 train_time:99063ms step_avg:98.08ms +step:1011/1695 train_time:99164ms step_avg:98.08ms +step:1012/1695 train_time:99265ms step_avg:98.09ms +step:1013/1695 train_time:99366ms step_avg:98.09ms +step:1014/1695 train_time:99466ms step_avg:98.09ms +step:1015/1695 train_time:99567ms step_avg:98.10ms +step:1016/1695 train_time:99666ms step_avg:98.10ms +step:1017/1695 train_time:99766ms step_avg:98.10ms +step:1018/1695 train_time:99865ms step_avg:98.10ms +step:1019/1695 train_time:99963ms step_avg:98.10ms +step:1020/1695 train_time:100063ms step_avg:98.10ms +step:1021/1695 train_time:100164ms step_avg:98.10ms +step:1022/1695 train_time:100264ms step_avg:98.11ms +step:1023/1695 train_time:100365ms step_avg:98.11ms +step:1024/1695 train_time:100467ms step_avg:98.11ms +step:1025/1695 train_time:100567ms step_avg:98.11ms +step:1026/1695 train_time:100667ms step_avg:98.12ms +step:1027/1695 train_time:100766ms step_avg:98.12ms +step:1028/1695 train_time:100865ms step_avg:98.12ms +step:1029/1695 train_time:100965ms step_avg:98.12ms +step:1030/1695 train_time:101064ms step_avg:98.12ms +step:1031/1695 train_time:101165ms step_avg:98.12ms +step:1032/1695 train_time:101266ms step_avg:98.13ms +step:1033/1695 train_time:101366ms step_avg:98.13ms +step:1034/1695 train_time:101466ms step_avg:98.13ms +step:1035/1695 train_time:101566ms step_avg:98.13ms +step:1036/1695 train_time:101665ms step_avg:98.13ms +step:1037/1695 train_time:101766ms step_avg:98.13ms +step:1038/1695 train_time:101865ms step_avg:98.14ms +step:1039/1695 train_time:101964ms step_avg:98.14ms +step:1040/1695 train_time:102063ms step_avg:98.14ms +step:1041/1695 train_time:102163ms step_avg:98.14ms +step:1042/1695 train_time:102264ms step_avg:98.14ms +step:1043/1695 train_time:102365ms step_avg:98.14ms +step:1044/1695 train_time:102465ms step_avg:98.15ms +step:1045/1695 train_time:102565ms step_avg:98.15ms +step:1046/1695 train_time:102665ms step_avg:98.15ms +step:1047/1695 train_time:102765ms step_avg:98.15ms +step:1048/1695 train_time:102865ms step_avg:98.15ms +step:1049/1695 train_time:102964ms step_avg:98.15ms +step:1050/1695 train_time:103064ms step_avg:98.16ms +step:1051/1695 train_time:103164ms step_avg:98.16ms +step:1052/1695 train_time:103264ms step_avg:98.16ms +step:1053/1695 train_time:103366ms step_avg:98.16ms +step:1054/1695 train_time:103466ms step_avg:98.17ms +step:1055/1695 train_time:103566ms step_avg:98.17ms +step:1056/1695 train_time:103666ms step_avg:98.17ms +step:1057/1695 train_time:103765ms step_avg:98.17ms +step:1058/1695 train_time:103864ms step_avg:98.17ms +step:1059/1695 train_time:103963ms step_avg:98.17ms +step:1060/1695 train_time:104063ms step_avg:98.17ms +step:1061/1695 train_time:104162ms step_avg:98.17ms +step:1062/1695 train_time:104262ms step_avg:98.18ms +step:1063/1695 train_time:104362ms step_avg:98.18ms +step:1064/1695 train_time:104462ms step_avg:98.18ms +step:1065/1695 train_time:104563ms step_avg:98.18ms +step:1066/1695 train_time:104663ms step_avg:98.18ms +step:1067/1695 train_time:104763ms step_avg:98.18ms +step:1068/1695 train_time:104863ms step_avg:98.19ms +step:1069/1695 train_time:104962ms step_avg:98.19ms +step:1070/1695 train_time:105062ms step_avg:98.19ms +step:1071/1695 train_time:105161ms step_avg:98.19ms +step:1072/1695 train_time:105261ms step_avg:98.19ms +step:1073/1695 train_time:105360ms step_avg:98.19ms +step:1074/1695 train_time:105459ms step_avg:98.19ms +step:1075/1695 train_time:105559ms step_avg:98.19ms +step:1076/1695 train_time:105659ms step_avg:98.20ms +step:1077/1695 train_time:105760ms step_avg:98.20ms +step:1078/1695 train_time:105860ms step_avg:98.20ms +step:1079/1695 train_time:105961ms step_avg:98.20ms +step:1080/1695 train_time:106060ms step_avg:98.20ms +step:1081/1695 train_time:106159ms step_avg:98.20ms +step:1082/1695 train_time:106260ms step_avg:98.21ms +step:1083/1695 train_time:106359ms step_avg:98.21ms +step:1084/1695 train_time:106460ms step_avg:98.21ms +step:1085/1695 train_time:106559ms step_avg:98.21ms +step:1086/1695 train_time:106660ms step_avg:98.21ms +step:1087/1695 train_time:106760ms step_avg:98.22ms +step:1088/1695 train_time:106861ms step_avg:98.22ms +step:1089/1695 train_time:106961ms step_avg:98.22ms +step:1090/1695 train_time:107062ms step_avg:98.22ms +step:1091/1695 train_time:107161ms step_avg:98.22ms +step:1092/1695 train_time:107261ms step_avg:98.22ms +step:1093/1695 train_time:107360ms step_avg:98.23ms +step:1094/1695 train_time:107460ms step_avg:98.23ms +step:1095/1695 train_time:107560ms step_avg:98.23ms +step:1096/1695 train_time:107661ms step_avg:98.23ms +step:1097/1695 train_time:107761ms step_avg:98.23ms +step:1098/1695 train_time:107860ms step_avg:98.23ms +step:1099/1695 train_time:107959ms step_avg:98.23ms +step:1100/1695 train_time:108059ms step_avg:98.24ms +step:1101/1695 train_time:108159ms step_avg:98.24ms +step:1102/1695 train_time:108259ms step_avg:98.24ms +step:1103/1695 train_time:108359ms step_avg:98.24ms +step:1104/1695 train_time:108460ms step_avg:98.24ms +step:1105/1695 train_time:108559ms step_avg:98.24ms +step:1106/1695 train_time:108660ms step_avg:98.25ms +step:1107/1695 train_time:108760ms step_avg:98.25ms +step:1108/1695 train_time:108860ms step_avg:98.25ms +step:1109/1695 train_time:108959ms step_avg:98.25ms +step:1110/1695 train_time:109060ms step_avg:98.25ms +step:1111/1695 train_time:109160ms step_avg:98.25ms +step:1112/1695 train_time:109260ms step_avg:98.26ms +step:1113/1695 train_time:109361ms step_avg:98.26ms +step:1114/1695 train_time:109461ms step_avg:98.26ms +step:1115/1695 train_time:109561ms step_avg:98.26ms +step:1116/1695 train_time:109661ms step_avg:98.26ms +step:1117/1695 train_time:109763ms step_avg:98.27ms +step:1118/1695 train_time:109863ms step_avg:98.27ms +step:1119/1695 train_time:109964ms step_avg:98.27ms +step:1120/1695 train_time:110064ms step_avg:98.27ms +step:1121/1695 train_time:110164ms step_avg:98.27ms +step:1122/1695 train_time:110264ms step_avg:98.27ms +step:1123/1695 train_time:110365ms step_avg:98.28ms +step:1124/1695 train_time:110465ms step_avg:98.28ms +step:1125/1695 train_time:110565ms step_avg:98.28ms +step:1125/1695 val_loss:3.4364 train_time:110663ms step_avg:98.37ms +step:1126/1695 train_time:110690ms step_avg:98.30ms +step:1127/1695 train_time:110776ms step_avg:98.29ms +step:1128/1695 train_time:110878ms step_avg:98.30ms +step:1129/1695 train_time:110978ms step_avg:98.30ms +step:1130/1695 train_time:111078ms step_avg:98.30ms +step:1131/1695 train_time:111177ms step_avg:98.30ms +step:1132/1695 train_time:111277ms step_avg:98.30ms +step:1133/1695 train_time:111376ms step_avg:98.30ms +step:1134/1695 train_time:111477ms step_avg:98.30ms +step:1135/1695 train_time:111576ms step_avg:98.30ms +step:1136/1695 train_time:111680ms step_avg:98.31ms +step:1137/1695 train_time:111782ms step_avg:98.31ms +step:1138/1695 train_time:111883ms step_avg:98.32ms +step:1139/1695 train_time:111982ms step_avg:98.32ms +step:1140/1695 train_time:112083ms step_avg:98.32ms +step:1141/1695 train_time:112182ms step_avg:98.32ms +step:1142/1695 train_time:112283ms step_avg:98.32ms +step:1143/1695 train_time:112382ms step_avg:98.32ms +step:1144/1695 train_time:112483ms step_avg:98.32ms +step:1145/1695 train_time:112586ms step_avg:98.33ms +step:1146/1695 train_time:112687ms step_avg:98.33ms +step:1147/1695 train_time:112789ms step_avg:98.33ms +step:1148/1695 train_time:112889ms step_avg:98.34ms +step:1149/1695 train_time:112991ms step_avg:98.34ms +step:1150/1695 train_time:113093ms step_avg:98.34ms +step:1151/1695 train_time:113193ms step_avg:98.34ms +step:1152/1695 train_time:113295ms step_avg:98.35ms +step:1153/1695 train_time:113395ms step_avg:98.35ms +step:1154/1695 train_time:113496ms step_avg:98.35ms +step:1155/1695 train_time:113597ms step_avg:98.35ms +step:1156/1695 train_time:113698ms step_avg:98.35ms +step:1157/1695 train_time:113800ms step_avg:98.36ms +step:1158/1695 train_time:113900ms step_avg:98.36ms +step:1159/1695 train_time:114001ms step_avg:98.36ms +step:1160/1695 train_time:114102ms step_avg:98.36ms +step:1161/1695 train_time:114202ms step_avg:98.36ms +step:1162/1695 train_time:114301ms step_avg:98.37ms +step:1163/1695 train_time:114404ms step_avg:98.37ms +step:1164/1695 train_time:114506ms step_avg:98.37ms +step:1165/1695 train_time:114609ms step_avg:98.38ms +step:1166/1695 train_time:114710ms step_avg:98.38ms +step:1167/1695 train_time:114813ms step_avg:98.38ms +step:1168/1695 train_time:114914ms step_avg:98.39ms +step:1169/1695 train_time:115015ms step_avg:98.39ms +step:1170/1695 train_time:115117ms step_avg:98.39ms +step:1171/1695 train_time:115218ms step_avg:98.39ms +step:1172/1695 train_time:115319ms step_avg:98.40ms +step:1173/1695 train_time:115420ms step_avg:98.40ms +step:1174/1695 train_time:115522ms step_avg:98.40ms +step:1175/1695 train_time:115622ms step_avg:98.40ms +step:1176/1695 train_time:115722ms step_avg:98.40ms +step:1177/1695 train_time:115823ms step_avg:98.41ms +step:1178/1695 train_time:115923ms step_avg:98.41ms +step:1179/1695 train_time:116026ms step_avg:98.41ms +step:1180/1695 train_time:116126ms step_avg:98.41ms +step:1181/1695 train_time:116227ms step_avg:98.41ms +step:1182/1695 train_time:116329ms step_avg:98.42ms +step:1183/1695 train_time:116431ms step_avg:98.42ms +step:1184/1695 train_time:116533ms step_avg:98.42ms +step:1185/1695 train_time:116635ms step_avg:98.43ms +step:1186/1695 train_time:116736ms step_avg:98.43ms +step:1187/1695 train_time:116837ms step_avg:98.43ms +step:1188/1695 train_time:116938ms step_avg:98.43ms +step:1189/1695 train_time:117038ms step_avg:98.43ms +step:1190/1695 train_time:117139ms step_avg:98.44ms +step:1191/1695 train_time:117241ms step_avg:98.44ms +step:1192/1695 train_time:117340ms step_avg:98.44ms +step:1193/1695 train_time:117440ms step_avg:98.44ms +step:1194/1695 train_time:117541ms step_avg:98.44ms +step:1195/1695 train_time:117641ms step_avg:98.44ms +step:1196/1695 train_time:117741ms step_avg:98.45ms +step:1197/1695 train_time:117842ms step_avg:98.45ms +step:1198/1695 train_time:117941ms step_avg:98.45ms +step:1199/1695 train_time:118041ms step_avg:98.45ms +step:1200/1695 train_time:118142ms step_avg:98.45ms +step:1201/1695 train_time:118242ms step_avg:98.45ms +step:1202/1695 train_time:118343ms step_avg:98.46ms +step:1203/1695 train_time:118443ms step_avg:98.46ms +step:1204/1695 train_time:118544ms step_avg:98.46ms +step:1205/1695 train_time:118645ms step_avg:98.46ms +step:1206/1695 train_time:118746ms step_avg:98.46ms +step:1207/1695 train_time:118846ms step_avg:98.46ms +step:1208/1695 train_time:118946ms step_avg:98.47ms +step:1209/1695 train_time:119047ms step_avg:98.47ms +step:1210/1695 train_time:119148ms step_avg:98.47ms +step:1211/1695 train_time:119250ms step_avg:98.47ms +step:1212/1695 train_time:119352ms step_avg:98.48ms +step:1213/1695 train_time:119453ms step_avg:98.48ms +step:1214/1695 train_time:119554ms step_avg:98.48ms +step:1215/1695 train_time:119656ms step_avg:98.48ms +step:1216/1695 train_time:119758ms step_avg:98.49ms +step:1217/1695 train_time:119859ms step_avg:98.49ms +step:1218/1695 train_time:119961ms step_avg:98.49ms +step:1219/1695 train_time:120062ms step_avg:98.49ms +step:1220/1695 train_time:120162ms step_avg:98.49ms +step:1221/1695 train_time:120261ms step_avg:98.49ms +step:1222/1695 train_time:120362ms step_avg:98.50ms +step:1223/1695 train_time:120462ms step_avg:98.50ms +step:1224/1695 train_time:120562ms step_avg:98.50ms +step:1225/1695 train_time:120664ms step_avg:98.50ms +step:1226/1695 train_time:120764ms step_avg:98.50ms +step:1227/1695 train_time:120866ms step_avg:98.50ms +step:1228/1695 train_time:120967ms step_avg:98.51ms +step:1229/1695 train_time:121068ms step_avg:98.51ms +step:1230/1695 train_time:121169ms step_avg:98.51ms +step:1231/1695 train_time:121270ms step_avg:98.51ms +step:1232/1695 train_time:121371ms step_avg:98.52ms +step:1233/1695 train_time:121472ms step_avg:98.52ms +step:1234/1695 train_time:121575ms step_avg:98.52ms +step:1235/1695 train_time:121675ms step_avg:98.52ms +step:1236/1695 train_time:121779ms step_avg:98.53ms +step:1237/1695 train_time:121880ms step_avg:98.53ms +step:1238/1695 train_time:121980ms step_avg:98.53ms +step:1239/1695 train_time:122081ms step_avg:98.53ms +step:1240/1695 train_time:122181ms step_avg:98.53ms +step:1241/1695 train_time:122283ms step_avg:98.54ms +step:1242/1695 train_time:122383ms step_avg:98.54ms +step:1243/1695 train_time:122484ms step_avg:98.54ms +step:1244/1695 train_time:122584ms step_avg:98.54ms +step:1245/1695 train_time:122686ms step_avg:98.54ms +step:1246/1695 train_time:122787ms step_avg:98.54ms +step:1247/1695 train_time:122888ms step_avg:98.55ms +step:1248/1695 train_time:122990ms step_avg:98.55ms +step:1249/1695 train_time:123091ms step_avg:98.55ms +step:1250/1695 train_time:123192ms step_avg:98.55ms +step:1250/1695 val_loss:3.3910 train_time:123290ms step_avg:98.63ms +step:1251/1695 train_time:123319ms step_avg:98.58ms +step:1252/1695 train_time:123407ms step_avg:98.57ms +step:1253/1695 train_time:123508ms step_avg:98.57ms +step:1254/1695 train_time:123608ms step_avg:98.57ms +step:1255/1695 train_time:123708ms step_avg:98.57ms +step:1256/1695 train_time:123808ms step_avg:98.57ms +step:1257/1695 train_time:123907ms step_avg:98.57ms +step:1258/1695 train_time:124007ms step_avg:98.58ms +step:1259/1695 train_time:124107ms step_avg:98.58ms +step:1260/1695 train_time:124206ms step_avg:98.58ms +step:1261/1695 train_time:124308ms step_avg:98.58ms +step:1262/1695 train_time:124411ms step_avg:98.58ms +step:1263/1695 train_time:124513ms step_avg:98.59ms +step:1264/1695 train_time:124614ms step_avg:98.59ms +step:1265/1695 train_time:124715ms step_avg:98.59ms +step:1266/1695 train_time:124816ms step_avg:98.59ms +step:1267/1695 train_time:124917ms step_avg:98.59ms +step:1268/1695 train_time:125019ms step_avg:98.60ms +step:1269/1695 train_time:125120ms step_avg:98.60ms +step:1270/1695 train_time:125221ms step_avg:98.60ms +step:1271/1695 train_time:125324ms step_avg:98.60ms +step:1272/1695 train_time:125424ms step_avg:98.60ms +step:1273/1695 train_time:125525ms step_avg:98.61ms +step:1274/1695 train_time:125625ms step_avg:98.61ms +step:1275/1695 train_time:125726ms step_avg:98.61ms +step:1276/1695 train_time:125827ms step_avg:98.61ms +step:1277/1695 train_time:125927ms step_avg:98.61ms +step:1278/1695 train_time:126027ms step_avg:98.61ms +step:1279/1695 train_time:126127ms step_avg:98.61ms +step:1280/1695 train_time:126228ms step_avg:98.62ms +step:1281/1695 train_time:126330ms step_avg:98.62ms +step:1282/1695 train_time:126432ms step_avg:98.62ms +step:1283/1695 train_time:126535ms step_avg:98.62ms +step:1284/1695 train_time:126637ms step_avg:98.63ms +step:1285/1695 train_time:126737ms step_avg:98.63ms +step:1286/1695 train_time:126838ms step_avg:98.63ms +step:1287/1695 train_time:126939ms step_avg:98.63ms +step:1288/1695 train_time:127040ms step_avg:98.63ms +step:1289/1695 train_time:127141ms step_avg:98.64ms +step:1290/1695 train_time:127243ms step_avg:98.64ms +step:1291/1695 train_time:127344ms step_avg:98.64ms +step:1292/1695 train_time:127446ms step_avg:98.64ms +step:1293/1695 train_time:127546ms step_avg:98.64ms +step:1294/1695 train_time:127648ms step_avg:98.65ms +step:1295/1695 train_time:127749ms step_avg:98.65ms +step:1296/1695 train_time:127848ms step_avg:98.65ms +step:1297/1695 train_time:127948ms step_avg:98.65ms +step:1298/1695 train_time:128049ms step_avg:98.65ms +step:1299/1695 train_time:128151ms step_avg:98.65ms +step:1300/1695 train_time:128253ms step_avg:98.66ms +step:1301/1695 train_time:128355ms step_avg:98.66ms +step:1302/1695 train_time:128457ms step_avg:98.66ms +step:1303/1695 train_time:128559ms step_avg:98.66ms +step:1304/1695 train_time:128660ms step_avg:98.67ms +step:1305/1695 train_time:128761ms step_avg:98.67ms +step:1306/1695 train_time:128862ms step_avg:98.67ms +step:1307/1695 train_time:128963ms step_avg:98.67ms +step:1308/1695 train_time:129064ms step_avg:98.67ms +step:1309/1695 train_time:129164ms step_avg:98.67ms +step:1310/1695 train_time:129265ms step_avg:98.68ms +step:1311/1695 train_time:129366ms step_avg:98.68ms +step:1312/1695 train_time:129466ms step_avg:98.68ms +step:1313/1695 train_time:129567ms step_avg:98.68ms +step:1314/1695 train_time:129667ms step_avg:98.68ms +step:1315/1695 train_time:129768ms step_avg:98.68ms +step:1316/1695 train_time:129869ms step_avg:98.68ms +step:1317/1695 train_time:129969ms step_avg:98.69ms +step:1318/1695 train_time:130069ms step_avg:98.69ms +step:1319/1695 train_time:130170ms step_avg:98.69ms +step:1320/1695 train_time:130271ms step_avg:98.69ms +step:1321/1695 train_time:130373ms step_avg:98.69ms +step:1322/1695 train_time:130475ms step_avg:98.70ms +step:1323/1695 train_time:130576ms step_avg:98.70ms +step:1324/1695 train_time:130677ms step_avg:98.70ms +step:1325/1695 train_time:130778ms step_avg:98.70ms +step:1326/1695 train_time:130880ms step_avg:98.70ms +step:1327/1695 train_time:130981ms step_avg:98.70ms +step:1328/1695 train_time:131082ms step_avg:98.71ms +step:1329/1695 train_time:131183ms step_avg:98.71ms +step:1330/1695 train_time:131284ms step_avg:98.71ms +step:1331/1695 train_time:131385ms step_avg:98.71ms +step:1332/1695 train_time:131486ms step_avg:98.71ms +step:1333/1695 train_time:131588ms step_avg:98.72ms +step:1334/1695 train_time:131688ms step_avg:98.72ms +step:1335/1695 train_time:131787ms step_avg:98.72ms +step:1336/1695 train_time:131889ms step_avg:98.72ms +step:1337/1695 train_time:131989ms step_avg:98.72ms +step:1338/1695 train_time:132090ms step_avg:98.72ms +step:1339/1695 train_time:132192ms step_avg:98.72ms +step:1340/1695 train_time:132293ms step_avg:98.73ms +step:1341/1695 train_time:132394ms step_avg:98.73ms +step:1342/1695 train_time:132496ms step_avg:98.73ms +step:1343/1695 train_time:132597ms step_avg:98.73ms +step:1344/1695 train_time:132697ms step_avg:98.73ms +step:1345/1695 train_time:132799ms step_avg:98.74ms +step:1346/1695 train_time:132901ms step_avg:98.74ms +step:1347/1695 train_time:133001ms step_avg:98.74ms +step:1348/1695 train_time:133103ms step_avg:98.74ms +step:1349/1695 train_time:133204ms step_avg:98.74ms +step:1350/1695 train_time:133305ms step_avg:98.74ms +step:1351/1695 train_time:133405ms step_avg:98.75ms +step:1352/1695 train_time:133506ms step_avg:98.75ms +step:1353/1695 train_time:133606ms step_avg:98.75ms +step:1354/1695 train_time:133705ms step_avg:98.75ms +step:1355/1695 train_time:133805ms step_avg:98.75ms +step:1356/1695 train_time:133905ms step_avg:98.75ms +step:1357/1695 train_time:134006ms step_avg:98.75ms +step:1358/1695 train_time:134106ms step_avg:98.75ms +step:1359/1695 train_time:134206ms step_avg:98.75ms +step:1360/1695 train_time:134307ms step_avg:98.76ms +step:1361/1695 train_time:134409ms step_avg:98.76ms +step:1362/1695 train_time:134509ms step_avg:98.76ms +step:1363/1695 train_time:134610ms step_avg:98.76ms +step:1364/1695 train_time:134712ms step_avg:98.76ms +step:1365/1695 train_time:134813ms step_avg:98.76ms +step:1366/1695 train_time:134915ms step_avg:98.77ms +step:1367/1695 train_time:135015ms step_avg:98.77ms +step:1368/1695 train_time:135116ms step_avg:98.77ms +step:1369/1695 train_time:135217ms step_avg:98.77ms +step:1370/1695 train_time:135317ms step_avg:98.77ms +step:1371/1695 train_time:135418ms step_avg:98.77ms +step:1372/1695 train_time:135520ms step_avg:98.78ms +step:1373/1695 train_time:135622ms step_avg:98.78ms +step:1374/1695 train_time:135723ms step_avg:98.78ms +step:1375/1695 train_time:135825ms step_avg:98.78ms +step:1375/1695 val_loss:3.3517 train_time:135923ms step_avg:98.85ms +step:1376/1695 train_time:135951ms step_avg:98.80ms +step:1377/1695 train_time:136040ms step_avg:98.79ms +step:1378/1695 train_time:136141ms step_avg:98.80ms +step:1379/1695 train_time:136242ms step_avg:98.80ms +step:1380/1695 train_time:136344ms step_avg:98.80ms +step:1381/1695 train_time:136445ms step_avg:98.80ms +step:1382/1695 train_time:136544ms step_avg:98.80ms +step:1383/1695 train_time:136644ms step_avg:98.80ms +step:1384/1695 train_time:136744ms step_avg:98.80ms +step:1385/1695 train_time:136845ms step_avg:98.81ms +step:1386/1695 train_time:136949ms step_avg:98.81ms +step:1387/1695 train_time:137052ms step_avg:98.81ms +step:1388/1695 train_time:137155ms step_avg:98.81ms +step:1389/1695 train_time:137257ms step_avg:98.82ms +step:1390/1695 train_time:137358ms step_avg:98.82ms +step:1391/1695 train_time:137459ms step_avg:98.82ms +step:1392/1695 train_time:137561ms step_avg:98.82ms +step:1393/1695 train_time:137663ms step_avg:98.82ms +step:1394/1695 train_time:137765ms step_avg:98.83ms +step:1395/1695 train_time:137867ms step_avg:98.83ms +step:1396/1695 train_time:137969ms step_avg:98.83ms +step:1397/1695 train_time:138071ms step_avg:98.83ms +step:1398/1695 train_time:138172ms step_avg:98.84ms +step:1399/1695 train_time:138273ms step_avg:98.84ms +step:1400/1695 train_time:138375ms step_avg:98.84ms +step:1401/1695 train_time:138475ms step_avg:98.84ms +step:1402/1695 train_time:138578ms step_avg:98.84ms +step:1403/1695 train_time:138682ms step_avg:98.85ms +step:1404/1695 train_time:138784ms step_avg:98.85ms +step:1405/1695 train_time:138886ms step_avg:98.85ms +step:1406/1695 train_time:138988ms step_avg:98.85ms +step:1407/1695 train_time:139090ms step_avg:98.86ms +step:1408/1695 train_time:139191ms step_avg:98.86ms +step:1409/1695 train_time:139296ms step_avg:98.86ms +step:1410/1695 train_time:139397ms step_avg:98.86ms +step:1411/1695 train_time:139498ms step_avg:98.86ms +step:1412/1695 train_time:139602ms step_avg:98.87ms +step:1413/1695 train_time:139702ms step_avg:98.87ms +step:1414/1695 train_time:139805ms step_avg:98.87ms +step:1415/1695 train_time:139908ms step_avg:98.88ms +step:1416/1695 train_time:140010ms step_avg:98.88ms +step:1417/1695 train_time:140110ms step_avg:98.88ms +step:1418/1695 train_time:140211ms step_avg:98.88ms +step:1419/1695 train_time:140314ms step_avg:98.88ms +step:1420/1695 train_time:140414ms step_avg:98.88ms +step:1421/1695 train_time:140516ms step_avg:98.89ms +step:1422/1695 train_time:140617ms step_avg:98.89ms +step:1423/1695 train_time:140718ms step_avg:98.89ms +step:1424/1695 train_time:140820ms step_avg:98.89ms +step:1425/1695 train_time:140924ms step_avg:98.89ms +step:1426/1695 train_time:141027ms step_avg:98.90ms +step:1427/1695 train_time:141129ms step_avg:98.90ms +step:1428/1695 train_time:141231ms step_avg:98.90ms +step:1429/1695 train_time:141332ms step_avg:98.90ms +step:1430/1695 train_time:141433ms step_avg:98.90ms +step:1431/1695 train_time:141534ms step_avg:98.91ms +step:1432/1695 train_time:141635ms step_avg:98.91ms +step:1433/1695 train_time:141736ms step_avg:98.91ms +step:1434/1695 train_time:141838ms step_avg:98.91ms +step:1435/1695 train_time:141941ms step_avg:98.91ms +step:1436/1695 train_time:142044ms step_avg:98.92ms +step:1437/1695 train_time:142146ms step_avg:98.92ms +step:1438/1695 train_time:142247ms step_avg:98.92ms +step:1439/1695 train_time:142350ms step_avg:98.92ms +step:1440/1695 train_time:142453ms step_avg:98.93ms +step:1441/1695 train_time:142555ms step_avg:98.93ms +step:1442/1695 train_time:142655ms step_avg:98.93ms +step:1443/1695 train_time:142755ms step_avg:98.93ms +step:1444/1695 train_time:142856ms step_avg:98.93ms +step:1445/1695 train_time:142958ms step_avg:98.93ms +step:1446/1695 train_time:143061ms step_avg:98.94ms +step:1447/1695 train_time:143164ms step_avg:98.94ms +step:1448/1695 train_time:143268ms step_avg:98.94ms +step:1449/1695 train_time:143368ms step_avg:98.94ms +step:1450/1695 train_time:143469ms step_avg:98.94ms +step:1451/1695 train_time:143571ms step_avg:98.95ms +step:1452/1695 train_time:143672ms step_avg:98.95ms +step:1453/1695 train_time:143773ms step_avg:98.95ms +step:1454/1695 train_time:143877ms step_avg:98.95ms +step:1455/1695 train_time:143979ms step_avg:98.95ms +step:1456/1695 train_time:144082ms step_avg:98.96ms +step:1457/1695 train_time:144184ms step_avg:98.96ms +step:1458/1695 train_time:144287ms step_avg:98.96ms +step:1459/1695 train_time:144389ms step_avg:98.96ms +step:1460/1695 train_time:144490ms step_avg:98.97ms +step:1461/1695 train_time:144593ms step_avg:98.97ms +step:1462/1695 train_time:144694ms step_avg:98.97ms +step:1463/1695 train_time:144794ms step_avg:98.97ms +step:1464/1695 train_time:144896ms step_avg:98.97ms +step:1465/1695 train_time:144996ms step_avg:98.97ms +step:1466/1695 train_time:145100ms step_avg:98.98ms +step:1467/1695 train_time:145203ms step_avg:98.98ms +step:1468/1695 train_time:145305ms step_avg:98.98ms +step:1469/1695 train_time:145408ms step_avg:98.98ms +step:1470/1695 train_time:145509ms step_avg:98.99ms +step:1471/1695 train_time:145610ms step_avg:98.99ms +step:1472/1695 train_time:145711ms step_avg:98.99ms +step:1473/1695 train_time:145812ms step_avg:98.99ms +step:1474/1695 train_time:145913ms step_avg:98.99ms +step:1475/1695 train_time:146014ms step_avg:98.99ms +step:1476/1695 train_time:146116ms step_avg:98.99ms +step:1477/1695 train_time:146221ms step_avg:99.00ms +step:1478/1695 train_time:146323ms step_avg:99.00ms +step:1479/1695 train_time:146424ms step_avg:99.00ms +step:1480/1695 train_time:146527ms step_avg:99.00ms +step:1481/1695 train_time:146629ms step_avg:99.01ms +step:1482/1695 train_time:146731ms step_avg:99.01ms +step:1483/1695 train_time:146832ms step_avg:99.01ms +step:1484/1695 train_time:146934ms step_avg:99.01ms +step:1485/1695 train_time:147036ms step_avg:99.01ms +step:1486/1695 train_time:147138ms step_avg:99.02ms +step:1487/1695 train_time:147239ms step_avg:99.02ms +step:1488/1695 train_time:147342ms step_avg:99.02ms +step:1489/1695 train_time:147445ms step_avg:99.02ms +step:1490/1695 train_time:147548ms step_avg:99.03ms +step:1491/1695 train_time:147650ms step_avg:99.03ms +step:1492/1695 train_time:147751ms step_avg:99.03ms +step:1493/1695 train_time:147852ms step_avg:99.03ms +step:1494/1695 train_time:147954ms step_avg:99.03ms +step:1495/1695 train_time:148055ms step_avg:99.03ms +step:1496/1695 train_time:148156ms step_avg:99.03ms +step:1497/1695 train_time:148257ms step_avg:99.04ms +step:1498/1695 train_time:148360ms step_avg:99.04ms +step:1499/1695 train_time:148462ms step_avg:99.04ms +step:1500/1695 train_time:148564ms step_avg:99.04ms +step:1500/1695 val_loss:3.3169 train_time:148663ms step_avg:99.11ms +step:1501/1695 train_time:148691ms step_avg:99.06ms +step:1502/1695 train_time:148780ms step_avg:99.05ms +step:1503/1695 train_time:148882ms step_avg:99.06ms +step:1504/1695 train_time:148983ms step_avg:99.06ms +step:1505/1695 train_time:149083ms step_avg:99.06ms +step:1506/1695 train_time:149184ms step_avg:99.06ms +step:1507/1695 train_time:149285ms step_avg:99.06ms +step:1508/1695 train_time:149385ms step_avg:99.06ms +step:1509/1695 train_time:149488ms step_avg:99.06ms +step:1510/1695 train_time:149589ms step_avg:99.07ms +step:1511/1695 train_time:149694ms step_avg:99.07ms +step:1512/1695 train_time:149796ms step_avg:99.07ms +step:1513/1695 train_time:149899ms step_avg:99.07ms +step:1514/1695 train_time:150001ms step_avg:99.08ms +step:1515/1695 train_time:150106ms step_avg:99.08ms +step:1516/1695 train_time:150207ms step_avg:99.08ms +step:1517/1695 train_time:150307ms step_avg:99.08ms +step:1518/1695 train_time:150408ms step_avg:99.08ms +step:1519/1695 train_time:150512ms step_avg:99.09ms +step:1520/1695 train_time:150614ms step_avg:99.09ms +step:1521/1695 train_time:150716ms step_avg:99.09ms +step:1522/1695 train_time:150818ms step_avg:99.09ms +step:1523/1695 train_time:150920ms step_avg:99.09ms +step:1524/1695 train_time:151025ms step_avg:99.10ms +step:1525/1695 train_time:151129ms step_avg:99.10ms +step:1526/1695 train_time:151231ms step_avg:99.10ms +step:1527/1695 train_time:151332ms step_avg:99.10ms +step:1528/1695 train_time:151438ms step_avg:99.11ms +step:1529/1695 train_time:151539ms step_avg:99.11ms +step:1530/1695 train_time:151642ms step_avg:99.11ms +step:1531/1695 train_time:151742ms step_avg:99.11ms +step:1532/1695 train_time:151845ms step_avg:99.12ms +step:1533/1695 train_time:151945ms step_avg:99.12ms +step:1534/1695 train_time:152046ms step_avg:99.12ms +step:1535/1695 train_time:152147ms step_avg:99.12ms +step:1536/1695 train_time:152248ms step_avg:99.12ms +step:1537/1695 train_time:152350ms step_avg:99.12ms +step:1538/1695 train_time:152452ms step_avg:99.12ms +step:1539/1695 train_time:152555ms step_avg:99.13ms +step:1540/1695 train_time:152659ms step_avg:99.13ms +step:1541/1695 train_time:152762ms step_avg:99.13ms +step:1542/1695 train_time:152868ms step_avg:99.14ms +step:1543/1695 train_time:152969ms step_avg:99.14ms +step:1544/1695 train_time:153071ms step_avg:99.14ms +step:1545/1695 train_time:153173ms step_avg:99.14ms +step:1546/1695 train_time:153274ms step_avg:99.14ms +step:1547/1695 train_time:153377ms step_avg:99.14ms +step:1548/1695 train_time:153479ms step_avg:99.15ms +step:1549/1695 train_time:153581ms step_avg:99.15ms +step:1550/1695 train_time:153683ms step_avg:99.15ms +step:1551/1695 train_time:153786ms step_avg:99.15ms +step:1552/1695 train_time:153887ms step_avg:99.15ms +step:1553/1695 train_time:153990ms step_avg:99.16ms +step:1554/1695 train_time:154091ms step_avg:99.16ms +step:1555/1695 train_time:154193ms step_avg:99.16ms +step:1556/1695 train_time:154294ms step_avg:99.16ms +step:1557/1695 train_time:154398ms step_avg:99.16ms +step:1558/1695 train_time:154501ms step_avg:99.17ms +step:1559/1695 train_time:154603ms step_avg:99.17ms +step:1560/1695 train_time:154704ms step_avg:99.17ms +step:1561/1695 train_time:154806ms step_avg:99.17ms +step:1562/1695 train_time:154909ms step_avg:99.17ms +step:1563/1695 train_time:155013ms step_avg:99.18ms +step:1564/1695 train_time:155114ms step_avg:99.18ms +step:1565/1695 train_time:155215ms step_avg:99.18ms +step:1566/1695 train_time:155317ms step_avg:99.18ms +step:1567/1695 train_time:155418ms step_avg:99.18ms +step:1568/1695 train_time:155519ms step_avg:99.18ms +step:1569/1695 train_time:155621ms step_avg:99.19ms +step:1570/1695 train_time:155726ms step_avg:99.19ms +step:1571/1695 train_time:155827ms step_avg:99.19ms +step:1572/1695 train_time:155929ms step_avg:99.19ms +step:1573/1695 train_time:156031ms step_avg:99.19ms +step:1574/1695 train_time:156132ms step_avg:99.19ms +step:1575/1695 train_time:156233ms step_avg:99.20ms +step:1576/1695 train_time:156336ms step_avg:99.20ms +step:1577/1695 train_time:156439ms step_avg:99.20ms +step:1578/1695 train_time:156541ms step_avg:99.20ms +step:1579/1695 train_time:156643ms step_avg:99.20ms +step:1580/1695 train_time:156746ms step_avg:99.21ms +step:1581/1695 train_time:156849ms step_avg:99.21ms +step:1582/1695 train_time:156950ms step_avg:99.21ms +step:1583/1695 train_time:157054ms step_avg:99.21ms +step:1584/1695 train_time:157156ms step_avg:99.21ms +step:1585/1695 train_time:157257ms step_avg:99.22ms +step:1586/1695 train_time:157360ms step_avg:99.22ms +step:1587/1695 train_time:157462ms step_avg:99.22ms +step:1588/1695 train_time:157563ms step_avg:99.22ms +step:1589/1695 train_time:157664ms step_avg:99.22ms +step:1590/1695 train_time:157766ms step_avg:99.22ms +step:1591/1695 train_time:157867ms step_avg:99.23ms +step:1592/1695 train_time:157969ms step_avg:99.23ms +step:1593/1695 train_time:158070ms step_avg:99.23ms +step:1594/1695 train_time:158174ms step_avg:99.23ms +step:1595/1695 train_time:158277ms step_avg:99.23ms +step:1596/1695 train_time:158378ms step_avg:99.23ms +step:1597/1695 train_time:158481ms step_avg:99.24ms +step:1598/1695 train_time:158584ms step_avg:99.24ms +step:1599/1695 train_time:158691ms step_avg:99.24ms +step:1600/1695 train_time:158787ms step_avg:99.24ms +step:1601/1695 train_time:158890ms step_avg:99.24ms +step:1602/1695 train_time:158991ms step_avg:99.25ms +step:1603/1695 train_time:159093ms step_avg:99.25ms +step:1604/1695 train_time:159194ms step_avg:99.25ms +step:1605/1695 train_time:159296ms step_avg:99.25ms +step:1606/1695 train_time:159399ms step_avg:99.25ms +step:1607/1695 train_time:159500ms step_avg:99.25ms +step:1608/1695 train_time:159601ms step_avg:99.25ms +step:1609/1695 train_time:159703ms step_avg:99.26ms +step:1610/1695 train_time:159806ms step_avg:99.26ms +step:1611/1695 train_time:159908ms step_avg:99.26ms +step:1612/1695 train_time:160010ms step_avg:99.26ms +step:1613/1695 train_time:160111ms step_avg:99.26ms +step:1614/1695 train_time:160212ms step_avg:99.26ms +step:1615/1695 train_time:160314ms step_avg:99.27ms +step:1616/1695 train_time:160415ms step_avg:99.27ms +step:1617/1695 train_time:160518ms step_avg:99.27ms +step:1618/1695 train_time:160620ms step_avg:99.27ms +step:1619/1695 train_time:160723ms step_avg:99.27ms +step:1620/1695 train_time:160826ms step_avg:99.28ms +step:1621/1695 train_time:160927ms step_avg:99.28ms +step:1622/1695 train_time:161028ms step_avg:99.28ms +step:1623/1695 train_time:161130ms step_avg:99.28ms +step:1624/1695 train_time:161231ms step_avg:99.28ms +step:1625/1695 train_time:161335ms step_avg:99.28ms +step:1625/1695 val_loss:3.2888 train_time:161435ms step_avg:99.34ms +step:1626/1695 train_time:161464ms step_avg:99.30ms +step:1627/1695 train_time:161550ms step_avg:99.29ms +step:1628/1695 train_time:161655ms step_avg:99.30ms +step:1629/1695 train_time:161757ms step_avg:99.30ms +step:1630/1695 train_time:161859ms step_avg:99.30ms +step:1631/1695 train_time:161961ms step_avg:99.30ms +step:1632/1695 train_time:162062ms step_avg:99.30ms +step:1633/1695 train_time:162162ms step_avg:99.30ms +step:1634/1695 train_time:162265ms step_avg:99.31ms +step:1635/1695 train_time:162367ms step_avg:99.31ms +step:1636/1695 train_time:162470ms step_avg:99.31ms +step:1637/1695 train_time:162574ms step_avg:99.31ms +step:1638/1695 train_time:162677ms step_avg:99.31ms +step:1639/1695 train_time:162779ms step_avg:99.32ms +step:1640/1695 train_time:162881ms step_avg:99.32ms +step:1641/1695 train_time:162984ms step_avg:99.32ms +step:1642/1695 train_time:163085ms step_avg:99.32ms +step:1643/1695 train_time:163187ms step_avg:99.32ms +step:1644/1695 train_time:163293ms step_avg:99.33ms +step:1645/1695 train_time:163393ms step_avg:99.33ms +step:1646/1695 train_time:163496ms step_avg:99.33ms +step:1647/1695 train_time:163601ms step_avg:99.33ms +step:1648/1695 train_time:163704ms step_avg:99.33ms +step:1649/1695 train_time:163807ms step_avg:99.34ms +step:1650/1695 train_time:163910ms step_avg:99.34ms +step:1651/1695 train_time:164012ms step_avg:99.34ms +step:1652/1695 train_time:164115ms step_avg:99.34ms +step:1653/1695 train_time:164219ms step_avg:99.35ms +step:1654/1695 train_time:164321ms step_avg:99.35ms +step:1655/1695 train_time:164423ms step_avg:99.35ms +step:1656/1695 train_time:164525ms step_avg:99.35ms +step:1657/1695 train_time:164627ms step_avg:99.35ms +step:1658/1695 train_time:164730ms step_avg:99.35ms +step:1659/1695 train_time:164836ms step_avg:99.36ms +step:1660/1695 train_time:164938ms step_avg:99.36ms +step:1661/1695 train_time:165042ms step_avg:99.36ms +step:1662/1695 train_time:165147ms step_avg:99.37ms +step:1663/1695 train_time:165249ms step_avg:99.37ms +step:1664/1695 train_time:165351ms step_avg:99.37ms +step:1665/1695 train_time:165456ms step_avg:99.37ms +step:1666/1695 train_time:165560ms step_avg:99.38ms +step:1667/1695 train_time:165661ms step_avg:99.38ms +step:1668/1695 train_time:165766ms step_avg:99.38ms +step:1669/1695 train_time:165871ms step_avg:99.38ms +step:1670/1695 train_time:165973ms step_avg:99.39ms +step:1671/1695 train_time:166077ms step_avg:99.39ms +step:1672/1695 train_time:166181ms step_avg:99.39ms +step:1673/1695 train_time:166282ms step_avg:99.39ms +step:1674/1695 train_time:166383ms step_avg:99.39ms +step:1675/1695 train_time:166486ms step_avg:99.39ms +step:1676/1695 train_time:166594ms step_avg:99.40ms +step:1677/1695 train_time:166695ms step_avg:99.40ms +step:1678/1695 train_time:166798ms step_avg:99.40ms +step:1679/1695 train_time:166901ms step_avg:99.40ms +step:1680/1695 train_time:167002ms step_avg:99.41ms +step:1681/1695 train_time:167105ms step_avg:99.41ms +step:1682/1695 train_time:167213ms step_avg:99.41ms +step:1683/1695 train_time:167315ms step_avg:99.41ms +step:1684/1695 train_time:167419ms step_avg:99.42ms +step:1685/1695 train_time:167522ms step_avg:99.42ms +step:1686/1695 train_time:167624ms step_avg:99.42ms +step:1687/1695 train_time:167726ms step_avg:99.42ms +step:1688/1695 train_time:167829ms step_avg:99.42ms +step:1689/1695 train_time:167931ms step_avg:99.43ms +step:1690/1695 train_time:168033ms step_avg:99.43ms +step:1691/1695 train_time:168136ms step_avg:99.43ms +step:1692/1695 train_time:168238ms step_avg:99.43ms +step:1693/1695 train_time:168341ms step_avg:99.43ms +step:1694/1695 train_time:168444ms step_avg:99.44ms +step:1695/1695 train_time:168548ms step_avg:99.44ms +step:1695/1695 val_loss:3.2760 train_time:168647ms step_avg:99.50ms +peak memory allocated: 34761 MiB reserved: 49140 MiB diff --git a/records/082325_SparseAttnGate/50524dcb-cf95-4b75-bf89-ba8ff3c5e1af.txt b/records/082325_SparseAttnGate/50524dcb-cf95-4b75-bf89-ba8ff3c5e1af.txt new file mode 100644 index 000000000..d12ded416 --- /dev/null +++ b/records/082325_SparseAttnGate/50524dcb-cf95-4b75-bf89-ba8ff3c5e1af.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:12:16 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 32C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 37C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 38C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 299071 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 299072 C /usr/bin/python3 614MiB | +| 0 N/A N/A 299073 C /usr/bin/python3 614MiB | +| 0 N/A N/A 299074 C /usr/bin/python3 614MiB | +| 0 N/A N/A 299075 C /usr/bin/python3 614MiB | +| 0 N/A N/A 299076 C /usr/bin/python3 614MiB | +| 0 N/A N/A 299077 C /usr/bin/python3 614MiB | +| 0 N/A N/A 299078 C /usr/bin/python3 614MiB | +| 1 N/A N/A 299072 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 299073 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 299074 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 299075 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 299076 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 299077 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 299078 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1695 train_time:157ms step_avg:157.11ms +step:2/1695 train_time:184ms step_avg:92.16ms +step:3/1695 train_time:253ms step_avg:84.19ms +step:4/1695 train_time:345ms step_avg:86.24ms +step:5/1695 train_time:438ms step_avg:87.58ms +step:6/1695 train_time:531ms step_avg:88.50ms +step:7/1695 train_time:624ms step_avg:89.17ms +step:8/1695 train_time:718ms step_avg:89.74ms +step:9/1695 train_time:811ms step_avg:90.06ms +step:10/1695 train_time:903ms step_avg:90.34ms +step:11/1695 train_time:997ms step_avg:90.61ms +step:12/1695 train_time:1090ms step_avg:90.82ms +step:13/1695 train_time:1184ms step_avg:91.09ms +step:14/1695 train_time:1279ms step_avg:91.33ms +step:15/1695 train_time:1373ms step_avg:91.54ms +step:16/1695 train_time:1467ms step_avg:91.66ms +step:17/1695 train_time:1561ms step_avg:91.80ms +step:18/1695 train_time:1655ms step_avg:91.92ms +step:19/1695 train_time:1748ms step_avg:91.98ms +step:20/1695 train_time:1842ms step_avg:92.10ms +step:21/1695 train_time:1936ms step_avg:92.18ms +step:22/1695 train_time:2030ms step_avg:92.27ms +step:23/1695 train_time:2124ms step_avg:92.34ms +step:24/1695 train_time:2218ms step_avg:92.41ms +step:25/1695 train_time:2311ms step_avg:92.46ms +step:26/1695 train_time:2405ms step_avg:92.50ms +step:27/1695 train_time:2498ms step_avg:92.53ms +step:28/1695 train_time:2593ms step_avg:92.60ms +step:29/1695 train_time:2686ms step_avg:92.64ms +step:30/1695 train_time:2780ms step_avg:92.68ms +step:31/1695 train_time:2874ms step_avg:92.71ms +step:32/1695 train_time:2967ms step_avg:92.72ms +step:33/1695 train_time:3061ms step_avg:92.74ms +step:34/1695 train_time:3154ms step_avg:92.77ms +step:35/1695 train_time:3248ms step_avg:92.80ms +step:36/1695 train_time:3342ms step_avg:92.84ms +step:37/1695 train_time:3437ms step_avg:92.88ms +step:38/1695 train_time:3532ms step_avg:92.96ms +step:39/1695 train_time:3624ms step_avg:92.93ms +step:40/1695 train_time:3719ms step_avg:92.97ms +step:41/1695 train_time:3813ms step_avg:93.01ms +step:42/1695 train_time:3908ms step_avg:93.04ms +step:43/1695 train_time:4002ms step_avg:93.07ms +step:44/1695 train_time:4096ms step_avg:93.08ms +step:45/1695 train_time:4188ms step_avg:93.08ms +step:46/1695 train_time:4282ms step_avg:93.10ms +step:47/1695 train_time:4376ms step_avg:93.11ms +step:48/1695 train_time:4471ms step_avg:93.14ms +step:49/1695 train_time:4564ms step_avg:93.14ms +step:50/1695 train_time:4658ms step_avg:93.17ms +step:51/1695 train_time:4753ms step_avg:93.20ms +step:52/1695 train_time:4847ms step_avg:93.21ms +step:53/1695 train_time:4941ms step_avg:93.23ms +step:54/1695 train_time:5035ms step_avg:93.24ms +step:55/1695 train_time:5128ms step_avg:93.24ms +step:56/1695 train_time:5221ms step_avg:93.24ms +step:57/1695 train_time:5315ms step_avg:93.25ms +step:58/1695 train_time:5410ms step_avg:93.28ms +step:59/1695 train_time:5503ms step_avg:93.27ms +step:60/1695 train_time:5597ms step_avg:93.28ms +step:61/1695 train_time:5690ms step_avg:93.28ms +step:62/1695 train_time:5784ms step_avg:93.29ms +step:63/1695 train_time:5879ms step_avg:93.32ms +step:64/1695 train_time:5973ms step_avg:93.33ms +step:65/1695 train_time:6067ms step_avg:93.34ms +step:66/1695 train_time:6161ms step_avg:93.35ms +step:67/1695 train_time:6254ms step_avg:93.35ms +step:68/1695 train_time:6348ms step_avg:93.35ms +step:69/1695 train_time:6442ms step_avg:93.36ms +step:70/1695 train_time:6536ms step_avg:93.37ms +step:71/1695 train_time:6629ms step_avg:93.36ms +step:72/1695 train_time:6723ms step_avg:93.37ms +step:73/1695 train_time:6817ms step_avg:93.38ms +step:74/1695 train_time:6911ms step_avg:93.39ms +step:75/1695 train_time:7004ms step_avg:93.39ms +step:76/1695 train_time:7099ms step_avg:93.41ms +step:77/1695 train_time:7193ms step_avg:93.41ms +step:78/1695 train_time:7286ms step_avg:93.41ms +step:79/1695 train_time:7380ms step_avg:93.41ms +step:80/1695 train_time:7474ms step_avg:93.42ms +step:81/1695 train_time:7567ms step_avg:93.42ms +step:82/1695 train_time:7662ms step_avg:93.44ms +step:83/1695 train_time:7756ms step_avg:93.45ms +step:84/1695 train_time:7851ms step_avg:93.46ms +step:85/1695 train_time:7944ms step_avg:93.46ms +step:86/1695 train_time:8039ms step_avg:93.47ms +step:87/1695 train_time:8132ms step_avg:93.48ms +step:88/1695 train_time:8227ms step_avg:93.48ms +step:89/1695 train_time:8320ms step_avg:93.49ms +step:90/1695 train_time:8414ms step_avg:93.49ms +step:91/1695 train_time:8507ms step_avg:93.48ms +step:92/1695 train_time:8601ms step_avg:93.49ms +step:93/1695 train_time:8694ms step_avg:93.49ms +step:94/1695 train_time:8788ms step_avg:93.49ms +step:95/1695 train_time:8882ms step_avg:93.50ms +step:96/1695 train_time:8976ms step_avg:93.50ms +step:97/1695 train_time:9070ms step_avg:93.51ms +step:98/1695 train_time:9164ms step_avg:93.51ms +step:99/1695 train_time:9258ms step_avg:93.52ms +step:100/1695 train_time:9352ms step_avg:93.52ms +step:101/1695 train_time:9446ms step_avg:93.53ms +step:102/1695 train_time:9541ms step_avg:93.54ms +step:103/1695 train_time:9635ms step_avg:93.54ms +step:104/1695 train_time:9729ms step_avg:93.54ms +step:105/1695 train_time:9822ms step_avg:93.55ms +step:106/1695 train_time:9917ms step_avg:93.55ms +step:107/1695 train_time:10010ms step_avg:93.55ms +step:108/1695 train_time:10104ms step_avg:93.56ms +step:109/1695 train_time:10199ms step_avg:93.57ms +step:110/1695 train_time:10292ms step_avg:93.57ms +step:111/1695 train_time:10385ms step_avg:93.56ms +step:112/1695 train_time:10479ms step_avg:93.56ms +step:113/1695 train_time:10573ms step_avg:93.57ms +step:114/1695 train_time:10667ms step_avg:93.57ms +step:115/1695 train_time:10761ms step_avg:93.57ms +step:116/1695 train_time:10855ms step_avg:93.58ms +step:117/1695 train_time:10949ms step_avg:93.58ms +step:118/1695 train_time:11043ms step_avg:93.59ms +step:119/1695 train_time:11136ms step_avg:93.58ms +step:120/1695 train_time:11231ms step_avg:93.59ms +step:121/1695 train_time:11324ms step_avg:93.59ms +step:122/1695 train_time:11419ms step_avg:93.60ms +step:123/1695 train_time:11512ms step_avg:93.60ms +step:124/1695 train_time:11605ms step_avg:93.59ms +step:125/1695 train_time:11698ms step_avg:93.59ms +step:125/1695 val_loss:4.6089 train_time:11790ms step_avg:94.32ms +step:126/1695 train_time:11820ms step_avg:93.81ms +step:127/1695 train_time:11894ms step_avg:93.65ms +step:128/1695 train_time:11995ms step_avg:93.71ms +step:129/1695 train_time:12092ms step_avg:93.73ms +step:130/1695 train_time:12186ms step_avg:93.74ms +step:131/1695 train_time:12279ms step_avg:93.74ms +step:132/1695 train_time:12373ms step_avg:93.73ms +step:133/1695 train_time:12467ms step_avg:93.73ms +step:134/1695 train_time:12560ms step_avg:93.73ms +step:135/1695 train_time:12653ms step_avg:93.73ms +step:136/1695 train_time:12746ms step_avg:93.72ms +step:137/1695 train_time:12840ms step_avg:93.73ms +step:138/1695 train_time:12935ms step_avg:93.74ms +step:139/1695 train_time:13032ms step_avg:93.76ms +step:140/1695 train_time:13128ms step_avg:93.77ms +step:141/1695 train_time:13222ms step_avg:93.77ms +step:142/1695 train_time:13315ms step_avg:93.77ms +step:143/1695 train_time:13409ms step_avg:93.77ms +step:144/1695 train_time:13503ms step_avg:93.77ms +step:145/1695 train_time:13596ms step_avg:93.77ms +step:146/1695 train_time:13690ms step_avg:93.77ms +step:147/1695 train_time:13784ms step_avg:93.77ms +step:148/1695 train_time:13878ms step_avg:93.77ms +step:149/1695 train_time:13972ms step_avg:93.77ms +step:150/1695 train_time:14067ms step_avg:93.78ms +step:151/1695 train_time:14162ms step_avg:93.79ms +step:152/1695 train_time:14257ms step_avg:93.79ms +step:153/1695 train_time:14351ms step_avg:93.80ms +step:154/1695 train_time:14446ms step_avg:93.80ms +step:155/1695 train_time:14539ms step_avg:93.80ms +step:156/1695 train_time:14633ms step_avg:93.80ms +step:157/1695 train_time:14727ms step_avg:93.80ms +step:158/1695 train_time:14820ms step_avg:93.80ms +step:159/1695 train_time:14914ms step_avg:93.80ms +step:160/1695 train_time:15008ms step_avg:93.80ms +step:161/1695 train_time:15102ms step_avg:93.80ms +step:162/1695 train_time:15196ms step_avg:93.80ms +step:163/1695 train_time:15290ms step_avg:93.80ms +step:164/1695 train_time:15384ms step_avg:93.81ms +step:165/1695 train_time:15479ms step_avg:93.81ms +step:166/1695 train_time:15572ms step_avg:93.81ms +step:167/1695 train_time:15667ms step_avg:93.82ms +step:168/1695 train_time:15762ms step_avg:93.82ms +step:169/1695 train_time:15855ms step_avg:93.82ms +step:170/1695 train_time:15949ms step_avg:93.82ms +step:171/1695 train_time:16044ms step_avg:93.82ms +step:172/1695 train_time:16141ms step_avg:93.84ms +step:173/1695 train_time:16232ms step_avg:93.83ms +step:174/1695 train_time:16326ms step_avg:93.83ms +step:175/1695 train_time:16421ms step_avg:93.83ms +step:176/1695 train_time:16515ms step_avg:93.84ms +step:177/1695 train_time:16610ms step_avg:93.84ms +step:178/1695 train_time:16704ms step_avg:93.85ms +step:179/1695 train_time:16798ms step_avg:93.84ms +step:180/1695 train_time:16892ms step_avg:93.84ms +step:181/1695 train_time:16986ms step_avg:93.85ms +step:182/1695 train_time:17080ms step_avg:93.85ms +step:183/1695 train_time:17174ms step_avg:93.85ms +step:184/1695 train_time:17270ms step_avg:93.86ms +step:185/1695 train_time:17365ms step_avg:93.86ms +step:186/1695 train_time:17458ms step_avg:93.86ms +step:187/1695 train_time:17553ms step_avg:93.87ms +step:188/1695 train_time:17648ms step_avg:93.87ms +step:189/1695 train_time:17742ms step_avg:93.87ms +step:190/1695 train_time:17835ms step_avg:93.87ms +step:191/1695 train_time:17930ms step_avg:93.87ms +step:192/1695 train_time:18024ms step_avg:93.88ms +step:193/1695 train_time:18119ms step_avg:93.88ms +step:194/1695 train_time:18213ms step_avg:93.88ms +step:195/1695 train_time:18308ms step_avg:93.89ms +step:196/1695 train_time:18402ms step_avg:93.89ms +step:197/1695 train_time:18496ms step_avg:93.89ms +step:198/1695 train_time:18590ms step_avg:93.89ms +step:199/1695 train_time:18684ms step_avg:93.89ms +step:200/1695 train_time:18777ms step_avg:93.89ms +step:201/1695 train_time:18871ms step_avg:93.89ms +step:202/1695 train_time:18967ms step_avg:93.90ms +step:203/1695 train_time:19062ms step_avg:93.90ms +step:204/1695 train_time:19156ms step_avg:93.90ms +step:205/1695 train_time:19250ms step_avg:93.90ms +step:206/1695 train_time:19344ms step_avg:93.90ms +step:207/1695 train_time:19438ms step_avg:93.90ms +step:208/1695 train_time:19532ms step_avg:93.90ms +step:209/1695 train_time:19626ms step_avg:93.91ms +step:210/1695 train_time:19721ms step_avg:93.91ms +step:211/1695 train_time:19815ms step_avg:93.91ms +step:212/1695 train_time:19908ms step_avg:93.91ms +step:213/1695 train_time:20003ms step_avg:93.91ms +step:214/1695 train_time:20098ms step_avg:93.92ms +step:215/1695 train_time:20192ms step_avg:93.92ms +step:216/1695 train_time:20286ms step_avg:93.92ms +step:217/1695 train_time:20380ms step_avg:93.92ms +step:218/1695 train_time:20475ms step_avg:93.92ms +step:219/1695 train_time:20569ms step_avg:93.92ms +step:220/1695 train_time:20662ms step_avg:93.92ms +step:221/1695 train_time:20756ms step_avg:93.92ms +step:222/1695 train_time:20850ms step_avg:93.92ms +step:223/1695 train_time:20943ms step_avg:93.92ms +step:224/1695 train_time:21037ms step_avg:93.92ms +step:225/1695 train_time:21131ms step_avg:93.92ms +step:226/1695 train_time:21226ms step_avg:93.92ms +step:227/1695 train_time:21320ms step_avg:93.92ms +step:228/1695 train_time:21414ms step_avg:93.92ms +step:229/1695 train_time:21508ms step_avg:93.92ms +step:230/1695 train_time:21602ms step_avg:93.92ms +step:231/1695 train_time:21695ms step_avg:93.92ms +step:232/1695 train_time:21789ms step_avg:93.92ms +step:233/1695 train_time:21883ms step_avg:93.92ms +step:234/1695 train_time:21977ms step_avg:93.92ms +step:235/1695 train_time:22071ms step_avg:93.92ms +step:236/1695 train_time:22165ms step_avg:93.92ms +step:237/1695 train_time:22258ms step_avg:93.92ms +step:238/1695 train_time:22352ms step_avg:93.92ms +step:239/1695 train_time:22447ms step_avg:93.92ms +step:240/1695 train_time:22541ms step_avg:93.92ms +step:241/1695 train_time:22635ms step_avg:93.92ms +step:242/1695 train_time:22729ms step_avg:93.92ms +step:243/1695 train_time:22824ms step_avg:93.93ms +step:244/1695 train_time:22918ms step_avg:93.93ms +step:245/1695 train_time:23012ms step_avg:93.93ms +step:246/1695 train_time:23106ms step_avg:93.93ms +step:247/1695 train_time:23199ms step_avg:93.92ms +step:248/1695 train_time:23293ms step_avg:93.92ms +step:249/1695 train_time:23387ms step_avg:93.92ms +step:250/1695 train_time:23482ms step_avg:93.93ms +step:250/1695 val_loss:4.0734 train_time:23574ms step_avg:94.30ms +step:251/1695 train_time:23603ms step_avg:94.04ms +step:252/1695 train_time:23679ms step_avg:93.97ms +step:253/1695 train_time:23779ms step_avg:93.99ms +step:254/1695 train_time:23874ms step_avg:93.99ms +step:255/1695 train_time:23968ms step_avg:93.99ms +step:256/1695 train_time:24063ms step_avg:94.00ms +step:257/1695 train_time:24157ms step_avg:93.99ms +step:258/1695 train_time:24250ms step_avg:93.99ms +step:259/1695 train_time:24345ms step_avg:93.99ms +step:260/1695 train_time:24439ms step_avg:93.99ms +step:261/1695 train_time:24532ms step_avg:93.99ms +step:262/1695 train_time:24628ms step_avg:94.00ms +step:263/1695 train_time:24724ms step_avg:94.01ms +step:264/1695 train_time:24819ms step_avg:94.01ms +step:265/1695 train_time:24914ms step_avg:94.02ms +step:266/1695 train_time:25009ms step_avg:94.02ms +step:267/1695 train_time:25104ms step_avg:94.02ms +step:268/1695 train_time:25199ms step_avg:94.03ms +step:269/1695 train_time:25292ms step_avg:94.02ms +step:270/1695 train_time:25386ms step_avg:94.02ms +step:271/1695 train_time:25480ms step_avg:94.02ms +step:272/1695 train_time:25573ms step_avg:94.02ms +step:273/1695 train_time:25669ms step_avg:94.03ms +step:274/1695 train_time:25765ms step_avg:94.03ms +step:275/1695 train_time:25860ms step_avg:94.04ms +step:276/1695 train_time:25955ms step_avg:94.04ms +step:277/1695 train_time:26049ms step_avg:94.04ms +step:278/1695 train_time:26145ms step_avg:94.05ms +step:279/1695 train_time:26239ms step_avg:94.05ms +step:280/1695 train_time:26333ms step_avg:94.05ms +step:281/1695 train_time:26427ms step_avg:94.05ms +step:282/1695 train_time:26521ms step_avg:94.05ms +step:283/1695 train_time:26614ms step_avg:94.04ms +step:284/1695 train_time:26709ms step_avg:94.05ms +step:285/1695 train_time:26805ms step_avg:94.05ms +step:286/1695 train_time:26899ms step_avg:94.05ms +step:287/1695 train_time:26994ms step_avg:94.05ms +step:288/1695 train_time:27089ms step_avg:94.06ms +step:289/1695 train_time:27185ms step_avg:94.07ms +step:290/1695 train_time:27280ms step_avg:94.07ms +step:291/1695 train_time:27374ms step_avg:94.07ms +step:292/1695 train_time:27469ms step_avg:94.07ms +step:293/1695 train_time:27564ms step_avg:94.07ms +step:294/1695 train_time:27659ms step_avg:94.08ms +step:295/1695 train_time:27753ms step_avg:94.08ms +step:296/1695 train_time:27847ms step_avg:94.08ms +step:297/1695 train_time:27942ms step_avg:94.08ms +step:298/1695 train_time:28037ms step_avg:94.08ms +step:299/1695 train_time:28131ms step_avg:94.08ms +step:300/1695 train_time:28226ms step_avg:94.09ms +step:301/1695 train_time:28321ms step_avg:94.09ms +step:302/1695 train_time:28415ms step_avg:94.09ms +step:303/1695 train_time:28509ms step_avg:94.09ms +step:304/1695 train_time:28605ms step_avg:94.09ms +step:305/1695 train_time:28698ms step_avg:94.09ms +step:306/1695 train_time:28791ms step_avg:94.09ms +step:307/1695 train_time:28886ms step_avg:94.09ms +step:308/1695 train_time:28981ms step_avg:94.09ms +step:309/1695 train_time:29076ms step_avg:94.10ms +step:310/1695 train_time:29170ms step_avg:94.10ms +step:311/1695 train_time:29265ms step_avg:94.10ms +step:312/1695 train_time:29360ms step_avg:94.10ms +step:313/1695 train_time:29453ms step_avg:94.10ms +step:314/1695 train_time:29548ms step_avg:94.10ms +step:315/1695 train_time:29643ms step_avg:94.10ms +step:316/1695 train_time:29737ms step_avg:94.11ms +step:317/1695 train_time:29831ms step_avg:94.10ms +step:318/1695 train_time:29925ms step_avg:94.11ms +step:319/1695 train_time:30020ms step_avg:94.11ms +step:320/1695 train_time:30113ms step_avg:94.10ms +step:321/1695 train_time:30209ms step_avg:94.11ms +step:322/1695 train_time:30304ms step_avg:94.11ms +step:323/1695 train_time:30398ms step_avg:94.11ms +step:324/1695 train_time:30492ms step_avg:94.11ms +step:325/1695 train_time:30586ms step_avg:94.11ms +step:326/1695 train_time:30681ms step_avg:94.11ms +step:327/1695 train_time:30775ms step_avg:94.11ms +step:328/1695 train_time:30870ms step_avg:94.12ms +step:329/1695 train_time:30965ms step_avg:94.12ms +step:330/1695 train_time:31060ms step_avg:94.12ms +step:331/1695 train_time:31154ms step_avg:94.12ms +step:332/1695 train_time:31248ms step_avg:94.12ms +step:333/1695 train_time:31342ms step_avg:94.12ms +step:334/1695 train_time:31437ms step_avg:94.12ms +step:335/1695 train_time:31531ms step_avg:94.12ms +step:336/1695 train_time:31625ms step_avg:94.12ms +step:337/1695 train_time:31720ms step_avg:94.12ms +step:338/1695 train_time:31814ms step_avg:94.12ms +step:339/1695 train_time:31908ms step_avg:94.12ms +step:340/1695 train_time:32003ms step_avg:94.13ms +step:341/1695 train_time:32097ms step_avg:94.13ms +step:342/1695 train_time:32192ms step_avg:94.13ms +step:343/1695 train_time:32287ms step_avg:94.13ms +step:344/1695 train_time:32382ms step_avg:94.13ms +step:345/1695 train_time:32477ms step_avg:94.13ms +step:346/1695 train_time:32571ms step_avg:94.13ms +step:347/1695 train_time:32666ms step_avg:94.14ms +step:348/1695 train_time:32760ms step_avg:94.14ms +step:349/1695 train_time:32854ms step_avg:94.14ms +step:350/1695 train_time:32949ms step_avg:94.14ms +step:351/1695 train_time:33045ms step_avg:94.14ms +step:352/1695 train_time:33139ms step_avg:94.15ms +step:353/1695 train_time:33233ms step_avg:94.15ms +step:354/1695 train_time:33329ms step_avg:94.15ms +step:355/1695 train_time:33423ms step_avg:94.15ms +step:356/1695 train_time:33517ms step_avg:94.15ms +step:357/1695 train_time:33611ms step_avg:94.15ms +step:358/1695 train_time:33706ms step_avg:94.15ms +step:359/1695 train_time:33802ms step_avg:94.16ms +step:360/1695 train_time:33897ms step_avg:94.16ms +step:361/1695 train_time:33991ms step_avg:94.16ms +step:362/1695 train_time:34085ms step_avg:94.16ms +step:363/1695 train_time:34181ms step_avg:94.16ms +step:364/1695 train_time:34275ms step_avg:94.16ms +step:365/1695 train_time:34370ms step_avg:94.16ms +step:366/1695 train_time:34465ms step_avg:94.17ms +step:367/1695 train_time:34559ms step_avg:94.17ms +step:368/1695 train_time:34653ms step_avg:94.16ms +step:369/1695 train_time:34747ms step_avg:94.17ms +step:370/1695 train_time:34842ms step_avg:94.17ms +step:371/1695 train_time:34936ms step_avg:94.17ms +step:372/1695 train_time:35030ms step_avg:94.17ms +step:373/1695 train_time:35126ms step_avg:94.17ms +step:374/1695 train_time:35222ms step_avg:94.18ms +step:375/1695 train_time:35315ms step_avg:94.17ms +step:375/1695 val_loss:3.8753 train_time:35407ms step_avg:94.42ms +step:376/1695 train_time:35436ms step_avg:94.24ms +step:377/1695 train_time:35517ms step_avg:94.21ms +step:378/1695 train_time:35616ms step_avg:94.22ms +step:379/1695 train_time:35712ms step_avg:94.23ms +step:380/1695 train_time:35808ms step_avg:94.23ms +step:381/1695 train_time:35905ms step_avg:94.24ms +step:382/1695 train_time:36000ms step_avg:94.24ms +step:383/1695 train_time:36095ms step_avg:94.24ms +step:384/1695 train_time:36191ms step_avg:94.25ms +step:385/1695 train_time:36286ms step_avg:94.25ms +step:386/1695 train_time:36382ms step_avg:94.25ms +step:387/1695 train_time:36479ms step_avg:94.26ms +step:388/1695 train_time:36577ms step_avg:94.27ms +step:389/1695 train_time:36674ms step_avg:94.28ms +step:390/1695 train_time:36770ms step_avg:94.28ms +step:391/1695 train_time:36866ms step_avg:94.29ms +step:392/1695 train_time:36961ms step_avg:94.29ms +step:393/1695 train_time:37057ms step_avg:94.29ms +step:394/1695 train_time:37153ms step_avg:94.30ms +step:395/1695 train_time:37248ms step_avg:94.30ms +step:396/1695 train_time:37344ms step_avg:94.30ms +step:397/1695 train_time:37441ms step_avg:94.31ms +step:398/1695 train_time:37537ms step_avg:94.32ms +step:399/1695 train_time:37634ms step_avg:94.32ms +step:400/1695 train_time:37731ms step_avg:94.33ms +step:401/1695 train_time:37827ms step_avg:94.33ms +step:402/1695 train_time:37924ms step_avg:94.34ms +step:403/1695 train_time:38021ms step_avg:94.34ms +step:404/1695 train_time:38117ms step_avg:94.35ms +step:405/1695 train_time:38214ms step_avg:94.35ms +step:406/1695 train_time:38309ms step_avg:94.36ms +step:407/1695 train_time:38405ms step_avg:94.36ms +step:408/1695 train_time:38501ms step_avg:94.37ms +step:409/1695 train_time:38597ms step_avg:94.37ms +step:410/1695 train_time:38693ms step_avg:94.37ms +step:411/1695 train_time:38789ms step_avg:94.38ms +step:412/1695 train_time:38886ms step_avg:94.38ms +step:413/1695 train_time:38984ms step_avg:94.39ms +step:414/1695 train_time:39081ms step_avg:94.40ms +step:415/1695 train_time:39177ms step_avg:94.40ms +step:416/1695 train_time:39273ms step_avg:94.41ms +step:417/1695 train_time:39369ms step_avg:94.41ms +step:418/1695 train_time:39465ms step_avg:94.41ms +step:419/1695 train_time:39562ms step_avg:94.42ms +step:420/1695 train_time:39659ms step_avg:94.43ms +step:421/1695 train_time:39755ms step_avg:94.43ms +step:422/1695 train_time:39851ms step_avg:94.43ms +step:423/1695 train_time:39948ms step_avg:94.44ms +step:424/1695 train_time:40045ms step_avg:94.44ms +step:425/1695 train_time:40141ms step_avg:94.45ms +step:426/1695 train_time:40237ms step_avg:94.45ms +step:427/1695 train_time:40333ms step_avg:94.46ms +step:428/1695 train_time:40428ms step_avg:94.46ms +step:429/1695 train_time:40525ms step_avg:94.46ms +step:430/1695 train_time:40622ms step_avg:94.47ms +step:431/1695 train_time:40718ms step_avg:94.47ms +step:432/1695 train_time:40814ms step_avg:94.48ms +step:433/1695 train_time:40910ms step_avg:94.48ms +step:434/1695 train_time:41006ms step_avg:94.48ms +step:435/1695 train_time:41103ms step_avg:94.49ms +step:436/1695 train_time:41200ms step_avg:94.50ms +step:437/1695 train_time:41297ms step_avg:94.50ms +step:438/1695 train_time:41392ms step_avg:94.50ms +step:439/1695 train_time:41488ms step_avg:94.51ms +step:440/1695 train_time:41585ms step_avg:94.51ms +step:441/1695 train_time:41682ms step_avg:94.52ms +step:442/1695 train_time:41778ms step_avg:94.52ms +step:443/1695 train_time:41874ms step_avg:94.52ms +step:444/1695 train_time:41970ms step_avg:94.53ms +step:445/1695 train_time:42066ms step_avg:94.53ms +step:446/1695 train_time:42163ms step_avg:94.54ms +step:447/1695 train_time:42260ms step_avg:94.54ms +step:448/1695 train_time:42356ms step_avg:94.55ms +step:449/1695 train_time:42454ms step_avg:94.55ms +step:450/1695 train_time:42549ms step_avg:94.55ms +step:451/1695 train_time:42646ms step_avg:94.56ms +step:452/1695 train_time:42742ms step_avg:94.56ms +step:453/1695 train_time:42838ms step_avg:94.57ms +step:454/1695 train_time:42935ms step_avg:94.57ms +step:455/1695 train_time:43031ms step_avg:94.57ms +step:456/1695 train_time:43126ms step_avg:94.58ms +step:457/1695 train_time:43223ms step_avg:94.58ms +step:458/1695 train_time:43320ms step_avg:94.59ms +step:459/1695 train_time:43417ms step_avg:94.59ms +step:460/1695 train_time:43513ms step_avg:94.59ms +step:461/1695 train_time:43609ms step_avg:94.60ms +step:462/1695 train_time:43705ms step_avg:94.60ms +step:463/1695 train_time:43802ms step_avg:94.60ms +step:464/1695 train_time:43898ms step_avg:94.61ms +step:465/1695 train_time:43994ms step_avg:94.61ms +step:466/1695 train_time:44090ms step_avg:94.61ms +step:467/1695 train_time:44186ms step_avg:94.62ms +step:468/1695 train_time:44283ms step_avg:94.62ms +step:469/1695 train_time:44381ms step_avg:94.63ms +step:470/1695 train_time:44477ms step_avg:94.63ms +step:471/1695 train_time:44573ms step_avg:94.64ms +step:472/1695 train_time:44669ms step_avg:94.64ms +step:473/1695 train_time:44765ms step_avg:94.64ms +step:474/1695 train_time:44861ms step_avg:94.64ms +step:475/1695 train_time:44958ms step_avg:94.65ms +step:476/1695 train_time:45055ms step_avg:94.65ms +step:477/1695 train_time:45150ms step_avg:94.65ms +step:478/1695 train_time:45246ms step_avg:94.66ms +step:479/1695 train_time:45343ms step_avg:94.66ms +step:480/1695 train_time:45439ms step_avg:94.67ms +step:481/1695 train_time:45536ms step_avg:94.67ms +step:482/1695 train_time:45632ms step_avg:94.67ms +step:483/1695 train_time:45728ms step_avg:94.67ms +step:484/1695 train_time:45823ms step_avg:94.68ms +step:485/1695 train_time:45920ms step_avg:94.68ms +step:486/1695 train_time:46016ms step_avg:94.68ms +step:487/1695 train_time:46112ms step_avg:94.69ms +step:488/1695 train_time:46208ms step_avg:94.69ms +step:489/1695 train_time:46304ms step_avg:94.69ms +step:490/1695 train_time:46401ms step_avg:94.70ms +step:491/1695 train_time:46498ms step_avg:94.70ms +step:492/1695 train_time:46593ms step_avg:94.70ms +step:493/1695 train_time:46689ms step_avg:94.70ms +step:494/1695 train_time:46785ms step_avg:94.71ms +step:495/1695 train_time:46882ms step_avg:94.71ms +step:496/1695 train_time:46979ms step_avg:94.72ms +step:497/1695 train_time:47075ms step_avg:94.72ms +step:498/1695 train_time:47170ms step_avg:94.72ms +step:499/1695 train_time:47267ms step_avg:94.72ms +step:500/1695 train_time:47364ms step_avg:94.73ms +step:500/1695 val_loss:3.7308 train_time:47458ms step_avg:94.92ms +step:501/1695 train_time:47487ms step_avg:94.78ms +step:502/1695 train_time:47568ms step_avg:94.76ms +step:503/1695 train_time:47669ms step_avg:94.77ms +step:504/1695 train_time:47766ms step_avg:94.77ms +step:505/1695 train_time:47862ms step_avg:94.78ms +step:506/1695 train_time:47958ms step_avg:94.78ms +step:507/1695 train_time:48053ms step_avg:94.78ms +step:508/1695 train_time:48149ms step_avg:94.78ms +step:509/1695 train_time:48245ms step_avg:94.78ms +step:510/1695 train_time:48341ms step_avg:94.79ms +step:511/1695 train_time:48436ms step_avg:94.79ms +step:512/1695 train_time:48533ms step_avg:94.79ms +step:513/1695 train_time:48631ms step_avg:94.80ms +step:514/1695 train_time:48729ms step_avg:94.80ms +step:515/1695 train_time:48826ms step_avg:94.81ms +step:516/1695 train_time:48923ms step_avg:94.81ms +step:517/1695 train_time:49019ms step_avg:94.82ms +step:518/1695 train_time:49115ms step_avg:94.82ms +step:519/1695 train_time:49211ms step_avg:94.82ms +step:520/1695 train_time:49308ms step_avg:94.82ms +step:521/1695 train_time:49404ms step_avg:94.83ms +step:522/1695 train_time:49501ms step_avg:94.83ms +step:523/1695 train_time:49598ms step_avg:94.83ms +step:524/1695 train_time:49694ms step_avg:94.84ms +step:525/1695 train_time:49792ms step_avg:94.84ms +step:526/1695 train_time:49890ms step_avg:94.85ms +step:527/1695 train_time:49989ms step_avg:94.86ms +step:528/1695 train_time:50085ms step_avg:94.86ms +step:529/1695 train_time:50181ms step_avg:94.86ms +step:530/1695 train_time:50277ms step_avg:94.86ms +step:531/1695 train_time:50373ms step_avg:94.87ms +step:532/1695 train_time:50470ms step_avg:94.87ms +step:533/1695 train_time:50567ms step_avg:94.87ms +step:534/1695 train_time:50665ms step_avg:94.88ms +step:535/1695 train_time:50763ms step_avg:94.88ms +step:536/1695 train_time:50861ms step_avg:94.89ms +step:537/1695 train_time:50956ms step_avg:94.89ms +step:538/1695 train_time:51053ms step_avg:94.89ms +step:539/1695 train_time:51149ms step_avg:94.90ms +step:540/1695 train_time:51246ms step_avg:94.90ms +step:541/1695 train_time:51342ms step_avg:94.90ms +step:542/1695 train_time:51438ms step_avg:94.90ms +step:543/1695 train_time:51533ms step_avg:94.90ms +step:544/1695 train_time:51630ms step_avg:94.91ms +step:545/1695 train_time:51726ms step_avg:94.91ms +step:546/1695 train_time:51824ms step_avg:94.91ms +step:547/1695 train_time:51921ms step_avg:94.92ms +step:548/1695 train_time:52018ms step_avg:94.92ms +step:549/1695 train_time:52114ms step_avg:94.93ms +step:550/1695 train_time:52210ms step_avg:94.93ms +step:551/1695 train_time:52308ms step_avg:94.93ms +step:552/1695 train_time:52404ms step_avg:94.94ms +step:553/1695 train_time:52501ms step_avg:94.94ms +step:554/1695 train_time:52597ms step_avg:94.94ms +step:555/1695 train_time:52694ms step_avg:94.94ms +step:556/1695 train_time:52790ms step_avg:94.95ms +step:557/1695 train_time:52888ms step_avg:94.95ms +step:558/1695 train_time:52986ms step_avg:94.96ms +step:559/1695 train_time:53082ms step_avg:94.96ms +step:560/1695 train_time:53179ms step_avg:94.96ms +step:561/1695 train_time:53275ms step_avg:94.96ms +step:562/1695 train_time:53371ms step_avg:94.97ms +step:563/1695 train_time:53467ms step_avg:94.97ms +step:564/1695 train_time:53564ms step_avg:94.97ms +step:565/1695 train_time:53661ms step_avg:94.98ms +step:566/1695 train_time:53758ms step_avg:94.98ms +step:567/1695 train_time:53854ms step_avg:94.98ms +step:568/1695 train_time:53951ms step_avg:94.98ms +step:569/1695 train_time:54048ms step_avg:94.99ms +step:570/1695 train_time:54144ms step_avg:94.99ms +step:571/1695 train_time:54241ms step_avg:94.99ms +step:572/1695 train_time:54337ms step_avg:95.00ms +step:573/1695 train_time:54433ms step_avg:95.00ms +step:574/1695 train_time:54530ms step_avg:95.00ms +step:575/1695 train_time:54626ms step_avg:95.00ms +step:576/1695 train_time:54724ms step_avg:95.01ms +step:577/1695 train_time:54821ms step_avg:95.01ms +step:578/1695 train_time:54918ms step_avg:95.01ms +step:579/1695 train_time:55015ms step_avg:95.02ms +step:580/1695 train_time:55111ms step_avg:95.02ms +step:581/1695 train_time:55208ms step_avg:95.02ms +step:582/1695 train_time:55304ms step_avg:95.02ms +step:583/1695 train_time:55400ms step_avg:95.03ms +step:584/1695 train_time:55497ms step_avg:95.03ms +step:585/1695 train_time:55593ms step_avg:95.03ms +step:586/1695 train_time:55690ms step_avg:95.03ms +step:587/1695 train_time:55787ms step_avg:95.04ms +step:588/1695 train_time:55885ms step_avg:95.04ms +step:589/1695 train_time:55981ms step_avg:95.04ms +step:590/1695 train_time:56077ms step_avg:95.05ms +step:591/1695 train_time:56173ms step_avg:95.05ms +step:592/1695 train_time:56270ms step_avg:95.05ms +step:593/1695 train_time:56367ms step_avg:95.05ms +step:594/1695 train_time:56465ms step_avg:95.06ms +step:595/1695 train_time:56561ms step_avg:95.06ms +step:596/1695 train_time:56657ms step_avg:95.06ms +step:597/1695 train_time:56754ms step_avg:95.06ms +step:598/1695 train_time:56850ms step_avg:95.07ms +step:599/1695 train_time:56946ms step_avg:95.07ms +step:600/1695 train_time:57043ms step_avg:95.07ms +step:601/1695 train_time:57139ms step_avg:95.07ms +step:602/1695 train_time:57235ms step_avg:95.08ms +step:603/1695 train_time:57331ms step_avg:95.08ms +step:604/1695 train_time:57428ms step_avg:95.08ms +step:605/1695 train_time:57526ms step_avg:95.08ms +step:606/1695 train_time:57623ms step_avg:95.09ms +step:607/1695 train_time:57720ms step_avg:95.09ms +step:608/1695 train_time:57815ms step_avg:95.09ms +step:609/1695 train_time:57911ms step_avg:95.09ms +step:610/1695 train_time:58009ms step_avg:95.10ms +step:611/1695 train_time:58107ms step_avg:95.10ms +step:612/1695 train_time:58203ms step_avg:95.10ms +step:613/1695 train_time:58300ms step_avg:95.11ms +step:614/1695 train_time:58396ms step_avg:95.11ms +step:615/1695 train_time:58492ms step_avg:95.11ms +step:616/1695 train_time:58591ms step_avg:95.11ms +step:617/1695 train_time:58689ms step_avg:95.12ms +step:618/1695 train_time:58784ms step_avg:95.12ms +step:619/1695 train_time:58881ms step_avg:95.12ms +step:620/1695 train_time:58977ms step_avg:95.12ms +step:621/1695 train_time:59073ms step_avg:95.13ms +step:622/1695 train_time:59170ms step_avg:95.13ms +step:623/1695 train_time:59267ms step_avg:95.13ms +step:624/1695 train_time:59363ms step_avg:95.13ms +step:625/1695 train_time:59459ms step_avg:95.13ms +step:625/1695 val_loss:3.6477 train_time:59553ms step_avg:95.28ms +step:626/1695 train_time:59581ms step_avg:95.18ms +step:627/1695 train_time:59663ms step_avg:95.16ms +step:628/1695 train_time:59764ms step_avg:95.17ms +step:629/1695 train_time:60094ms step_avg:95.54ms +step:630/1695 train_time:60191ms step_avg:95.54ms +step:631/1695 train_time:60287ms step_avg:95.54ms +step:632/1695 train_time:60671ms step_avg:96.00ms +step:633/1695 train_time:60767ms step_avg:96.00ms +step:634/1695 train_time:60865ms step_avg:96.00ms +step:635/1695 train_time:61199ms step_avg:96.38ms +step:636/1695 train_time:61295ms step_avg:96.38ms +step:637/1695 train_time:61392ms step_avg:96.38ms +step:638/1695 train_time:61489ms step_avg:96.38ms +step:639/1695 train_time:61586ms step_avg:96.38ms +step:640/1695 train_time:61684ms step_avg:96.38ms +step:641/1695 train_time:61780ms step_avg:96.38ms +step:642/1695 train_time:61877ms step_avg:96.38ms +step:643/1695 train_time:61974ms step_avg:96.38ms +step:644/1695 train_time:62073ms step_avg:96.39ms +step:645/1695 train_time:62174ms step_avg:96.39ms +step:646/1695 train_time:62274ms step_avg:96.40ms +step:647/1695 train_time:62606ms step_avg:96.76ms +step:648/1695 train_time:62702ms step_avg:96.76ms +step:649/1695 train_time:62799ms step_avg:96.76ms +step:650/1695 train_time:62896ms step_avg:96.76ms +step:651/1695 train_time:62993ms step_avg:96.76ms +step:652/1695 train_time:63091ms step_avg:96.76ms +step:653/1695 train_time:63577ms step_avg:97.36ms +step:654/1695 train_time:63626ms step_avg:97.29ms +step:655/1695 train_time:63722ms step_avg:97.29ms +step:656/1695 train_time:63819ms step_avg:97.28ms +step:657/1695 train_time:63916ms step_avg:97.28ms +step:658/1695 train_time:64012ms step_avg:97.28ms +step:659/1695 train_time:64110ms step_avg:97.28ms +step:660/1695 train_time:64207ms step_avg:97.28ms +step:661/1695 train_time:64304ms step_avg:97.28ms +step:662/1695 train_time:64401ms step_avg:97.28ms +step:663/1695 train_time:64501ms step_avg:97.29ms +step:664/1695 train_time:64602ms step_avg:97.29ms +step:665/1695 train_time:64701ms step_avg:97.29ms +step:666/1695 train_time:64799ms step_avg:97.30ms +step:667/1695 train_time:64895ms step_avg:97.29ms +step:668/1695 train_time:64992ms step_avg:97.29ms +step:669/1695 train_time:65090ms step_avg:97.29ms +step:670/1695 train_time:65187ms step_avg:97.29ms +step:671/1695 train_time:65284ms step_avg:97.29ms +step:672/1695 train_time:65381ms step_avg:97.29ms +step:673/1695 train_time:65479ms step_avg:97.29ms +step:674/1695 train_time:65577ms step_avg:97.30ms +step:675/1695 train_time:65676ms step_avg:97.30ms +step:676/1695 train_time:65775ms step_avg:97.30ms +step:677/1695 train_time:65873ms step_avg:97.30ms +step:678/1695 train_time:65971ms step_avg:97.30ms +step:679/1695 train_time:66070ms step_avg:97.30ms +step:680/1695 train_time:66167ms step_avg:97.31ms +step:681/1695 train_time:66265ms step_avg:97.30ms +step:682/1695 train_time:66362ms step_avg:97.31ms +step:683/1695 train_time:66460ms step_avg:97.31ms +step:684/1695 train_time:66558ms step_avg:97.31ms +step:685/1695 train_time:66655ms step_avg:97.31ms +step:686/1695 train_time:66753ms step_avg:97.31ms +step:687/1695 train_time:66852ms step_avg:97.31ms +step:688/1695 train_time:66950ms step_avg:97.31ms +step:689/1695 train_time:67049ms step_avg:97.31ms +step:690/1695 train_time:67146ms step_avg:97.31ms +step:691/1695 train_time:67244ms step_avg:97.31ms +step:692/1695 train_time:67342ms step_avg:97.31ms +step:693/1695 train_time:67440ms step_avg:97.32ms +step:694/1695 train_time:67537ms step_avg:97.32ms +step:695/1695 train_time:67634ms step_avg:97.32ms +step:696/1695 train_time:67733ms step_avg:97.32ms +step:697/1695 train_time:67831ms step_avg:97.32ms +step:698/1695 train_time:67929ms step_avg:97.32ms +step:699/1695 train_time:68027ms step_avg:97.32ms +step:700/1695 train_time:68124ms step_avg:97.32ms +step:701/1695 train_time:68222ms step_avg:97.32ms +step:702/1695 train_time:68319ms step_avg:97.32ms +step:703/1695 train_time:68417ms step_avg:97.32ms +step:704/1695 train_time:68514ms step_avg:97.32ms +step:705/1695 train_time:68613ms step_avg:97.32ms +step:706/1695 train_time:68712ms step_avg:97.33ms +step:707/1695 train_time:68810ms step_avg:97.33ms +step:708/1695 train_time:68908ms step_avg:97.33ms +step:709/1695 train_time:69006ms step_avg:97.33ms +step:710/1695 train_time:69103ms step_avg:97.33ms +step:711/1695 train_time:69200ms step_avg:97.33ms +step:712/1695 train_time:69298ms step_avg:97.33ms +step:713/1695 train_time:69395ms step_avg:97.33ms +step:714/1695 train_time:69494ms step_avg:97.33ms +step:715/1695 train_time:69591ms step_avg:97.33ms +step:716/1695 train_time:69690ms step_avg:97.33ms +step:717/1695 train_time:69789ms step_avg:97.33ms +step:718/1695 train_time:69886ms step_avg:97.33ms +step:719/1695 train_time:69984ms step_avg:97.33ms +step:720/1695 train_time:70081ms step_avg:97.33ms +step:721/1695 train_time:70179ms step_avg:97.34ms +step:722/1695 train_time:70277ms step_avg:97.34ms +step:723/1695 train_time:70374ms step_avg:97.34ms +step:724/1695 train_time:70473ms step_avg:97.34ms +step:725/1695 train_time:70571ms step_avg:97.34ms +step:726/1695 train_time:70669ms step_avg:97.34ms +step:727/1695 train_time:70767ms step_avg:97.34ms +step:728/1695 train_time:70864ms step_avg:97.34ms +step:729/1695 train_time:70962ms step_avg:97.34ms +step:730/1695 train_time:71060ms step_avg:97.34ms +step:731/1695 train_time:71158ms step_avg:97.34ms +step:732/1695 train_time:71256ms step_avg:97.34ms +step:733/1695 train_time:71353ms step_avg:97.34ms +step:734/1695 train_time:71451ms step_avg:97.34ms +step:735/1695 train_time:71549ms step_avg:97.35ms +step:736/1695 train_time:71647ms step_avg:97.35ms +step:737/1695 train_time:71744ms step_avg:97.35ms +step:738/1695 train_time:71842ms step_avg:97.35ms +step:739/1695 train_time:71939ms step_avg:97.35ms +step:740/1695 train_time:72037ms step_avg:97.35ms +step:741/1695 train_time:72135ms step_avg:97.35ms +step:742/1695 train_time:72233ms step_avg:97.35ms +step:743/1695 train_time:72332ms step_avg:97.35ms +step:744/1695 train_time:72430ms step_avg:97.35ms +step:745/1695 train_time:72528ms step_avg:97.35ms +step:746/1695 train_time:72626ms step_avg:97.35ms +step:747/1695 train_time:72724ms step_avg:97.36ms +step:748/1695 train_time:72822ms step_avg:97.36ms +step:749/1695 train_time:72920ms step_avg:97.36ms +step:750/1695 train_time:73018ms step_avg:97.36ms +step:750/1695 val_loss:3.5859 train_time:73112ms step_avg:97.48ms +step:751/1695 train_time:73142ms step_avg:97.39ms +step:752/1695 train_time:73224ms step_avg:97.37ms +step:753/1695 train_time:73324ms step_avg:97.38ms +step:754/1695 train_time:73422ms step_avg:97.38ms +step:755/1695 train_time:73519ms step_avg:97.38ms +step:756/1695 train_time:73617ms step_avg:97.38ms +step:757/1695 train_time:73714ms step_avg:97.38ms +step:758/1695 train_time:73812ms step_avg:97.38ms +step:759/1695 train_time:73909ms step_avg:97.38ms +step:760/1695 train_time:74228ms step_avg:97.67ms +step:761/1695 train_time:74324ms step_avg:97.67ms +step:762/1695 train_time:74422ms step_avg:97.67ms +step:763/1695 train_time:74520ms step_avg:97.67ms +step:764/1695 train_time:74617ms step_avg:97.67ms +step:765/1695 train_time:74715ms step_avg:97.67ms +step:766/1695 train_time:74812ms step_avg:97.67ms +step:767/1695 train_time:74910ms step_avg:97.67ms +step:768/1695 train_time:75007ms step_avg:97.67ms +step:769/1695 train_time:75104ms step_avg:97.66ms +step:770/1695 train_time:75203ms step_avg:97.67ms +step:771/1695 train_time:75302ms step_avg:97.67ms +step:772/1695 train_time:75401ms step_avg:97.67ms +step:773/1695 train_time:75499ms step_avg:97.67ms +step:774/1695 train_time:75597ms step_avg:97.67ms +step:775/1695 train_time:75695ms step_avg:97.67ms +step:776/1695 train_time:75792ms step_avg:97.67ms +step:777/1695 train_time:75889ms step_avg:97.67ms +step:778/1695 train_time:75988ms step_avg:97.67ms +step:779/1695 train_time:76086ms step_avg:97.67ms +step:780/1695 train_time:76184ms step_avg:97.67ms +step:781/1695 train_time:76282ms step_avg:97.67ms +step:782/1695 train_time:76381ms step_avg:97.67ms +step:783/1695 train_time:76480ms step_avg:97.68ms +step:784/1695 train_time:76578ms step_avg:97.68ms +step:785/1695 train_time:76677ms step_avg:97.68ms +step:786/1695 train_time:76775ms step_avg:97.68ms +step:787/1695 train_time:76872ms step_avg:97.68ms +step:788/1695 train_time:76969ms step_avg:97.68ms +step:789/1695 train_time:77068ms step_avg:97.68ms +step:790/1695 train_time:77167ms step_avg:97.68ms +step:791/1695 train_time:77265ms step_avg:97.68ms +step:792/1695 train_time:77363ms step_avg:97.68ms +step:793/1695 train_time:77461ms step_avg:97.68ms +step:794/1695 train_time:77559ms step_avg:97.68ms +step:795/1695 train_time:77656ms step_avg:97.68ms +step:796/1695 train_time:77755ms step_avg:97.68ms +step:797/1695 train_time:78197ms step_avg:98.11ms +step:798/1695 train_time:78293ms step_avg:98.11ms +step:799/1695 train_time:78390ms step_avg:98.11ms +step:800/1695 train_time:78488ms step_avg:98.11ms +step:801/1695 train_time:78584ms step_avg:98.11ms +step:802/1695 train_time:78681ms step_avg:98.11ms +step:803/1695 train_time:78778ms step_avg:98.11ms +step:804/1695 train_time:78877ms step_avg:98.11ms +step:805/1695 train_time:78974ms step_avg:98.10ms +step:806/1695 train_time:79073ms step_avg:98.11ms +step:807/1695 train_time:79177ms step_avg:98.11ms +step:808/1695 train_time:79277ms step_avg:98.12ms +step:809/1695 train_time:79376ms step_avg:98.12ms +step:810/1695 train_time:79475ms step_avg:98.12ms +step:811/1695 train_time:79575ms step_avg:98.12ms +step:812/1695 train_time:79673ms step_avg:98.12ms +step:813/1695 train_time:79771ms step_avg:98.12ms +step:814/1695 train_time:79869ms step_avg:98.12ms +step:815/1695 train_time:79966ms step_avg:98.12ms +step:816/1695 train_time:80064ms step_avg:98.12ms +step:817/1695 train_time:80162ms step_avg:98.12ms +step:818/1695 train_time:80260ms step_avg:98.12ms +step:819/1695 train_time:80359ms step_avg:98.12ms +step:820/1695 train_time:80457ms step_avg:98.12ms +step:821/1695 train_time:80556ms step_avg:98.12ms +step:822/1695 train_time:80654ms step_avg:98.12ms +step:823/1695 train_time:80753ms step_avg:98.12ms +step:824/1695 train_time:80852ms step_avg:98.12ms +step:825/1695 train_time:80951ms step_avg:98.12ms +step:826/1695 train_time:81048ms step_avg:98.12ms +step:827/1695 train_time:81148ms step_avg:98.12ms +step:828/1695 train_time:81246ms step_avg:98.12ms +step:829/1695 train_time:81344ms step_avg:98.12ms +step:830/1695 train_time:81442ms step_avg:98.12ms +step:831/1695 train_time:81540ms step_avg:98.12ms +step:832/1695 train_time:81639ms step_avg:98.12ms +step:833/1695 train_time:81736ms step_avg:98.12ms +step:834/1695 train_time:81835ms step_avg:98.12ms +step:835/1695 train_time:81933ms step_avg:98.12ms +step:836/1695 train_time:82032ms step_avg:98.12ms +step:837/1695 train_time:82130ms step_avg:98.12ms +step:838/1695 train_time:82229ms step_avg:98.13ms +step:839/1695 train_time:82327ms step_avg:98.13ms +step:840/1695 train_time:82425ms step_avg:98.13ms +step:841/1695 train_time:82522ms step_avg:98.12ms +step:842/1695 train_time:82620ms step_avg:98.12ms +step:843/1695 train_time:82718ms step_avg:98.12ms +step:844/1695 train_time:82816ms step_avg:98.12ms +step:845/1695 train_time:82915ms step_avg:98.12ms +step:846/1695 train_time:83013ms step_avg:98.12ms +step:847/1695 train_time:83112ms step_avg:98.13ms +step:848/1695 train_time:83211ms step_avg:98.13ms +step:849/1695 train_time:83309ms step_avg:98.13ms +step:850/1695 train_time:83408ms step_avg:98.13ms +step:851/1695 train_time:83507ms step_avg:98.13ms +step:852/1695 train_time:83605ms step_avg:98.13ms +step:853/1695 train_time:83703ms step_avg:98.13ms +step:854/1695 train_time:83801ms step_avg:98.13ms +step:855/1695 train_time:83898ms step_avg:98.13ms +step:856/1695 train_time:83997ms step_avg:98.13ms +step:857/1695 train_time:84095ms step_avg:98.13ms +step:858/1695 train_time:84194ms step_avg:98.13ms +step:859/1695 train_time:84293ms step_avg:98.13ms +step:860/1695 train_time:84393ms step_avg:98.13ms +step:861/1695 train_time:84492ms step_avg:98.13ms +step:862/1695 train_time:84591ms step_avg:98.13ms +step:863/1695 train_time:84690ms step_avg:98.13ms +step:864/1695 train_time:84789ms step_avg:98.14ms +step:865/1695 train_time:84888ms step_avg:98.14ms +step:866/1695 train_time:84987ms step_avg:98.14ms +step:867/1695 train_time:85085ms step_avg:98.14ms +step:868/1695 train_time:85182ms step_avg:98.14ms +step:869/1695 train_time:85280ms step_avg:98.14ms +step:870/1695 train_time:85379ms step_avg:98.14ms +step:871/1695 train_time:85479ms step_avg:98.14ms +step:872/1695 train_time:85578ms step_avg:98.14ms +step:873/1695 train_time:85678ms step_avg:98.14ms +step:874/1695 train_time:85777ms step_avg:98.14ms +step:875/1695 train_time:85876ms step_avg:98.14ms +step:875/1695 val_loss:3.5367 train_time:85973ms step_avg:98.25ms +step:876/1695 train_time:86005ms step_avg:98.18ms +step:877/1695 train_time:86083ms step_avg:98.16ms +step:878/1695 train_time:86183ms step_avg:98.16ms +step:879/1695 train_time:86281ms step_avg:98.16ms +step:880/1695 train_time:86380ms step_avg:98.16ms +step:881/1695 train_time:86479ms step_avg:98.16ms +step:882/1695 train_time:86578ms step_avg:98.16ms +step:883/1695 train_time:86677ms step_avg:98.16ms +step:884/1695 train_time:86776ms step_avg:98.16ms +step:885/1695 train_time:86875ms step_avg:98.16ms +step:886/1695 train_time:86975ms step_avg:98.17ms +step:887/1695 train_time:87077ms step_avg:98.17ms +step:888/1695 train_time:87178ms step_avg:98.17ms +step:889/1695 train_time:87278ms step_avg:98.18ms +step:890/1695 train_time:87378ms step_avg:98.18ms +step:891/1695 train_time:87478ms step_avg:98.18ms +step:892/1695 train_time:87577ms step_avg:98.18ms +step:893/1695 train_time:87677ms step_avg:98.18ms +step:894/1695 train_time:87775ms step_avg:98.18ms +step:895/1695 train_time:87874ms step_avg:98.18ms +step:896/1695 train_time:87974ms step_avg:98.19ms +step:897/1695 train_time:88074ms step_avg:98.19ms +step:898/1695 train_time:88174ms step_avg:98.19ms +step:899/1695 train_time:88275ms step_avg:98.19ms +step:900/1695 train_time:88376ms step_avg:98.20ms +step:901/1695 train_time:88476ms step_avg:98.20ms +step:902/1695 train_time:88576ms step_avg:98.20ms +step:903/1695 train_time:88676ms step_avg:98.20ms +step:904/1695 train_time:88776ms step_avg:98.20ms +step:905/1695 train_time:88875ms step_avg:98.20ms +step:906/1695 train_time:88975ms step_avg:98.21ms +step:907/1695 train_time:89076ms step_avg:98.21ms +step:908/1695 train_time:89177ms step_avg:98.21ms +step:909/1695 train_time:89276ms step_avg:98.21ms +step:910/1695 train_time:89376ms step_avg:98.22ms +step:911/1695 train_time:89476ms step_avg:98.22ms +step:912/1695 train_time:89575ms step_avg:98.22ms +step:913/1695 train_time:89675ms step_avg:98.22ms +step:914/1695 train_time:89774ms step_avg:98.22ms +step:915/1695 train_time:89873ms step_avg:98.22ms +step:916/1695 train_time:89973ms step_avg:98.22ms +step:917/1695 train_time:90074ms step_avg:98.23ms +step:918/1695 train_time:90174ms step_avg:98.23ms +step:919/1695 train_time:90275ms step_avg:98.23ms +step:920/1695 train_time:90377ms step_avg:98.24ms +step:921/1695 train_time:90477ms step_avg:98.24ms +step:922/1695 train_time:90577ms step_avg:98.24ms +step:923/1695 train_time:90677ms step_avg:98.24ms +step:924/1695 train_time:90776ms step_avg:98.24ms +step:925/1695 train_time:90876ms step_avg:98.24ms +step:926/1695 train_time:90976ms step_avg:98.25ms +step:927/1695 train_time:91077ms step_avg:98.25ms +step:928/1695 train_time:91177ms step_avg:98.25ms +step:929/1695 train_time:91278ms step_avg:98.25ms +step:930/1695 train_time:91378ms step_avg:98.26ms +step:931/1695 train_time:91478ms step_avg:98.26ms +step:932/1695 train_time:91578ms step_avg:98.26ms +step:933/1695 train_time:91677ms step_avg:98.26ms +step:934/1695 train_time:91776ms step_avg:98.26ms +step:935/1695 train_time:91876ms step_avg:98.26ms +step:936/1695 train_time:91975ms step_avg:98.26ms +step:937/1695 train_time:92075ms step_avg:98.27ms +step:938/1695 train_time:92175ms step_avg:98.27ms +step:939/1695 train_time:92276ms step_avg:98.27ms +step:940/1695 train_time:92376ms step_avg:98.27ms +step:941/1695 train_time:92477ms step_avg:98.28ms +step:942/1695 train_time:92576ms step_avg:98.28ms +step:943/1695 train_time:92677ms step_avg:98.28ms +step:944/1695 train_time:92777ms step_avg:98.28ms +step:945/1695 train_time:92877ms step_avg:98.28ms +step:946/1695 train_time:92976ms step_avg:98.28ms +step:947/1695 train_time:93076ms step_avg:98.29ms +step:948/1695 train_time:93176ms step_avg:98.29ms +step:949/1695 train_time:93276ms step_avg:98.29ms +step:950/1695 train_time:93376ms step_avg:98.29ms +step:951/1695 train_time:93476ms step_avg:98.29ms +step:952/1695 train_time:93576ms step_avg:98.29ms +step:953/1695 train_time:93676ms step_avg:98.30ms +step:954/1695 train_time:93776ms step_avg:98.30ms +step:955/1695 train_time:93876ms step_avg:98.30ms +step:956/1695 train_time:93975ms step_avg:98.30ms +step:957/1695 train_time:94076ms step_avg:98.30ms +step:958/1695 train_time:94176ms step_avg:98.30ms +step:959/1695 train_time:94275ms step_avg:98.31ms +step:960/1695 train_time:94374ms step_avg:98.31ms +step:961/1695 train_time:94475ms step_avg:98.31ms +step:962/1695 train_time:94576ms step_avg:98.31ms +step:963/1695 train_time:94677ms step_avg:98.31ms +step:964/1695 train_time:94777ms step_avg:98.32ms +step:965/1695 train_time:94878ms step_avg:98.32ms +step:966/1695 train_time:94978ms step_avg:98.32ms +step:967/1695 train_time:95078ms step_avg:98.32ms +step:968/1695 train_time:95178ms step_avg:98.32ms +step:969/1695 train_time:95278ms step_avg:98.33ms +step:970/1695 train_time:95377ms step_avg:98.33ms +step:971/1695 train_time:95477ms step_avg:98.33ms +step:972/1695 train_time:95577ms step_avg:98.33ms +step:973/1695 train_time:95677ms step_avg:98.33ms +step:974/1695 train_time:95777ms step_avg:98.33ms +step:975/1695 train_time:95877ms step_avg:98.34ms +step:976/1695 train_time:95977ms step_avg:98.34ms +step:977/1695 train_time:96077ms step_avg:98.34ms +step:978/1695 train_time:96176ms step_avg:98.34ms +step:979/1695 train_time:96277ms step_avg:98.34ms +step:980/1695 train_time:96377ms step_avg:98.34ms +step:981/1695 train_time:96477ms step_avg:98.35ms +step:982/1695 train_time:96577ms step_avg:98.35ms +step:983/1695 train_time:96678ms step_avg:98.35ms +step:984/1695 train_time:96778ms step_avg:98.35ms +step:985/1695 train_time:96877ms step_avg:98.35ms +step:986/1695 train_time:96978ms step_avg:98.35ms +step:987/1695 train_time:97078ms step_avg:98.36ms +step:988/1695 train_time:97178ms step_avg:98.36ms +step:989/1695 train_time:97277ms step_avg:98.36ms +step:990/1695 train_time:97377ms step_avg:98.36ms +step:991/1695 train_time:97477ms step_avg:98.36ms +step:992/1695 train_time:97576ms step_avg:98.36ms +step:993/1695 train_time:97676ms step_avg:98.37ms +step:994/1695 train_time:97776ms step_avg:98.37ms +step:995/1695 train_time:97877ms step_avg:98.37ms +step:996/1695 train_time:97976ms step_avg:98.37ms +step:997/1695 train_time:98077ms step_avg:98.37ms +step:998/1695 train_time:98176ms step_avg:98.37ms +step:999/1695 train_time:98277ms step_avg:98.37ms +step:1000/1695 train_time:98376ms step_avg:98.38ms +step:1000/1695 val_loss:3.4912 train_time:98474ms step_avg:98.47ms +step:1001/1695 train_time:98503ms step_avg:98.40ms +step:1002/1695 train_time:98585ms step_avg:98.39ms +step:1003/1695 train_time:98687ms step_avg:98.39ms +step:1004/1695 train_time:98788ms step_avg:98.39ms +step:1005/1695 train_time:98887ms step_avg:98.40ms +step:1006/1695 train_time:98986ms step_avg:98.40ms +step:1007/1695 train_time:99085ms step_avg:98.40ms +step:1008/1695 train_time:99184ms step_avg:98.40ms +step:1009/1695 train_time:99283ms step_avg:98.40ms +step:1010/1695 train_time:99382ms step_avg:98.40ms +step:1011/1695 train_time:99483ms step_avg:98.40ms +step:1012/1695 train_time:99587ms step_avg:98.41ms +step:1013/1695 train_time:99689ms step_avg:98.41ms +step:1014/1695 train_time:99789ms step_avg:98.41ms +step:1015/1695 train_time:99888ms step_avg:98.41ms +step:1016/1695 train_time:99987ms step_avg:98.41ms +step:1017/1695 train_time:100087ms step_avg:98.41ms +step:1018/1695 train_time:100185ms step_avg:98.41ms +step:1019/1695 train_time:100284ms step_avg:98.41ms +step:1020/1695 train_time:100384ms step_avg:98.42ms +step:1021/1695 train_time:100485ms step_avg:98.42ms +step:1022/1695 train_time:100586ms step_avg:98.42ms +step:1023/1695 train_time:100687ms step_avg:98.42ms +step:1024/1695 train_time:100790ms step_avg:98.43ms +step:1025/1695 train_time:100891ms step_avg:98.43ms +step:1026/1695 train_time:100991ms step_avg:98.43ms +step:1027/1695 train_time:101090ms step_avg:98.43ms +step:1028/1695 train_time:101189ms step_avg:98.43ms +step:1029/1695 train_time:101290ms step_avg:98.44ms +step:1030/1695 train_time:101388ms step_avg:98.44ms +step:1031/1695 train_time:101488ms step_avg:98.44ms +step:1032/1695 train_time:101588ms step_avg:98.44ms +step:1033/1695 train_time:101688ms step_avg:98.44ms +step:1034/1695 train_time:101788ms step_avg:98.44ms +step:1035/1695 train_time:101888ms step_avg:98.44ms +step:1036/1695 train_time:101987ms step_avg:98.44ms +step:1037/1695 train_time:102088ms step_avg:98.45ms +step:1038/1695 train_time:102187ms step_avg:98.45ms +step:1039/1695 train_time:102286ms step_avg:98.45ms +step:1040/1695 train_time:102386ms step_avg:98.45ms +step:1041/1695 train_time:102487ms step_avg:98.45ms +step:1042/1695 train_time:102587ms step_avg:98.45ms +step:1043/1695 train_time:102689ms step_avg:98.46ms +step:1044/1695 train_time:102789ms step_avg:98.46ms +step:1045/1695 train_time:102888ms step_avg:98.46ms +step:1046/1695 train_time:102989ms step_avg:98.46ms +step:1047/1695 train_time:103088ms step_avg:98.46ms +step:1048/1695 train_time:103188ms step_avg:98.46ms +step:1049/1695 train_time:103287ms step_avg:98.46ms +step:1050/1695 train_time:103386ms step_avg:98.46ms +step:1051/1695 train_time:103486ms step_avg:98.46ms +step:1052/1695 train_time:103587ms step_avg:98.47ms +step:1053/1695 train_time:103688ms step_avg:98.47ms +step:1054/1695 train_time:103788ms step_avg:98.47ms +step:1055/1695 train_time:103889ms step_avg:98.47ms +step:1056/1695 train_time:103989ms step_avg:98.47ms +step:1057/1695 train_time:104088ms step_avg:98.48ms +step:1058/1695 train_time:104188ms step_avg:98.48ms +step:1059/1695 train_time:104286ms step_avg:98.48ms +step:1060/1695 train_time:104386ms step_avg:98.48ms +step:1061/1695 train_time:104485ms step_avg:98.48ms +step:1062/1695 train_time:104585ms step_avg:98.48ms +step:1063/1695 train_time:104686ms step_avg:98.48ms +step:1064/1695 train_time:104787ms step_avg:98.48ms +step:1065/1695 train_time:104888ms step_avg:98.49ms +step:1066/1695 train_time:104988ms step_avg:98.49ms +step:1067/1695 train_time:105088ms step_avg:98.49ms +step:1068/1695 train_time:105188ms step_avg:98.49ms +step:1069/1695 train_time:105287ms step_avg:98.49ms +step:1070/1695 train_time:105388ms step_avg:98.49ms +step:1071/1695 train_time:105488ms step_avg:98.49ms +step:1072/1695 train_time:105588ms step_avg:98.50ms +step:1073/1695 train_time:105687ms step_avg:98.50ms +step:1074/1695 train_time:105787ms step_avg:98.50ms +step:1075/1695 train_time:105887ms step_avg:98.50ms +step:1076/1695 train_time:105987ms step_avg:98.50ms +step:1077/1695 train_time:106087ms step_avg:98.50ms +step:1078/1695 train_time:106188ms step_avg:98.50ms +step:1079/1695 train_time:106288ms step_avg:98.51ms +step:1080/1695 train_time:106387ms step_avg:98.51ms +step:1081/1695 train_time:106487ms step_avg:98.51ms +step:1082/1695 train_time:106587ms step_avg:98.51ms +step:1083/1695 train_time:106687ms step_avg:98.51ms +step:1084/1695 train_time:106787ms step_avg:98.51ms +step:1085/1695 train_time:106887ms step_avg:98.51ms +step:1086/1695 train_time:106988ms step_avg:98.52ms +step:1087/1695 train_time:107088ms step_avg:98.52ms +step:1088/1695 train_time:107188ms step_avg:98.52ms +step:1089/1695 train_time:107288ms step_avg:98.52ms +step:1090/1695 train_time:107388ms step_avg:98.52ms +step:1091/1695 train_time:107488ms step_avg:98.52ms +step:1092/1695 train_time:107588ms step_avg:98.52ms +step:1093/1695 train_time:107687ms step_avg:98.52ms +step:1094/1695 train_time:107787ms step_avg:98.53ms +step:1095/1695 train_time:107887ms step_avg:98.53ms +step:1096/1695 train_time:107987ms step_avg:98.53ms +step:1097/1695 train_time:108087ms step_avg:98.53ms +step:1098/1695 train_time:108187ms step_avg:98.53ms +step:1099/1695 train_time:108287ms step_avg:98.53ms +step:1100/1695 train_time:108387ms step_avg:98.53ms +step:1101/1695 train_time:108486ms step_avg:98.53ms +step:1102/1695 train_time:108587ms step_avg:98.54ms +step:1103/1695 train_time:108686ms step_avg:98.54ms +step:1104/1695 train_time:108786ms step_avg:98.54ms +step:1105/1695 train_time:108886ms step_avg:98.54ms +step:1106/1695 train_time:108986ms step_avg:98.54ms +step:1107/1695 train_time:109086ms step_avg:98.54ms +step:1108/1695 train_time:109187ms step_avg:98.54ms +step:1109/1695 train_time:109286ms step_avg:98.54ms +step:1110/1695 train_time:109387ms step_avg:98.55ms +step:1111/1695 train_time:109487ms step_avg:98.55ms +step:1112/1695 train_time:109587ms step_avg:98.55ms +step:1113/1695 train_time:109687ms step_avg:98.55ms +step:1114/1695 train_time:109787ms step_avg:98.55ms +step:1115/1695 train_time:109887ms step_avg:98.55ms +step:1116/1695 train_time:109988ms step_avg:98.56ms +step:1117/1695 train_time:110089ms step_avg:98.56ms +step:1118/1695 train_time:110189ms step_avg:98.56ms +step:1119/1695 train_time:110288ms step_avg:98.56ms +step:1120/1695 train_time:110388ms step_avg:98.56ms +step:1121/1695 train_time:110488ms step_avg:98.56ms +step:1122/1695 train_time:110588ms step_avg:98.56ms +step:1123/1695 train_time:110688ms step_avg:98.56ms +step:1124/1695 train_time:110787ms step_avg:98.56ms +step:1125/1695 train_time:110886ms step_avg:98.57ms +step:1125/1695 val_loss:3.4413 train_time:110983ms step_avg:98.65ms +step:1126/1695 train_time:111013ms step_avg:98.59ms +step:1127/1695 train_time:111097ms step_avg:98.58ms +step:1128/1695 train_time:111197ms step_avg:98.58ms +step:1129/1695 train_time:111297ms step_avg:98.58ms +step:1130/1695 train_time:111397ms step_avg:98.58ms +step:1131/1695 train_time:111496ms step_avg:98.58ms +step:1132/1695 train_time:111596ms step_avg:98.58ms +step:1133/1695 train_time:111696ms step_avg:98.58ms +step:1134/1695 train_time:111795ms step_avg:98.58ms +step:1135/1695 train_time:111896ms step_avg:98.59ms +step:1136/1695 train_time:111998ms step_avg:98.59ms +step:1137/1695 train_time:112100ms step_avg:98.59ms +step:1138/1695 train_time:112202ms step_avg:98.60ms +step:1139/1695 train_time:112302ms step_avg:98.60ms +step:1140/1695 train_time:112401ms step_avg:98.60ms +step:1141/1695 train_time:112501ms step_avg:98.60ms +step:1142/1695 train_time:112602ms step_avg:98.60ms +step:1143/1695 train_time:112702ms step_avg:98.60ms +step:1144/1695 train_time:112803ms step_avg:98.60ms +step:1145/1695 train_time:112905ms step_avg:98.61ms +step:1146/1695 train_time:113007ms step_avg:98.61ms +step:1147/1695 train_time:113109ms step_avg:98.61ms +step:1148/1695 train_time:113210ms step_avg:98.62ms +step:1149/1695 train_time:113311ms step_avg:98.62ms +step:1150/1695 train_time:113413ms step_avg:98.62ms +step:1151/1695 train_time:113515ms step_avg:98.62ms +step:1152/1695 train_time:113616ms step_avg:98.62ms +step:1153/1695 train_time:113717ms step_avg:98.63ms +step:1154/1695 train_time:113819ms step_avg:98.63ms +step:1155/1695 train_time:113920ms step_avg:98.63ms +step:1156/1695 train_time:114020ms step_avg:98.63ms +step:1157/1695 train_time:114121ms step_avg:98.64ms +step:1158/1695 train_time:114220ms step_avg:98.64ms +step:1159/1695 train_time:114320ms step_avg:98.64ms +step:1160/1695 train_time:114420ms step_avg:98.64ms +step:1161/1695 train_time:114520ms step_avg:98.64ms +step:1162/1695 train_time:114620ms step_avg:98.64ms +step:1163/1695 train_time:114722ms step_avg:98.64ms +step:1164/1695 train_time:114822ms step_avg:98.64ms +step:1165/1695 train_time:114923ms step_avg:98.65ms +step:1166/1695 train_time:115023ms step_avg:98.65ms +step:1167/1695 train_time:115123ms step_avg:98.65ms +step:1168/1695 train_time:115224ms step_avg:98.65ms +step:1169/1695 train_time:115323ms step_avg:98.65ms +step:1170/1695 train_time:115425ms step_avg:98.65ms +step:1171/1695 train_time:115526ms step_avg:98.66ms +step:1172/1695 train_time:115629ms step_avg:98.66ms +step:1173/1695 train_time:115730ms step_avg:98.66ms +step:1174/1695 train_time:115831ms step_avg:98.66ms +step:1175/1695 train_time:115932ms step_avg:98.67ms +step:1176/1695 train_time:116034ms step_avg:98.67ms +step:1177/1695 train_time:116135ms step_avg:98.67ms +step:1178/1695 train_time:116236ms step_avg:98.67ms +step:1179/1695 train_time:116340ms step_avg:98.68ms +step:1180/1695 train_time:116439ms step_avg:98.68ms +step:1181/1695 train_time:116541ms step_avg:98.68ms +step:1182/1695 train_time:116641ms step_avg:98.68ms +step:1183/1695 train_time:116740ms step_avg:98.68ms +step:1184/1695 train_time:116843ms step_avg:98.69ms +step:1185/1695 train_time:116946ms step_avg:98.69ms +step:1186/1695 train_time:117047ms step_avg:98.69ms +step:1187/1695 train_time:117150ms step_avg:98.69ms +step:1188/1695 train_time:117252ms step_avg:98.70ms +step:1189/1695 train_time:117352ms step_avg:98.70ms +step:1190/1695 train_time:117453ms step_avg:98.70ms +step:1191/1695 train_time:117553ms step_avg:98.70ms +step:1192/1695 train_time:117655ms step_avg:98.70ms +step:1193/1695 train_time:117757ms step_avg:98.71ms +step:1194/1695 train_time:117858ms step_avg:98.71ms +step:1195/1695 train_time:117958ms step_avg:98.71ms +step:1196/1695 train_time:118059ms step_avg:98.71ms +step:1197/1695 train_time:118160ms step_avg:98.71ms +step:1198/1695 train_time:118260ms step_avg:98.71ms +step:1199/1695 train_time:118360ms step_avg:98.72ms +step:1200/1695 train_time:118460ms step_avg:98.72ms +step:1201/1695 train_time:118561ms step_avg:98.72ms +step:1202/1695 train_time:118662ms step_avg:98.72ms +step:1203/1695 train_time:118764ms step_avg:98.72ms +step:1204/1695 train_time:118865ms step_avg:98.72ms +step:1205/1695 train_time:118965ms step_avg:98.73ms +step:1206/1695 train_time:119066ms step_avg:98.73ms +step:1207/1695 train_time:119168ms step_avg:98.73ms +step:1208/1695 train_time:119269ms step_avg:98.73ms +step:1209/1695 train_time:119371ms step_avg:98.74ms +step:1210/1695 train_time:119473ms step_avg:98.74ms +step:1211/1695 train_time:119574ms step_avg:98.74ms +step:1212/1695 train_time:119675ms step_avg:98.74ms +step:1213/1695 train_time:119776ms step_avg:98.74ms +step:1214/1695 train_time:119877ms step_avg:98.75ms +step:1215/1695 train_time:119978ms step_avg:98.75ms +step:1216/1695 train_time:120079ms step_avg:98.75ms +step:1217/1695 train_time:120181ms step_avg:98.75ms +step:1218/1695 train_time:120281ms step_avg:98.75ms +step:1219/1695 train_time:120381ms step_avg:98.75ms +step:1220/1695 train_time:120482ms step_avg:98.76ms +step:1221/1695 train_time:120583ms step_avg:98.76ms +step:1222/1695 train_time:120685ms step_avg:98.76ms +step:1223/1695 train_time:120786ms step_avg:98.76ms +step:1224/1695 train_time:120887ms step_avg:98.76ms +step:1225/1695 train_time:120989ms step_avg:98.77ms +step:1226/1695 train_time:121090ms step_avg:98.77ms +step:1227/1695 train_time:121192ms step_avg:98.77ms +step:1228/1695 train_time:121293ms step_avg:98.77ms +step:1229/1695 train_time:121394ms step_avg:98.77ms +step:1230/1695 train_time:121496ms step_avg:98.78ms +step:1231/1695 train_time:121597ms step_avg:98.78ms +step:1232/1695 train_time:121698ms step_avg:98.78ms +step:1233/1695 train_time:121798ms step_avg:98.78ms +step:1234/1695 train_time:121901ms step_avg:98.79ms +step:1235/1695 train_time:122000ms step_avg:98.79ms +step:1236/1695 train_time:122101ms step_avg:98.79ms +step:1237/1695 train_time:122201ms step_avg:98.79ms +step:1238/1695 train_time:122302ms step_avg:98.79ms +step:1239/1695 train_time:122404ms step_avg:98.79ms +step:1240/1695 train_time:122506ms step_avg:98.80ms +step:1241/1695 train_time:122606ms step_avg:98.80ms +step:1242/1695 train_time:122708ms step_avg:98.80ms +step:1243/1695 train_time:122810ms step_avg:98.80ms +step:1244/1695 train_time:122912ms step_avg:98.80ms +step:1245/1695 train_time:123013ms step_avg:98.81ms +step:1246/1695 train_time:123115ms step_avg:98.81ms +step:1247/1695 train_time:123217ms step_avg:98.81ms +step:1248/1695 train_time:123319ms step_avg:98.81ms +step:1249/1695 train_time:123419ms step_avg:98.81ms +step:1250/1695 train_time:123519ms step_avg:98.82ms +step:1250/1695 val_loss:3.3953 train_time:123617ms step_avg:98.89ms +step:1251/1695 train_time:123646ms step_avg:98.84ms +step:1252/1695 train_time:123729ms step_avg:98.82ms +step:1253/1695 train_time:123830ms step_avg:98.83ms +step:1254/1695 train_time:123932ms step_avg:98.83ms +step:1255/1695 train_time:124033ms step_avg:98.83ms +step:1256/1695 train_time:124134ms step_avg:98.83ms +step:1257/1695 train_time:124234ms step_avg:98.83ms +step:1258/1695 train_time:124335ms step_avg:98.84ms +step:1259/1695 train_time:124435ms step_avg:98.84ms +step:1260/1695 train_time:124536ms step_avg:98.84ms +step:1261/1695 train_time:124638ms step_avg:98.84ms +step:1262/1695 train_time:124743ms step_avg:98.85ms +step:1263/1695 train_time:124844ms step_avg:98.85ms +step:1264/1695 train_time:124944ms step_avg:98.85ms +step:1265/1695 train_time:125045ms step_avg:98.85ms +step:1266/1695 train_time:125146ms step_avg:98.85ms +step:1267/1695 train_time:125246ms step_avg:98.85ms +step:1268/1695 train_time:125347ms step_avg:98.85ms +step:1269/1695 train_time:125447ms step_avg:98.86ms +step:1270/1695 train_time:125548ms step_avg:98.86ms +step:1271/1695 train_time:125649ms step_avg:98.86ms +step:1272/1695 train_time:125751ms step_avg:98.86ms +step:1273/1695 train_time:125852ms step_avg:98.86ms +step:1274/1695 train_time:125953ms step_avg:98.86ms +step:1275/1695 train_time:126055ms step_avg:98.87ms +step:1276/1695 train_time:126157ms step_avg:98.87ms +step:1277/1695 train_time:126259ms step_avg:98.87ms +step:1278/1695 train_time:126360ms step_avg:98.87ms +step:1279/1695 train_time:126461ms step_avg:98.88ms +step:1280/1695 train_time:126563ms step_avg:98.88ms +step:1281/1695 train_time:126665ms step_avg:98.88ms +step:1282/1695 train_time:126766ms step_avg:98.88ms +step:1283/1695 train_time:126867ms step_avg:98.88ms +step:1284/1695 train_time:126967ms step_avg:98.88ms +step:1285/1695 train_time:127067ms step_avg:98.88ms +step:1286/1695 train_time:127167ms step_avg:98.89ms +step:1287/1695 train_time:127267ms step_avg:98.89ms +step:1288/1695 train_time:127367ms step_avg:98.89ms +step:1289/1695 train_time:127468ms step_avg:98.89ms +step:1290/1695 train_time:127569ms step_avg:98.89ms +step:1291/1695 train_time:127670ms step_avg:98.89ms +step:1292/1695 train_time:127770ms step_avg:98.89ms +step:1293/1695 train_time:127871ms step_avg:98.89ms +step:1294/1695 train_time:127973ms step_avg:98.90ms +step:1295/1695 train_time:128074ms step_avg:98.90ms +step:1296/1695 train_time:128174ms step_avg:98.90ms +step:1297/1695 train_time:128276ms step_avg:98.90ms +step:1298/1695 train_time:128376ms step_avg:98.90ms +step:1299/1695 train_time:128478ms step_avg:98.90ms +step:1300/1695 train_time:128581ms step_avg:98.91ms +step:1301/1695 train_time:128682ms step_avg:98.91ms +step:1302/1695 train_time:128784ms step_avg:98.91ms +step:1303/1695 train_time:128885ms step_avg:98.91ms +step:1304/1695 train_time:128985ms step_avg:98.92ms +step:1305/1695 train_time:129086ms step_avg:98.92ms +step:1306/1695 train_time:129187ms step_avg:98.92ms +step:1307/1695 train_time:129286ms step_avg:98.92ms +step:1308/1695 train_time:129386ms step_avg:98.92ms +step:1309/1695 train_time:129486ms step_avg:98.92ms +step:1310/1695 train_time:129586ms step_avg:98.92ms +step:1311/1695 train_time:129688ms step_avg:98.92ms +step:1312/1695 train_time:129789ms step_avg:98.92ms +step:1313/1695 train_time:129889ms step_avg:98.93ms +step:1314/1695 train_time:129990ms step_avg:98.93ms +step:1315/1695 train_time:130091ms step_avg:98.93ms +step:1316/1695 train_time:130192ms step_avg:98.93ms +step:1317/1695 train_time:130293ms step_avg:98.93ms +step:1318/1695 train_time:130393ms step_avg:98.93ms +step:1319/1695 train_time:130494ms step_avg:98.93ms +step:1320/1695 train_time:130597ms step_avg:98.94ms +step:1321/1695 train_time:130698ms step_avg:98.94ms +step:1322/1695 train_time:130800ms step_avg:98.94ms +step:1323/1695 train_time:130902ms step_avg:98.94ms +step:1324/1695 train_time:131003ms step_avg:98.95ms +step:1325/1695 train_time:131104ms step_avg:98.95ms +step:1326/1695 train_time:131206ms step_avg:98.95ms +step:1327/1695 train_time:131307ms step_avg:98.95ms +step:1328/1695 train_time:131406ms step_avg:98.95ms +step:1329/1695 train_time:131507ms step_avg:98.95ms +step:1330/1695 train_time:131607ms step_avg:98.95ms +step:1331/1695 train_time:131708ms step_avg:98.95ms +step:1332/1695 train_time:131808ms step_avg:98.95ms +step:1333/1695 train_time:131910ms step_avg:98.96ms +step:1334/1695 train_time:132013ms step_avg:98.96ms +step:1335/1695 train_time:132114ms step_avg:98.96ms +step:1336/1695 train_time:132215ms step_avg:98.96ms +step:1337/1695 train_time:132318ms step_avg:98.97ms +step:1338/1695 train_time:132419ms step_avg:98.97ms +step:1339/1695 train_time:132520ms step_avg:98.97ms +step:1340/1695 train_time:132621ms step_avg:98.97ms +step:1341/1695 train_time:132723ms step_avg:98.97ms +step:1342/1695 train_time:132825ms step_avg:98.98ms +step:1343/1695 train_time:132926ms step_avg:98.98ms +step:1344/1695 train_time:133026ms step_avg:98.98ms +step:1345/1695 train_time:133127ms step_avg:98.98ms +step:1346/1695 train_time:133228ms step_avg:98.98ms +step:1347/1695 train_time:133328ms step_avg:98.98ms +step:1348/1695 train_time:133429ms step_avg:98.98ms +step:1349/1695 train_time:133529ms step_avg:98.98ms +step:1350/1695 train_time:133631ms step_avg:98.99ms +step:1351/1695 train_time:133732ms step_avg:98.99ms +step:1352/1695 train_time:133833ms step_avg:98.99ms +step:1353/1695 train_time:133935ms step_avg:98.99ms +step:1354/1695 train_time:134036ms step_avg:98.99ms +step:1355/1695 train_time:134139ms step_avg:99.00ms +step:1356/1695 train_time:134242ms step_avg:99.00ms +step:1357/1695 train_time:134343ms step_avg:99.00ms +step:1358/1695 train_time:134444ms step_avg:99.00ms +step:1359/1695 train_time:134545ms step_avg:99.00ms +step:1360/1695 train_time:134645ms step_avg:99.00ms +step:1361/1695 train_time:134747ms step_avg:99.01ms +step:1362/1695 train_time:134847ms step_avg:99.01ms +step:1363/1695 train_time:134948ms step_avg:99.01ms +step:1364/1695 train_time:135049ms step_avg:99.01ms +step:1365/1695 train_time:135151ms step_avg:99.01ms +step:1366/1695 train_time:135253ms step_avg:99.01ms +step:1367/1695 train_time:135354ms step_avg:99.02ms +step:1368/1695 train_time:135455ms step_avg:99.02ms +step:1369/1695 train_time:135556ms step_avg:99.02ms +step:1370/1695 train_time:135658ms step_avg:99.02ms +step:1371/1695 train_time:135760ms step_avg:99.02ms +step:1372/1695 train_time:135861ms step_avg:99.02ms +step:1373/1695 train_time:135963ms step_avg:99.03ms +step:1374/1695 train_time:136064ms step_avg:99.03ms +step:1375/1695 train_time:136166ms step_avg:99.03ms +step:1375/1695 val_loss:3.3558 train_time:136264ms step_avg:99.10ms +step:1376/1695 train_time:136293ms step_avg:99.05ms +step:1377/1695 train_time:136379ms step_avg:99.04ms +step:1378/1695 train_time:136482ms step_avg:99.04ms +step:1379/1695 train_time:136584ms step_avg:99.05ms +step:1380/1695 train_time:136687ms step_avg:99.05ms +step:1381/1695 train_time:136787ms step_avg:99.05ms +step:1382/1695 train_time:136886ms step_avg:99.05ms +step:1383/1695 train_time:136986ms step_avg:99.05ms +step:1384/1695 train_time:137086ms step_avg:99.05ms +step:1385/1695 train_time:137188ms step_avg:99.05ms +step:1386/1695 train_time:137294ms step_avg:99.06ms +step:1387/1695 train_time:137395ms step_avg:99.06ms +step:1388/1695 train_time:137496ms step_avg:99.06ms +step:1389/1695 train_time:137600ms step_avg:99.06ms +step:1390/1695 train_time:137703ms step_avg:99.07ms +step:1391/1695 train_time:137806ms step_avg:99.07ms +step:1392/1695 train_time:137907ms step_avg:99.07ms +step:1393/1695 train_time:138009ms step_avg:99.07ms +step:1394/1695 train_time:138110ms step_avg:99.07ms +step:1395/1695 train_time:138211ms step_avg:99.08ms +step:1396/1695 train_time:138313ms step_avg:99.08ms +step:1397/1695 train_time:138416ms step_avg:99.08ms +step:1398/1695 train_time:138519ms step_avg:99.08ms +step:1399/1695 train_time:138621ms step_avg:99.09ms +step:1400/1695 train_time:138724ms step_avg:99.09ms +step:1401/1695 train_time:138826ms step_avg:99.09ms +step:1402/1695 train_time:138928ms step_avg:99.09ms +step:1403/1695 train_time:139030ms step_avg:99.09ms +step:1404/1695 train_time:139131ms step_avg:99.10ms +step:1405/1695 train_time:139233ms step_avg:99.10ms +step:1406/1695 train_time:139334ms step_avg:99.10ms +step:1407/1695 train_time:139436ms step_avg:99.10ms +step:1408/1695 train_time:139536ms step_avg:99.10ms +step:1409/1695 train_time:139640ms step_avg:99.11ms +step:1410/1695 train_time:139743ms step_avg:99.11ms +step:1411/1695 train_time:139846ms step_avg:99.11ms +step:1412/1695 train_time:139949ms step_avg:99.11ms +step:1413/1695 train_time:140050ms step_avg:99.12ms +step:1414/1695 train_time:140152ms step_avg:99.12ms +step:1415/1695 train_time:140253ms step_avg:99.12ms +step:1416/1695 train_time:140354ms step_avg:99.12ms +step:1417/1695 train_time:140454ms step_avg:99.12ms +step:1418/1695 train_time:140555ms step_avg:99.12ms +step:1419/1695 train_time:140656ms step_avg:99.12ms +step:1420/1695 train_time:140759ms step_avg:99.13ms +step:1421/1695 train_time:140862ms step_avg:99.13ms +step:1422/1695 train_time:140964ms step_avg:99.13ms +step:1423/1695 train_time:141067ms step_avg:99.13ms +step:1424/1695 train_time:141169ms step_avg:99.14ms +step:1425/1695 train_time:141271ms step_avg:99.14ms +step:1426/1695 train_time:141374ms step_avg:99.14ms +step:1427/1695 train_time:141476ms step_avg:99.14ms +step:1428/1695 train_time:141578ms step_avg:99.14ms +step:1429/1695 train_time:141679ms step_avg:99.15ms +step:1430/1695 train_time:141781ms step_avg:99.15ms +step:1431/1695 train_time:141883ms step_avg:99.15ms +step:1432/1695 train_time:141985ms step_avg:99.15ms +step:1433/1695 train_time:142089ms step_avg:99.15ms +step:1434/1695 train_time:142190ms step_avg:99.16ms +step:1435/1695 train_time:142293ms step_avg:99.16ms +step:1436/1695 train_time:142395ms step_avg:99.16ms +step:1437/1695 train_time:142497ms step_avg:99.16ms +step:1438/1695 train_time:142599ms step_avg:99.16ms +step:1439/1695 train_time:142701ms step_avg:99.17ms +step:1440/1695 train_time:142804ms step_avg:99.17ms +step:1441/1695 train_time:142907ms step_avg:99.17ms +step:1442/1695 train_time:143009ms step_avg:99.17ms +step:1443/1695 train_time:143110ms step_avg:99.18ms +step:1444/1695 train_time:143212ms step_avg:99.18ms +step:1445/1695 train_time:143313ms step_avg:99.18ms +step:1446/1695 train_time:143414ms step_avg:99.18ms +step:1447/1695 train_time:143515ms step_avg:99.18ms +step:1448/1695 train_time:143618ms step_avg:99.18ms +step:1449/1695 train_time:143719ms step_avg:99.18ms +step:1450/1695 train_time:143820ms step_avg:99.19ms +step:1451/1695 train_time:143923ms step_avg:99.19ms +step:1452/1695 train_time:144026ms step_avg:99.19ms +step:1453/1695 train_time:144130ms step_avg:99.19ms +step:1454/1695 train_time:144232ms step_avg:99.20ms +step:1455/1695 train_time:144334ms step_avg:99.20ms +step:1456/1695 train_time:144435ms step_avg:99.20ms +step:1457/1695 train_time:144536ms step_avg:99.20ms +step:1458/1695 train_time:144638ms step_avg:99.20ms +step:1459/1695 train_time:144740ms step_avg:99.20ms +step:1460/1695 train_time:144841ms step_avg:99.21ms +step:1461/1695 train_time:144943ms step_avg:99.21ms +step:1462/1695 train_time:145046ms step_avg:99.21ms +step:1463/1695 train_time:145148ms step_avg:99.21ms +step:1464/1695 train_time:145250ms step_avg:99.21ms +step:1465/1695 train_time:145351ms step_avg:99.22ms +step:1466/1695 train_time:145452ms step_avg:99.22ms +step:1467/1695 train_time:145553ms step_avg:99.22ms +step:1468/1695 train_time:145656ms step_avg:99.22ms +step:1469/1695 train_time:145758ms step_avg:99.22ms +step:1470/1695 train_time:145859ms step_avg:99.22ms +step:1471/1695 train_time:145962ms step_avg:99.23ms +step:1472/1695 train_time:146064ms step_avg:99.23ms +step:1473/1695 train_time:146167ms step_avg:99.23ms +step:1474/1695 train_time:146268ms step_avg:99.23ms +step:1475/1695 train_time:146369ms step_avg:99.23ms +step:1476/1695 train_time:146471ms step_avg:99.24ms +step:1477/1695 train_time:146573ms step_avg:99.24ms +step:1478/1695 train_time:146674ms step_avg:99.24ms +step:1479/1695 train_time:146775ms step_avg:99.24ms +step:1480/1695 train_time:146877ms step_avg:99.24ms +step:1481/1695 train_time:146980ms step_avg:99.24ms +step:1482/1695 train_time:147082ms step_avg:99.25ms +step:1483/1695 train_time:147184ms step_avg:99.25ms +step:1484/1695 train_time:147288ms step_avg:99.25ms +step:1485/1695 train_time:147389ms step_avg:99.25ms +step:1486/1695 train_time:147491ms step_avg:99.25ms +step:1487/1695 train_time:147593ms step_avg:99.26ms +step:1488/1695 train_time:147695ms step_avg:99.26ms +step:1489/1695 train_time:147797ms step_avg:99.26ms +step:1490/1695 train_time:147899ms step_avg:99.26ms +step:1491/1695 train_time:148000ms step_avg:99.26ms +step:1492/1695 train_time:148102ms step_avg:99.26ms +step:1493/1695 train_time:148205ms step_avg:99.27ms +step:1494/1695 train_time:148307ms step_avg:99.27ms +step:1495/1695 train_time:148410ms step_avg:99.27ms +step:1496/1695 train_time:148512ms step_avg:99.27ms +step:1497/1695 train_time:148612ms step_avg:99.27ms +step:1498/1695 train_time:148714ms step_avg:99.27ms +step:1499/1695 train_time:148815ms step_avg:99.28ms +step:1500/1695 train_time:148916ms step_avg:99.28ms +step:1500/1695 val_loss:3.3212 train_time:149015ms step_avg:99.34ms +step:1501/1695 train_time:149044ms step_avg:99.30ms +step:1502/1695 train_time:149127ms step_avg:99.29ms +step:1503/1695 train_time:149230ms step_avg:99.29ms +step:1504/1695 train_time:149333ms step_avg:99.29ms +step:1505/1695 train_time:149434ms step_avg:99.29ms +step:1506/1695 train_time:149535ms step_avg:99.29ms +step:1507/1695 train_time:149637ms step_avg:99.29ms +step:1508/1695 train_time:149738ms step_avg:99.30ms +step:1509/1695 train_time:149841ms step_avg:99.30ms +step:1510/1695 train_time:149943ms step_avg:99.30ms +step:1511/1695 train_time:150046ms step_avg:99.30ms +step:1512/1695 train_time:150149ms step_avg:99.30ms +step:1513/1695 train_time:150252ms step_avg:99.31ms +step:1514/1695 train_time:150354ms step_avg:99.31ms +step:1515/1695 train_time:150459ms step_avg:99.31ms +step:1516/1695 train_time:150560ms step_avg:99.31ms +step:1517/1695 train_time:150661ms step_avg:99.31ms +step:1518/1695 train_time:150762ms step_avg:99.32ms +step:1519/1695 train_time:150866ms step_avg:99.32ms +step:1520/1695 train_time:150968ms step_avg:99.32ms +step:1521/1695 train_time:151069ms step_avg:99.32ms +step:1522/1695 train_time:151170ms step_avg:99.32ms +step:1523/1695 train_time:151272ms step_avg:99.33ms +step:1524/1695 train_time:151376ms step_avg:99.33ms +step:1525/1695 train_time:151479ms step_avg:99.33ms +step:1526/1695 train_time:151582ms step_avg:99.33ms +step:1527/1695 train_time:151684ms step_avg:99.33ms +step:1528/1695 train_time:151788ms step_avg:99.34ms +step:1529/1695 train_time:151890ms step_avg:99.34ms +step:1530/1695 train_time:151994ms step_avg:99.34ms +step:1531/1695 train_time:152095ms step_avg:99.34ms +step:1532/1695 train_time:152198ms step_avg:99.35ms +step:1533/1695 train_time:152301ms step_avg:99.35ms +step:1534/1695 train_time:152403ms step_avg:99.35ms +step:1535/1695 train_time:152504ms step_avg:99.35ms +step:1536/1695 train_time:152605ms step_avg:99.35ms +step:1537/1695 train_time:152709ms step_avg:99.35ms +step:1538/1695 train_time:152810ms step_avg:99.36ms +step:1539/1695 train_time:152911ms step_avg:99.36ms +step:1540/1695 train_time:153013ms step_avg:99.36ms +step:1541/1695 train_time:153118ms step_avg:99.36ms +step:1542/1695 train_time:153222ms step_avg:99.37ms +step:1543/1695 train_time:153324ms step_avg:99.37ms +step:1544/1695 train_time:153425ms step_avg:99.37ms +step:1545/1695 train_time:153526ms step_avg:99.37ms +step:1546/1695 train_time:153627ms step_avg:99.37ms +step:1547/1695 train_time:153729ms step_avg:99.37ms +step:1548/1695 train_time:153831ms step_avg:99.37ms +step:1549/1695 train_time:153934ms step_avg:99.38ms +step:1550/1695 train_time:154035ms step_avg:99.38ms +step:1551/1695 train_time:154137ms step_avg:99.38ms +step:1552/1695 train_time:154240ms step_avg:99.38ms +step:1553/1695 train_time:154343ms step_avg:99.38ms +step:1554/1695 train_time:154444ms step_avg:99.38ms +step:1555/1695 train_time:154545ms step_avg:99.39ms +step:1556/1695 train_time:154647ms step_avg:99.39ms +step:1557/1695 train_time:154750ms step_avg:99.39ms +step:1558/1695 train_time:154852ms step_avg:99.39ms +step:1559/1695 train_time:154954ms step_avg:99.39ms +step:1560/1695 train_time:155056ms step_avg:99.40ms +step:1561/1695 train_time:155159ms step_avg:99.40ms +step:1562/1695 train_time:155262ms step_avg:99.40ms +step:1563/1695 train_time:155366ms step_avg:99.40ms +step:1564/1695 train_time:155467ms step_avg:99.40ms +step:1565/1695 train_time:155568ms step_avg:99.40ms +step:1566/1695 train_time:155669ms step_avg:99.41ms +step:1567/1695 train_time:155771ms step_avg:99.41ms +step:1568/1695 train_time:155872ms step_avg:99.41ms +step:1569/1695 train_time:155973ms step_avg:99.41ms +step:1570/1695 train_time:156078ms step_avg:99.41ms +step:1571/1695 train_time:156179ms step_avg:99.41ms +step:1572/1695 train_time:156281ms step_avg:99.42ms +step:1573/1695 train_time:156383ms step_avg:99.42ms +step:1574/1695 train_time:156485ms step_avg:99.42ms +step:1575/1695 train_time:156585ms step_avg:99.42ms +step:1576/1695 train_time:156687ms step_avg:99.42ms +step:1577/1695 train_time:156790ms step_avg:99.42ms +step:1578/1695 train_time:156891ms step_avg:99.42ms +step:1579/1695 train_time:156993ms step_avg:99.43ms +step:1580/1695 train_time:157095ms step_avg:99.43ms +step:1581/1695 train_time:157198ms step_avg:99.43ms +step:1582/1695 train_time:157299ms step_avg:99.43ms +step:1583/1695 train_time:157402ms step_avg:99.43ms +step:1584/1695 train_time:157505ms step_avg:99.43ms +step:1585/1695 train_time:157606ms step_avg:99.44ms +step:1586/1695 train_time:157708ms step_avg:99.44ms +step:1587/1695 train_time:157809ms step_avg:99.44ms +step:1588/1695 train_time:157910ms step_avg:99.44ms +step:1589/1695 train_time:158012ms step_avg:99.44ms +step:1590/1695 train_time:158114ms step_avg:99.44ms +step:1591/1695 train_time:158216ms step_avg:99.44ms +step:1592/1695 train_time:158319ms step_avg:99.45ms +step:1593/1695 train_time:158421ms step_avg:99.45ms +step:1594/1695 train_time:158525ms step_avg:99.45ms +step:1595/1695 train_time:158626ms step_avg:99.45ms +step:1596/1695 train_time:158727ms step_avg:99.45ms +step:1597/1695 train_time:158829ms step_avg:99.45ms +step:1598/1695 train_time:158931ms step_avg:99.46ms +step:1599/1695 train_time:159032ms step_avg:99.46ms +step:1600/1695 train_time:159134ms step_avg:99.46ms +step:1601/1695 train_time:159238ms step_avg:99.46ms +step:1602/1695 train_time:159340ms step_avg:99.46ms +step:1603/1695 train_time:159441ms step_avg:99.46ms +step:1604/1695 train_time:159544ms step_avg:99.47ms +step:1605/1695 train_time:159647ms step_avg:99.47ms +step:1606/1695 train_time:159749ms step_avg:99.47ms +step:1607/1695 train_time:159850ms step_avg:99.47ms +step:1608/1695 train_time:159951ms step_avg:99.47ms +step:1609/1695 train_time:160052ms step_avg:99.47ms +step:1610/1695 train_time:160153ms step_avg:99.47ms +step:1611/1695 train_time:160256ms step_avg:99.48ms +step:1612/1695 train_time:160359ms step_avg:99.48ms +step:1613/1695 train_time:160460ms step_avg:99.48ms +step:1614/1695 train_time:160561ms step_avg:99.48ms +step:1615/1695 train_time:160664ms step_avg:99.48ms +step:1616/1695 train_time:160765ms step_avg:99.48ms +step:1617/1695 train_time:160867ms step_avg:99.48ms +step:1618/1695 train_time:160968ms step_avg:99.49ms +step:1619/1695 train_time:161070ms step_avg:99.49ms +step:1620/1695 train_time:161172ms step_avg:99.49ms +step:1621/1695 train_time:161274ms step_avg:99.49ms +step:1622/1695 train_time:161375ms step_avg:99.49ms +step:1623/1695 train_time:161478ms step_avg:99.49ms +step:1624/1695 train_time:161582ms step_avg:99.50ms +step:1625/1695 train_time:161686ms step_avg:99.50ms +step:1625/1695 val_loss:3.2922 train_time:161787ms step_avg:99.56ms +step:1626/1695 train_time:161816ms step_avg:99.52ms +step:1627/1695 train_time:161901ms step_avg:99.51ms +step:1628/1695 train_time:162003ms step_avg:99.51ms +step:1629/1695 train_time:162105ms step_avg:99.51ms +step:1630/1695 train_time:162207ms step_avg:99.51ms +step:1631/1695 train_time:162308ms step_avg:99.51ms +step:1632/1695 train_time:162410ms step_avg:99.52ms +step:1633/1695 train_time:162511ms step_avg:99.52ms +step:1634/1695 train_time:162614ms step_avg:99.52ms +step:1635/1695 train_time:162716ms step_avg:99.52ms +step:1636/1695 train_time:162819ms step_avg:99.52ms +step:1637/1695 train_time:162922ms step_avg:99.52ms +step:1638/1695 train_time:163025ms step_avg:99.53ms +step:1639/1695 train_time:163129ms step_avg:99.53ms +step:1640/1695 train_time:163231ms step_avg:99.53ms +step:1641/1695 train_time:163333ms step_avg:99.53ms +step:1642/1695 train_time:163435ms step_avg:99.53ms +step:1643/1695 train_time:163536ms step_avg:99.54ms +step:1644/1695 train_time:163638ms step_avg:99.54ms +step:1645/1695 train_time:163741ms step_avg:99.54ms +step:1646/1695 train_time:163844ms step_avg:99.54ms +step:1647/1695 train_time:163950ms step_avg:99.54ms +step:1648/1695 train_time:164054ms step_avg:99.55ms +step:1649/1695 train_time:164156ms step_avg:99.55ms +step:1650/1695 train_time:164258ms step_avg:99.55ms +step:1651/1695 train_time:164360ms step_avg:99.55ms +step:1652/1695 train_time:164462ms step_avg:99.55ms +step:1653/1695 train_time:164565ms step_avg:99.56ms +step:1654/1695 train_time:164668ms step_avg:99.56ms +step:1655/1695 train_time:164771ms step_avg:99.56ms +step:1656/1695 train_time:164874ms step_avg:99.56ms +step:1657/1695 train_time:164976ms step_avg:99.56ms +step:1658/1695 train_time:165078ms step_avg:99.56ms +step:1659/1695 train_time:165184ms step_avg:99.57ms +step:1660/1695 train_time:165287ms step_avg:99.57ms +step:1661/1695 train_time:165392ms step_avg:99.57ms +step:1662/1695 train_time:165496ms step_avg:99.58ms +step:1663/1695 train_time:165599ms step_avg:99.58ms +step:1664/1695 train_time:165700ms step_avg:99.58ms +step:1665/1695 train_time:165808ms step_avg:99.58ms +step:1666/1695 train_time:165911ms step_avg:99.59ms +step:1667/1695 train_time:166014ms step_avg:99.59ms +step:1668/1695 train_time:166119ms step_avg:99.59ms +step:1669/1695 train_time:166223ms step_avg:99.59ms +step:1670/1695 train_time:166326ms step_avg:99.60ms +step:1671/1695 train_time:166429ms step_avg:99.60ms +step:1672/1695 train_time:166533ms step_avg:99.60ms +step:1673/1695 train_time:166635ms step_avg:99.60ms +step:1674/1695 train_time:166738ms step_avg:99.60ms +step:1675/1695 train_time:166840ms step_avg:99.61ms +step:1676/1695 train_time:166945ms step_avg:99.61ms +step:1677/1695 train_time:167046ms step_avg:99.61ms +step:1678/1695 train_time:167152ms step_avg:99.61ms +step:1679/1695 train_time:167254ms step_avg:99.62ms +step:1680/1695 train_time:167356ms step_avg:99.62ms +step:1681/1695 train_time:167459ms step_avg:99.62ms +step:1682/1695 train_time:167564ms step_avg:99.62ms +step:1683/1695 train_time:167667ms step_avg:99.62ms +step:1684/1695 train_time:167770ms step_avg:99.63ms +step:1685/1695 train_time:167873ms step_avg:99.63ms +step:1686/1695 train_time:167975ms step_avg:99.63ms +step:1687/1695 train_time:168077ms step_avg:99.63ms +step:1688/1695 train_time:168179ms step_avg:99.63ms +step:1689/1695 train_time:168280ms step_avg:99.63ms +step:1690/1695 train_time:168382ms step_avg:99.63ms +step:1691/1695 train_time:168485ms step_avg:99.64ms +step:1692/1695 train_time:168588ms step_avg:99.64ms +step:1693/1695 train_time:168692ms step_avg:99.64ms +step:1694/1695 train_time:168796ms step_avg:99.64ms +step:1695/1695 train_time:168900ms step_avg:99.65ms +step:1695/1695 val_loss:3.2795 train_time:168999ms step_avg:99.70ms +peak memory allocated: 33860 MiB reserved: 49040 MiB diff --git a/records/082325_SparseAttnGate/53ecb4ef-77ed-4af6-b776-47cd4006614b.txt b/records/082325_SparseAttnGate/53ecb4ef-77ed-4af6-b776-47cd4006614b.txt new file mode 100644 index 000000000..01dbfe241 --- /dev/null +++ b/records/082325_SparseAttnGate/53ecb4ef-77ed-4af6-b776-47cd4006614b.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:28:09 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 31C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 308062 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 308063 C /usr/bin/python3 614MiB | +| 0 N/A N/A 308064 C /usr/bin/python3 614MiB | +| 0 N/A N/A 308065 C /usr/bin/python3 614MiB | +| 0 N/A N/A 308066 C /usr/bin/python3 614MiB | +| 0 N/A N/A 308067 C /usr/bin/python3 614MiB | +| 0 N/A N/A 308068 C /usr/bin/python3 614MiB | +| 0 N/A N/A 308069 C /usr/bin/python3 614MiB | +| 1 N/A N/A 308063 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 308064 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 308065 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 308066 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 308067 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 308068 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 308069 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:154ms step_avg:153.72ms +step:2/1695 train_time:179ms step_avg:89.42ms +step:3/1695 train_time:250ms step_avg:83.25ms +step:4/1695 train_time:342ms step_avg:85.38ms +step:5/1695 train_time:434ms step_avg:86.79ms +step:6/1695 train_time:526ms step_avg:87.73ms +step:7/1695 train_time:620ms step_avg:88.51ms +step:8/1695 train_time:712ms step_avg:89.00ms +step:9/1695 train_time:805ms step_avg:89.40ms +step:10/1695 train_time:898ms step_avg:89.77ms +step:11/1695 train_time:990ms step_avg:90.05ms +step:12/1695 train_time:1084ms step_avg:90.36ms +step:13/1695 train_time:1180ms step_avg:90.78ms +step:14/1695 train_time:1274ms step_avg:91.03ms +step:15/1695 train_time:1368ms step_avg:91.19ms +step:16/1695 train_time:1462ms step_avg:91.35ms +step:17/1695 train_time:1555ms step_avg:91.48ms +step:18/1695 train_time:1648ms step_avg:91.58ms +step:19/1695 train_time:1742ms step_avg:91.67ms +step:20/1695 train_time:1835ms step_avg:91.74ms +step:21/1695 train_time:1927ms step_avg:91.77ms +step:22/1695 train_time:2021ms step_avg:91.85ms +step:23/1695 train_time:2115ms step_avg:91.97ms +step:24/1695 train_time:2211ms step_avg:92.11ms +step:25/1695 train_time:2305ms step_avg:92.20ms +step:26/1695 train_time:2399ms step_avg:92.26ms +step:27/1695 train_time:2493ms step_avg:92.32ms +step:28/1695 train_time:2586ms step_avg:92.36ms +step:29/1695 train_time:2680ms step_avg:92.40ms +step:30/1695 train_time:2773ms step_avg:92.44ms +step:31/1695 train_time:2866ms step_avg:92.46ms +step:32/1695 train_time:2961ms step_avg:92.52ms +step:33/1695 train_time:3054ms step_avg:92.55ms +step:34/1695 train_time:3149ms step_avg:92.61ms +step:35/1695 train_time:3242ms step_avg:92.63ms +step:36/1695 train_time:3336ms step_avg:92.67ms +step:37/1695 train_time:3430ms step_avg:92.71ms +step:38/1695 train_time:3523ms step_avg:92.71ms +step:39/1695 train_time:3617ms step_avg:92.73ms +step:40/1695 train_time:3710ms step_avg:92.74ms +step:41/1695 train_time:3802ms step_avg:92.74ms +step:42/1695 train_time:3896ms step_avg:92.76ms +step:43/1695 train_time:3989ms step_avg:92.77ms +step:44/1695 train_time:4083ms step_avg:92.80ms +step:45/1695 train_time:4177ms step_avg:92.81ms +step:46/1695 train_time:4270ms step_avg:92.83ms +step:47/1695 train_time:4364ms step_avg:92.85ms +step:48/1695 train_time:4459ms step_avg:92.89ms +step:49/1695 train_time:4553ms step_avg:92.92ms +step:50/1695 train_time:4646ms step_avg:92.93ms +step:51/1695 train_time:4740ms step_avg:92.95ms +step:52/1695 train_time:4834ms step_avg:92.96ms +step:53/1695 train_time:4927ms step_avg:92.97ms +step:54/1695 train_time:5021ms step_avg:92.99ms +step:55/1695 train_time:5116ms step_avg:93.01ms +step:56/1695 train_time:5209ms step_avg:93.02ms +step:57/1695 train_time:5303ms step_avg:93.03ms +step:58/1695 train_time:5397ms step_avg:93.05ms +step:59/1695 train_time:5491ms step_avg:93.07ms +step:60/1695 train_time:5585ms step_avg:93.08ms +step:61/1695 train_time:5678ms step_avg:93.08ms +step:62/1695 train_time:5771ms step_avg:93.09ms +step:63/1695 train_time:5865ms step_avg:93.09ms +step:64/1695 train_time:5959ms step_avg:93.10ms +step:65/1695 train_time:6052ms step_avg:93.10ms +step:66/1695 train_time:6145ms step_avg:93.11ms +step:67/1695 train_time:6239ms step_avg:93.11ms +step:68/1695 train_time:6333ms step_avg:93.13ms +step:69/1695 train_time:6426ms step_avg:93.13ms +step:70/1695 train_time:6520ms step_avg:93.14ms +step:71/1695 train_time:6614ms step_avg:93.16ms +step:72/1695 train_time:6709ms step_avg:93.18ms +step:73/1695 train_time:6803ms step_avg:93.19ms +step:74/1695 train_time:6896ms step_avg:93.19ms +step:75/1695 train_time:6989ms step_avg:93.18ms +step:76/1695 train_time:7083ms step_avg:93.20ms +step:77/1695 train_time:7177ms step_avg:93.21ms +step:78/1695 train_time:7271ms step_avg:93.22ms +step:79/1695 train_time:7364ms step_avg:93.22ms +step:80/1695 train_time:7458ms step_avg:93.22ms +step:81/1695 train_time:7551ms step_avg:93.22ms +step:82/1695 train_time:7644ms step_avg:93.22ms +step:83/1695 train_time:7739ms step_avg:93.24ms +step:84/1695 train_time:7832ms step_avg:93.24ms +step:85/1695 train_time:7925ms step_avg:93.23ms +step:86/1695 train_time:8019ms step_avg:93.24ms +step:87/1695 train_time:8112ms step_avg:93.24ms +step:88/1695 train_time:8205ms step_avg:93.24ms +step:89/1695 train_time:8299ms step_avg:93.25ms +step:90/1695 train_time:8393ms step_avg:93.26ms +step:91/1695 train_time:8486ms step_avg:93.25ms +step:92/1695 train_time:8580ms step_avg:93.26ms +step:93/1695 train_time:8674ms step_avg:93.26ms +step:94/1695 train_time:8767ms step_avg:93.27ms +step:95/1695 train_time:8861ms step_avg:93.27ms +step:96/1695 train_time:8954ms step_avg:93.27ms +step:97/1695 train_time:9047ms step_avg:93.27ms +step:98/1695 train_time:9141ms step_avg:93.28ms +step:99/1695 train_time:9235ms step_avg:93.28ms +step:100/1695 train_time:9329ms step_avg:93.29ms +step:101/1695 train_time:9423ms step_avg:93.29ms +step:102/1695 train_time:9516ms step_avg:93.29ms +step:103/1695 train_time:9609ms step_avg:93.29ms +step:104/1695 train_time:9703ms step_avg:93.29ms +step:105/1695 train_time:9797ms step_avg:93.30ms +step:106/1695 train_time:9890ms step_avg:93.30ms +step:107/1695 train_time:9983ms step_avg:93.30ms +step:108/1695 train_time:10077ms step_avg:93.31ms +step:109/1695 train_time:10171ms step_avg:93.31ms +step:110/1695 train_time:10264ms step_avg:93.31ms +step:111/1695 train_time:10357ms step_avg:93.31ms +step:112/1695 train_time:10451ms step_avg:93.31ms +step:113/1695 train_time:10545ms step_avg:93.31ms +step:114/1695 train_time:10639ms step_avg:93.32ms +step:115/1695 train_time:10732ms step_avg:93.32ms +step:116/1695 train_time:10826ms step_avg:93.32ms +step:117/1695 train_time:10919ms step_avg:93.32ms +step:118/1695 train_time:11012ms step_avg:93.32ms +step:119/1695 train_time:11105ms step_avg:93.32ms +step:120/1695 train_time:11199ms step_avg:93.33ms +step:121/1695 train_time:11293ms step_avg:93.33ms +step:122/1695 train_time:11387ms step_avg:93.33ms +step:123/1695 train_time:11480ms step_avg:93.33ms +step:124/1695 train_time:11573ms step_avg:93.33ms +step:125/1695 train_time:11667ms step_avg:93.33ms +step:125/1695 val_loss:4.6063 train_time:11759ms step_avg:94.07ms +step:126/1695 train_time:11785ms step_avg:93.53ms +step:127/1695 train_time:11864ms step_avg:93.41ms +step:128/1695 train_time:11963ms step_avg:93.46ms +step:129/1695 train_time:12057ms step_avg:93.46ms +step:130/1695 train_time:12150ms step_avg:93.46ms +step:131/1695 train_time:12243ms step_avg:93.46ms +step:132/1695 train_time:12337ms step_avg:93.46ms +step:133/1695 train_time:12430ms step_avg:93.46ms +step:134/1695 train_time:12524ms step_avg:93.46ms +step:135/1695 train_time:12617ms step_avg:93.46ms +step:136/1695 train_time:12711ms step_avg:93.46ms +step:137/1695 train_time:12806ms step_avg:93.48ms +step:138/1695 train_time:12903ms step_avg:93.50ms +step:139/1695 train_time:12999ms step_avg:93.52ms +step:140/1695 train_time:13094ms step_avg:93.53ms +step:141/1695 train_time:13187ms step_avg:93.53ms +step:142/1695 train_time:13282ms step_avg:93.53ms +step:143/1695 train_time:13376ms step_avg:93.54ms +step:144/1695 train_time:13469ms step_avg:93.53ms +step:145/1695 train_time:13563ms step_avg:93.54ms +step:146/1695 train_time:13656ms step_avg:93.53ms +step:147/1695 train_time:13750ms step_avg:93.54ms +step:148/1695 train_time:13845ms step_avg:93.55ms +step:149/1695 train_time:13940ms step_avg:93.56ms +step:150/1695 train_time:14035ms step_avg:93.57ms +step:151/1695 train_time:14129ms step_avg:93.57ms +step:152/1695 train_time:14223ms step_avg:93.57ms +step:153/1695 train_time:14318ms step_avg:93.58ms +step:154/1695 train_time:14411ms step_avg:93.58ms +step:155/1695 train_time:14504ms step_avg:93.58ms +step:156/1695 train_time:14598ms step_avg:93.58ms +step:157/1695 train_time:14692ms step_avg:93.58ms +step:158/1695 train_time:14785ms step_avg:93.58ms +step:159/1695 train_time:14879ms step_avg:93.58ms +step:160/1695 train_time:14974ms step_avg:93.59ms +step:161/1695 train_time:15068ms step_avg:93.59ms +step:162/1695 train_time:15162ms step_avg:93.59ms +step:163/1695 train_time:15256ms step_avg:93.59ms +step:164/1695 train_time:15349ms step_avg:93.59ms +step:165/1695 train_time:15443ms step_avg:93.60ms +step:166/1695 train_time:15537ms step_avg:93.59ms +step:167/1695 train_time:15630ms step_avg:93.59ms +step:168/1695 train_time:15724ms step_avg:93.59ms +step:169/1695 train_time:15818ms step_avg:93.60ms +step:170/1695 train_time:15912ms step_avg:93.60ms +step:171/1695 train_time:16006ms step_avg:93.60ms +step:172/1695 train_time:16101ms step_avg:93.61ms +step:173/1695 train_time:16197ms step_avg:93.62ms +step:174/1695 train_time:16290ms step_avg:93.62ms +step:175/1695 train_time:16385ms step_avg:93.63ms +step:176/1695 train_time:16479ms step_avg:93.63ms +step:177/1695 train_time:16572ms step_avg:93.63ms +step:178/1695 train_time:16666ms step_avg:93.63ms +step:179/1695 train_time:16759ms step_avg:93.63ms +step:180/1695 train_time:16854ms step_avg:93.63ms +step:181/1695 train_time:16948ms step_avg:93.63ms +step:182/1695 train_time:17041ms step_avg:93.63ms +step:183/1695 train_time:17137ms step_avg:93.64ms +step:184/1695 train_time:17230ms step_avg:93.64ms +step:185/1695 train_time:17325ms step_avg:93.65ms +step:186/1695 train_time:17420ms step_avg:93.65ms +step:187/1695 train_time:17514ms step_avg:93.66ms +step:188/1695 train_time:17607ms step_avg:93.66ms +step:189/1695 train_time:17701ms step_avg:93.66ms +step:190/1695 train_time:17795ms step_avg:93.66ms +step:191/1695 train_time:17889ms step_avg:93.66ms +step:192/1695 train_time:17983ms step_avg:93.66ms +step:193/1695 train_time:18077ms step_avg:93.66ms +step:194/1695 train_time:18171ms step_avg:93.66ms +step:195/1695 train_time:18266ms step_avg:93.67ms +step:196/1695 train_time:18361ms step_avg:93.68ms +step:197/1695 train_time:18455ms step_avg:93.68ms +step:198/1695 train_time:18549ms step_avg:93.68ms +step:199/1695 train_time:18642ms step_avg:93.68ms +step:200/1695 train_time:18737ms step_avg:93.68ms +step:201/1695 train_time:18830ms step_avg:93.68ms +step:202/1695 train_time:18924ms step_avg:93.68ms +step:203/1695 train_time:19019ms step_avg:93.69ms +step:204/1695 train_time:19114ms step_avg:93.70ms +step:205/1695 train_time:19207ms step_avg:93.69ms +step:206/1695 train_time:19302ms step_avg:93.70ms +step:207/1695 train_time:19397ms step_avg:93.70ms +step:208/1695 train_time:19490ms step_avg:93.70ms +step:209/1695 train_time:19585ms step_avg:93.71ms +step:210/1695 train_time:19680ms step_avg:93.71ms +step:211/1695 train_time:19773ms step_avg:93.71ms +step:212/1695 train_time:19867ms step_avg:93.71ms +step:213/1695 train_time:19961ms step_avg:93.71ms +step:214/1695 train_time:20055ms step_avg:93.72ms +step:215/1695 train_time:20148ms step_avg:93.71ms +step:216/1695 train_time:20242ms step_avg:93.71ms +step:217/1695 train_time:20335ms step_avg:93.71ms +step:218/1695 train_time:20429ms step_avg:93.71ms +step:219/1695 train_time:20523ms step_avg:93.71ms +step:220/1695 train_time:20618ms step_avg:93.72ms +step:221/1695 train_time:20711ms step_avg:93.72ms +step:222/1695 train_time:20805ms step_avg:93.72ms +step:223/1695 train_time:20900ms step_avg:93.72ms +step:224/1695 train_time:20993ms step_avg:93.72ms +step:225/1695 train_time:21087ms step_avg:93.72ms +step:226/1695 train_time:21181ms step_avg:93.72ms +step:227/1695 train_time:21275ms step_avg:93.72ms +step:228/1695 train_time:21368ms step_avg:93.72ms +step:229/1695 train_time:21463ms step_avg:93.72ms +step:230/1695 train_time:21558ms step_avg:93.73ms +step:231/1695 train_time:21651ms step_avg:93.73ms +step:232/1695 train_time:21745ms step_avg:93.73ms +step:233/1695 train_time:21839ms step_avg:93.73ms +step:234/1695 train_time:21932ms step_avg:93.73ms +step:235/1695 train_time:22026ms step_avg:93.73ms +step:236/1695 train_time:22121ms step_avg:93.73ms +step:237/1695 train_time:22216ms step_avg:93.74ms +step:238/1695 train_time:22309ms step_avg:93.74ms +step:239/1695 train_time:22404ms step_avg:93.74ms +step:240/1695 train_time:22498ms step_avg:93.74ms +step:241/1695 train_time:22592ms step_avg:93.74ms +step:242/1695 train_time:22685ms step_avg:93.74ms +step:243/1695 train_time:22780ms step_avg:93.74ms +step:244/1695 train_time:22873ms step_avg:93.74ms +step:245/1695 train_time:22967ms step_avg:93.74ms +step:246/1695 train_time:23061ms step_avg:93.74ms +step:247/1695 train_time:23154ms step_avg:93.74ms +step:248/1695 train_time:23248ms step_avg:93.74ms +step:249/1695 train_time:23342ms step_avg:93.74ms +step:250/1695 train_time:23436ms step_avg:93.74ms +step:250/1695 val_loss:4.0653 train_time:23527ms step_avg:94.11ms +step:251/1695 train_time:23554ms step_avg:93.84ms +step:252/1695 train_time:23632ms step_avg:93.78ms +step:253/1695 train_time:23730ms step_avg:93.79ms +step:254/1695 train_time:23824ms step_avg:93.80ms +step:255/1695 train_time:23918ms step_avg:93.80ms +step:256/1695 train_time:24012ms step_avg:93.80ms +step:257/1695 train_time:24106ms step_avg:93.80ms +step:258/1695 train_time:24200ms step_avg:93.80ms +step:259/1695 train_time:24293ms step_avg:93.80ms +step:260/1695 train_time:24387ms step_avg:93.79ms +step:261/1695 train_time:24482ms step_avg:93.80ms +step:262/1695 train_time:24579ms step_avg:93.81ms +step:263/1695 train_time:24673ms step_avg:93.81ms +step:264/1695 train_time:24768ms step_avg:93.82ms +step:265/1695 train_time:24863ms step_avg:93.82ms +step:266/1695 train_time:24957ms step_avg:93.82ms +step:267/1695 train_time:25051ms step_avg:93.82ms +step:268/1695 train_time:25145ms step_avg:93.82ms +step:269/1695 train_time:25239ms step_avg:93.83ms +step:270/1695 train_time:25334ms step_avg:93.83ms +step:271/1695 train_time:25427ms step_avg:93.83ms +step:272/1695 train_time:25523ms step_avg:93.83ms +step:273/1695 train_time:25619ms step_avg:93.84ms +step:274/1695 train_time:25714ms step_avg:93.85ms +step:275/1695 train_time:25809ms step_avg:93.85ms +step:276/1695 train_time:25903ms step_avg:93.85ms +step:277/1695 train_time:25997ms step_avg:93.85ms +step:278/1695 train_time:26091ms step_avg:93.85ms +step:279/1695 train_time:26186ms step_avg:93.86ms +step:280/1695 train_time:26281ms step_avg:93.86ms +step:281/1695 train_time:26375ms step_avg:93.86ms +step:282/1695 train_time:26469ms step_avg:93.86ms +step:283/1695 train_time:26563ms step_avg:93.86ms +step:284/1695 train_time:26658ms step_avg:93.87ms +step:285/1695 train_time:26753ms step_avg:93.87ms +step:286/1695 train_time:26847ms step_avg:93.87ms +step:287/1695 train_time:26941ms step_avg:93.87ms +step:288/1695 train_time:27037ms step_avg:93.88ms +step:289/1695 train_time:27131ms step_avg:93.88ms +step:290/1695 train_time:27225ms step_avg:93.88ms +step:291/1695 train_time:27319ms step_avg:93.88ms +step:292/1695 train_time:27414ms step_avg:93.88ms +step:293/1695 train_time:27507ms step_avg:93.88ms +step:294/1695 train_time:27602ms step_avg:93.88ms +step:295/1695 train_time:27697ms step_avg:93.89ms +step:296/1695 train_time:27792ms step_avg:93.89ms +step:297/1695 train_time:27887ms step_avg:93.90ms +step:298/1695 train_time:27982ms step_avg:93.90ms +step:299/1695 train_time:28077ms step_avg:93.90ms +step:300/1695 train_time:28171ms step_avg:93.90ms +step:301/1695 train_time:28265ms step_avg:93.90ms +step:302/1695 train_time:28360ms step_avg:93.91ms +step:303/1695 train_time:28454ms step_avg:93.91ms +step:304/1695 train_time:28548ms step_avg:93.91ms +step:305/1695 train_time:28642ms step_avg:93.91ms +step:306/1695 train_time:28736ms step_avg:93.91ms +step:307/1695 train_time:28830ms step_avg:93.91ms +step:308/1695 train_time:28924ms step_avg:93.91ms +step:309/1695 train_time:29018ms step_avg:93.91ms +step:310/1695 train_time:29113ms step_avg:93.91ms +step:311/1695 train_time:29206ms step_avg:93.91ms +step:312/1695 train_time:29302ms step_avg:93.92ms +step:313/1695 train_time:29398ms step_avg:93.92ms +step:314/1695 train_time:29492ms step_avg:93.92ms +step:315/1695 train_time:29585ms step_avg:93.92ms +step:316/1695 train_time:29681ms step_avg:93.93ms +step:317/1695 train_time:29775ms step_avg:93.93ms +step:318/1695 train_time:29869ms step_avg:93.93ms +step:319/1695 train_time:29963ms step_avg:93.93ms +step:320/1695 train_time:30058ms step_avg:93.93ms +step:321/1695 train_time:30153ms step_avg:93.93ms +step:322/1695 train_time:30247ms step_avg:93.93ms +step:323/1695 train_time:30341ms step_avg:93.94ms +step:324/1695 train_time:30436ms step_avg:93.94ms +step:325/1695 train_time:30531ms step_avg:93.94ms +step:326/1695 train_time:30625ms step_avg:93.94ms +step:327/1695 train_time:30721ms step_avg:93.95ms +step:328/1695 train_time:30815ms step_avg:93.95ms +step:329/1695 train_time:30909ms step_avg:93.95ms +step:330/1695 train_time:31003ms step_avg:93.95ms +step:331/1695 train_time:31098ms step_avg:93.95ms +step:332/1695 train_time:31193ms step_avg:93.95ms +step:333/1695 train_time:31286ms step_avg:93.95ms +step:334/1695 train_time:31381ms step_avg:93.95ms +step:335/1695 train_time:31476ms step_avg:93.96ms +step:336/1695 train_time:31569ms step_avg:93.96ms +step:337/1695 train_time:31664ms step_avg:93.96ms +step:338/1695 train_time:31758ms step_avg:93.96ms +step:339/1695 train_time:31853ms step_avg:93.96ms +step:340/1695 train_time:31946ms step_avg:93.96ms +step:341/1695 train_time:32041ms step_avg:93.96ms +step:342/1695 train_time:32136ms step_avg:93.96ms +step:343/1695 train_time:32229ms step_avg:93.96ms +step:344/1695 train_time:32323ms step_avg:93.96ms +step:345/1695 train_time:32418ms step_avg:93.97ms +step:346/1695 train_time:32512ms step_avg:93.97ms +step:347/1695 train_time:32606ms step_avg:93.97ms +step:348/1695 train_time:32701ms step_avg:93.97ms +step:349/1695 train_time:32796ms step_avg:93.97ms +step:350/1695 train_time:32889ms step_avg:93.97ms +step:351/1695 train_time:32985ms step_avg:93.97ms +step:352/1695 train_time:33079ms step_avg:93.97ms +step:353/1695 train_time:33174ms step_avg:93.98ms +step:354/1695 train_time:33268ms step_avg:93.98ms +step:355/1695 train_time:33362ms step_avg:93.98ms +step:356/1695 train_time:33457ms step_avg:93.98ms +step:357/1695 train_time:33551ms step_avg:93.98ms +step:358/1695 train_time:33645ms step_avg:93.98ms +step:359/1695 train_time:33740ms step_avg:93.98ms +step:360/1695 train_time:33836ms step_avg:93.99ms +step:361/1695 train_time:33930ms step_avg:93.99ms +step:362/1695 train_time:34024ms step_avg:93.99ms +step:363/1695 train_time:34119ms step_avg:93.99ms +step:364/1695 train_time:34213ms step_avg:93.99ms +step:365/1695 train_time:34306ms step_avg:93.99ms +step:366/1695 train_time:34402ms step_avg:93.99ms +step:367/1695 train_time:34497ms step_avg:94.00ms +step:368/1695 train_time:34591ms step_avg:94.00ms +step:369/1695 train_time:34685ms step_avg:94.00ms +step:370/1695 train_time:34780ms step_avg:94.00ms +step:371/1695 train_time:34875ms step_avg:94.00ms +step:372/1695 train_time:34969ms step_avg:94.00ms +step:373/1695 train_time:35063ms step_avg:94.00ms +step:374/1695 train_time:35157ms step_avg:94.00ms +step:375/1695 train_time:35251ms step_avg:94.00ms +step:375/1695 val_loss:3.8671 train_time:35344ms step_avg:94.25ms +step:376/1695 train_time:35370ms step_avg:94.07ms +step:377/1695 train_time:35449ms step_avg:94.03ms +step:378/1695 train_time:35549ms step_avg:94.05ms +step:379/1695 train_time:35645ms step_avg:94.05ms +step:380/1695 train_time:35740ms step_avg:94.05ms +step:381/1695 train_time:35836ms step_avg:94.06ms +step:382/1695 train_time:35931ms step_avg:94.06ms +step:383/1695 train_time:36026ms step_avg:94.06ms +step:384/1695 train_time:36122ms step_avg:94.07ms +step:385/1695 train_time:36218ms step_avg:94.07ms +step:386/1695 train_time:36313ms step_avg:94.07ms +step:387/1695 train_time:36409ms step_avg:94.08ms +step:388/1695 train_time:36506ms step_avg:94.09ms +step:389/1695 train_time:36604ms step_avg:94.10ms +step:390/1695 train_time:36700ms step_avg:94.10ms +step:391/1695 train_time:36796ms step_avg:94.11ms +step:392/1695 train_time:36893ms step_avg:94.11ms +step:393/1695 train_time:36989ms step_avg:94.12ms +step:394/1695 train_time:37084ms step_avg:94.12ms +step:395/1695 train_time:37179ms step_avg:94.12ms +step:396/1695 train_time:37275ms step_avg:94.13ms +step:397/1695 train_time:37371ms step_avg:94.13ms +step:398/1695 train_time:37468ms step_avg:94.14ms +step:399/1695 train_time:37564ms step_avg:94.15ms +step:400/1695 train_time:37660ms step_avg:94.15ms +step:401/1695 train_time:37757ms step_avg:94.16ms +step:402/1695 train_time:37853ms step_avg:94.16ms +step:403/1695 train_time:37950ms step_avg:94.17ms +step:404/1695 train_time:38045ms step_avg:94.17ms +step:405/1695 train_time:38141ms step_avg:94.18ms +step:406/1695 train_time:38237ms step_avg:94.18ms +step:407/1695 train_time:38333ms step_avg:94.18ms +step:408/1695 train_time:38429ms step_avg:94.19ms +step:409/1695 train_time:38525ms step_avg:94.19ms +step:410/1695 train_time:38621ms step_avg:94.20ms +step:411/1695 train_time:38717ms step_avg:94.20ms +step:412/1695 train_time:38813ms step_avg:94.21ms +step:413/1695 train_time:38909ms step_avg:94.21ms +step:414/1695 train_time:39005ms step_avg:94.21ms +step:415/1695 train_time:39101ms step_avg:94.22ms +step:416/1695 train_time:39197ms step_avg:94.22ms +step:417/1695 train_time:39292ms step_avg:94.23ms +step:418/1695 train_time:39388ms step_avg:94.23ms +step:419/1695 train_time:39484ms step_avg:94.23ms +step:420/1695 train_time:39580ms step_avg:94.24ms +step:421/1695 train_time:39676ms step_avg:94.24ms +step:422/1695 train_time:39772ms step_avg:94.25ms +step:423/1695 train_time:39869ms step_avg:94.25ms +step:424/1695 train_time:39965ms step_avg:94.26ms +step:425/1695 train_time:40061ms step_avg:94.26ms +step:426/1695 train_time:40157ms step_avg:94.27ms +step:427/1695 train_time:40253ms step_avg:94.27ms +step:428/1695 train_time:40349ms step_avg:94.27ms +step:429/1695 train_time:40446ms step_avg:94.28ms +step:430/1695 train_time:40541ms step_avg:94.28ms +step:431/1695 train_time:40637ms step_avg:94.29ms +step:432/1695 train_time:40733ms step_avg:94.29ms +step:433/1695 train_time:40829ms step_avg:94.29ms +step:434/1695 train_time:40926ms step_avg:94.30ms +step:435/1695 train_time:41022ms step_avg:94.30ms +step:436/1695 train_time:41118ms step_avg:94.31ms +step:437/1695 train_time:41214ms step_avg:94.31ms +step:438/1695 train_time:41312ms step_avg:94.32ms +step:439/1695 train_time:41408ms step_avg:94.32ms +step:440/1695 train_time:41504ms step_avg:94.33ms +step:441/1695 train_time:41599ms step_avg:94.33ms +step:442/1695 train_time:41695ms step_avg:94.33ms +step:443/1695 train_time:41792ms step_avg:94.34ms +step:444/1695 train_time:41888ms step_avg:94.34ms +step:445/1695 train_time:41986ms step_avg:94.35ms +step:446/1695 train_time:42082ms step_avg:94.35ms +step:447/1695 train_time:42178ms step_avg:94.36ms +step:448/1695 train_time:42274ms step_avg:94.36ms +step:449/1695 train_time:42371ms step_avg:94.37ms +step:450/1695 train_time:42467ms step_avg:94.37ms +step:451/1695 train_time:42562ms step_avg:94.37ms +step:452/1695 train_time:42658ms step_avg:94.38ms +step:453/1695 train_time:42755ms step_avg:94.38ms +step:454/1695 train_time:42851ms step_avg:94.39ms +step:455/1695 train_time:42948ms step_avg:94.39ms +step:456/1695 train_time:43044ms step_avg:94.39ms +step:457/1695 train_time:43139ms step_avg:94.40ms +step:458/1695 train_time:43236ms step_avg:94.40ms +step:459/1695 train_time:43332ms step_avg:94.41ms +step:460/1695 train_time:43429ms step_avg:94.41ms +step:461/1695 train_time:43526ms step_avg:94.42ms +step:462/1695 train_time:43621ms step_avg:94.42ms +step:463/1695 train_time:43717ms step_avg:94.42ms +step:464/1695 train_time:43812ms step_avg:94.42ms +step:465/1695 train_time:43909ms step_avg:94.43ms +step:466/1695 train_time:44005ms step_avg:94.43ms +step:467/1695 train_time:44100ms step_avg:94.43ms +step:468/1695 train_time:44197ms step_avg:94.44ms +step:469/1695 train_time:44293ms step_avg:94.44ms +step:470/1695 train_time:44389ms step_avg:94.45ms +step:471/1695 train_time:44486ms step_avg:94.45ms +step:472/1695 train_time:44582ms step_avg:94.45ms +step:473/1695 train_time:44677ms step_avg:94.46ms +step:474/1695 train_time:44773ms step_avg:94.46ms +step:475/1695 train_time:44870ms step_avg:94.46ms +step:476/1695 train_time:44966ms step_avg:94.47ms +step:477/1695 train_time:45061ms step_avg:94.47ms +step:478/1695 train_time:45157ms step_avg:94.47ms +step:479/1695 train_time:45253ms step_avg:94.47ms +step:480/1695 train_time:45350ms step_avg:94.48ms +step:481/1695 train_time:45446ms step_avg:94.48ms +step:482/1695 train_time:45542ms step_avg:94.49ms +step:483/1695 train_time:45638ms step_avg:94.49ms +step:484/1695 train_time:45735ms step_avg:94.49ms +step:485/1695 train_time:45830ms step_avg:94.49ms +step:486/1695 train_time:45927ms step_avg:94.50ms +step:487/1695 train_time:46023ms step_avg:94.50ms +step:488/1695 train_time:46119ms step_avg:94.51ms +step:489/1695 train_time:46216ms step_avg:94.51ms +step:490/1695 train_time:46312ms step_avg:94.51ms +step:491/1695 train_time:46408ms step_avg:94.52ms +step:492/1695 train_time:46503ms step_avg:94.52ms +step:493/1695 train_time:46598ms step_avg:94.52ms +step:494/1695 train_time:46694ms step_avg:94.52ms +step:495/1695 train_time:46790ms step_avg:94.53ms +step:496/1695 train_time:46886ms step_avg:94.53ms +step:497/1695 train_time:46982ms step_avg:94.53ms +step:498/1695 train_time:47078ms step_avg:94.53ms +step:499/1695 train_time:47175ms step_avg:94.54ms +step:500/1695 train_time:47271ms step_avg:94.54ms +step:500/1695 val_loss:3.7271 train_time:47366ms step_avg:94.73ms +step:501/1695 train_time:47393ms step_avg:94.60ms +step:502/1695 train_time:47474ms step_avg:94.57ms +step:503/1695 train_time:47572ms step_avg:94.58ms +step:504/1695 train_time:47669ms step_avg:94.58ms +step:505/1695 train_time:47764ms step_avg:94.58ms +step:506/1695 train_time:47860ms step_avg:94.58ms +step:507/1695 train_time:47955ms step_avg:94.59ms +step:508/1695 train_time:48051ms step_avg:94.59ms +step:509/1695 train_time:48146ms step_avg:94.59ms +step:510/1695 train_time:48242ms step_avg:94.59ms +step:511/1695 train_time:48337ms step_avg:94.59ms +step:512/1695 train_time:48434ms step_avg:94.60ms +step:513/1695 train_time:48532ms step_avg:94.60ms +step:514/1695 train_time:48629ms step_avg:94.61ms +step:515/1695 train_time:48725ms step_avg:94.61ms +step:516/1695 train_time:48821ms step_avg:94.61ms +step:517/1695 train_time:48917ms step_avg:94.62ms +step:518/1695 train_time:49014ms step_avg:94.62ms +step:519/1695 train_time:49110ms step_avg:94.62ms +step:520/1695 train_time:49205ms step_avg:94.63ms +step:521/1695 train_time:49301ms step_avg:94.63ms +step:522/1695 train_time:49398ms step_avg:94.63ms +step:523/1695 train_time:49495ms step_avg:94.64ms +step:524/1695 train_time:49593ms step_avg:94.64ms +step:525/1695 train_time:49690ms step_avg:94.65ms +step:526/1695 train_time:49788ms step_avg:94.65ms +step:527/1695 train_time:49883ms step_avg:94.66ms +step:528/1695 train_time:49979ms step_avg:94.66ms +step:529/1695 train_time:50075ms step_avg:94.66ms +step:530/1695 train_time:50171ms step_avg:94.66ms +step:531/1695 train_time:50268ms step_avg:94.67ms +step:532/1695 train_time:50363ms step_avg:94.67ms +step:533/1695 train_time:50459ms step_avg:94.67ms +step:534/1695 train_time:50557ms step_avg:94.68ms +step:535/1695 train_time:50654ms step_avg:94.68ms +step:536/1695 train_time:50752ms step_avg:94.69ms +step:537/1695 train_time:50848ms step_avg:94.69ms +step:538/1695 train_time:50944ms step_avg:94.69ms +step:539/1695 train_time:51040ms step_avg:94.69ms +step:540/1695 train_time:51136ms step_avg:94.70ms +step:541/1695 train_time:51233ms step_avg:94.70ms +step:542/1695 train_time:51329ms step_avg:94.70ms +step:543/1695 train_time:51425ms step_avg:94.71ms +step:544/1695 train_time:51521ms step_avg:94.71ms +step:545/1695 train_time:51619ms step_avg:94.71ms +step:546/1695 train_time:51717ms step_avg:94.72ms +step:547/1695 train_time:51815ms step_avg:94.73ms +step:548/1695 train_time:51912ms step_avg:94.73ms +step:549/1695 train_time:52009ms step_avg:94.73ms +step:550/1695 train_time:52105ms step_avg:94.74ms +step:551/1695 train_time:52201ms step_avg:94.74ms +step:552/1695 train_time:52298ms step_avg:94.74ms +step:553/1695 train_time:52394ms step_avg:94.74ms +step:554/1695 train_time:52491ms step_avg:94.75ms +step:555/1695 train_time:52587ms step_avg:94.75ms +step:556/1695 train_time:52683ms step_avg:94.75ms +step:557/1695 train_time:52780ms step_avg:94.76ms +step:558/1695 train_time:52877ms step_avg:94.76ms +step:559/1695 train_time:52974ms step_avg:94.77ms +step:560/1695 train_time:53070ms step_avg:94.77ms +step:561/1695 train_time:53166ms step_avg:94.77ms +step:562/1695 train_time:53262ms step_avg:94.77ms +step:563/1695 train_time:53358ms step_avg:94.78ms +step:564/1695 train_time:53455ms step_avg:94.78ms +step:565/1695 train_time:53551ms step_avg:94.78ms +step:566/1695 train_time:53647ms step_avg:94.78ms +step:567/1695 train_time:53743ms step_avg:94.79ms +step:568/1695 train_time:54068ms step_avg:95.19ms +step:569/1695 train_time:54162ms step_avg:95.19ms +step:570/1695 train_time:54258ms step_avg:95.19ms +step:571/1695 train_time:54354ms step_avg:95.19ms +step:572/1695 train_time:54448ms step_avg:95.19ms +step:573/1695 train_time:54544ms step_avg:95.19ms +step:574/1695 train_time:54640ms step_avg:95.19ms +step:575/1695 train_time:54735ms step_avg:95.19ms +step:576/1695 train_time:54831ms step_avg:95.19ms +step:577/1695 train_time:54927ms step_avg:95.19ms +step:578/1695 train_time:55025ms step_avg:95.20ms +step:579/1695 train_time:55123ms step_avg:95.20ms +step:580/1695 train_time:55220ms step_avg:95.21ms +step:581/1695 train_time:55317ms step_avg:95.21ms +step:582/1695 train_time:55413ms step_avg:95.21ms +step:583/1695 train_time:55508ms step_avg:95.21ms +step:584/1695 train_time:55604ms step_avg:95.21ms +step:585/1695 train_time:55699ms step_avg:95.21ms +step:586/1695 train_time:55796ms step_avg:95.21ms +step:587/1695 train_time:55891ms step_avg:95.22ms +step:588/1695 train_time:55989ms step_avg:95.22ms +step:589/1695 train_time:56085ms step_avg:95.22ms +step:590/1695 train_time:56181ms step_avg:95.22ms +step:591/1695 train_time:56279ms step_avg:95.23ms +step:592/1695 train_time:56376ms step_avg:95.23ms +step:593/1695 train_time:56473ms step_avg:95.23ms +step:594/1695 train_time:56569ms step_avg:95.23ms +step:595/1695 train_time:56664ms step_avg:95.23ms +step:596/1695 train_time:56760ms step_avg:95.23ms +step:597/1695 train_time:56856ms step_avg:95.24ms +step:598/1695 train_time:56953ms step_avg:95.24ms +step:599/1695 train_time:57050ms step_avg:95.24ms +step:600/1695 train_time:57147ms step_avg:95.24ms +step:601/1695 train_time:57244ms step_avg:95.25ms +step:602/1695 train_time:57340ms step_avg:95.25ms +step:603/1695 train_time:57437ms step_avg:95.25ms +step:604/1695 train_time:57533ms step_avg:95.25ms +step:605/1695 train_time:57630ms step_avg:95.26ms +step:606/1695 train_time:57725ms step_avg:95.26ms +step:607/1695 train_time:57821ms step_avg:95.26ms +step:608/1695 train_time:57918ms step_avg:95.26ms +step:609/1695 train_time:58015ms step_avg:95.26ms +step:610/1695 train_time:58112ms step_avg:95.27ms +step:611/1695 train_time:58209ms step_avg:95.27ms +step:612/1695 train_time:58305ms step_avg:95.27ms +step:613/1695 train_time:58401ms step_avg:95.27ms +step:614/1695 train_time:58498ms step_avg:95.27ms +step:615/1695 train_time:58594ms step_avg:95.28ms +step:616/1695 train_time:58690ms step_avg:95.28ms +step:617/1695 train_time:58786ms step_avg:95.28ms +step:618/1695 train_time:58882ms step_avg:95.28ms +step:619/1695 train_time:58979ms step_avg:95.28ms +step:620/1695 train_time:59076ms step_avg:95.28ms +step:621/1695 train_time:59173ms step_avg:95.29ms +step:622/1695 train_time:59270ms step_avg:95.29ms +step:623/1695 train_time:59366ms step_avg:95.29ms +step:624/1695 train_time:59462ms step_avg:95.29ms +step:625/1695 train_time:59558ms step_avg:95.29ms +step:625/1695 val_loss:3.6433 train_time:59653ms step_avg:95.44ms +step:626/1695 train_time:59679ms step_avg:95.33ms +step:627/1695 train_time:59760ms step_avg:95.31ms +step:628/1695 train_time:59861ms step_avg:95.32ms +step:629/1695 train_time:60187ms step_avg:95.69ms +step:630/1695 train_time:60284ms step_avg:95.69ms +step:631/1695 train_time:60380ms step_avg:95.69ms +step:632/1695 train_time:60477ms step_avg:95.69ms +step:633/1695 train_time:60574ms step_avg:95.69ms +step:634/1695 train_time:60672ms step_avg:95.70ms +step:635/1695 train_time:61019ms step_avg:96.09ms +step:636/1695 train_time:61114ms step_avg:96.09ms +step:637/1695 train_time:61211ms step_avg:96.09ms +step:638/1695 train_time:61308ms step_avg:96.09ms +step:639/1695 train_time:61405ms step_avg:96.10ms +step:640/1695 train_time:61502ms step_avg:96.10ms +step:641/1695 train_time:61599ms step_avg:96.10ms +step:642/1695 train_time:61696ms step_avg:96.10ms +step:643/1695 train_time:61794ms step_avg:96.10ms +step:644/1695 train_time:61891ms step_avg:96.10ms +step:645/1695 train_time:61990ms step_avg:96.11ms +step:646/1695 train_time:62090ms step_avg:96.11ms +step:647/1695 train_time:62188ms step_avg:96.12ms +step:648/1695 train_time:62285ms step_avg:96.12ms +step:649/1695 train_time:62383ms step_avg:96.12ms +step:650/1695 train_time:62479ms step_avg:96.12ms +step:651/1695 train_time:62577ms step_avg:96.12ms +step:652/1695 train_time:62674ms step_avg:96.13ms +step:653/1695 train_time:62772ms step_avg:96.13ms +step:654/1695 train_time:62870ms step_avg:96.13ms +step:655/1695 train_time:62969ms step_avg:96.14ms +step:656/1695 train_time:63068ms step_avg:96.14ms +step:657/1695 train_time:63166ms step_avg:96.14ms +step:658/1695 train_time:63264ms step_avg:96.15ms +step:659/1695 train_time:63361ms step_avg:96.15ms +step:660/1695 train_time:63458ms step_avg:96.15ms +step:661/1695 train_time:63556ms step_avg:96.15ms +step:662/1695 train_time:63654ms step_avg:96.15ms +step:663/1695 train_time:63752ms step_avg:96.16ms +step:664/1695 train_time:63850ms step_avg:96.16ms +step:665/1695 train_time:63948ms step_avg:96.16ms +step:666/1695 train_time:64046ms step_avg:96.17ms +step:667/1695 train_time:64143ms step_avg:96.17ms +step:668/1695 train_time:64241ms step_avg:96.17ms +step:669/1695 train_time:64339ms step_avg:96.17ms +step:670/1695 train_time:64438ms step_avg:96.18ms +step:671/1695 train_time:64536ms step_avg:96.18ms +step:672/1695 train_time:64634ms step_avg:96.18ms +step:673/1695 train_time:64732ms step_avg:96.18ms +step:674/1695 train_time:64830ms step_avg:96.19ms +step:675/1695 train_time:64928ms step_avg:96.19ms +step:676/1695 train_time:65025ms step_avg:96.19ms +step:677/1695 train_time:65123ms step_avg:96.19ms +step:678/1695 train_time:65222ms step_avg:96.20ms +step:679/1695 train_time:65320ms step_avg:96.20ms +step:680/1695 train_time:65418ms step_avg:96.20ms +step:681/1695 train_time:65516ms step_avg:96.21ms +step:682/1695 train_time:65614ms step_avg:96.21ms +step:683/1695 train_time:65712ms step_avg:96.21ms +step:684/1695 train_time:65810ms step_avg:96.21ms +step:685/1695 train_time:65908ms step_avg:96.22ms +step:686/1695 train_time:66007ms step_avg:96.22ms +step:687/1695 train_time:66105ms step_avg:96.22ms +step:688/1695 train_time:66203ms step_avg:96.23ms +step:689/1695 train_time:66301ms step_avg:96.23ms +step:690/1695 train_time:66398ms step_avg:96.23ms +step:691/1695 train_time:66496ms step_avg:96.23ms +step:692/1695 train_time:66594ms step_avg:96.23ms +step:693/1695 train_time:66692ms step_avg:96.24ms +step:694/1695 train_time:66789ms step_avg:96.24ms +step:695/1695 train_time:66887ms step_avg:96.24ms +step:696/1695 train_time:66984ms step_avg:96.24ms +step:697/1695 train_time:67082ms step_avg:96.24ms +step:698/1695 train_time:67180ms step_avg:96.25ms +step:699/1695 train_time:67279ms step_avg:96.25ms +step:700/1695 train_time:67377ms step_avg:96.25ms +step:701/1695 train_time:67476ms step_avg:96.26ms +step:702/1695 train_time:67574ms step_avg:96.26ms +step:703/1695 train_time:67672ms step_avg:96.26ms +step:704/1695 train_time:67769ms step_avg:96.26ms +step:705/1695 train_time:67867ms step_avg:96.27ms +step:706/1695 train_time:67965ms step_avg:96.27ms +step:707/1695 train_time:68063ms step_avg:96.27ms +step:708/1695 train_time:68160ms step_avg:96.27ms +step:709/1695 train_time:68258ms step_avg:96.27ms +step:710/1695 train_time:68356ms step_avg:96.28ms +step:711/1695 train_time:68453ms step_avg:96.28ms +step:712/1695 train_time:68552ms step_avg:96.28ms +step:713/1695 train_time:68650ms step_avg:96.28ms +step:714/1695 train_time:68747ms step_avg:96.28ms +step:715/1695 train_time:68844ms step_avg:96.29ms +step:716/1695 train_time:68942ms step_avg:96.29ms +step:717/1695 train_time:69041ms step_avg:96.29ms +step:718/1695 train_time:69138ms step_avg:96.29ms +step:719/1695 train_time:69236ms step_avg:96.30ms +step:720/1695 train_time:69335ms step_avg:96.30ms +step:721/1695 train_time:69434ms step_avg:96.30ms +step:722/1695 train_time:69533ms step_avg:96.31ms +step:723/1695 train_time:69630ms step_avg:96.31ms +step:724/1695 train_time:69728ms step_avg:96.31ms +step:725/1695 train_time:69826ms step_avg:96.31ms +step:726/1695 train_time:70167ms step_avg:96.65ms +step:727/1695 train_time:70262ms step_avg:96.65ms +step:728/1695 train_time:70360ms step_avg:96.65ms +step:729/1695 train_time:70457ms step_avg:96.65ms +step:730/1695 train_time:70555ms step_avg:96.65ms +step:731/1695 train_time:70653ms step_avg:96.65ms +step:732/1695 train_time:70750ms step_avg:96.65ms +step:733/1695 train_time:70847ms step_avg:96.65ms +step:734/1695 train_time:70943ms step_avg:96.65ms +step:735/1695 train_time:71040ms step_avg:96.65ms +step:736/1695 train_time:71142ms step_avg:96.66ms +step:737/1695 train_time:71241ms step_avg:96.66ms +step:738/1695 train_time:71338ms step_avg:96.66ms +step:739/1695 train_time:71437ms step_avg:96.67ms +step:740/1695 train_time:71535ms step_avg:96.67ms +step:741/1695 train_time:71633ms step_avg:96.67ms +step:742/1695 train_time:71731ms step_avg:96.67ms +step:743/1695 train_time:71829ms step_avg:96.67ms +step:744/1695 train_time:71926ms step_avg:96.67ms +step:745/1695 train_time:72023ms step_avg:96.68ms +step:746/1695 train_time:72122ms step_avg:96.68ms +step:747/1695 train_time:72221ms step_avg:96.68ms +step:748/1695 train_time:72319ms step_avg:96.68ms +step:749/1695 train_time:72417ms step_avg:96.69ms +step:750/1695 train_time:72516ms step_avg:96.69ms +step:750/1695 val_loss:3.5813 train_time:72612ms step_avg:96.82ms +step:751/1695 train_time:72638ms step_avg:96.72ms +step:752/1695 train_time:72722ms step_avg:96.71ms +step:753/1695 train_time:72825ms step_avg:96.71ms +step:754/1695 train_time:72924ms step_avg:96.72ms +step:755/1695 train_time:73022ms step_avg:96.72ms +step:756/1695 train_time:73119ms step_avg:96.72ms +step:757/1695 train_time:73216ms step_avg:96.72ms +step:758/1695 train_time:73313ms step_avg:96.72ms +step:759/1695 train_time:73410ms step_avg:96.72ms +step:760/1695 train_time:73508ms step_avg:96.72ms +step:761/1695 train_time:73605ms step_avg:96.72ms +step:762/1695 train_time:73705ms step_avg:96.73ms +step:763/1695 train_time:73806ms step_avg:96.73ms +step:764/1695 train_time:73907ms step_avg:96.74ms +step:765/1695 train_time:74008ms step_avg:96.74ms +step:766/1695 train_time:74106ms step_avg:96.74ms +step:767/1695 train_time:74205ms step_avg:96.75ms +step:768/1695 train_time:74304ms step_avg:96.75ms +step:769/1695 train_time:74402ms step_avg:96.75ms +step:770/1695 train_time:74499ms step_avg:96.75ms +step:771/1695 train_time:74597ms step_avg:96.75ms +step:772/1695 train_time:74695ms step_avg:96.76ms +step:773/1695 train_time:74794ms step_avg:96.76ms +step:774/1695 train_time:74892ms step_avg:96.76ms +step:775/1695 train_time:74990ms step_avg:96.76ms +step:776/1695 train_time:75089ms step_avg:96.76ms +step:777/1695 train_time:75187ms step_avg:96.77ms +step:778/1695 train_time:75285ms step_avg:96.77ms +step:779/1695 train_time:75675ms step_avg:97.14ms +step:780/1695 train_time:75771ms step_avg:97.14ms +step:781/1695 train_time:75868ms step_avg:97.14ms +step:782/1695 train_time:75966ms step_avg:97.14ms +step:783/1695 train_time:76064ms step_avg:97.14ms +step:784/1695 train_time:76161ms step_avg:97.14ms +step:785/1695 train_time:76258ms step_avg:97.14ms +step:786/1695 train_time:76355ms step_avg:97.14ms +step:787/1695 train_time:76451ms step_avg:97.14ms +step:788/1695 train_time:76548ms step_avg:97.14ms +step:789/1695 train_time:76951ms step_avg:97.53ms +step:790/1695 train_time:77001ms step_avg:97.47ms +step:791/1695 train_time:77124ms step_avg:97.50ms +step:792/1695 train_time:77221ms step_avg:97.50ms +step:793/1695 train_time:77318ms step_avg:97.50ms +step:794/1695 train_time:77415ms step_avg:97.50ms +step:795/1695 train_time:77512ms step_avg:97.50ms +step:796/1695 train_time:77609ms step_avg:97.50ms +step:797/1695 train_time:77707ms step_avg:97.50ms +step:798/1695 train_time:77804ms step_avg:97.50ms +step:799/1695 train_time:77902ms step_avg:97.50ms +step:800/1695 train_time:78002ms step_avg:97.50ms +step:801/1695 train_time:78102ms step_avg:97.51ms +step:802/1695 train_time:78201ms step_avg:97.51ms +step:803/1695 train_time:78299ms step_avg:97.51ms +step:804/1695 train_time:78397ms step_avg:97.51ms +step:805/1695 train_time:78495ms step_avg:97.51ms +step:806/1695 train_time:78592ms step_avg:97.51ms +step:807/1695 train_time:78690ms step_avg:97.51ms +step:808/1695 train_time:78788ms step_avg:97.51ms +step:809/1695 train_time:78887ms step_avg:97.51ms +step:810/1695 train_time:78986ms step_avg:97.51ms +step:811/1695 train_time:79086ms step_avg:97.52ms +step:812/1695 train_time:79185ms step_avg:97.52ms +step:813/1695 train_time:79285ms step_avg:97.52ms +step:814/1695 train_time:79383ms step_avg:97.52ms +step:815/1695 train_time:79483ms step_avg:97.52ms +step:816/1695 train_time:79581ms step_avg:97.53ms +step:817/1695 train_time:79680ms step_avg:97.53ms +step:818/1695 train_time:79778ms step_avg:97.53ms +step:819/1695 train_time:79876ms step_avg:97.53ms +step:820/1695 train_time:79974ms step_avg:97.53ms +step:821/1695 train_time:80071ms step_avg:97.53ms +step:822/1695 train_time:80169ms step_avg:97.53ms +step:823/1695 train_time:80267ms step_avg:97.53ms +step:824/1695 train_time:80365ms step_avg:97.53ms +step:825/1695 train_time:80463ms step_avg:97.53ms +step:826/1695 train_time:80561ms step_avg:97.53ms +step:827/1695 train_time:80659ms step_avg:97.53ms +step:828/1695 train_time:80757ms step_avg:97.53ms +step:829/1695 train_time:80855ms step_avg:97.53ms +step:830/1695 train_time:80953ms step_avg:97.53ms +step:831/1695 train_time:81051ms step_avg:97.53ms +step:832/1695 train_time:81150ms step_avg:97.54ms +step:833/1695 train_time:81248ms step_avg:97.54ms +step:834/1695 train_time:81346ms step_avg:97.54ms +step:835/1695 train_time:81445ms step_avg:97.54ms +step:836/1695 train_time:81543ms step_avg:97.54ms +step:837/1695 train_time:81642ms step_avg:97.54ms +step:838/1695 train_time:81741ms step_avg:97.54ms +step:839/1695 train_time:81839ms step_avg:97.54ms +step:840/1695 train_time:81937ms step_avg:97.54ms +step:841/1695 train_time:82036ms step_avg:97.55ms +step:842/1695 train_time:82134ms step_avg:97.55ms +step:843/1695 train_time:82233ms step_avg:97.55ms +step:844/1695 train_time:82330ms step_avg:97.55ms +step:845/1695 train_time:82428ms step_avg:97.55ms +step:846/1695 train_time:82526ms step_avg:97.55ms +step:847/1695 train_time:82624ms step_avg:97.55ms +step:848/1695 train_time:82723ms step_avg:97.55ms +step:849/1695 train_time:82821ms step_avg:97.55ms +step:850/1695 train_time:82920ms step_avg:97.55ms +step:851/1695 train_time:83018ms step_avg:97.55ms +step:852/1695 train_time:83117ms step_avg:97.55ms +step:853/1695 train_time:83216ms step_avg:97.56ms +step:854/1695 train_time:83315ms step_avg:97.56ms +step:855/1695 train_time:83412ms step_avg:97.56ms +step:856/1695 train_time:83509ms step_avg:97.56ms +step:857/1695 train_time:83607ms step_avg:97.56ms +step:858/1695 train_time:83705ms step_avg:97.56ms +step:859/1695 train_time:83803ms step_avg:97.56ms +step:860/1695 train_time:83901ms step_avg:97.56ms +step:861/1695 train_time:84000ms step_avg:97.56ms +step:862/1695 train_time:84099ms step_avg:97.56ms +step:863/1695 train_time:84197ms step_avg:97.56ms +step:864/1695 train_time:84571ms step_avg:97.88ms +step:865/1695 train_time:84667ms step_avg:97.88ms +step:866/1695 train_time:84764ms step_avg:97.88ms +step:867/1695 train_time:84862ms step_avg:97.88ms +step:868/1695 train_time:84959ms step_avg:97.88ms +step:869/1695 train_time:85056ms step_avg:97.88ms +step:870/1695 train_time:85152ms step_avg:97.88ms +step:871/1695 train_time:85250ms step_avg:97.88ms +step:872/1695 train_time:85346ms step_avg:97.87ms +step:873/1695 train_time:85448ms step_avg:97.88ms +step:874/1695 train_time:85549ms step_avg:97.88ms +step:875/1695 train_time:85648ms step_avg:97.88ms +step:875/1695 val_loss:3.5344 train_time:85745ms step_avg:97.99ms +step:876/1695 train_time:85771ms step_avg:97.91ms +step:877/1695 train_time:85852ms step_avg:97.89ms +step:878/1695 train_time:85957ms step_avg:97.90ms +step:879/1695 train_time:86055ms step_avg:97.90ms +step:880/1695 train_time:86153ms step_avg:97.90ms +step:881/1695 train_time:86252ms step_avg:97.90ms +step:882/1695 train_time:86352ms step_avg:97.90ms +step:883/1695 train_time:86452ms step_avg:97.91ms +step:884/1695 train_time:86551ms step_avg:97.91ms +step:885/1695 train_time:86650ms step_avg:97.91ms +step:886/1695 train_time:86750ms step_avg:97.91ms +step:887/1695 train_time:86851ms step_avg:97.92ms +step:888/1695 train_time:86954ms step_avg:97.92ms +step:889/1695 train_time:87054ms step_avg:97.92ms +step:890/1695 train_time:87154ms step_avg:97.93ms +step:891/1695 train_time:87253ms step_avg:97.93ms +step:892/1695 train_time:87351ms step_avg:97.93ms +step:893/1695 train_time:87451ms step_avg:97.93ms +step:894/1695 train_time:87550ms step_avg:97.93ms +step:895/1695 train_time:87649ms step_avg:97.93ms +step:896/1695 train_time:87748ms step_avg:97.93ms +step:897/1695 train_time:87849ms step_avg:97.94ms +step:898/1695 train_time:87949ms step_avg:97.94ms +step:899/1695 train_time:88050ms step_avg:97.94ms +step:900/1695 train_time:88150ms step_avg:97.94ms +step:901/1695 train_time:88250ms step_avg:97.95ms +step:902/1695 train_time:88349ms step_avg:97.95ms +step:903/1695 train_time:88449ms step_avg:97.95ms +step:904/1695 train_time:88548ms step_avg:97.95ms +step:905/1695 train_time:88647ms step_avg:97.95ms +step:906/1695 train_time:88746ms step_avg:97.95ms +step:907/1695 train_time:88845ms step_avg:97.96ms +step:908/1695 train_time:88945ms step_avg:97.96ms +step:909/1695 train_time:89045ms step_avg:97.96ms +step:910/1695 train_time:89146ms step_avg:97.96ms +step:911/1695 train_time:89245ms step_avg:97.96ms +step:912/1695 train_time:89345ms step_avg:97.97ms +step:913/1695 train_time:89444ms step_avg:97.97ms +step:914/1695 train_time:89543ms step_avg:97.97ms +step:915/1695 train_time:89642ms step_avg:97.97ms +step:916/1695 train_time:89740ms step_avg:97.97ms +step:917/1695 train_time:89839ms step_avg:97.97ms +step:918/1695 train_time:89937ms step_avg:97.97ms +step:919/1695 train_time:90037ms step_avg:97.97ms +step:920/1695 train_time:90136ms step_avg:97.97ms +step:921/1695 train_time:90235ms step_avg:97.98ms +step:922/1695 train_time:90335ms step_avg:97.98ms +step:923/1695 train_time:90434ms step_avg:97.98ms +step:924/1695 train_time:90533ms step_avg:97.98ms +step:925/1695 train_time:90633ms step_avg:97.98ms +step:926/1695 train_time:90733ms step_avg:97.98ms +step:927/1695 train_time:90833ms step_avg:97.99ms +step:928/1695 train_time:90933ms step_avg:97.99ms +step:929/1695 train_time:91033ms step_avg:97.99ms +step:930/1695 train_time:91133ms step_avg:97.99ms +step:931/1695 train_time:91233ms step_avg:97.99ms +step:932/1695 train_time:91332ms step_avg:98.00ms +step:933/1695 train_time:91432ms step_avg:98.00ms +step:934/1695 train_time:91532ms step_avg:98.00ms +step:935/1695 train_time:91631ms step_avg:98.00ms +step:936/1695 train_time:91731ms step_avg:98.00ms +step:937/1695 train_time:91831ms step_avg:98.01ms +step:938/1695 train_time:91932ms step_avg:98.01ms +step:939/1695 train_time:92032ms step_avg:98.01ms +step:940/1695 train_time:92131ms step_avg:98.01ms +step:941/1695 train_time:92231ms step_avg:98.01ms +step:942/1695 train_time:92331ms step_avg:98.02ms +step:943/1695 train_time:92431ms step_avg:98.02ms +step:944/1695 train_time:92531ms step_avg:98.02ms +step:945/1695 train_time:92632ms step_avg:98.02ms +step:946/1695 train_time:92731ms step_avg:98.02ms +step:947/1695 train_time:92830ms step_avg:98.03ms +step:948/1695 train_time:92929ms step_avg:98.03ms +step:949/1695 train_time:93029ms step_avg:98.03ms +step:950/1695 train_time:93129ms step_avg:98.03ms +step:951/1695 train_time:93230ms step_avg:98.03ms +step:952/1695 train_time:93329ms step_avg:98.03ms +step:953/1695 train_time:93429ms step_avg:98.04ms +step:954/1695 train_time:93529ms step_avg:98.04ms +step:955/1695 train_time:93628ms step_avg:98.04ms +step:956/1695 train_time:93728ms step_avg:98.04ms +step:957/1695 train_time:93827ms step_avg:98.04ms +step:958/1695 train_time:93928ms step_avg:98.05ms +step:959/1695 train_time:94028ms step_avg:98.05ms +step:960/1695 train_time:94128ms step_avg:98.05ms +step:961/1695 train_time:94227ms step_avg:98.05ms +step:962/1695 train_time:94326ms step_avg:98.05ms +step:963/1695 train_time:94426ms step_avg:98.05ms +step:964/1695 train_time:94526ms step_avg:98.06ms +step:965/1695 train_time:94626ms step_avg:98.06ms +step:966/1695 train_time:94726ms step_avg:98.06ms +step:967/1695 train_time:94825ms step_avg:98.06ms +step:968/1695 train_time:94925ms step_avg:98.06ms +step:969/1695 train_time:95026ms step_avg:98.07ms +step:970/1695 train_time:95126ms step_avg:98.07ms +step:971/1695 train_time:95225ms step_avg:98.07ms +step:972/1695 train_time:95324ms step_avg:98.07ms +step:973/1695 train_time:95425ms step_avg:98.07ms +step:974/1695 train_time:95524ms step_avg:98.07ms +step:975/1695 train_time:95623ms step_avg:98.07ms +step:976/1695 train_time:95722ms step_avg:98.08ms +step:977/1695 train_time:95820ms step_avg:98.08ms +step:978/1695 train_time:95919ms step_avg:98.08ms +step:979/1695 train_time:96018ms step_avg:98.08ms +step:980/1695 train_time:96117ms step_avg:98.08ms +step:981/1695 train_time:96217ms step_avg:98.08ms +step:982/1695 train_time:96316ms step_avg:98.08ms +step:983/1695 train_time:96416ms step_avg:98.08ms +step:984/1695 train_time:96516ms step_avg:98.09ms +step:985/1695 train_time:96616ms step_avg:98.09ms +step:986/1695 train_time:96716ms step_avg:98.09ms +step:987/1695 train_time:96815ms step_avg:98.09ms +step:988/1695 train_time:96914ms step_avg:98.09ms +step:989/1695 train_time:97014ms step_avg:98.09ms +step:990/1695 train_time:97114ms step_avg:98.09ms +step:991/1695 train_time:97213ms step_avg:98.10ms +step:992/1695 train_time:97313ms step_avg:98.10ms +step:993/1695 train_time:97413ms step_avg:98.10ms +step:994/1695 train_time:97512ms step_avg:98.10ms +step:995/1695 train_time:97612ms step_avg:98.10ms +step:996/1695 train_time:97712ms step_avg:98.10ms +step:997/1695 train_time:97812ms step_avg:98.11ms +step:998/1695 train_time:97912ms step_avg:98.11ms +step:999/1695 train_time:98012ms step_avg:98.11ms +step:1000/1695 train_time:98111ms step_avg:98.11ms +step:1000/1695 val_loss:3.4900 train_time:98209ms step_avg:98.21ms +step:1001/1695 train_time:98235ms step_avg:98.14ms +step:1002/1695 train_time:98321ms step_avg:98.12ms +step:1003/1695 train_time:98422ms step_avg:98.13ms +step:1004/1695 train_time:98523ms step_avg:98.13ms +step:1005/1695 train_time:98622ms step_avg:98.13ms +step:1006/1695 train_time:98721ms step_avg:98.13ms +step:1007/1695 train_time:98820ms step_avg:98.13ms +step:1008/1695 train_time:98919ms step_avg:98.13ms +step:1009/1695 train_time:99018ms step_avg:98.13ms +step:1010/1695 train_time:99116ms step_avg:98.13ms +step:1011/1695 train_time:99219ms step_avg:98.14ms +step:1012/1695 train_time:99323ms step_avg:98.15ms +step:1013/1695 train_time:99424ms step_avg:98.15ms +step:1014/1695 train_time:99524ms step_avg:98.15ms +step:1015/1695 train_time:99624ms step_avg:98.15ms +step:1016/1695 train_time:99722ms step_avg:98.15ms +step:1017/1695 train_time:99822ms step_avg:98.15ms +step:1018/1695 train_time:99920ms step_avg:98.15ms +step:1019/1695 train_time:100020ms step_avg:98.15ms +step:1020/1695 train_time:100119ms step_avg:98.16ms +step:1021/1695 train_time:100220ms step_avg:98.16ms +step:1022/1695 train_time:100321ms step_avg:98.16ms +step:1023/1695 train_time:100423ms step_avg:98.16ms +step:1024/1695 train_time:100524ms step_avg:98.17ms +step:1025/1695 train_time:100624ms step_avg:98.17ms +step:1026/1695 train_time:100724ms step_avg:98.17ms +step:1027/1695 train_time:100823ms step_avg:98.17ms +step:1028/1695 train_time:100922ms step_avg:98.17ms +step:1029/1695 train_time:101023ms step_avg:98.18ms +step:1030/1695 train_time:101122ms step_avg:98.18ms +step:1031/1695 train_time:101223ms step_avg:98.18ms +step:1032/1695 train_time:101322ms step_avg:98.18ms +step:1033/1695 train_time:101423ms step_avg:98.18ms +step:1034/1695 train_time:101522ms step_avg:98.18ms +step:1035/1695 train_time:101622ms step_avg:98.19ms +step:1036/1695 train_time:101722ms step_avg:98.19ms +step:1037/1695 train_time:101823ms step_avg:98.19ms +step:1038/1695 train_time:101922ms step_avg:98.19ms +step:1039/1695 train_time:102021ms step_avg:98.19ms +step:1040/1695 train_time:102121ms step_avg:98.19ms +step:1041/1695 train_time:102221ms step_avg:98.19ms +step:1042/1695 train_time:102321ms step_avg:98.20ms +step:1043/1695 train_time:102421ms step_avg:98.20ms +step:1044/1695 train_time:102521ms step_avg:98.20ms +step:1045/1695 train_time:102621ms step_avg:98.20ms +step:1046/1695 train_time:102722ms step_avg:98.20ms +step:1047/1695 train_time:102821ms step_avg:98.21ms +step:1048/1695 train_time:102920ms step_avg:98.21ms +step:1049/1695 train_time:103019ms step_avg:98.21ms +step:1050/1695 train_time:103119ms step_avg:98.21ms +step:1051/1695 train_time:103219ms step_avg:98.21ms +step:1052/1695 train_time:103320ms step_avg:98.21ms +step:1053/1695 train_time:103420ms step_avg:98.22ms +step:1054/1695 train_time:103521ms step_avg:98.22ms +step:1055/1695 train_time:103621ms step_avg:98.22ms +step:1056/1695 train_time:103721ms step_avg:98.22ms +step:1057/1695 train_time:103821ms step_avg:98.22ms +step:1058/1695 train_time:103921ms step_avg:98.22ms +step:1059/1695 train_time:104021ms step_avg:98.23ms +step:1060/1695 train_time:104120ms step_avg:98.23ms +step:1061/1695 train_time:104220ms step_avg:98.23ms +step:1062/1695 train_time:104320ms step_avg:98.23ms +step:1063/1695 train_time:104420ms step_avg:98.23ms +step:1064/1695 train_time:104521ms step_avg:98.23ms +step:1065/1695 train_time:104622ms step_avg:98.24ms +step:1066/1695 train_time:104722ms step_avg:98.24ms +step:1067/1695 train_time:104822ms step_avg:98.24ms +step:1068/1695 train_time:104921ms step_avg:98.24ms +step:1069/1695 train_time:105021ms step_avg:98.24ms +step:1070/1695 train_time:105121ms step_avg:98.24ms +step:1071/1695 train_time:105221ms step_avg:98.25ms +step:1072/1695 train_time:105322ms step_avg:98.25ms +step:1073/1695 train_time:105421ms step_avg:98.25ms +step:1074/1695 train_time:105521ms step_avg:98.25ms +step:1075/1695 train_time:105621ms step_avg:98.25ms +step:1076/1695 train_time:105721ms step_avg:98.25ms +step:1077/1695 train_time:105822ms step_avg:98.26ms +step:1078/1695 train_time:105922ms step_avg:98.26ms +step:1079/1695 train_time:106021ms step_avg:98.26ms +step:1080/1695 train_time:106121ms step_avg:98.26ms +step:1081/1695 train_time:106221ms step_avg:98.26ms +step:1082/1695 train_time:106322ms step_avg:98.26ms +step:1083/1695 train_time:106422ms step_avg:98.27ms +step:1084/1695 train_time:106522ms step_avg:98.27ms +step:1085/1695 train_time:106621ms step_avg:98.27ms +step:1086/1695 train_time:106722ms step_avg:98.27ms +step:1087/1695 train_time:106821ms step_avg:98.27ms +step:1088/1695 train_time:106921ms step_avg:98.27ms +step:1089/1695 train_time:107021ms step_avg:98.27ms +step:1090/1695 train_time:107121ms step_avg:98.28ms +step:1091/1695 train_time:107221ms step_avg:98.28ms +step:1092/1695 train_time:107321ms step_avg:98.28ms +step:1093/1695 train_time:107421ms step_avg:98.28ms +step:1094/1695 train_time:107522ms step_avg:98.28ms +step:1095/1695 train_time:107621ms step_avg:98.28ms +step:1096/1695 train_time:107722ms step_avg:98.29ms +step:1097/1695 train_time:107822ms step_avg:98.29ms +step:1098/1695 train_time:107921ms step_avg:98.29ms +step:1099/1695 train_time:108021ms step_avg:98.29ms +step:1100/1695 train_time:108121ms step_avg:98.29ms +step:1101/1695 train_time:108221ms step_avg:98.29ms +step:1102/1695 train_time:108321ms step_avg:98.29ms +step:1103/1695 train_time:108420ms step_avg:98.30ms +step:1104/1695 train_time:108520ms step_avg:98.30ms +step:1105/1695 train_time:108621ms step_avg:98.30ms +step:1106/1695 train_time:108720ms step_avg:98.30ms +step:1107/1695 train_time:108821ms step_avg:98.30ms +step:1108/1695 train_time:108921ms step_avg:98.30ms +step:1109/1695 train_time:109021ms step_avg:98.31ms +step:1110/1695 train_time:109121ms step_avg:98.31ms +step:1111/1695 train_time:109221ms step_avg:98.31ms +step:1112/1695 train_time:109321ms step_avg:98.31ms +step:1113/1695 train_time:109421ms step_avg:98.31ms +step:1114/1695 train_time:109520ms step_avg:98.31ms +step:1115/1695 train_time:109620ms step_avg:98.31ms +step:1116/1695 train_time:109721ms step_avg:98.32ms +step:1117/1695 train_time:109821ms step_avg:98.32ms +step:1118/1695 train_time:109921ms step_avg:98.32ms +step:1119/1695 train_time:110021ms step_avg:98.32ms +step:1120/1695 train_time:110121ms step_avg:98.32ms +step:1121/1695 train_time:110222ms step_avg:98.32ms +step:1122/1695 train_time:110323ms step_avg:98.33ms +step:1123/1695 train_time:110423ms step_avg:98.33ms +step:1124/1695 train_time:110522ms step_avg:98.33ms +step:1125/1695 train_time:110622ms step_avg:98.33ms +step:1125/1695 val_loss:3.4397 train_time:110720ms step_avg:98.42ms +step:1126/1695 train_time:110745ms step_avg:98.35ms +step:1127/1695 train_time:110830ms step_avg:98.34ms +step:1128/1695 train_time:110933ms step_avg:98.34ms +step:1129/1695 train_time:111032ms step_avg:98.35ms +step:1130/1695 train_time:111131ms step_avg:98.35ms +step:1131/1695 train_time:111229ms step_avg:98.35ms +step:1132/1695 train_time:111328ms step_avg:98.35ms +step:1133/1695 train_time:111428ms step_avg:98.35ms +step:1134/1695 train_time:111527ms step_avg:98.35ms +step:1135/1695 train_time:111626ms step_avg:98.35ms +step:1136/1695 train_time:111727ms step_avg:98.35ms +step:1137/1695 train_time:111830ms step_avg:98.36ms +step:1138/1695 train_time:111930ms step_avg:98.36ms +step:1139/1695 train_time:112030ms step_avg:98.36ms +step:1140/1695 train_time:112130ms step_avg:98.36ms +step:1141/1695 train_time:112230ms step_avg:98.36ms +step:1142/1695 train_time:112329ms step_avg:98.36ms +step:1143/1695 train_time:112429ms step_avg:98.36ms +step:1144/1695 train_time:112529ms step_avg:98.36ms +step:1145/1695 train_time:112629ms step_avg:98.37ms +step:1146/1695 train_time:112730ms step_avg:98.37ms +step:1147/1695 train_time:112831ms step_avg:98.37ms +step:1148/1695 train_time:112931ms step_avg:98.37ms +step:1149/1695 train_time:113032ms step_avg:98.37ms +step:1150/1695 train_time:113132ms step_avg:98.38ms +step:1151/1695 train_time:113231ms step_avg:98.38ms +step:1152/1695 train_time:113332ms step_avg:98.38ms +step:1153/1695 train_time:113433ms step_avg:98.38ms +step:1154/1695 train_time:113533ms step_avg:98.38ms +step:1155/1695 train_time:113634ms step_avg:98.38ms +step:1156/1695 train_time:113736ms step_avg:98.39ms +step:1157/1695 train_time:113838ms step_avg:98.39ms +step:1158/1695 train_time:113938ms step_avg:98.39ms +step:1159/1695 train_time:114039ms step_avg:98.39ms +step:1160/1695 train_time:114141ms step_avg:98.40ms +step:1161/1695 train_time:114242ms step_avg:98.40ms +step:1162/1695 train_time:114343ms step_avg:98.40ms +step:1163/1695 train_time:114446ms step_avg:98.41ms +step:1164/1695 train_time:114547ms step_avg:98.41ms +step:1165/1695 train_time:114647ms step_avg:98.41ms +step:1166/1695 train_time:114747ms step_avg:98.41ms +step:1167/1695 train_time:114848ms step_avg:98.41ms +step:1168/1695 train_time:114948ms step_avg:98.41ms +step:1169/1695 train_time:115048ms step_avg:98.42ms +step:1170/1695 train_time:115149ms step_avg:98.42ms +step:1171/1695 train_time:115248ms step_avg:98.42ms +step:1172/1695 train_time:115350ms step_avg:98.42ms +step:1173/1695 train_time:115451ms step_avg:98.42ms +step:1174/1695 train_time:115552ms step_avg:98.43ms +step:1175/1695 train_time:115653ms step_avg:98.43ms +step:1176/1695 train_time:115754ms step_avg:98.43ms +step:1177/1695 train_time:115855ms step_avg:98.43ms +step:1178/1695 train_time:115957ms step_avg:98.44ms +step:1179/1695 train_time:116059ms step_avg:98.44ms +step:1180/1695 train_time:116160ms step_avg:98.44ms +step:1181/1695 train_time:116261ms step_avg:98.44ms +step:1182/1695 train_time:116364ms step_avg:98.45ms +step:1183/1695 train_time:116465ms step_avg:98.45ms +step:1184/1695 train_time:116566ms step_avg:98.45ms +step:1185/1695 train_time:116667ms step_avg:98.45ms +step:1186/1695 train_time:116768ms step_avg:98.46ms +step:1187/1695 train_time:116869ms step_avg:98.46ms +step:1188/1695 train_time:116969ms step_avg:98.46ms +step:1189/1695 train_time:117068ms step_avg:98.46ms +step:1190/1695 train_time:117168ms step_avg:98.46ms +step:1191/1695 train_time:117268ms step_avg:98.46ms +step:1192/1695 train_time:117368ms step_avg:98.46ms +step:1193/1695 train_time:117468ms step_avg:98.46ms +step:1194/1695 train_time:117569ms step_avg:98.47ms +step:1195/1695 train_time:117668ms step_avg:98.47ms +step:1196/1695 train_time:117768ms step_avg:98.47ms +step:1197/1695 train_time:117869ms step_avg:98.47ms +step:1198/1695 train_time:117968ms step_avg:98.47ms +step:1199/1695 train_time:118069ms step_avg:98.47ms +step:1200/1695 train_time:118168ms step_avg:98.47ms +step:1201/1695 train_time:118268ms step_avg:98.47ms +step:1202/1695 train_time:118370ms step_avg:98.48ms +step:1203/1695 train_time:118470ms step_avg:98.48ms +step:1204/1695 train_time:118571ms step_avg:98.48ms +step:1205/1695 train_time:118671ms step_avg:98.48ms +step:1206/1695 train_time:118771ms step_avg:98.48ms +step:1207/1695 train_time:118872ms step_avg:98.49ms +step:1208/1695 train_time:118972ms step_avg:98.49ms +step:1209/1695 train_time:119071ms step_avg:98.49ms +step:1210/1695 train_time:119171ms step_avg:98.49ms +step:1211/1695 train_time:119273ms step_avg:98.49ms +step:1212/1695 train_time:119373ms step_avg:98.49ms +step:1213/1695 train_time:119476ms step_avg:98.50ms +step:1214/1695 train_time:119576ms step_avg:98.50ms +step:1215/1695 train_time:119676ms step_avg:98.50ms +step:1216/1695 train_time:119778ms step_avg:98.50ms +step:1217/1695 train_time:119880ms step_avg:98.50ms +step:1218/1695 train_time:119982ms step_avg:98.51ms +step:1219/1695 train_time:120084ms step_avg:98.51ms +step:1220/1695 train_time:120185ms step_avg:98.51ms +step:1221/1695 train_time:120285ms step_avg:98.51ms +step:1222/1695 train_time:120386ms step_avg:98.52ms +step:1223/1695 train_time:120487ms step_avg:98.52ms +step:1224/1695 train_time:120587ms step_avg:98.52ms +step:1225/1695 train_time:120688ms step_avg:98.52ms +step:1226/1695 train_time:120787ms step_avg:98.52ms +step:1227/1695 train_time:120888ms step_avg:98.52ms +step:1228/1695 train_time:120989ms step_avg:98.52ms +step:1229/1695 train_time:121088ms step_avg:98.53ms +step:1230/1695 train_time:121187ms step_avg:98.53ms +step:1231/1695 train_time:121287ms step_avg:98.53ms +step:1232/1695 train_time:121388ms step_avg:98.53ms +step:1233/1695 train_time:121488ms step_avg:98.53ms +step:1234/1695 train_time:121589ms step_avg:98.53ms +step:1235/1695 train_time:121689ms step_avg:98.53ms +step:1236/1695 train_time:121789ms step_avg:98.53ms +step:1237/1695 train_time:121889ms step_avg:98.54ms +step:1238/1695 train_time:121989ms step_avg:98.54ms +step:1239/1695 train_time:122089ms step_avg:98.54ms +step:1240/1695 train_time:122189ms step_avg:98.54ms +step:1241/1695 train_time:122290ms step_avg:98.54ms +step:1242/1695 train_time:122390ms step_avg:98.54ms +step:1243/1695 train_time:122490ms step_avg:98.54ms +step:1244/1695 train_time:122589ms step_avg:98.54ms +step:1245/1695 train_time:122689ms step_avg:98.55ms +step:1246/1695 train_time:122790ms step_avg:98.55ms +step:1247/1695 train_time:122889ms step_avg:98.55ms +step:1248/1695 train_time:122990ms step_avg:98.55ms +step:1249/1695 train_time:123090ms step_avg:98.55ms +step:1250/1695 train_time:123190ms step_avg:98.55ms +step:1250/1695 val_loss:3.3940 train_time:123288ms step_avg:98.63ms +step:1251/1695 train_time:123313ms step_avg:98.57ms +step:1252/1695 train_time:123398ms step_avg:98.56ms +step:1253/1695 train_time:123502ms step_avg:98.56ms +step:1254/1695 train_time:123603ms step_avg:98.57ms +step:1255/1695 train_time:123704ms step_avg:98.57ms +step:1256/1695 train_time:123804ms step_avg:98.57ms +step:1257/1695 train_time:123903ms step_avg:98.57ms +step:1258/1695 train_time:124004ms step_avg:98.57ms +step:1259/1695 train_time:124103ms step_avg:98.57ms +step:1260/1695 train_time:124204ms step_avg:98.57ms +step:1261/1695 train_time:124307ms step_avg:98.58ms +step:1262/1695 train_time:124411ms step_avg:98.58ms +step:1263/1695 train_time:124513ms step_avg:98.59ms +step:1264/1695 train_time:124613ms step_avg:98.59ms +step:1265/1695 train_time:124713ms step_avg:98.59ms +step:1266/1695 train_time:124812ms step_avg:98.59ms +step:1267/1695 train_time:124912ms step_avg:98.59ms +step:1268/1695 train_time:125012ms step_avg:98.59ms +step:1269/1695 train_time:125113ms step_avg:98.59ms +step:1270/1695 train_time:125213ms step_avg:98.59ms +step:1271/1695 train_time:125315ms step_avg:98.60ms +step:1272/1695 train_time:125416ms step_avg:98.60ms +step:1273/1695 train_time:125517ms step_avg:98.60ms +step:1274/1695 train_time:125617ms step_avg:98.60ms +step:1275/1695 train_time:125719ms step_avg:98.60ms +step:1276/1695 train_time:125821ms step_avg:98.61ms +step:1277/1695 train_time:125922ms step_avg:98.61ms +step:1278/1695 train_time:126024ms step_avg:98.61ms +step:1279/1695 train_time:126126ms step_avg:98.61ms +step:1280/1695 train_time:126227ms step_avg:98.61ms +step:1281/1695 train_time:126328ms step_avg:98.62ms +step:1282/1695 train_time:126429ms step_avg:98.62ms +step:1283/1695 train_time:126529ms step_avg:98.62ms +step:1284/1695 train_time:126630ms step_avg:98.62ms +step:1285/1695 train_time:126731ms step_avg:98.62ms +step:1286/1695 train_time:126832ms step_avg:98.62ms +step:1287/1695 train_time:126932ms step_avg:98.63ms +step:1288/1695 train_time:127031ms step_avg:98.63ms +step:1289/1695 train_time:127132ms step_avg:98.63ms +step:1290/1695 train_time:127232ms step_avg:98.63ms +step:1291/1695 train_time:127333ms step_avg:98.63ms +step:1292/1695 train_time:127433ms step_avg:98.63ms +step:1293/1695 train_time:127534ms step_avg:98.63ms +step:1294/1695 train_time:127635ms step_avg:98.64ms +step:1295/1695 train_time:127736ms step_avg:98.64ms +step:1296/1695 train_time:127837ms step_avg:98.64ms +step:1297/1695 train_time:127938ms step_avg:98.64ms +step:1298/1695 train_time:128039ms step_avg:98.64ms +step:1299/1695 train_time:128141ms step_avg:98.65ms +step:1300/1695 train_time:128242ms step_avg:98.65ms +step:1301/1695 train_time:128344ms step_avg:98.65ms +step:1302/1695 train_time:128446ms step_avg:98.65ms +step:1303/1695 train_time:128548ms step_avg:98.66ms +step:1304/1695 train_time:128649ms step_avg:98.66ms +step:1305/1695 train_time:128750ms step_avg:98.66ms +step:1306/1695 train_time:128849ms step_avg:98.66ms +step:1307/1695 train_time:128950ms step_avg:98.66ms +step:1308/1695 train_time:129051ms step_avg:98.66ms +step:1309/1695 train_time:129152ms step_avg:98.66ms +step:1310/1695 train_time:129253ms step_avg:98.67ms +step:1311/1695 train_time:129354ms step_avg:98.67ms +step:1312/1695 train_time:129455ms step_avg:98.67ms +step:1313/1695 train_time:129556ms step_avg:98.67ms +step:1314/1695 train_time:129657ms step_avg:98.67ms +step:1315/1695 train_time:129758ms step_avg:98.68ms +step:1316/1695 train_time:129859ms step_avg:98.68ms +step:1317/1695 train_time:129959ms step_avg:98.68ms +step:1318/1695 train_time:130060ms step_avg:98.68ms +step:1319/1695 train_time:130162ms step_avg:98.68ms +step:1320/1695 train_time:130265ms step_avg:98.69ms +step:1321/1695 train_time:130366ms step_avg:98.69ms +step:1322/1695 train_time:130468ms step_avg:98.69ms +step:1323/1695 train_time:130568ms step_avg:98.69ms +step:1324/1695 train_time:130669ms step_avg:98.69ms +step:1325/1695 train_time:130770ms step_avg:98.69ms +step:1326/1695 train_time:130870ms step_avg:98.70ms +step:1327/1695 train_time:130971ms step_avg:98.70ms +step:1328/1695 train_time:131071ms step_avg:98.70ms +step:1329/1695 train_time:131172ms step_avg:98.70ms +step:1330/1695 train_time:131272ms step_avg:98.70ms +step:1331/1695 train_time:131372ms step_avg:98.70ms +step:1332/1695 train_time:131472ms step_avg:98.70ms +step:1333/1695 train_time:131572ms step_avg:98.70ms +step:1334/1695 train_time:131673ms step_avg:98.71ms +step:1335/1695 train_time:131773ms step_avg:98.71ms +step:1336/1695 train_time:131874ms step_avg:98.71ms +step:1337/1695 train_time:131975ms step_avg:98.71ms +step:1338/1695 train_time:132075ms step_avg:98.71ms +step:1339/1695 train_time:132177ms step_avg:98.71ms +step:1340/1695 train_time:132277ms step_avg:98.71ms +step:1341/1695 train_time:132378ms step_avg:98.72ms +step:1342/1695 train_time:132478ms step_avg:98.72ms +step:1343/1695 train_time:132580ms step_avg:98.72ms +step:1344/1695 train_time:132680ms step_avg:98.72ms +step:1345/1695 train_time:132782ms step_avg:98.72ms +step:1346/1695 train_time:132883ms step_avg:98.72ms +step:1347/1695 train_time:132985ms step_avg:98.73ms +step:1348/1695 train_time:133087ms step_avg:98.73ms +step:1349/1695 train_time:133187ms step_avg:98.73ms +step:1350/1695 train_time:133289ms step_avg:98.73ms +step:1351/1695 train_time:133389ms step_avg:98.73ms +step:1352/1695 train_time:133489ms step_avg:98.73ms +step:1353/1695 train_time:133590ms step_avg:98.74ms +step:1354/1695 train_time:133691ms step_avg:98.74ms +step:1355/1695 train_time:133791ms step_avg:98.74ms +step:1356/1695 train_time:133891ms step_avg:98.74ms +step:1357/1695 train_time:133991ms step_avg:98.74ms +step:1358/1695 train_time:134091ms step_avg:98.74ms +step:1359/1695 train_time:134191ms step_avg:98.74ms +step:1360/1695 train_time:134292ms step_avg:98.74ms +step:1361/1695 train_time:134392ms step_avg:98.75ms +step:1362/1695 train_time:134492ms step_avg:98.75ms +step:1363/1695 train_time:134593ms step_avg:98.75ms +step:1364/1695 train_time:134693ms step_avg:98.75ms +step:1365/1695 train_time:134794ms step_avg:98.75ms +step:1366/1695 train_time:134895ms step_avg:98.75ms +step:1367/1695 train_time:134995ms step_avg:98.75ms +step:1368/1695 train_time:135096ms step_avg:98.75ms +step:1369/1695 train_time:135197ms step_avg:98.76ms +step:1370/1695 train_time:135299ms step_avg:98.76ms +step:1371/1695 train_time:135399ms step_avg:98.76ms +step:1372/1695 train_time:135499ms step_avg:98.76ms +step:1373/1695 train_time:135600ms step_avg:98.76ms +step:1374/1695 train_time:135701ms step_avg:98.76ms +step:1375/1695 train_time:135804ms step_avg:98.77ms +step:1375/1695 val_loss:3.3538 train_time:135904ms step_avg:98.84ms +step:1376/1695 train_time:135930ms step_avg:98.79ms +step:1377/1695 train_time:136016ms step_avg:98.78ms +step:1378/1695 train_time:136117ms step_avg:98.78ms +step:1379/1695 train_time:136217ms step_avg:98.78ms +step:1380/1695 train_time:136318ms step_avg:98.78ms +step:1381/1695 train_time:136418ms step_avg:98.78ms +step:1382/1695 train_time:136517ms step_avg:98.78ms +step:1383/1695 train_time:136617ms step_avg:98.78ms +step:1384/1695 train_time:136718ms step_avg:98.78ms +step:1385/1695 train_time:136821ms step_avg:98.79ms +step:1386/1695 train_time:136926ms step_avg:98.79ms +step:1387/1695 train_time:137029ms step_avg:98.79ms +step:1388/1695 train_time:137131ms step_avg:98.80ms +step:1389/1695 train_time:137233ms step_avg:98.80ms +step:1390/1695 train_time:137334ms step_avg:98.80ms +step:1391/1695 train_time:137435ms step_avg:98.80ms +step:1392/1695 train_time:137536ms step_avg:98.80ms +step:1393/1695 train_time:137638ms step_avg:98.81ms +step:1394/1695 train_time:137739ms step_avg:98.81ms +step:1395/1695 train_time:137840ms step_avg:98.81ms +step:1396/1695 train_time:137944ms step_avg:98.81ms +step:1397/1695 train_time:138048ms step_avg:98.82ms +step:1398/1695 train_time:138150ms step_avg:98.82ms +step:1399/1695 train_time:138252ms step_avg:98.82ms +step:1400/1695 train_time:138354ms step_avg:98.82ms +step:1401/1695 train_time:138454ms step_avg:98.83ms +step:1402/1695 train_time:138555ms step_avg:98.83ms +step:1403/1695 train_time:138656ms step_avg:98.83ms +step:1404/1695 train_time:138758ms step_avg:98.83ms +step:1405/1695 train_time:138858ms step_avg:98.83ms +step:1406/1695 train_time:138962ms step_avg:98.84ms +step:1407/1695 train_time:139064ms step_avg:98.84ms +step:1408/1695 train_time:139166ms step_avg:98.84ms +step:1409/1695 train_time:139272ms step_avg:98.84ms +step:1410/1695 train_time:139374ms step_avg:98.85ms +step:1411/1695 train_time:139474ms step_avg:98.85ms +step:1412/1695 train_time:139577ms step_avg:98.85ms +step:1413/1695 train_time:139678ms step_avg:98.85ms +step:1414/1695 train_time:139779ms step_avg:98.85ms +step:1415/1695 train_time:139881ms step_avg:98.86ms +step:1416/1695 train_time:139982ms step_avg:98.86ms +step:1417/1695 train_time:140083ms step_avg:98.86ms +step:1418/1695 train_time:140185ms step_avg:98.86ms +step:1419/1695 train_time:140290ms step_avg:98.87ms +step:1420/1695 train_time:140392ms step_avg:98.87ms +step:1421/1695 train_time:140494ms step_avg:98.87ms +step:1422/1695 train_time:140595ms step_avg:98.87ms +step:1423/1695 train_time:140696ms step_avg:98.87ms +step:1424/1695 train_time:140797ms step_avg:98.87ms +step:1425/1695 train_time:140901ms step_avg:98.88ms +step:1426/1695 train_time:141002ms step_avg:98.88ms +step:1427/1695 train_time:141103ms step_avg:98.88ms +step:1428/1695 train_time:141205ms step_avg:98.88ms +step:1429/1695 train_time:141309ms step_avg:98.89ms +step:1430/1695 train_time:141411ms step_avg:98.89ms +step:1431/1695 train_time:141512ms step_avg:98.89ms +step:1432/1695 train_time:141614ms step_avg:98.89ms +step:1433/1695 train_time:141716ms step_avg:98.89ms +step:1434/1695 train_time:141817ms step_avg:98.90ms +step:1435/1695 train_time:141919ms step_avg:98.90ms +step:1436/1695 train_time:142021ms step_avg:98.90ms +step:1437/1695 train_time:142123ms step_avg:98.90ms +step:1438/1695 train_time:142225ms step_avg:98.90ms +step:1439/1695 train_time:142329ms step_avg:98.91ms +step:1440/1695 train_time:142432ms step_avg:98.91ms +step:1441/1695 train_time:142535ms step_avg:98.91ms +step:1442/1695 train_time:142636ms step_avg:98.92ms +step:1443/1695 train_time:142737ms step_avg:98.92ms +step:1444/1695 train_time:142838ms step_avg:98.92ms +step:1445/1695 train_time:142938ms step_avg:98.92ms +step:1446/1695 train_time:143040ms step_avg:98.92ms +step:1447/1695 train_time:143142ms step_avg:98.92ms +step:1448/1695 train_time:143245ms step_avg:98.93ms +step:1449/1695 train_time:143346ms step_avg:98.93ms +step:1450/1695 train_time:143449ms step_avg:98.93ms +step:1451/1695 train_time:143552ms step_avg:98.93ms +step:1452/1695 train_time:143653ms step_avg:98.93ms +step:1453/1695 train_time:143756ms step_avg:98.94ms +step:1454/1695 train_time:143859ms step_avg:98.94ms +step:1455/1695 train_time:143960ms step_avg:98.94ms +step:1456/1695 train_time:144061ms step_avg:98.94ms +step:1457/1695 train_time:144163ms step_avg:98.95ms +step:1458/1695 train_time:144266ms step_avg:98.95ms +step:1459/1695 train_time:144368ms step_avg:98.95ms +step:1460/1695 train_time:144470ms step_avg:98.95ms +step:1461/1695 train_time:144572ms step_avg:98.95ms +step:1462/1695 train_time:144673ms step_avg:98.96ms +step:1463/1695 train_time:144774ms step_avg:98.96ms +step:1464/1695 train_time:144876ms step_avg:98.96ms +step:1465/1695 train_time:144977ms step_avg:98.96ms +step:1466/1695 train_time:145077ms step_avg:98.96ms +step:1467/1695 train_time:145179ms step_avg:98.96ms +step:1468/1695 train_time:145282ms step_avg:98.97ms +step:1469/1695 train_time:145386ms step_avg:98.97ms +step:1470/1695 train_time:145487ms step_avg:98.97ms +step:1471/1695 train_time:145590ms step_avg:98.97ms +step:1472/1695 train_time:145691ms step_avg:98.97ms +step:1473/1695 train_time:145792ms step_avg:98.98ms +step:1474/1695 train_time:145895ms step_avg:98.98ms +step:1475/1695 train_time:145995ms step_avg:98.98ms +step:1476/1695 train_time:146097ms step_avg:98.98ms +step:1477/1695 train_time:146198ms step_avg:98.98ms +step:1478/1695 train_time:146300ms step_avg:98.99ms +step:1479/1695 train_time:146402ms step_avg:98.99ms +step:1480/1695 train_time:146504ms step_avg:98.99ms +step:1481/1695 train_time:146606ms step_avg:98.99ms +step:1482/1695 train_time:146710ms step_avg:98.99ms +step:1483/1695 train_time:146812ms step_avg:99.00ms +step:1484/1695 train_time:146916ms step_avg:99.00ms +step:1485/1695 train_time:147017ms step_avg:99.00ms +step:1486/1695 train_time:147118ms step_avg:99.00ms +step:1487/1695 train_time:147219ms step_avg:99.00ms +step:1488/1695 train_time:147321ms step_avg:99.01ms +step:1489/1695 train_time:147424ms step_avg:99.01ms +step:1490/1695 train_time:147526ms step_avg:99.01ms +step:1491/1695 train_time:147628ms step_avg:99.01ms +step:1492/1695 train_time:147730ms step_avg:99.01ms +step:1493/1695 train_time:147832ms step_avg:99.02ms +step:1494/1695 train_time:147933ms step_avg:99.02ms +step:1495/1695 train_time:148034ms step_avg:99.02ms +step:1496/1695 train_time:148135ms step_avg:99.02ms +step:1497/1695 train_time:148235ms step_avg:99.02ms +step:1498/1695 train_time:148336ms step_avg:99.02ms +step:1499/1695 train_time:148440ms step_avg:99.03ms +step:1500/1695 train_time:148542ms step_avg:99.03ms +step:1500/1695 val_loss:3.3196 train_time:148642ms step_avg:99.09ms +step:1501/1695 train_time:148668ms step_avg:99.05ms +step:1502/1695 train_time:148756ms step_avg:99.04ms +step:1503/1695 train_time:148858ms step_avg:99.04ms +step:1504/1695 train_time:148960ms step_avg:99.04ms +step:1505/1695 train_time:149061ms step_avg:99.04ms +step:1506/1695 train_time:149163ms step_avg:99.05ms +step:1507/1695 train_time:149264ms step_avg:99.05ms +step:1508/1695 train_time:149364ms step_avg:99.05ms +step:1509/1695 train_time:149467ms step_avg:99.05ms +step:1510/1695 train_time:149569ms step_avg:99.05ms +step:1511/1695 train_time:149672ms step_avg:99.05ms +step:1512/1695 train_time:149775ms step_avg:99.06ms +step:1513/1695 train_time:149878ms step_avg:99.06ms +step:1514/1695 train_time:149980ms step_avg:99.06ms +step:1515/1695 train_time:150086ms step_avg:99.07ms +step:1516/1695 train_time:150188ms step_avg:99.07ms +step:1517/1695 train_time:150289ms step_avg:99.07ms +step:1518/1695 train_time:150391ms step_avg:99.07ms +step:1519/1695 train_time:150494ms step_avg:99.07ms +step:1520/1695 train_time:150595ms step_avg:99.08ms +step:1521/1695 train_time:150696ms step_avg:99.08ms +step:1522/1695 train_time:150798ms step_avg:99.08ms +step:1523/1695 train_time:150899ms step_avg:99.08ms +step:1524/1695 train_time:151003ms step_avg:99.08ms +step:1525/1695 train_time:151106ms step_avg:99.09ms +step:1526/1695 train_time:151209ms step_avg:99.09ms +step:1527/1695 train_time:151311ms step_avg:99.09ms +step:1528/1695 train_time:151416ms step_avg:99.09ms +step:1529/1695 train_time:151518ms step_avg:99.10ms +step:1530/1695 train_time:151621ms step_avg:99.10ms +step:1531/1695 train_time:151723ms step_avg:99.10ms +step:1532/1695 train_time:151825ms step_avg:99.10ms +step:1533/1695 train_time:151925ms step_avg:99.10ms +step:1534/1695 train_time:152027ms step_avg:99.11ms +step:1535/1695 train_time:152129ms step_avg:99.11ms +step:1536/1695 train_time:152232ms step_avg:99.11ms +step:1537/1695 train_time:152333ms step_avg:99.11ms +step:1538/1695 train_time:152435ms step_avg:99.11ms +step:1539/1695 train_time:152536ms step_avg:99.11ms +step:1540/1695 train_time:152638ms step_avg:99.12ms +step:1541/1695 train_time:152741ms step_avg:99.12ms +step:1542/1695 train_time:152844ms step_avg:99.12ms +step:1543/1695 train_time:152947ms step_avg:99.12ms +step:1544/1695 train_time:153049ms step_avg:99.12ms +step:1545/1695 train_time:153151ms step_avg:99.13ms +step:1546/1695 train_time:153253ms step_avg:99.13ms +step:1547/1695 train_time:153356ms step_avg:99.13ms +step:1548/1695 train_time:153458ms step_avg:99.13ms +step:1549/1695 train_time:153560ms step_avg:99.14ms +step:1550/1695 train_time:153661ms step_avg:99.14ms +step:1551/1695 train_time:153763ms step_avg:99.14ms +step:1552/1695 train_time:153865ms step_avg:99.14ms +step:1553/1695 train_time:153968ms step_avg:99.14ms +step:1554/1695 train_time:154069ms step_avg:99.14ms +step:1555/1695 train_time:154172ms step_avg:99.15ms +step:1556/1695 train_time:154275ms step_avg:99.15ms +step:1557/1695 train_time:154378ms step_avg:99.15ms +step:1558/1695 train_time:154482ms step_avg:99.15ms +step:1559/1695 train_time:154584ms step_avg:99.16ms +step:1560/1695 train_time:154686ms step_avg:99.16ms +step:1561/1695 train_time:154787ms step_avg:99.16ms +step:1562/1695 train_time:154890ms step_avg:99.16ms +step:1563/1695 train_time:154994ms step_avg:99.16ms +step:1564/1695 train_time:155095ms step_avg:99.17ms +step:1565/1695 train_time:155196ms step_avg:99.17ms +step:1566/1695 train_time:155298ms step_avg:99.17ms +step:1567/1695 train_time:155398ms step_avg:99.17ms +step:1568/1695 train_time:155499ms step_avg:99.17ms +step:1569/1695 train_time:155600ms step_avg:99.17ms +step:1570/1695 train_time:155703ms step_avg:99.17ms +step:1571/1695 train_time:155805ms step_avg:99.18ms +step:1572/1695 train_time:155905ms step_avg:99.18ms +step:1573/1695 train_time:156008ms step_avg:99.18ms +step:1574/1695 train_time:156109ms step_avg:99.18ms +step:1575/1695 train_time:156211ms step_avg:99.18ms +step:1576/1695 train_time:156315ms step_avg:99.18ms +step:1577/1695 train_time:156418ms step_avg:99.19ms +step:1578/1695 train_time:156520ms step_avg:99.19ms +step:1579/1695 train_time:156621ms step_avg:99.19ms +step:1580/1695 train_time:156723ms step_avg:99.19ms +step:1581/1695 train_time:156826ms step_avg:99.19ms +step:1582/1695 train_time:156927ms step_avg:99.20ms +step:1583/1695 train_time:157030ms step_avg:99.20ms +step:1584/1695 train_time:157133ms step_avg:99.20ms +step:1585/1695 train_time:157235ms step_avg:99.20ms +step:1586/1695 train_time:157338ms step_avg:99.20ms +step:1587/1695 train_time:157439ms step_avg:99.21ms +step:1588/1695 train_time:157539ms step_avg:99.21ms +step:1589/1695 train_time:157640ms step_avg:99.21ms +step:1590/1695 train_time:157742ms step_avg:99.21ms +step:1591/1695 train_time:157844ms step_avg:99.21ms +step:1592/1695 train_time:157946ms step_avg:99.21ms +step:1593/1695 train_time:158047ms step_avg:99.21ms +step:1594/1695 train_time:158151ms step_avg:99.22ms +step:1595/1695 train_time:158254ms step_avg:99.22ms +step:1596/1695 train_time:158355ms step_avg:99.22ms +step:1597/1695 train_time:158457ms step_avg:99.22ms +step:1598/1695 train_time:158560ms step_avg:99.22ms +step:1599/1695 train_time:158660ms step_avg:99.22ms +step:1600/1695 train_time:158762ms step_avg:99.23ms +step:1601/1695 train_time:158864ms step_avg:99.23ms +step:1602/1695 train_time:158965ms step_avg:99.23ms +step:1603/1695 train_time:159067ms step_avg:99.23ms +step:1604/1695 train_time:159170ms step_avg:99.23ms +step:1605/1695 train_time:159273ms step_avg:99.24ms +step:1606/1695 train_time:159376ms step_avg:99.24ms +step:1607/1695 train_time:159477ms step_avg:99.24ms +step:1608/1695 train_time:159578ms step_avg:99.24ms +step:1609/1695 train_time:159679ms step_avg:99.24ms +step:1610/1695 train_time:159781ms step_avg:99.24ms +step:1611/1695 train_time:159884ms step_avg:99.24ms +step:1612/1695 train_time:159985ms step_avg:99.25ms +step:1613/1695 train_time:160087ms step_avg:99.25ms +step:1614/1695 train_time:160188ms step_avg:99.25ms +step:1615/1695 train_time:160291ms step_avg:99.25ms +step:1616/1695 train_time:160392ms step_avg:99.25ms +step:1617/1695 train_time:160495ms step_avg:99.25ms +step:1618/1695 train_time:160597ms step_avg:99.26ms +step:1619/1695 train_time:160698ms step_avg:99.26ms +step:1620/1695 train_time:160801ms step_avg:99.26ms +step:1621/1695 train_time:160902ms step_avg:99.26ms +step:1622/1695 train_time:161003ms step_avg:99.26ms +step:1623/1695 train_time:161105ms step_avg:99.26ms +step:1624/1695 train_time:161208ms step_avg:99.27ms +step:1625/1695 train_time:161311ms step_avg:99.27ms +step:1625/1695 val_loss:3.2905 train_time:161412ms step_avg:99.33ms +step:1626/1695 train_time:161438ms step_avg:99.29ms +step:1627/1695 train_time:161526ms step_avg:99.28ms +step:1628/1695 train_time:161629ms step_avg:99.28ms +step:1629/1695 train_time:161733ms step_avg:99.28ms +step:1630/1695 train_time:161835ms step_avg:99.29ms +step:1631/1695 train_time:161936ms step_avg:99.29ms +step:1632/1695 train_time:162038ms step_avg:99.29ms +step:1633/1695 train_time:162138ms step_avg:99.29ms +step:1634/1695 train_time:162241ms step_avg:99.29ms +step:1635/1695 train_time:162342ms step_avg:99.29ms +step:1636/1695 train_time:162445ms step_avg:99.29ms +step:1637/1695 train_time:162548ms step_avg:99.30ms +step:1638/1695 train_time:162652ms step_avg:99.30ms +step:1639/1695 train_time:162756ms step_avg:99.30ms +step:1640/1695 train_time:162859ms step_avg:99.30ms +step:1641/1695 train_time:162963ms step_avg:99.31ms +step:1642/1695 train_time:163064ms step_avg:99.31ms +step:1643/1695 train_time:163168ms step_avg:99.31ms +step:1644/1695 train_time:163269ms step_avg:99.31ms +step:1645/1695 train_time:163373ms step_avg:99.31ms +step:1646/1695 train_time:163475ms step_avg:99.32ms +step:1647/1695 train_time:163579ms step_avg:99.32ms +step:1648/1695 train_time:163682ms step_avg:99.32ms +step:1649/1695 train_time:163785ms step_avg:99.32ms +step:1650/1695 train_time:163889ms step_avg:99.33ms +step:1651/1695 train_time:163992ms step_avg:99.33ms +step:1652/1695 train_time:164096ms step_avg:99.33ms +step:1653/1695 train_time:164199ms step_avg:99.33ms +step:1654/1695 train_time:164300ms step_avg:99.33ms +step:1655/1695 train_time:164402ms step_avg:99.34ms +step:1656/1695 train_time:164505ms step_avg:99.34ms +step:1657/1695 train_time:164607ms step_avg:99.34ms +step:1658/1695 train_time:164710ms step_avg:99.34ms +step:1659/1695 train_time:164816ms step_avg:99.35ms +step:1660/1695 train_time:164918ms step_avg:99.35ms +step:1661/1695 train_time:165022ms step_avg:99.35ms +step:1662/1695 train_time:165126ms step_avg:99.35ms +step:1663/1695 train_time:165230ms step_avg:99.36ms +step:1664/1695 train_time:165334ms step_avg:99.36ms +step:1665/1695 train_time:165439ms step_avg:99.36ms +step:1666/1695 train_time:165542ms step_avg:99.36ms +step:1667/1695 train_time:165643ms step_avg:99.37ms +step:1668/1695 train_time:165747ms step_avg:99.37ms +step:1669/1695 train_time:165852ms step_avg:99.37ms +step:1670/1695 train_time:165954ms step_avg:99.37ms +step:1671/1695 train_time:166057ms step_avg:99.38ms +step:1672/1695 train_time:166160ms step_avg:99.38ms +step:1673/1695 train_time:166262ms step_avg:99.38ms +step:1674/1695 train_time:166365ms step_avg:99.38ms +step:1675/1695 train_time:166467ms step_avg:99.38ms +step:1676/1695 train_time:166572ms step_avg:99.39ms +step:1677/1695 train_time:166675ms step_avg:99.39ms +step:1678/1695 train_time:166778ms step_avg:99.39ms +step:1679/1695 train_time:166881ms step_avg:99.39ms +step:1680/1695 train_time:166983ms step_avg:99.39ms +step:1681/1695 train_time:167086ms step_avg:99.40ms +step:1682/1695 train_time:167192ms step_avg:99.40ms +step:1683/1695 train_time:167295ms step_avg:99.40ms +step:1684/1695 train_time:167397ms step_avg:99.40ms +step:1685/1695 train_time:167500ms step_avg:99.41ms +step:1686/1695 train_time:167602ms step_avg:99.41ms +step:1687/1695 train_time:167703ms step_avg:99.41ms +step:1688/1695 train_time:167805ms step_avg:99.41ms +step:1689/1695 train_time:167907ms step_avg:99.41ms +step:1690/1695 train_time:168010ms step_avg:99.41ms +step:1691/1695 train_time:168114ms step_avg:99.42ms +step:1692/1695 train_time:168216ms step_avg:99.42ms +step:1693/1695 train_time:168320ms step_avg:99.42ms +step:1694/1695 train_time:168425ms step_avg:99.42ms +step:1695/1695 train_time:168528ms step_avg:99.43ms +step:1695/1695 val_loss:3.2774 train_time:168627ms step_avg:99.48ms +peak memory allocated: 34004 MiB reserved: 49680 MiB diff --git a/records/082325_SparseAttnGate/6701af06-6c40-4553-bb04-f501fdd56284.txt b/records/082325_SparseAttnGate/6701af06-6c40-4553-bb04-f501fdd56284.txt new file mode 100644 index 000000000..28ee46440 --- /dev/null +++ b/records/082325_SparseAttnGate/6701af06-6c40-4553-bb04-f501fdd56284.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:24:11 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 32C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 37C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 124W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 38C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 305810 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 305811 C /usr/bin/python3 614MiB | +| 0 N/A N/A 305812 C /usr/bin/python3 614MiB | +| 0 N/A N/A 305813 C /usr/bin/python3 614MiB | +| 0 N/A N/A 305814 C /usr/bin/python3 614MiB | +| 0 N/A N/A 305815 C /usr/bin/python3 614MiB | +| 0 N/A N/A 305816 C /usr/bin/python3 614MiB | +| 0 N/A N/A 305817 C /usr/bin/python3 614MiB | +| 1 N/A N/A 305811 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 305812 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 305813 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 305814 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 305815 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 305816 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 305817 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:154ms step_avg:153.57ms +step:2/1695 train_time:178ms step_avg:89.05ms +step:3/1695 train_time:249ms step_avg:83.13ms +step:4/1695 train_time:342ms step_avg:85.39ms +step:5/1695 train_time:434ms step_avg:86.82ms +step:6/1695 train_time:527ms step_avg:87.75ms +step:7/1695 train_time:620ms step_avg:88.51ms +step:8/1695 train_time:713ms step_avg:89.12ms +step:9/1695 train_time:806ms step_avg:89.55ms +step:10/1695 train_time:899ms step_avg:89.86ms +step:11/1695 train_time:991ms step_avg:90.12ms +step:12/1695 train_time:1085ms step_avg:90.45ms +step:13/1695 train_time:1181ms step_avg:90.81ms +step:14/1695 train_time:1276ms step_avg:91.15ms +step:15/1695 train_time:1370ms step_avg:91.32ms +step:16/1695 train_time:1463ms step_avg:91.47ms +step:17/1695 train_time:1557ms step_avg:91.58ms +step:18/1695 train_time:1650ms step_avg:91.68ms +step:19/1695 train_time:1744ms step_avg:91.82ms +step:20/1695 train_time:1838ms step_avg:91.90ms +step:21/1695 train_time:1931ms step_avg:91.96ms +step:22/1695 train_time:2025ms step_avg:92.05ms +step:23/1695 train_time:2119ms step_avg:92.12ms +step:24/1695 train_time:2213ms step_avg:92.19ms +step:25/1695 train_time:2307ms step_avg:92.28ms +step:26/1695 train_time:2401ms step_avg:92.36ms +step:27/1695 train_time:2495ms step_avg:92.42ms +step:28/1695 train_time:2590ms step_avg:92.51ms +step:29/1695 train_time:2684ms step_avg:92.55ms +step:30/1695 train_time:2778ms step_avg:92.59ms +step:31/1695 train_time:2871ms step_avg:92.62ms +step:32/1695 train_time:2965ms step_avg:92.66ms +step:33/1695 train_time:3059ms step_avg:92.70ms +step:34/1695 train_time:3153ms step_avg:92.73ms +step:35/1695 train_time:3247ms step_avg:92.77ms +step:36/1695 train_time:3341ms step_avg:92.82ms +step:37/1695 train_time:3436ms step_avg:92.86ms +step:38/1695 train_time:3530ms step_avg:92.89ms +step:39/1695 train_time:3624ms step_avg:92.92ms +step:40/1695 train_time:3718ms step_avg:92.96ms +step:41/1695 train_time:3813ms step_avg:93.00ms +step:42/1695 train_time:3906ms step_avg:93.01ms +step:43/1695 train_time:4000ms step_avg:93.01ms +step:44/1695 train_time:4093ms step_avg:93.03ms +step:45/1695 train_time:4187ms step_avg:93.04ms +step:46/1695 train_time:4281ms step_avg:93.07ms +step:47/1695 train_time:4375ms step_avg:93.08ms +step:48/1695 train_time:4468ms step_avg:93.09ms +step:49/1695 train_time:4562ms step_avg:93.11ms +step:50/1695 train_time:4656ms step_avg:93.12ms +step:51/1695 train_time:4750ms step_avg:93.13ms +step:52/1695 train_time:4844ms step_avg:93.15ms +step:53/1695 train_time:4938ms step_avg:93.16ms +step:54/1695 train_time:5031ms step_avg:93.16ms +step:55/1695 train_time:5124ms step_avg:93.17ms +step:56/1695 train_time:5218ms step_avg:93.19ms +step:57/1695 train_time:5311ms step_avg:93.18ms +step:58/1695 train_time:5405ms step_avg:93.19ms +step:59/1695 train_time:5499ms step_avg:93.20ms +step:60/1695 train_time:5592ms step_avg:93.20ms +step:61/1695 train_time:5686ms step_avg:93.22ms +step:62/1695 train_time:5780ms step_avg:93.23ms +step:63/1695 train_time:5874ms step_avg:93.24ms +step:64/1695 train_time:5967ms step_avg:93.24ms +step:65/1695 train_time:6061ms step_avg:93.25ms +step:66/1695 train_time:6155ms step_avg:93.26ms +step:67/1695 train_time:6248ms step_avg:93.26ms +step:68/1695 train_time:6342ms step_avg:93.27ms +step:69/1695 train_time:6436ms step_avg:93.27ms +step:70/1695 train_time:6529ms step_avg:93.27ms +step:71/1695 train_time:6623ms step_avg:93.28ms +step:72/1695 train_time:6717ms step_avg:93.30ms +step:73/1695 train_time:6811ms step_avg:93.30ms +step:74/1695 train_time:6905ms step_avg:93.31ms +step:75/1695 train_time:7000ms step_avg:93.33ms +step:76/1695 train_time:7093ms step_avg:93.32ms +step:77/1695 train_time:7186ms step_avg:93.32ms +step:78/1695 train_time:7279ms step_avg:93.32ms +step:79/1695 train_time:7373ms step_avg:93.32ms +step:80/1695 train_time:7466ms step_avg:93.32ms +step:81/1695 train_time:7560ms step_avg:93.33ms +step:82/1695 train_time:7654ms step_avg:93.35ms +step:83/1695 train_time:7748ms step_avg:93.35ms +step:84/1695 train_time:7842ms step_avg:93.36ms +step:85/1695 train_time:7936ms step_avg:93.36ms +step:86/1695 train_time:8029ms step_avg:93.36ms +step:87/1695 train_time:8123ms step_avg:93.37ms +step:88/1695 train_time:8218ms step_avg:93.39ms +step:89/1695 train_time:8312ms step_avg:93.39ms +step:90/1695 train_time:8405ms step_avg:93.39ms +step:91/1695 train_time:8498ms step_avg:93.39ms +step:92/1695 train_time:8591ms step_avg:93.39ms +step:93/1695 train_time:8685ms step_avg:93.39ms +step:94/1695 train_time:8779ms step_avg:93.39ms +step:95/1695 train_time:8872ms step_avg:93.39ms +step:96/1695 train_time:8966ms step_avg:93.39ms +step:97/1695 train_time:9060ms step_avg:93.40ms +step:98/1695 train_time:9155ms step_avg:93.42ms +step:99/1695 train_time:9247ms step_avg:93.40ms +step:100/1695 train_time:9341ms step_avg:93.41ms +step:101/1695 train_time:9434ms step_avg:93.41ms +step:102/1695 train_time:9528ms step_avg:93.41ms +step:103/1695 train_time:9622ms step_avg:93.42ms +step:104/1695 train_time:9716ms step_avg:93.42ms +step:105/1695 train_time:9809ms step_avg:93.42ms +step:106/1695 train_time:9903ms step_avg:93.42ms +step:107/1695 train_time:9997ms step_avg:93.43ms +step:108/1695 train_time:10091ms step_avg:93.43ms +step:109/1695 train_time:10184ms step_avg:93.43ms +step:110/1695 train_time:10277ms step_avg:93.43ms +step:111/1695 train_time:10370ms step_avg:93.43ms +step:112/1695 train_time:10464ms step_avg:93.43ms +step:113/1695 train_time:10558ms step_avg:93.43ms +step:114/1695 train_time:10651ms step_avg:93.43ms +step:115/1695 train_time:10745ms step_avg:93.44ms +step:116/1695 train_time:10840ms step_avg:93.45ms +step:117/1695 train_time:10933ms step_avg:93.44ms +step:118/1695 train_time:11027ms step_avg:93.45ms +step:119/1695 train_time:11121ms step_avg:93.45ms +step:120/1695 train_time:11214ms step_avg:93.45ms +step:121/1695 train_time:11307ms step_avg:93.45ms +step:122/1695 train_time:11401ms step_avg:93.45ms +step:123/1695 train_time:11495ms step_avg:93.45ms +step:124/1695 train_time:11588ms step_avg:93.45ms +step:125/1695 train_time:11682ms step_avg:93.45ms +step:125/1695 val_loss:4.5907 train_time:11774ms step_avg:94.19ms +step:126/1695 train_time:11800ms step_avg:93.65ms +step:127/1695 train_time:11878ms step_avg:93.52ms +step:128/1695 train_time:11980ms step_avg:93.60ms +step:129/1695 train_time:12076ms step_avg:93.61ms +step:130/1695 train_time:12171ms step_avg:93.62ms +step:131/1695 train_time:12264ms step_avg:93.62ms +step:132/1695 train_time:12357ms step_avg:93.61ms +step:133/1695 train_time:12451ms step_avg:93.62ms +step:134/1695 train_time:12544ms step_avg:93.61ms +step:135/1695 train_time:12638ms step_avg:93.61ms +step:136/1695 train_time:12732ms step_avg:93.62ms +step:137/1695 train_time:12828ms step_avg:93.63ms +step:138/1695 train_time:12924ms step_avg:93.65ms +step:139/1695 train_time:13018ms step_avg:93.65ms +step:140/1695 train_time:13114ms step_avg:93.67ms +step:141/1695 train_time:13209ms step_avg:93.68ms +step:142/1695 train_time:13302ms step_avg:93.68ms +step:143/1695 train_time:13396ms step_avg:93.68ms +step:144/1695 train_time:13490ms step_avg:93.68ms +step:145/1695 train_time:13583ms step_avg:93.68ms +step:146/1695 train_time:13676ms step_avg:93.67ms +step:147/1695 train_time:13771ms step_avg:93.68ms +step:148/1695 train_time:13866ms step_avg:93.69ms +step:149/1695 train_time:13960ms step_avg:93.69ms +step:150/1695 train_time:14055ms step_avg:93.70ms +step:151/1695 train_time:14151ms step_avg:93.71ms +step:152/1695 train_time:14245ms step_avg:93.72ms +step:153/1695 train_time:14339ms step_avg:93.72ms +step:154/1695 train_time:14433ms step_avg:93.72ms +step:155/1695 train_time:14527ms step_avg:93.72ms +step:156/1695 train_time:14620ms step_avg:93.72ms +step:157/1695 train_time:14714ms step_avg:93.72ms +step:158/1695 train_time:14809ms step_avg:93.73ms +step:159/1695 train_time:14903ms step_avg:93.73ms +step:160/1695 train_time:14997ms step_avg:93.73ms +step:161/1695 train_time:15092ms step_avg:93.74ms +step:162/1695 train_time:15187ms step_avg:93.74ms +step:163/1695 train_time:15280ms step_avg:93.74ms +step:164/1695 train_time:15374ms step_avg:93.74ms +step:165/1695 train_time:15468ms step_avg:93.75ms +step:166/1695 train_time:15563ms step_avg:93.75ms +step:167/1695 train_time:15656ms step_avg:93.75ms +step:168/1695 train_time:15750ms step_avg:93.75ms +step:169/1695 train_time:15845ms step_avg:93.75ms +step:170/1695 train_time:15939ms step_avg:93.76ms +step:171/1695 train_time:16033ms step_avg:93.76ms +step:172/1695 train_time:16128ms step_avg:93.77ms +step:173/1695 train_time:16222ms step_avg:93.77ms +step:174/1695 train_time:16316ms step_avg:93.77ms +step:175/1695 train_time:16410ms step_avg:93.77ms +step:176/1695 train_time:16505ms step_avg:93.78ms +step:177/1695 train_time:16599ms step_avg:93.78ms +step:178/1695 train_time:16693ms step_avg:93.78ms +step:179/1695 train_time:16787ms step_avg:93.78ms +step:180/1695 train_time:16882ms step_avg:93.79ms +step:181/1695 train_time:16976ms step_avg:93.79ms +step:182/1695 train_time:17070ms step_avg:93.79ms +step:183/1695 train_time:17164ms step_avg:93.79ms +step:184/1695 train_time:17258ms step_avg:93.79ms +step:185/1695 train_time:17353ms step_avg:93.80ms +step:186/1695 train_time:17448ms step_avg:93.81ms +step:187/1695 train_time:17542ms step_avg:93.81ms +step:188/1695 train_time:17636ms step_avg:93.81ms +step:189/1695 train_time:17730ms step_avg:93.81ms +step:190/1695 train_time:17824ms step_avg:93.81ms +step:191/1695 train_time:17918ms step_avg:93.81ms +step:192/1695 train_time:18012ms step_avg:93.81ms +step:193/1695 train_time:18107ms step_avg:93.82ms +step:194/1695 train_time:18200ms step_avg:93.82ms +step:195/1695 train_time:18294ms step_avg:93.82ms +step:196/1695 train_time:18388ms step_avg:93.82ms +step:197/1695 train_time:18482ms step_avg:93.82ms +step:198/1695 train_time:18576ms step_avg:93.82ms +step:199/1695 train_time:18670ms step_avg:93.82ms +step:200/1695 train_time:18764ms step_avg:93.82ms +step:201/1695 train_time:18857ms step_avg:93.82ms +step:202/1695 train_time:18951ms step_avg:93.82ms +step:203/1695 train_time:19046ms step_avg:93.82ms +step:204/1695 train_time:19140ms step_avg:93.82ms +step:205/1695 train_time:19234ms step_avg:93.82ms +step:206/1695 train_time:19328ms step_avg:93.83ms +step:207/1695 train_time:19422ms step_avg:93.82ms +step:208/1695 train_time:19516ms step_avg:93.83ms +step:209/1695 train_time:19610ms step_avg:93.83ms +step:210/1695 train_time:19704ms step_avg:93.83ms +step:211/1695 train_time:19798ms step_avg:93.83ms +step:212/1695 train_time:19893ms step_avg:93.83ms +step:213/1695 train_time:19987ms step_avg:93.83ms +step:214/1695 train_time:20080ms step_avg:93.83ms +step:215/1695 train_time:20174ms step_avg:93.83ms +step:216/1695 train_time:20269ms step_avg:93.84ms +step:217/1695 train_time:20365ms step_avg:93.85ms +step:218/1695 train_time:20459ms step_avg:93.85ms +step:219/1695 train_time:20553ms step_avg:93.85ms +step:220/1695 train_time:20649ms step_avg:93.86ms +step:221/1695 train_time:20741ms step_avg:93.85ms +step:222/1695 train_time:20836ms step_avg:93.85ms +step:223/1695 train_time:20930ms step_avg:93.86ms +step:224/1695 train_time:21024ms step_avg:93.86ms +step:225/1695 train_time:21118ms step_avg:93.86ms +step:226/1695 train_time:21213ms step_avg:93.86ms +step:227/1695 train_time:21308ms step_avg:93.87ms +step:228/1695 train_time:21402ms step_avg:93.87ms +step:229/1695 train_time:21496ms step_avg:93.87ms +step:230/1695 train_time:21590ms step_avg:93.87ms +step:231/1695 train_time:21684ms step_avg:93.87ms +step:232/1695 train_time:21778ms step_avg:93.87ms +step:233/1695 train_time:21872ms step_avg:93.87ms +step:234/1695 train_time:21966ms step_avg:93.87ms +step:235/1695 train_time:22060ms step_avg:93.87ms +step:236/1695 train_time:22154ms step_avg:93.87ms +step:237/1695 train_time:22250ms step_avg:93.88ms +step:238/1695 train_time:22343ms step_avg:93.88ms +step:239/1695 train_time:22437ms step_avg:93.88ms +step:240/1695 train_time:22533ms step_avg:93.89ms +step:241/1695 train_time:22628ms step_avg:93.89ms +step:242/1695 train_time:22722ms step_avg:93.89ms +step:243/1695 train_time:22816ms step_avg:93.89ms +step:244/1695 train_time:22910ms step_avg:93.89ms +step:245/1695 train_time:23003ms step_avg:93.89ms +step:246/1695 train_time:23098ms step_avg:93.89ms +step:247/1695 train_time:23192ms step_avg:93.89ms +step:248/1695 train_time:23286ms step_avg:93.89ms +step:249/1695 train_time:23380ms step_avg:93.89ms +step:250/1695 train_time:23474ms step_avg:93.90ms +step:250/1695 val_loss:4.0689 train_time:23568ms step_avg:94.27ms +step:251/1695 train_time:23593ms step_avg:94.00ms +step:252/1695 train_time:23674ms step_avg:93.94ms +step:253/1695 train_time:23772ms step_avg:93.96ms +step:254/1695 train_time:23867ms step_avg:93.97ms +step:255/1695 train_time:23962ms step_avg:93.97ms +step:256/1695 train_time:24056ms step_avg:93.97ms +step:257/1695 train_time:24150ms step_avg:93.97ms +step:258/1695 train_time:24243ms step_avg:93.97ms +step:259/1695 train_time:24337ms step_avg:93.96ms +step:260/1695 train_time:24431ms step_avg:93.97ms +step:261/1695 train_time:24525ms step_avg:93.97ms +step:262/1695 train_time:24620ms step_avg:93.97ms +step:263/1695 train_time:24716ms step_avg:93.98ms +step:264/1695 train_time:24812ms step_avg:93.99ms +step:265/1695 train_time:24907ms step_avg:93.99ms +step:266/1695 train_time:25002ms step_avg:93.99ms +step:267/1695 train_time:25095ms step_avg:93.99ms +step:268/1695 train_time:25190ms step_avg:93.99ms +step:269/1695 train_time:25284ms step_avg:93.99ms +step:270/1695 train_time:25378ms step_avg:93.99ms +step:271/1695 train_time:25471ms step_avg:93.99ms +step:272/1695 train_time:25566ms step_avg:93.99ms +step:273/1695 train_time:25660ms step_avg:93.99ms +step:274/1695 train_time:25756ms step_avg:94.00ms +step:275/1695 train_time:25851ms step_avg:94.01ms +step:276/1695 train_time:25947ms step_avg:94.01ms +step:277/1695 train_time:26041ms step_avg:94.01ms +step:278/1695 train_time:26135ms step_avg:94.01ms +step:279/1695 train_time:26230ms step_avg:94.02ms +step:280/1695 train_time:26325ms step_avg:94.02ms +step:281/1695 train_time:26419ms step_avg:94.02ms +step:282/1695 train_time:26513ms step_avg:94.02ms +step:283/1695 train_time:26609ms step_avg:94.02ms +step:284/1695 train_time:26703ms step_avg:94.03ms +step:285/1695 train_time:26798ms step_avg:94.03ms +step:286/1695 train_time:26893ms step_avg:94.03ms +step:287/1695 train_time:26989ms step_avg:94.04ms +step:288/1695 train_time:27083ms step_avg:94.04ms +step:289/1695 train_time:27177ms step_avg:94.04ms +step:290/1695 train_time:27272ms step_avg:94.04ms +step:291/1695 train_time:27368ms step_avg:94.05ms +step:292/1695 train_time:27462ms step_avg:94.05ms +step:293/1695 train_time:27556ms step_avg:94.05ms +step:294/1695 train_time:27650ms step_avg:94.05ms +step:295/1695 train_time:27745ms step_avg:94.05ms +step:296/1695 train_time:27839ms step_avg:94.05ms +step:297/1695 train_time:27934ms step_avg:94.05ms +step:298/1695 train_time:28028ms step_avg:94.05ms +step:299/1695 train_time:28123ms step_avg:94.06ms +step:300/1695 train_time:28217ms step_avg:94.06ms +step:301/1695 train_time:28312ms step_avg:94.06ms +step:302/1695 train_time:28407ms step_avg:94.06ms +step:303/1695 train_time:28500ms step_avg:94.06ms +step:304/1695 train_time:28595ms step_avg:94.06ms +step:305/1695 train_time:28691ms step_avg:94.07ms +step:306/1695 train_time:28785ms step_avg:94.07ms +step:307/1695 train_time:28879ms step_avg:94.07ms +step:308/1695 train_time:28973ms step_avg:94.07ms +step:309/1695 train_time:29068ms step_avg:94.07ms +step:310/1695 train_time:29163ms step_avg:94.07ms +step:311/1695 train_time:29257ms step_avg:94.07ms +step:312/1695 train_time:29352ms step_avg:94.08ms +step:313/1695 train_time:29447ms step_avg:94.08ms +step:314/1695 train_time:29540ms step_avg:94.08ms +step:315/1695 train_time:29635ms step_avg:94.08ms +step:316/1695 train_time:29729ms step_avg:94.08ms +step:317/1695 train_time:29824ms step_avg:94.08ms +step:318/1695 train_time:29918ms step_avg:94.08ms +step:319/1695 train_time:30012ms step_avg:94.08ms +step:320/1695 train_time:30107ms step_avg:94.09ms +step:321/1695 train_time:30201ms step_avg:94.09ms +step:322/1695 train_time:30296ms step_avg:94.09ms +step:323/1695 train_time:30390ms step_avg:94.09ms +step:324/1695 train_time:30485ms step_avg:94.09ms +step:325/1695 train_time:30578ms step_avg:94.09ms +step:326/1695 train_time:30674ms step_avg:94.09ms +step:327/1695 train_time:30769ms step_avg:94.09ms +step:328/1695 train_time:30863ms step_avg:94.09ms +step:329/1695 train_time:30957ms step_avg:94.09ms +step:330/1695 train_time:31052ms step_avg:94.10ms +step:331/1695 train_time:31146ms step_avg:94.10ms +step:332/1695 train_time:31240ms step_avg:94.10ms +step:333/1695 train_time:31335ms step_avg:94.10ms +step:334/1695 train_time:31430ms step_avg:94.10ms +step:335/1695 train_time:31524ms step_avg:94.10ms +step:336/1695 train_time:31618ms step_avg:94.10ms +step:337/1695 train_time:31713ms step_avg:94.11ms +step:338/1695 train_time:31808ms step_avg:94.11ms +step:339/1695 train_time:31902ms step_avg:94.11ms +step:340/1695 train_time:31996ms step_avg:94.11ms +step:341/1695 train_time:32092ms step_avg:94.11ms +step:342/1695 train_time:32187ms step_avg:94.12ms +step:343/1695 train_time:32281ms step_avg:94.11ms +step:344/1695 train_time:32375ms step_avg:94.11ms +step:345/1695 train_time:32470ms step_avg:94.12ms +step:346/1695 train_time:32564ms step_avg:94.12ms +step:347/1695 train_time:32658ms step_avg:94.11ms +step:348/1695 train_time:32753ms step_avg:94.12ms +step:349/1695 train_time:32848ms step_avg:94.12ms +step:350/1695 train_time:32942ms step_avg:94.12ms +step:351/1695 train_time:33036ms step_avg:94.12ms +step:352/1695 train_time:33132ms step_avg:94.12ms +step:353/1695 train_time:33226ms step_avg:94.13ms +step:354/1695 train_time:33320ms step_avg:94.12ms +step:355/1695 train_time:33415ms step_avg:94.13ms +step:356/1695 train_time:33509ms step_avg:94.13ms +step:357/1695 train_time:33604ms step_avg:94.13ms +step:358/1695 train_time:33697ms step_avg:94.13ms +step:359/1695 train_time:33792ms step_avg:94.13ms +step:360/1695 train_time:33887ms step_avg:94.13ms +step:361/1695 train_time:33981ms step_avg:94.13ms +step:362/1695 train_time:34075ms step_avg:94.13ms +step:363/1695 train_time:34171ms step_avg:94.14ms +step:364/1695 train_time:34265ms step_avg:94.14ms +step:365/1695 train_time:34359ms step_avg:94.13ms +step:366/1695 train_time:34454ms step_avg:94.14ms +step:367/1695 train_time:34548ms step_avg:94.14ms +step:368/1695 train_time:34642ms step_avg:94.14ms +step:369/1695 train_time:34736ms step_avg:94.14ms +step:370/1695 train_time:34831ms step_avg:94.14ms +step:371/1695 train_time:34925ms step_avg:94.14ms +step:372/1695 train_time:35019ms step_avg:94.14ms +step:373/1695 train_time:35113ms step_avg:94.14ms +step:374/1695 train_time:35209ms step_avg:94.14ms +step:375/1695 train_time:35302ms step_avg:94.14ms +step:375/1695 val_loss:3.8750 train_time:35394ms step_avg:94.39ms +step:376/1695 train_time:35420ms step_avg:94.20ms +step:377/1695 train_time:35500ms step_avg:94.16ms +step:378/1695 train_time:35598ms step_avg:94.18ms +step:379/1695 train_time:35696ms step_avg:94.18ms +step:380/1695 train_time:35791ms step_avg:94.19ms +step:381/1695 train_time:35888ms step_avg:94.19ms +step:382/1695 train_time:35983ms step_avg:94.20ms +step:383/1695 train_time:36079ms step_avg:94.20ms +step:384/1695 train_time:36174ms step_avg:94.20ms +step:385/1695 train_time:36269ms step_avg:94.21ms +step:386/1695 train_time:36365ms step_avg:94.21ms +step:387/1695 train_time:36462ms step_avg:94.22ms +step:388/1695 train_time:36559ms step_avg:94.22ms +step:389/1695 train_time:36656ms step_avg:94.23ms +step:390/1695 train_time:36753ms step_avg:94.24ms +step:391/1695 train_time:36850ms step_avg:94.25ms +step:392/1695 train_time:36946ms step_avg:94.25ms +step:393/1695 train_time:37041ms step_avg:94.25ms +step:394/1695 train_time:37138ms step_avg:94.26ms +step:395/1695 train_time:37233ms step_avg:94.26ms +step:396/1695 train_time:37329ms step_avg:94.27ms +step:397/1695 train_time:37426ms step_avg:94.27ms +step:398/1695 train_time:37523ms step_avg:94.28ms +step:399/1695 train_time:37619ms step_avg:94.28ms +step:400/1695 train_time:37714ms step_avg:94.29ms +step:401/1695 train_time:37812ms step_avg:94.29ms +step:402/1695 train_time:37908ms step_avg:94.30ms +step:403/1695 train_time:38005ms step_avg:94.31ms +step:404/1695 train_time:38100ms step_avg:94.31ms +step:405/1695 train_time:38196ms step_avg:94.31ms +step:406/1695 train_time:38292ms step_avg:94.32ms +step:407/1695 train_time:38389ms step_avg:94.32ms +step:408/1695 train_time:38486ms step_avg:94.33ms +step:409/1695 train_time:38582ms step_avg:94.33ms +step:410/1695 train_time:38677ms step_avg:94.33ms +step:411/1695 train_time:38774ms step_avg:94.34ms +step:412/1695 train_time:38871ms step_avg:94.35ms +step:413/1695 train_time:38967ms step_avg:94.35ms +step:414/1695 train_time:39063ms step_avg:94.36ms +step:415/1695 train_time:39159ms step_avg:94.36ms +step:416/1695 train_time:39255ms step_avg:94.36ms +step:417/1695 train_time:39351ms step_avg:94.37ms +step:418/1695 train_time:39448ms step_avg:94.37ms +step:419/1695 train_time:39545ms step_avg:94.38ms +step:420/1695 train_time:39641ms step_avg:94.38ms +step:421/1695 train_time:39737ms step_avg:94.39ms +step:422/1695 train_time:39833ms step_avg:94.39ms +step:423/1695 train_time:39930ms step_avg:94.40ms +step:424/1695 train_time:40027ms step_avg:94.40ms +step:425/1695 train_time:40123ms step_avg:94.41ms +step:426/1695 train_time:40219ms step_avg:94.41ms +step:427/1695 train_time:40314ms step_avg:94.41ms +step:428/1695 train_time:40410ms step_avg:94.42ms +step:429/1695 train_time:40506ms step_avg:94.42ms +step:430/1695 train_time:40602ms step_avg:94.42ms +step:431/1695 train_time:40698ms step_avg:94.43ms +step:432/1695 train_time:40794ms step_avg:94.43ms +step:433/1695 train_time:40891ms step_avg:94.44ms +step:434/1695 train_time:40988ms step_avg:94.44ms +step:435/1695 train_time:41085ms step_avg:94.45ms +step:436/1695 train_time:41181ms step_avg:94.45ms +step:437/1695 train_time:41277ms step_avg:94.46ms +step:438/1695 train_time:41373ms step_avg:94.46ms +step:439/1695 train_time:41469ms step_avg:94.46ms +step:440/1695 train_time:41566ms step_avg:94.47ms +step:441/1695 train_time:41662ms step_avg:94.47ms +step:442/1695 train_time:41757ms step_avg:94.47ms +step:443/1695 train_time:41854ms step_avg:94.48ms +step:444/1695 train_time:41951ms step_avg:94.48ms +step:445/1695 train_time:42047ms step_avg:94.49ms +step:446/1695 train_time:42144ms step_avg:94.49ms +step:447/1695 train_time:42240ms step_avg:94.50ms +step:448/1695 train_time:42336ms step_avg:94.50ms +step:449/1695 train_time:42432ms step_avg:94.50ms +step:450/1695 train_time:42529ms step_avg:94.51ms +step:451/1695 train_time:42626ms step_avg:94.51ms +step:452/1695 train_time:42722ms step_avg:94.52ms +step:453/1695 train_time:42818ms step_avg:94.52ms +step:454/1695 train_time:42914ms step_avg:94.52ms +step:455/1695 train_time:43010ms step_avg:94.53ms +step:456/1695 train_time:43107ms step_avg:94.53ms +step:457/1695 train_time:43203ms step_avg:94.54ms +step:458/1695 train_time:43299ms step_avg:94.54ms +step:459/1695 train_time:43395ms step_avg:94.54ms +step:460/1695 train_time:43492ms step_avg:94.55ms +step:461/1695 train_time:43589ms step_avg:94.55ms +step:462/1695 train_time:43685ms step_avg:94.56ms +step:463/1695 train_time:43782ms step_avg:94.56ms +step:464/1695 train_time:43878ms step_avg:94.56ms +step:465/1695 train_time:43974ms step_avg:94.57ms +step:466/1695 train_time:44070ms step_avg:94.57ms +step:467/1695 train_time:44167ms step_avg:94.58ms +step:468/1695 train_time:44263ms step_avg:94.58ms +step:469/1695 train_time:44359ms step_avg:94.58ms +step:470/1695 train_time:44455ms step_avg:94.59ms +step:471/1695 train_time:44552ms step_avg:94.59ms +step:472/1695 train_time:44650ms step_avg:94.60ms +step:473/1695 train_time:44746ms step_avg:94.60ms +step:474/1695 train_time:44842ms step_avg:94.60ms +step:475/1695 train_time:44938ms step_avg:94.61ms +step:476/1695 train_time:45035ms step_avg:94.61ms +step:477/1695 train_time:45131ms step_avg:94.61ms +step:478/1695 train_time:45228ms step_avg:94.62ms +step:479/1695 train_time:45324ms step_avg:94.62ms +step:480/1695 train_time:45420ms step_avg:94.63ms +step:481/1695 train_time:45516ms step_avg:94.63ms +step:482/1695 train_time:45613ms step_avg:94.63ms +step:483/1695 train_time:45709ms step_avg:94.64ms +step:484/1695 train_time:45805ms step_avg:94.64ms +step:485/1695 train_time:45901ms step_avg:94.64ms +step:486/1695 train_time:45997ms step_avg:94.64ms +step:487/1695 train_time:46093ms step_avg:94.65ms +step:488/1695 train_time:46190ms step_avg:94.65ms +step:489/1695 train_time:46286ms step_avg:94.66ms +step:490/1695 train_time:46383ms step_avg:94.66ms +step:491/1695 train_time:46478ms step_avg:94.66ms +step:492/1695 train_time:46574ms step_avg:94.66ms +step:493/1695 train_time:46671ms step_avg:94.67ms +step:494/1695 train_time:46767ms step_avg:94.67ms +step:495/1695 train_time:46863ms step_avg:94.67ms +step:496/1695 train_time:46959ms step_avg:94.68ms +step:497/1695 train_time:47055ms step_avg:94.68ms +step:498/1695 train_time:47151ms step_avg:94.68ms +step:499/1695 train_time:47248ms step_avg:94.68ms +step:500/1695 train_time:47344ms step_avg:94.69ms +step:500/1695 val_loss:3.7291 train_time:47440ms step_avg:94.88ms +step:501/1695 train_time:47465ms step_avg:94.74ms +step:502/1695 train_time:47549ms step_avg:94.72ms +step:503/1695 train_time:47648ms step_avg:94.73ms +step:504/1695 train_time:47745ms step_avg:94.73ms +step:505/1695 train_time:47840ms step_avg:94.73ms +step:506/1695 train_time:47937ms step_avg:94.74ms +step:507/1695 train_time:48033ms step_avg:94.74ms +step:508/1695 train_time:48128ms step_avg:94.74ms +step:509/1695 train_time:48224ms step_avg:94.74ms +step:510/1695 train_time:48319ms step_avg:94.74ms +step:511/1695 train_time:48416ms step_avg:94.75ms +step:512/1695 train_time:48515ms step_avg:94.76ms +step:513/1695 train_time:48614ms step_avg:94.76ms +step:514/1695 train_time:48712ms step_avg:94.77ms +step:515/1695 train_time:48811ms step_avg:94.78ms +step:516/1695 train_time:48906ms step_avg:94.78ms +step:517/1695 train_time:49001ms step_avg:94.78ms +step:518/1695 train_time:49097ms step_avg:94.78ms +step:519/1695 train_time:49194ms step_avg:94.79ms +step:520/1695 train_time:49290ms step_avg:94.79ms +step:521/1695 train_time:49386ms step_avg:94.79ms +step:522/1695 train_time:49483ms step_avg:94.79ms +step:523/1695 train_time:49580ms step_avg:94.80ms +step:524/1695 train_time:49678ms step_avg:94.81ms +step:525/1695 train_time:49776ms step_avg:94.81ms +step:526/1695 train_time:49874ms step_avg:94.82ms +step:527/1695 train_time:49971ms step_avg:94.82ms +step:528/1695 train_time:50068ms step_avg:94.83ms +step:529/1695 train_time:50163ms step_avg:94.83ms +step:530/1695 train_time:50259ms step_avg:94.83ms +step:531/1695 train_time:50356ms step_avg:94.83ms +step:532/1695 train_time:50454ms step_avg:94.84ms +step:533/1695 train_time:50551ms step_avg:94.84ms +step:534/1695 train_time:50650ms step_avg:94.85ms +step:535/1695 train_time:50748ms step_avg:94.86ms +step:536/1695 train_time:50844ms step_avg:94.86ms +step:537/1695 train_time:50940ms step_avg:94.86ms +step:538/1695 train_time:51037ms step_avg:94.86ms +step:539/1695 train_time:51134ms step_avg:94.87ms +step:540/1695 train_time:51230ms step_avg:94.87ms +step:541/1695 train_time:51326ms step_avg:94.87ms +step:542/1695 train_time:51422ms step_avg:94.87ms +step:543/1695 train_time:51518ms step_avg:94.88ms +step:544/1695 train_time:51616ms step_avg:94.88ms +step:545/1695 train_time:51714ms step_avg:94.89ms +step:546/1695 train_time:51811ms step_avg:94.89ms +step:547/1695 train_time:51909ms step_avg:94.90ms +step:548/1695 train_time:52006ms step_avg:94.90ms +step:549/1695 train_time:52102ms step_avg:94.90ms +step:550/1695 train_time:52198ms step_avg:94.91ms +step:551/1695 train_time:52296ms step_avg:94.91ms +step:552/1695 train_time:52393ms step_avg:94.92ms +step:553/1695 train_time:52489ms step_avg:94.92ms +step:554/1695 train_time:52585ms step_avg:94.92ms +step:555/1695 train_time:52682ms step_avg:94.92ms +step:556/1695 train_time:52778ms step_avg:94.93ms +step:557/1695 train_time:52876ms step_avg:94.93ms +step:558/1695 train_time:52974ms step_avg:94.94ms +step:559/1695 train_time:53072ms step_avg:94.94ms +step:560/1695 train_time:53169ms step_avg:94.94ms +step:561/1695 train_time:53265ms step_avg:94.95ms +step:562/1695 train_time:53361ms step_avg:94.95ms +step:563/1695 train_time:53458ms step_avg:94.95ms +step:564/1695 train_time:53555ms step_avg:94.96ms +step:565/1695 train_time:53652ms step_avg:94.96ms +step:566/1695 train_time:53749ms step_avg:94.96ms +step:567/1695 train_time:53845ms step_avg:94.96ms +step:568/1695 train_time:53941ms step_avg:94.97ms +step:569/1695 train_time:54038ms step_avg:94.97ms +step:570/1695 train_time:54135ms step_avg:94.97ms +step:571/1695 train_time:54232ms step_avg:94.98ms +step:572/1695 train_time:54329ms step_avg:94.98ms +step:573/1695 train_time:54426ms step_avg:94.98ms +step:574/1695 train_time:54522ms step_avg:94.99ms +step:575/1695 train_time:54618ms step_avg:94.99ms +step:576/1695 train_time:54715ms step_avg:94.99ms +step:577/1695 train_time:54813ms step_avg:95.00ms +step:578/1695 train_time:54910ms step_avg:95.00ms +step:579/1695 train_time:55006ms step_avg:95.00ms +step:580/1695 train_time:55103ms step_avg:95.00ms +step:581/1695 train_time:55199ms step_avg:95.01ms +step:582/1695 train_time:55296ms step_avg:95.01ms +step:583/1695 train_time:55394ms step_avg:95.01ms +step:584/1695 train_time:55491ms step_avg:95.02ms +step:585/1695 train_time:55588ms step_avg:95.02ms +step:586/1695 train_time:55684ms step_avg:95.02ms +step:587/1695 train_time:55780ms step_avg:95.02ms +step:588/1695 train_time:55877ms step_avg:95.03ms +step:589/1695 train_time:55973ms step_avg:95.03ms +step:590/1695 train_time:56071ms step_avg:95.04ms +step:591/1695 train_time:56168ms step_avg:95.04ms +step:592/1695 train_time:56264ms step_avg:95.04ms +step:593/1695 train_time:56361ms step_avg:95.04ms +step:594/1695 train_time:56457ms step_avg:95.05ms +step:595/1695 train_time:56556ms step_avg:95.05ms +step:596/1695 train_time:56653ms step_avg:95.06ms +step:597/1695 train_time:56750ms step_avg:95.06ms +step:598/1695 train_time:56847ms step_avg:95.06ms +step:599/1695 train_time:56943ms step_avg:95.06ms +step:600/1695 train_time:57039ms step_avg:95.07ms +step:601/1695 train_time:57136ms step_avg:95.07ms +step:602/1695 train_time:57234ms step_avg:95.07ms +step:603/1695 train_time:57331ms step_avg:95.08ms +step:604/1695 train_time:57427ms step_avg:95.08ms +step:605/1695 train_time:57524ms step_avg:95.08ms +step:606/1695 train_time:57620ms step_avg:95.08ms +step:607/1695 train_time:57717ms step_avg:95.09ms +step:608/1695 train_time:57814ms step_avg:95.09ms +step:609/1695 train_time:57911ms step_avg:95.09ms +step:610/1695 train_time:58006ms step_avg:95.09ms +step:611/1695 train_time:58102ms step_avg:95.09ms +step:612/1695 train_time:58198ms step_avg:95.10ms +step:613/1695 train_time:58296ms step_avg:95.10ms +step:614/1695 train_time:58393ms step_avg:95.10ms +step:615/1695 train_time:58490ms step_avg:95.11ms +step:616/1695 train_time:58587ms step_avg:95.11ms +step:617/1695 train_time:58682ms step_avg:95.11ms +step:618/1695 train_time:58779ms step_avg:95.11ms +step:619/1695 train_time:58876ms step_avg:95.11ms +step:620/1695 train_time:58973ms step_avg:95.12ms +step:621/1695 train_time:59070ms step_avg:95.12ms +step:622/1695 train_time:59167ms step_avg:95.12ms +step:623/1695 train_time:59263ms step_avg:95.13ms +step:624/1695 train_time:59360ms step_avg:95.13ms +step:625/1695 train_time:59457ms step_avg:95.13ms +step:625/1695 val_loss:3.6467 train_time:59552ms step_avg:95.28ms +step:626/1695 train_time:59578ms step_avg:95.17ms +step:627/1695 train_time:59664ms step_avg:95.16ms +step:628/1695 train_time:59765ms step_avg:95.17ms +step:629/1695 train_time:59862ms step_avg:95.17ms +step:630/1695 train_time:59960ms step_avg:95.17ms +step:631/1695 train_time:60058ms step_avg:95.18ms +step:632/1695 train_time:60155ms step_avg:95.18ms +step:633/1695 train_time:60252ms step_avg:95.19ms +step:634/1695 train_time:60349ms step_avg:95.19ms +step:635/1695 train_time:60685ms step_avg:95.57ms +step:636/1695 train_time:60780ms step_avg:95.57ms +step:637/1695 train_time:60877ms step_avg:95.57ms +step:638/1695 train_time:60975ms step_avg:95.57ms +step:639/1695 train_time:61072ms step_avg:95.57ms +step:640/1695 train_time:61169ms step_avg:95.58ms +step:641/1695 train_time:61528ms step_avg:95.99ms +step:642/1695 train_time:61622ms step_avg:95.98ms +step:643/1695 train_time:61720ms step_avg:95.99ms +step:644/1695 train_time:61817ms step_avg:95.99ms +step:645/1695 train_time:61914ms step_avg:95.99ms +step:646/1695 train_time:62011ms step_avg:95.99ms +step:647/1695 train_time:62109ms step_avg:95.99ms +step:648/1695 train_time:62206ms step_avg:96.00ms +step:649/1695 train_time:62303ms step_avg:96.00ms +step:650/1695 train_time:62400ms step_avg:96.00ms +step:651/1695 train_time:62503ms step_avg:96.01ms +step:652/1695 train_time:62882ms step_avg:96.44ms +step:653/1695 train_time:62932ms step_avg:96.37ms +step:654/1695 train_time:63029ms step_avg:96.37ms +step:655/1695 train_time:63126ms step_avg:96.38ms +step:656/1695 train_time:63224ms step_avg:96.38ms +step:657/1695 train_time:63320ms step_avg:96.38ms +step:658/1695 train_time:63418ms step_avg:96.38ms +step:659/1695 train_time:63515ms step_avg:96.38ms +step:660/1695 train_time:63612ms step_avg:96.38ms +step:661/1695 train_time:63709ms step_avg:96.38ms +step:662/1695 train_time:63808ms step_avg:96.39ms +step:663/1695 train_time:63908ms step_avg:96.39ms +step:664/1695 train_time:64006ms step_avg:96.39ms +step:665/1695 train_time:64104ms step_avg:96.40ms +step:666/1695 train_time:64202ms step_avg:96.40ms +step:667/1695 train_time:64299ms step_avg:96.40ms +step:668/1695 train_time:64398ms step_avg:96.40ms +step:669/1695 train_time:64495ms step_avg:96.41ms +step:670/1695 train_time:64593ms step_avg:96.41ms +step:671/1695 train_time:64690ms step_avg:96.41ms +step:672/1695 train_time:64788ms step_avg:96.41ms +step:673/1695 train_time:64886ms step_avg:96.41ms +step:674/1695 train_time:64985ms step_avg:96.42ms +step:675/1695 train_time:65083ms step_avg:96.42ms +step:676/1695 train_time:65181ms step_avg:96.42ms +step:677/1695 train_time:65280ms step_avg:96.42ms +step:678/1695 train_time:65377ms step_avg:96.43ms +step:679/1695 train_time:65475ms step_avg:96.43ms +step:680/1695 train_time:65572ms step_avg:96.43ms +step:681/1695 train_time:65670ms step_avg:96.43ms +step:682/1695 train_time:65768ms step_avg:96.43ms +step:683/1695 train_time:65866ms step_avg:96.44ms +step:684/1695 train_time:65964ms step_avg:96.44ms +step:685/1695 train_time:66063ms step_avg:96.44ms +step:686/1695 train_time:66161ms step_avg:96.45ms +step:687/1695 train_time:66260ms step_avg:96.45ms +step:688/1695 train_time:66359ms step_avg:96.45ms +step:689/1695 train_time:66456ms step_avg:96.45ms +step:690/1695 train_time:66554ms step_avg:96.46ms +step:691/1695 train_time:66652ms step_avg:96.46ms +step:692/1695 train_time:66751ms step_avg:96.46ms +step:693/1695 train_time:66849ms step_avg:96.46ms +step:694/1695 train_time:66947ms step_avg:96.47ms +step:695/1695 train_time:67045ms step_avg:96.47ms +step:696/1695 train_time:67142ms step_avg:96.47ms +step:697/1695 train_time:67240ms step_avg:96.47ms +step:698/1695 train_time:67338ms step_avg:96.47ms +step:699/1695 train_time:67436ms step_avg:96.47ms +step:700/1695 train_time:67534ms step_avg:96.48ms +step:701/1695 train_time:67632ms step_avg:96.48ms +step:702/1695 train_time:67729ms step_avg:96.48ms +step:703/1695 train_time:67827ms step_avg:96.48ms +step:704/1695 train_time:67925ms step_avg:96.48ms +step:705/1695 train_time:68023ms step_avg:96.49ms +step:706/1695 train_time:68121ms step_avg:96.49ms +step:707/1695 train_time:68219ms step_avg:96.49ms +step:708/1695 train_time:68318ms step_avg:96.49ms +step:709/1695 train_time:68415ms step_avg:96.50ms +step:710/1695 train_time:68513ms step_avg:96.50ms +step:711/1695 train_time:68611ms step_avg:96.50ms +step:712/1695 train_time:68940ms step_avg:96.83ms +step:713/1695 train_time:69036ms step_avg:96.82ms +step:714/1695 train_time:69133ms step_avg:96.83ms +step:715/1695 train_time:69230ms step_avg:96.82ms +step:716/1695 train_time:69327ms step_avg:96.82ms +step:717/1695 train_time:69424ms step_avg:96.83ms +step:718/1695 train_time:69521ms step_avg:96.83ms +step:719/1695 train_time:69921ms step_avg:97.25ms +step:720/1695 train_time:70016ms step_avg:97.25ms +step:721/1695 train_time:70113ms step_avg:97.24ms +step:722/1695 train_time:70210ms step_avg:97.24ms +step:723/1695 train_time:70307ms step_avg:97.24ms +step:724/1695 train_time:70404ms step_avg:97.24ms +step:725/1695 train_time:70501ms step_avg:97.24ms +step:726/1695 train_time:70599ms step_avg:97.24ms +step:727/1695 train_time:70696ms step_avg:97.24ms +step:728/1695 train_time:70793ms step_avg:97.24ms +step:729/1695 train_time:70895ms step_avg:97.25ms +step:730/1695 train_time:70994ms step_avg:97.25ms +step:731/1695 train_time:71092ms step_avg:97.25ms +step:732/1695 train_time:71189ms step_avg:97.25ms +step:733/1695 train_time:71286ms step_avg:97.25ms +step:734/1695 train_time:71384ms step_avg:97.25ms +step:735/1695 train_time:71481ms step_avg:97.25ms +step:736/1695 train_time:71579ms step_avg:97.25ms +step:737/1695 train_time:71676ms step_avg:97.25ms +step:738/1695 train_time:71774ms step_avg:97.25ms +step:739/1695 train_time:71872ms step_avg:97.26ms +step:740/1695 train_time:71971ms step_avg:97.26ms +step:741/1695 train_time:72069ms step_avg:97.26ms +step:742/1695 train_time:72167ms step_avg:97.26ms +step:743/1695 train_time:72264ms step_avg:97.26ms +step:744/1695 train_time:72362ms step_avg:97.26ms +step:745/1695 train_time:72459ms step_avg:97.26ms +step:746/1695 train_time:72557ms step_avg:97.26ms +step:747/1695 train_time:72654ms step_avg:97.26ms +step:748/1695 train_time:72752ms step_avg:97.26ms +step:749/1695 train_time:72849ms step_avg:97.26ms +step:750/1695 train_time:72947ms step_avg:97.26ms +step:750/1695 val_loss:3.5846 train_time:73043ms step_avg:97.39ms +step:751/1695 train_time:73069ms step_avg:97.30ms +step:752/1695 train_time:73158ms step_avg:97.28ms +step:753/1695 train_time:73258ms step_avg:97.29ms +step:754/1695 train_time:73356ms step_avg:97.29ms +step:755/1695 train_time:73453ms step_avg:97.29ms +step:756/1695 train_time:73550ms step_avg:97.29ms +step:757/1695 train_time:73648ms step_avg:97.29ms +step:758/1695 train_time:73746ms step_avg:97.29ms +step:759/1695 train_time:73843ms step_avg:97.29ms +step:760/1695 train_time:73940ms step_avg:97.29ms +step:761/1695 train_time:74038ms step_avg:97.29ms +step:762/1695 train_time:74136ms step_avg:97.29ms +step:763/1695 train_time:74236ms step_avg:97.29ms +step:764/1695 train_time:74334ms step_avg:97.30ms +step:765/1695 train_time:74432ms step_avg:97.30ms +step:766/1695 train_time:74530ms step_avg:97.30ms +step:767/1695 train_time:74628ms step_avg:97.30ms +step:768/1695 train_time:74725ms step_avg:97.30ms +step:769/1695 train_time:74823ms step_avg:97.30ms +step:770/1695 train_time:74921ms step_avg:97.30ms +step:771/1695 train_time:75019ms step_avg:97.30ms +step:772/1695 train_time:75117ms step_avg:97.30ms +step:773/1695 train_time:75215ms step_avg:97.30ms +step:774/1695 train_time:75313ms step_avg:97.30ms +step:775/1695 train_time:75412ms step_avg:97.31ms +step:776/1695 train_time:75509ms step_avg:97.31ms +step:777/1695 train_time:75607ms step_avg:97.31ms +step:778/1695 train_time:75931ms step_avg:97.60ms +step:779/1695 train_time:76028ms step_avg:97.60ms +step:780/1695 train_time:76126ms step_avg:97.60ms +step:781/1695 train_time:76223ms step_avg:97.60ms +step:782/1695 train_time:76321ms step_avg:97.60ms +step:783/1695 train_time:76418ms step_avg:97.60ms +step:784/1695 train_time:76515ms step_avg:97.60ms +step:785/1695 train_time:76841ms step_avg:97.89ms +step:786/1695 train_time:76937ms step_avg:97.88ms +step:787/1695 train_time:77034ms step_avg:97.88ms +step:788/1695 train_time:77131ms step_avg:97.88ms +step:789/1695 train_time:77228ms step_avg:97.88ms +step:790/1695 train_time:77326ms step_avg:97.88ms +step:791/1695 train_time:77423ms step_avg:97.88ms +step:792/1695 train_time:77521ms step_avg:97.88ms +step:793/1695 train_time:77617ms step_avg:97.88ms +step:794/1695 train_time:77716ms step_avg:97.88ms +step:795/1695 train_time:77814ms step_avg:97.88ms +step:796/1695 train_time:77913ms step_avg:97.88ms +step:797/1695 train_time:78012ms step_avg:97.88ms +step:798/1695 train_time:78110ms step_avg:97.88ms +step:799/1695 train_time:78208ms step_avg:97.88ms +step:800/1695 train_time:78306ms step_avg:97.88ms +step:801/1695 train_time:78404ms step_avg:97.88ms +step:802/1695 train_time:78501ms step_avg:97.88ms +step:803/1695 train_time:78599ms step_avg:97.88ms +step:804/1695 train_time:78697ms step_avg:97.88ms +step:805/1695 train_time:78795ms step_avg:97.88ms +step:806/1695 train_time:78893ms step_avg:97.88ms +step:807/1695 train_time:78991ms step_avg:97.88ms +step:808/1695 train_time:79090ms step_avg:97.88ms +step:809/1695 train_time:79189ms step_avg:97.88ms +step:810/1695 train_time:79287ms step_avg:97.88ms +step:811/1695 train_time:79386ms step_avg:97.89ms +step:812/1695 train_time:79483ms step_avg:97.89ms +step:813/1695 train_time:79581ms step_avg:97.89ms +step:814/1695 train_time:79679ms step_avg:97.89ms +step:815/1695 train_time:79778ms step_avg:97.89ms +step:816/1695 train_time:79876ms step_avg:97.89ms +step:817/1695 train_time:79974ms step_avg:97.89ms +step:818/1695 train_time:80071ms step_avg:97.89ms +step:819/1695 train_time:80170ms step_avg:97.89ms +step:820/1695 train_time:80268ms step_avg:97.89ms +step:821/1695 train_time:80367ms step_avg:97.89ms +step:822/1695 train_time:80465ms step_avg:97.89ms +step:823/1695 train_time:80563ms step_avg:97.89ms +step:824/1695 train_time:80663ms step_avg:97.89ms +step:825/1695 train_time:80762ms step_avg:97.89ms +step:826/1695 train_time:80861ms step_avg:97.89ms +step:827/1695 train_time:80959ms step_avg:97.89ms +step:828/1695 train_time:81058ms step_avg:97.90ms +step:829/1695 train_time:81156ms step_avg:97.90ms +step:830/1695 train_time:81253ms step_avg:97.90ms +step:831/1695 train_time:81351ms step_avg:97.90ms +step:832/1695 train_time:81449ms step_avg:97.90ms +step:833/1695 train_time:81547ms step_avg:97.90ms +step:834/1695 train_time:81647ms step_avg:97.90ms +step:835/1695 train_time:81746ms step_avg:97.90ms +step:836/1695 train_time:81846ms step_avg:97.90ms +step:837/1695 train_time:81945ms step_avg:97.90ms +step:838/1695 train_time:82044ms step_avg:97.90ms +step:839/1695 train_time:82143ms step_avg:97.91ms +step:840/1695 train_time:82242ms step_avg:97.91ms +step:841/1695 train_time:82340ms step_avg:97.91ms +step:842/1695 train_time:82439ms step_avg:97.91ms +step:843/1695 train_time:82536ms step_avg:97.91ms +step:844/1695 train_time:82634ms step_avg:97.91ms +step:845/1695 train_time:82732ms step_avg:97.91ms +step:846/1695 train_time:82830ms step_avg:97.91ms +step:847/1695 train_time:82928ms step_avg:97.91ms +step:848/1695 train_time:83028ms step_avg:97.91ms +step:849/1695 train_time:83127ms step_avg:97.91ms +step:850/1695 train_time:83225ms step_avg:97.91ms +step:851/1695 train_time:83324ms step_avg:97.91ms +step:852/1695 train_time:83424ms step_avg:97.92ms +step:853/1695 train_time:83523ms step_avg:97.92ms +step:854/1695 train_time:83620ms step_avg:97.92ms +step:855/1695 train_time:83719ms step_avg:97.92ms +step:856/1695 train_time:83817ms step_avg:97.92ms +step:857/1695 train_time:83916ms step_avg:97.92ms +step:858/1695 train_time:84015ms step_avg:97.92ms +step:859/1695 train_time:84113ms step_avg:97.92ms +step:860/1695 train_time:84211ms step_avg:97.92ms +step:861/1695 train_time:84309ms step_avg:97.92ms +step:862/1695 train_time:84661ms step_avg:98.21ms +step:863/1695 train_time:84758ms step_avg:98.21ms +step:864/1695 train_time:84855ms step_avg:98.21ms +step:865/1695 train_time:84952ms step_avg:98.21ms +step:866/1695 train_time:85050ms step_avg:98.21ms +step:867/1695 train_time:85147ms step_avg:98.21ms +step:868/1695 train_time:85245ms step_avg:98.21ms +step:869/1695 train_time:85343ms step_avg:98.21ms +step:870/1695 train_time:85441ms step_avg:98.21ms +step:871/1695 train_time:85540ms step_avg:98.21ms +step:872/1695 train_time:85641ms step_avg:98.21ms +step:873/1695 train_time:85740ms step_avg:98.21ms +step:874/1695 train_time:85839ms step_avg:98.21ms +step:875/1695 train_time:85937ms step_avg:98.21ms +step:875/1695 val_loss:3.5373 train_time:86033ms step_avg:98.32ms +step:876/1695 train_time:86058ms step_avg:98.24ms +step:877/1695 train_time:86144ms step_avg:98.23ms +step:878/1695 train_time:86248ms step_avg:98.23ms +step:879/1695 train_time:86346ms step_avg:98.23ms +step:880/1695 train_time:86443ms step_avg:98.23ms +step:881/1695 train_time:86542ms step_avg:98.23ms +step:882/1695 train_time:86641ms step_avg:98.23ms +step:883/1695 train_time:86740ms step_avg:98.23ms +step:884/1695 train_time:86840ms step_avg:98.24ms +step:885/1695 train_time:86939ms step_avg:98.24ms +step:886/1695 train_time:87038ms step_avg:98.24ms +step:887/1695 train_time:87139ms step_avg:98.24ms +step:888/1695 train_time:87241ms step_avg:98.24ms +step:889/1695 train_time:87342ms step_avg:98.25ms +step:890/1695 train_time:87442ms step_avg:98.25ms +step:891/1695 train_time:87541ms step_avg:98.25ms +step:892/1695 train_time:87641ms step_avg:98.25ms +step:893/1695 train_time:87740ms step_avg:98.25ms +step:894/1695 train_time:87839ms step_avg:98.25ms +step:895/1695 train_time:87938ms step_avg:98.25ms +step:896/1695 train_time:88038ms step_avg:98.26ms +step:897/1695 train_time:88137ms step_avg:98.26ms +step:898/1695 train_time:88238ms step_avg:98.26ms +step:899/1695 train_time:88340ms step_avg:98.26ms +step:900/1695 train_time:88441ms step_avg:98.27ms +step:901/1695 train_time:88540ms step_avg:98.27ms +step:902/1695 train_time:88640ms step_avg:98.27ms +step:903/1695 train_time:88740ms step_avg:98.27ms +step:904/1695 train_time:88839ms step_avg:98.27ms +step:905/1695 train_time:88939ms step_avg:98.27ms +step:906/1695 train_time:89039ms step_avg:98.28ms +step:907/1695 train_time:89139ms step_avg:98.28ms +step:908/1695 train_time:89240ms step_avg:98.28ms +step:909/1695 train_time:89340ms step_avg:98.28ms +step:910/1695 train_time:89440ms step_avg:98.29ms +step:911/1695 train_time:89540ms step_avg:98.29ms +step:912/1695 train_time:89640ms step_avg:98.29ms +step:913/1695 train_time:89739ms step_avg:98.29ms +step:914/1695 train_time:89839ms step_avg:98.29ms +step:915/1695 train_time:89938ms step_avg:98.29ms +step:916/1695 train_time:90038ms step_avg:98.29ms +step:917/1695 train_time:90138ms step_avg:98.30ms +step:918/1695 train_time:90239ms step_avg:98.30ms +step:919/1695 train_time:90340ms step_avg:98.30ms +step:920/1695 train_time:90441ms step_avg:98.31ms +step:921/1695 train_time:90541ms step_avg:98.31ms +step:922/1695 train_time:90641ms step_avg:98.31ms +step:923/1695 train_time:90741ms step_avg:98.31ms +step:924/1695 train_time:90841ms step_avg:98.31ms +step:925/1695 train_time:90940ms step_avg:98.31ms +step:926/1695 train_time:91039ms step_avg:98.31ms +step:927/1695 train_time:91139ms step_avg:98.32ms +step:928/1695 train_time:91238ms step_avg:98.32ms +step:929/1695 train_time:91339ms step_avg:98.32ms +step:930/1695 train_time:91441ms step_avg:98.32ms +step:931/1695 train_time:91541ms step_avg:98.33ms +step:932/1695 train_time:91641ms step_avg:98.33ms +step:933/1695 train_time:91741ms step_avg:98.33ms +step:934/1695 train_time:91840ms step_avg:98.33ms +step:935/1695 train_time:91939ms step_avg:98.33ms +step:936/1695 train_time:92039ms step_avg:98.33ms +step:937/1695 train_time:92140ms step_avg:98.33ms +step:938/1695 train_time:92240ms step_avg:98.34ms +step:939/1695 train_time:92340ms step_avg:98.34ms +step:940/1695 train_time:92440ms step_avg:98.34ms +step:941/1695 train_time:92541ms step_avg:98.34ms +step:942/1695 train_time:92640ms step_avg:98.34ms +step:943/1695 train_time:92741ms step_avg:98.35ms +step:944/1695 train_time:92840ms step_avg:98.35ms +step:945/1695 train_time:92941ms step_avg:98.35ms +step:946/1695 train_time:93040ms step_avg:98.35ms +step:947/1695 train_time:93139ms step_avg:98.35ms +step:948/1695 train_time:93239ms step_avg:98.35ms +step:949/1695 train_time:93339ms step_avg:98.36ms +step:950/1695 train_time:93440ms step_avg:98.36ms +step:951/1695 train_time:93540ms step_avg:98.36ms +step:952/1695 train_time:93641ms step_avg:98.36ms +step:953/1695 train_time:93740ms step_avg:98.36ms +step:954/1695 train_time:93840ms step_avg:98.36ms +step:955/1695 train_time:93940ms step_avg:98.37ms +step:956/1695 train_time:94040ms step_avg:98.37ms +step:957/1695 train_time:94140ms step_avg:98.37ms +step:958/1695 train_time:94239ms step_avg:98.37ms +step:959/1695 train_time:94339ms step_avg:98.37ms +step:960/1695 train_time:94440ms step_avg:98.37ms +step:961/1695 train_time:94540ms step_avg:98.38ms +step:962/1695 train_time:94640ms step_avg:98.38ms +step:963/1695 train_time:94740ms step_avg:98.38ms +step:964/1695 train_time:94841ms step_avg:98.38ms +step:965/1695 train_time:94941ms step_avg:98.38ms +step:966/1695 train_time:95040ms step_avg:98.39ms +step:967/1695 train_time:95140ms step_avg:98.39ms +step:968/1695 train_time:95240ms step_avg:98.39ms +step:969/1695 train_time:95340ms step_avg:98.39ms +step:970/1695 train_time:95439ms step_avg:98.39ms +step:971/1695 train_time:95540ms step_avg:98.39ms +step:972/1695 train_time:95640ms step_avg:98.40ms +step:973/1695 train_time:95741ms step_avg:98.40ms +step:974/1695 train_time:95840ms step_avg:98.40ms +step:975/1695 train_time:95940ms step_avg:98.40ms +step:976/1695 train_time:96040ms step_avg:98.40ms +step:977/1695 train_time:96140ms step_avg:98.40ms +step:978/1695 train_time:96239ms step_avg:98.40ms +step:979/1695 train_time:96339ms step_avg:98.41ms +step:980/1695 train_time:96440ms step_avg:98.41ms +step:981/1695 train_time:96539ms step_avg:98.41ms +step:982/1695 train_time:96640ms step_avg:98.41ms +step:983/1695 train_time:96741ms step_avg:98.41ms +step:984/1695 train_time:96841ms step_avg:98.42ms +step:985/1695 train_time:96941ms step_avg:98.42ms +step:986/1695 train_time:97041ms step_avg:98.42ms +step:987/1695 train_time:97142ms step_avg:98.42ms +step:988/1695 train_time:97241ms step_avg:98.42ms +step:989/1695 train_time:97340ms step_avg:98.42ms +step:990/1695 train_time:97440ms step_avg:98.42ms +step:991/1695 train_time:97540ms step_avg:98.43ms +step:992/1695 train_time:97640ms step_avg:98.43ms +step:993/1695 train_time:97740ms step_avg:98.43ms +step:994/1695 train_time:97840ms step_avg:98.43ms +step:995/1695 train_time:97940ms step_avg:98.43ms +step:996/1695 train_time:98040ms step_avg:98.43ms +step:997/1695 train_time:98140ms step_avg:98.44ms +step:998/1695 train_time:98239ms step_avg:98.44ms +step:999/1695 train_time:98339ms step_avg:98.44ms +step:1000/1695 train_time:98438ms step_avg:98.44ms +step:1000/1695 val_loss:3.4932 train_time:98537ms step_avg:98.54ms +step:1001/1695 train_time:98563ms step_avg:98.46ms +step:1002/1695 train_time:98647ms step_avg:98.45ms +step:1003/1695 train_time:98748ms step_avg:98.45ms +step:1004/1695 train_time:98847ms step_avg:98.45ms +step:1005/1695 train_time:98946ms step_avg:98.45ms +step:1006/1695 train_time:99045ms step_avg:98.45ms +step:1007/1695 train_time:99143ms step_avg:98.45ms +step:1008/1695 train_time:99242ms step_avg:98.45ms +step:1009/1695 train_time:99340ms step_avg:98.45ms +step:1010/1695 train_time:99439ms step_avg:98.45ms +step:1011/1695 train_time:99539ms step_avg:98.46ms +step:1012/1695 train_time:99640ms step_avg:98.46ms +step:1013/1695 train_time:99741ms step_avg:98.46ms +step:1014/1695 train_time:99842ms step_avg:98.46ms +step:1015/1695 train_time:99942ms step_avg:98.46ms +step:1016/1695 train_time:100041ms step_avg:98.47ms +step:1017/1695 train_time:100142ms step_avg:98.47ms +step:1018/1695 train_time:100241ms step_avg:98.47ms +step:1019/1695 train_time:100339ms step_avg:98.47ms +step:1020/1695 train_time:100439ms step_avg:98.47ms +step:1021/1695 train_time:100539ms step_avg:98.47ms +step:1022/1695 train_time:100639ms step_avg:98.47ms +step:1023/1695 train_time:100740ms step_avg:98.48ms +step:1024/1695 train_time:100843ms step_avg:98.48ms +step:1025/1695 train_time:100943ms step_avg:98.48ms +step:1026/1695 train_time:101043ms step_avg:98.48ms +step:1027/1695 train_time:101142ms step_avg:98.48ms +step:1028/1695 train_time:101241ms step_avg:98.48ms +step:1029/1695 train_time:101342ms step_avg:98.49ms +step:1030/1695 train_time:101440ms step_avg:98.49ms +step:1031/1695 train_time:101540ms step_avg:98.49ms +step:1032/1695 train_time:101640ms step_avg:98.49ms +step:1033/1695 train_time:101740ms step_avg:98.49ms +step:1034/1695 train_time:101840ms step_avg:98.49ms +step:1035/1695 train_time:101941ms step_avg:98.49ms +step:1036/1695 train_time:102041ms step_avg:98.50ms +step:1037/1695 train_time:102142ms step_avg:98.50ms +step:1038/1695 train_time:102241ms step_avg:98.50ms +step:1039/1695 train_time:102340ms step_avg:98.50ms +step:1040/1695 train_time:102439ms step_avg:98.50ms +step:1041/1695 train_time:102539ms step_avg:98.50ms +step:1042/1695 train_time:102639ms step_avg:98.50ms +step:1043/1695 train_time:102739ms step_avg:98.50ms +step:1044/1695 train_time:102839ms step_avg:98.51ms +step:1045/1695 train_time:102940ms step_avg:98.51ms +step:1046/1695 train_time:103041ms step_avg:98.51ms +step:1047/1695 train_time:103141ms step_avg:98.51ms +step:1048/1695 train_time:103241ms step_avg:98.51ms +step:1049/1695 train_time:103340ms step_avg:98.51ms +step:1050/1695 train_time:103440ms step_avg:98.51ms +step:1051/1695 train_time:103541ms step_avg:98.52ms +step:1052/1695 train_time:103640ms step_avg:98.52ms +step:1053/1695 train_time:103740ms step_avg:98.52ms +step:1054/1695 train_time:103840ms step_avg:98.52ms +step:1055/1695 train_time:103941ms step_avg:98.52ms +step:1056/1695 train_time:104041ms step_avg:98.52ms +step:1057/1695 train_time:104141ms step_avg:98.53ms +step:1058/1695 train_time:104241ms step_avg:98.53ms +step:1059/1695 train_time:104339ms step_avg:98.53ms +step:1060/1695 train_time:104439ms step_avg:98.53ms +step:1061/1695 train_time:104538ms step_avg:98.53ms +step:1062/1695 train_time:104639ms step_avg:98.53ms +step:1063/1695 train_time:104739ms step_avg:98.53ms +step:1064/1695 train_time:104839ms step_avg:98.53ms +step:1065/1695 train_time:104940ms step_avg:98.54ms +step:1066/1695 train_time:105040ms step_avg:98.54ms +step:1067/1695 train_time:105140ms step_avg:98.54ms +step:1068/1695 train_time:105240ms step_avg:98.54ms +step:1069/1695 train_time:105340ms step_avg:98.54ms +step:1070/1695 train_time:105440ms step_avg:98.54ms +step:1071/1695 train_time:105539ms step_avg:98.54ms +step:1072/1695 train_time:105639ms step_avg:98.54ms +step:1073/1695 train_time:105739ms step_avg:98.54ms +step:1074/1695 train_time:105838ms step_avg:98.55ms +step:1075/1695 train_time:105939ms step_avg:98.55ms +step:1076/1695 train_time:106038ms step_avg:98.55ms +step:1077/1695 train_time:106140ms step_avg:98.55ms +step:1078/1695 train_time:106239ms step_avg:98.55ms +step:1079/1695 train_time:106339ms step_avg:98.55ms +step:1080/1695 train_time:106439ms step_avg:98.55ms +step:1081/1695 train_time:106538ms step_avg:98.56ms +step:1082/1695 train_time:106639ms step_avg:98.56ms +step:1083/1695 train_time:106739ms step_avg:98.56ms +step:1084/1695 train_time:106839ms step_avg:98.56ms +step:1085/1695 train_time:106939ms step_avg:98.56ms +step:1086/1695 train_time:107040ms step_avg:98.56ms +step:1087/1695 train_time:107140ms step_avg:98.56ms +step:1088/1695 train_time:107240ms step_avg:98.57ms +step:1089/1695 train_time:107339ms step_avg:98.57ms +step:1090/1695 train_time:107439ms step_avg:98.57ms +step:1091/1695 train_time:107539ms step_avg:98.57ms +step:1092/1695 train_time:107639ms step_avg:98.57ms +step:1093/1695 train_time:107739ms step_avg:98.57ms +step:1094/1695 train_time:107839ms step_avg:98.57ms +step:1095/1695 train_time:107939ms step_avg:98.57ms +step:1096/1695 train_time:108040ms step_avg:98.58ms +step:1097/1695 train_time:108140ms step_avg:98.58ms +step:1098/1695 train_time:108240ms step_avg:98.58ms +step:1099/1695 train_time:108339ms step_avg:98.58ms +step:1100/1695 train_time:108439ms step_avg:98.58ms +step:1101/1695 train_time:108538ms step_avg:98.58ms +step:1102/1695 train_time:108639ms step_avg:98.58ms +step:1103/1695 train_time:108739ms step_avg:98.58ms +step:1104/1695 train_time:108839ms step_avg:98.59ms +step:1105/1695 train_time:108938ms step_avg:98.59ms +step:1106/1695 train_time:109040ms step_avg:98.59ms +step:1107/1695 train_time:109140ms step_avg:98.59ms +step:1108/1695 train_time:109240ms step_avg:98.59ms +step:1109/1695 train_time:109340ms step_avg:98.59ms +step:1110/1695 train_time:109439ms step_avg:98.59ms +step:1111/1695 train_time:109539ms step_avg:98.60ms +step:1112/1695 train_time:109640ms step_avg:98.60ms +step:1113/1695 train_time:109740ms step_avg:98.60ms +step:1114/1695 train_time:109840ms step_avg:98.60ms +step:1115/1695 train_time:109940ms step_avg:98.60ms +step:1116/1695 train_time:110040ms step_avg:98.60ms +step:1117/1695 train_time:110141ms step_avg:98.60ms +step:1118/1695 train_time:110240ms step_avg:98.60ms +step:1119/1695 train_time:110340ms step_avg:98.61ms +step:1120/1695 train_time:110440ms step_avg:98.61ms +step:1121/1695 train_time:110541ms step_avg:98.61ms +step:1122/1695 train_time:110640ms step_avg:98.61ms +step:1123/1695 train_time:110740ms step_avg:98.61ms +step:1124/1695 train_time:110839ms step_avg:98.61ms +step:1125/1695 train_time:110940ms step_avg:98.61ms +step:1125/1695 val_loss:3.4399 train_time:111037ms step_avg:98.70ms +step:1126/1695 train_time:111063ms step_avg:98.64ms +step:1127/1695 train_time:111148ms step_avg:98.62ms +step:1128/1695 train_time:111250ms step_avg:98.63ms +step:1129/1695 train_time:111351ms step_avg:98.63ms +step:1130/1695 train_time:111451ms step_avg:98.63ms +step:1131/1695 train_time:111550ms step_avg:98.63ms +step:1132/1695 train_time:111650ms step_avg:98.63ms +step:1133/1695 train_time:111750ms step_avg:98.63ms +step:1134/1695 train_time:111850ms step_avg:98.63ms +step:1135/1695 train_time:111949ms step_avg:98.63ms +step:1136/1695 train_time:112052ms step_avg:98.64ms +step:1137/1695 train_time:112157ms step_avg:98.64ms +step:1138/1695 train_time:112261ms step_avg:98.65ms +step:1139/1695 train_time:112360ms step_avg:98.65ms +step:1140/1695 train_time:112461ms step_avg:98.65ms +step:1141/1695 train_time:112560ms step_avg:98.65ms +step:1142/1695 train_time:112660ms step_avg:98.65ms +step:1143/1695 train_time:112759ms step_avg:98.65ms +step:1144/1695 train_time:112859ms step_avg:98.65ms +step:1145/1695 train_time:112961ms step_avg:98.66ms +step:1146/1695 train_time:113061ms step_avg:98.66ms +step:1147/1695 train_time:113162ms step_avg:98.66ms +step:1148/1695 train_time:113263ms step_avg:98.66ms +step:1149/1695 train_time:113363ms step_avg:98.66ms +step:1150/1695 train_time:113463ms step_avg:98.66ms +step:1151/1695 train_time:113563ms step_avg:98.67ms +step:1152/1695 train_time:113664ms step_avg:98.67ms +step:1153/1695 train_time:113765ms step_avg:98.67ms +step:1154/1695 train_time:113866ms step_avg:98.67ms +step:1155/1695 train_time:113966ms step_avg:98.67ms +step:1156/1695 train_time:114067ms step_avg:98.67ms +step:1157/1695 train_time:114169ms step_avg:98.68ms +step:1158/1695 train_time:114269ms step_avg:98.68ms +step:1159/1695 train_time:114370ms step_avg:98.68ms +step:1160/1695 train_time:114473ms step_avg:98.68ms +step:1161/1695 train_time:114576ms step_avg:98.69ms +step:1162/1695 train_time:114677ms step_avg:98.69ms +step:1163/1695 train_time:114780ms step_avg:98.69ms +step:1164/1695 train_time:114881ms step_avg:98.69ms +step:1165/1695 train_time:114981ms step_avg:98.70ms +step:1166/1695 train_time:115082ms step_avg:98.70ms +step:1167/1695 train_time:115182ms step_avg:98.70ms +step:1168/1695 train_time:115282ms step_avg:98.70ms +step:1169/1695 train_time:115383ms step_avg:98.70ms +step:1170/1695 train_time:115483ms step_avg:98.70ms +step:1171/1695 train_time:115583ms step_avg:98.70ms +step:1172/1695 train_time:115688ms step_avg:98.71ms +step:1173/1695 train_time:115789ms step_avg:98.71ms +step:1174/1695 train_time:115890ms step_avg:98.71ms +step:1175/1695 train_time:115991ms step_avg:98.72ms +step:1176/1695 train_time:116092ms step_avg:98.72ms +step:1177/1695 train_time:116193ms step_avg:98.72ms +step:1178/1695 train_time:116295ms step_avg:98.72ms +step:1179/1695 train_time:116399ms step_avg:98.73ms +step:1180/1695 train_time:116500ms step_avg:98.73ms +step:1181/1695 train_time:116600ms step_avg:98.73ms +step:1182/1695 train_time:116701ms step_avg:98.73ms +step:1183/1695 train_time:116801ms step_avg:98.73ms +step:1184/1695 train_time:116902ms step_avg:98.74ms +step:1185/1695 train_time:117003ms step_avg:98.74ms +step:1186/1695 train_time:117103ms step_avg:98.74ms +step:1187/1695 train_time:117203ms step_avg:98.74ms +step:1188/1695 train_time:117304ms step_avg:98.74ms +step:1189/1695 train_time:117405ms step_avg:98.74ms +step:1190/1695 train_time:117506ms step_avg:98.74ms +step:1191/1695 train_time:117608ms step_avg:98.75ms +step:1192/1695 train_time:117709ms step_avg:98.75ms +step:1193/1695 train_time:117809ms step_avg:98.75ms +step:1194/1695 train_time:117910ms step_avg:98.75ms +step:1195/1695 train_time:118011ms step_avg:98.75ms +step:1196/1695 train_time:118112ms step_avg:98.76ms +step:1197/1695 train_time:118215ms step_avg:98.76ms +step:1198/1695 train_time:118317ms step_avg:98.76ms +step:1199/1695 train_time:118418ms step_avg:98.76ms +step:1200/1695 train_time:118519ms step_avg:98.77ms +step:1201/1695 train_time:118619ms step_avg:98.77ms +step:1202/1695 train_time:118721ms step_avg:98.77ms +step:1203/1695 train_time:118822ms step_avg:98.77ms +step:1204/1695 train_time:118923ms step_avg:98.77ms +step:1205/1695 train_time:119022ms step_avg:98.77ms +step:1206/1695 train_time:119122ms step_avg:98.77ms +step:1207/1695 train_time:119222ms step_avg:98.78ms +step:1208/1695 train_time:119322ms step_avg:98.78ms +step:1209/1695 train_time:119422ms step_avg:98.78ms +step:1210/1695 train_time:119522ms step_avg:98.78ms +step:1211/1695 train_time:119623ms step_avg:98.78ms +step:1212/1695 train_time:119723ms step_avg:98.78ms +step:1213/1695 train_time:119825ms step_avg:98.78ms +step:1214/1695 train_time:119925ms step_avg:98.79ms +step:1215/1695 train_time:120026ms step_avg:98.79ms +step:1216/1695 train_time:120128ms step_avg:98.79ms +step:1217/1695 train_time:120229ms step_avg:98.79ms +step:1218/1695 train_time:120330ms step_avg:98.79ms +step:1219/1695 train_time:120431ms step_avg:98.79ms +step:1220/1695 train_time:120533ms step_avg:98.80ms +step:1221/1695 train_time:120636ms step_avg:98.80ms +step:1222/1695 train_time:120738ms step_avg:98.80ms +step:1223/1695 train_time:120839ms step_avg:98.81ms +step:1224/1695 train_time:120939ms step_avg:98.81ms +step:1225/1695 train_time:121040ms step_avg:98.81ms +step:1226/1695 train_time:121140ms step_avg:98.81ms +step:1227/1695 train_time:121241ms step_avg:98.81ms +step:1228/1695 train_time:121341ms step_avg:98.81ms +step:1229/1695 train_time:121440ms step_avg:98.81ms +step:1230/1695 train_time:121540ms step_avg:98.81ms +step:1231/1695 train_time:121641ms step_avg:98.81ms +step:1232/1695 train_time:121741ms step_avg:98.82ms +step:1233/1695 train_time:121843ms step_avg:98.82ms +step:1234/1695 train_time:121944ms step_avg:98.82ms +step:1235/1695 train_time:122043ms step_avg:98.82ms +step:1236/1695 train_time:122143ms step_avg:98.82ms +step:1237/1695 train_time:122243ms step_avg:98.82ms +step:1238/1695 train_time:122343ms step_avg:98.82ms +step:1239/1695 train_time:122443ms step_avg:98.82ms +step:1240/1695 train_time:122544ms step_avg:98.83ms +step:1241/1695 train_time:122645ms step_avg:98.83ms +step:1242/1695 train_time:122746ms step_avg:98.83ms +step:1243/1695 train_time:122849ms step_avg:98.83ms +step:1244/1695 train_time:122949ms step_avg:98.83ms +step:1245/1695 train_time:123050ms step_avg:98.84ms +step:1246/1695 train_time:123151ms step_avg:98.84ms +step:1247/1695 train_time:123252ms step_avg:98.84ms +step:1248/1695 train_time:123354ms step_avg:98.84ms +step:1249/1695 train_time:123455ms step_avg:98.84ms +step:1250/1695 train_time:123556ms step_avg:98.84ms +step:1250/1695 val_loss:3.3958 train_time:123656ms step_avg:98.92ms +step:1251/1695 train_time:123681ms step_avg:98.87ms +step:1252/1695 train_time:123768ms step_avg:98.86ms +step:1253/1695 train_time:123872ms step_avg:98.86ms +step:1254/1695 train_time:123972ms step_avg:98.86ms +step:1255/1695 train_time:124073ms step_avg:98.86ms +step:1256/1695 train_time:124173ms step_avg:98.86ms +step:1257/1695 train_time:124272ms step_avg:98.86ms +step:1258/1695 train_time:124372ms step_avg:98.86ms +step:1259/1695 train_time:124471ms step_avg:98.87ms +step:1260/1695 train_time:124571ms step_avg:98.87ms +step:1261/1695 train_time:124671ms step_avg:98.87ms +step:1262/1695 train_time:124773ms step_avg:98.87ms +step:1263/1695 train_time:124874ms step_avg:98.87ms +step:1264/1695 train_time:124974ms step_avg:98.87ms +step:1265/1695 train_time:125075ms step_avg:98.87ms +step:1266/1695 train_time:125175ms step_avg:98.87ms +step:1267/1695 train_time:125275ms step_avg:98.88ms +step:1268/1695 train_time:125375ms step_avg:98.88ms +step:1269/1695 train_time:125476ms step_avg:98.88ms +step:1270/1695 train_time:125577ms step_avg:98.88ms +step:1271/1695 train_time:125679ms step_avg:98.88ms +step:1272/1695 train_time:125780ms step_avg:98.88ms +step:1273/1695 train_time:125882ms step_avg:98.89ms +step:1274/1695 train_time:125983ms step_avg:98.89ms +step:1275/1695 train_time:126084ms step_avg:98.89ms +step:1276/1695 train_time:126188ms step_avg:98.89ms +step:1277/1695 train_time:126289ms step_avg:98.90ms +step:1278/1695 train_time:126390ms step_avg:98.90ms +step:1279/1695 train_time:126491ms step_avg:98.90ms +step:1280/1695 train_time:126591ms step_avg:98.90ms +step:1281/1695 train_time:126691ms step_avg:98.90ms +step:1282/1695 train_time:126791ms step_avg:98.90ms +step:1283/1695 train_time:126892ms step_avg:98.90ms +step:1284/1695 train_time:126992ms step_avg:98.90ms +step:1285/1695 train_time:127092ms step_avg:98.90ms +step:1286/1695 train_time:127192ms step_avg:98.91ms +step:1287/1695 train_time:127293ms step_avg:98.91ms +step:1288/1695 train_time:127393ms step_avg:98.91ms +step:1289/1695 train_time:127493ms step_avg:98.91ms +step:1290/1695 train_time:127593ms step_avg:98.91ms +step:1291/1695 train_time:127693ms step_avg:98.91ms +step:1292/1695 train_time:127794ms step_avg:98.91ms +step:1293/1695 train_time:127894ms step_avg:98.91ms +step:1294/1695 train_time:127995ms step_avg:98.91ms +step:1295/1695 train_time:128096ms step_avg:98.92ms +step:1296/1695 train_time:128196ms step_avg:98.92ms +step:1297/1695 train_time:128297ms step_avg:98.92ms +step:1298/1695 train_time:128397ms step_avg:98.92ms +step:1299/1695 train_time:128498ms step_avg:98.92ms +step:1300/1695 train_time:128598ms step_avg:98.92ms +step:1301/1695 train_time:128699ms step_avg:98.92ms +step:1302/1695 train_time:128802ms step_avg:98.93ms +step:1303/1695 train_time:128903ms step_avg:98.93ms +step:1304/1695 train_time:129005ms step_avg:98.93ms +step:1305/1695 train_time:129107ms step_avg:98.93ms +step:1306/1695 train_time:129208ms step_avg:98.93ms +step:1307/1695 train_time:129309ms step_avg:98.94ms +step:1308/1695 train_time:129410ms step_avg:98.94ms +step:1309/1695 train_time:129511ms step_avg:98.94ms +step:1310/1695 train_time:129613ms step_avg:98.94ms +step:1311/1695 train_time:129714ms step_avg:98.94ms +step:1312/1695 train_time:129814ms step_avg:98.94ms +step:1313/1695 train_time:129915ms step_avg:98.94ms +step:1314/1695 train_time:130015ms step_avg:98.95ms +step:1315/1695 train_time:130115ms step_avg:98.95ms +step:1316/1695 train_time:130217ms step_avg:98.95ms +step:1317/1695 train_time:130318ms step_avg:98.95ms +step:1318/1695 train_time:130418ms step_avg:98.95ms +step:1319/1695 train_time:130519ms step_avg:98.95ms +step:1320/1695 train_time:130621ms step_avg:98.96ms +step:1321/1695 train_time:130723ms step_avg:98.96ms +step:1322/1695 train_time:130824ms step_avg:98.96ms +step:1323/1695 train_time:130925ms step_avg:98.96ms +step:1324/1695 train_time:131027ms step_avg:98.96ms +step:1325/1695 train_time:131129ms step_avg:98.97ms +step:1326/1695 train_time:131232ms step_avg:98.97ms +step:1327/1695 train_time:131333ms step_avg:98.97ms +step:1328/1695 train_time:131433ms step_avg:98.97ms +step:1329/1695 train_time:131533ms step_avg:98.97ms +step:1330/1695 train_time:131634ms step_avg:98.97ms +step:1331/1695 train_time:131734ms step_avg:98.97ms +step:1332/1695 train_time:131834ms step_avg:98.97ms +step:1333/1695 train_time:131934ms step_avg:98.98ms +step:1334/1695 train_time:132037ms step_avg:98.98ms +step:1335/1695 train_time:132139ms step_avg:98.98ms +step:1336/1695 train_time:132241ms step_avg:98.98ms +step:1337/1695 train_time:132342ms step_avg:98.98ms +step:1338/1695 train_time:132443ms step_avg:98.99ms +step:1339/1695 train_time:132544ms step_avg:98.99ms +step:1340/1695 train_time:132645ms step_avg:98.99ms +step:1341/1695 train_time:132746ms step_avg:98.99ms +step:1342/1695 train_time:132848ms step_avg:98.99ms +step:1343/1695 train_time:132950ms step_avg:98.99ms +step:1344/1695 train_time:133050ms step_avg:99.00ms +step:1345/1695 train_time:133151ms step_avg:99.00ms +step:1346/1695 train_time:133252ms step_avg:99.00ms +step:1347/1695 train_time:133353ms step_avg:99.00ms +step:1348/1695 train_time:133452ms step_avg:99.00ms +step:1349/1695 train_time:133552ms step_avg:99.00ms +step:1350/1695 train_time:133653ms step_avg:99.00ms +step:1351/1695 train_time:133753ms step_avg:99.00ms +step:1352/1695 train_time:133853ms step_avg:99.00ms +step:1353/1695 train_time:133953ms step_avg:99.00ms +step:1354/1695 train_time:134054ms step_avg:99.01ms +step:1355/1695 train_time:134155ms step_avg:99.01ms +step:1356/1695 train_time:134255ms step_avg:99.01ms +step:1357/1695 train_time:134356ms step_avg:99.01ms +step:1358/1695 train_time:134457ms step_avg:99.01ms +step:1359/1695 train_time:134556ms step_avg:99.01ms +step:1360/1695 train_time:134657ms step_avg:99.01ms +step:1361/1695 train_time:134758ms step_avg:99.01ms +step:1362/1695 train_time:134860ms step_avg:99.02ms +step:1363/1695 train_time:134960ms step_avg:99.02ms +step:1364/1695 train_time:135062ms step_avg:99.02ms +step:1365/1695 train_time:135163ms step_avg:99.02ms +step:1366/1695 train_time:135265ms step_avg:99.02ms +step:1367/1695 train_time:135368ms step_avg:99.03ms +step:1368/1695 train_time:135470ms step_avg:99.03ms +step:1369/1695 train_time:135570ms step_avg:99.03ms +step:1370/1695 train_time:135670ms step_avg:99.03ms +step:1371/1695 train_time:135771ms step_avg:99.03ms +step:1372/1695 train_time:135872ms step_avg:99.03ms +step:1373/1695 train_time:135974ms step_avg:99.03ms +step:1374/1695 train_time:136074ms step_avg:99.03ms +step:1375/1695 train_time:136175ms step_avg:99.04ms +step:1375/1695 val_loss:3.3561 train_time:136274ms step_avg:99.11ms +step:1376/1695 train_time:136299ms step_avg:99.05ms +step:1377/1695 train_time:136390ms step_avg:99.05ms +step:1378/1695 train_time:136492ms step_avg:99.05ms +step:1379/1695 train_time:136594ms step_avg:99.05ms +step:1380/1695 train_time:136696ms step_avg:99.06ms +step:1381/1695 train_time:136796ms step_avg:99.06ms +step:1382/1695 train_time:136897ms step_avg:99.06ms +step:1383/1695 train_time:136996ms step_avg:99.06ms +step:1384/1695 train_time:137097ms step_avg:99.06ms +step:1385/1695 train_time:137198ms step_avg:99.06ms +step:1386/1695 train_time:137302ms step_avg:99.06ms +step:1387/1695 train_time:137405ms step_avg:99.07ms +step:1388/1695 train_time:137506ms step_avg:99.07ms +step:1389/1695 train_time:137608ms step_avg:99.07ms +step:1390/1695 train_time:137709ms step_avg:99.07ms +step:1391/1695 train_time:137810ms step_avg:99.07ms +step:1392/1695 train_time:137911ms step_avg:99.07ms +step:1393/1695 train_time:138013ms step_avg:99.08ms +step:1394/1695 train_time:138114ms step_avg:99.08ms +step:1395/1695 train_time:138218ms step_avg:99.08ms +step:1396/1695 train_time:138320ms step_avg:99.08ms +step:1397/1695 train_time:138424ms step_avg:99.09ms +step:1398/1695 train_time:138526ms step_avg:99.09ms +step:1399/1695 train_time:138629ms step_avg:99.09ms +step:1400/1695 train_time:138731ms step_avg:99.09ms +step:1401/1695 train_time:138831ms step_avg:99.09ms +step:1402/1695 train_time:138933ms step_avg:99.10ms +step:1403/1695 train_time:139036ms step_avg:99.10ms +step:1404/1695 train_time:139138ms step_avg:99.10ms +step:1405/1695 train_time:139240ms step_avg:99.10ms +step:1406/1695 train_time:139342ms step_avg:99.11ms +step:1407/1695 train_time:139444ms step_avg:99.11ms +step:1408/1695 train_time:139545ms step_avg:99.11ms +step:1409/1695 train_time:139649ms step_avg:99.11ms +step:1410/1695 train_time:139749ms step_avg:99.11ms +step:1411/1695 train_time:139850ms step_avg:99.11ms +step:1412/1695 train_time:139953ms step_avg:99.12ms +step:1413/1695 train_time:140054ms step_avg:99.12ms +step:1414/1695 train_time:140157ms step_avg:99.12ms +step:1415/1695 train_time:140260ms step_avg:99.12ms +step:1416/1695 train_time:140360ms step_avg:99.12ms +step:1417/1695 train_time:140461ms step_avg:99.13ms +step:1418/1695 train_time:140563ms step_avg:99.13ms +step:1419/1695 train_time:140665ms step_avg:99.13ms +step:1420/1695 train_time:140766ms step_avg:99.13ms +step:1421/1695 train_time:140867ms step_avg:99.13ms +step:1422/1695 train_time:140968ms step_avg:99.13ms +step:1423/1695 train_time:141069ms step_avg:99.14ms +step:1424/1695 train_time:141172ms step_avg:99.14ms +step:1425/1695 train_time:141275ms step_avg:99.14ms +step:1426/1695 train_time:141379ms step_avg:99.14ms +step:1427/1695 train_time:141481ms step_avg:99.15ms +step:1428/1695 train_time:141582ms step_avg:99.15ms +step:1429/1695 train_time:141684ms step_avg:99.15ms +step:1430/1695 train_time:141785ms step_avg:99.15ms +step:1431/1695 train_time:141886ms step_avg:99.15ms +step:1432/1695 train_time:141986ms step_avg:99.15ms +step:1433/1695 train_time:142089ms step_avg:99.16ms +step:1434/1695 train_time:142192ms step_avg:99.16ms +step:1435/1695 train_time:142294ms step_avg:99.16ms +step:1436/1695 train_time:142399ms step_avg:99.16ms +step:1437/1695 train_time:142501ms step_avg:99.17ms +step:1438/1695 train_time:142602ms step_avg:99.17ms +step:1439/1695 train_time:142705ms step_avg:99.17ms +step:1440/1695 train_time:142807ms step_avg:99.17ms +step:1441/1695 train_time:142910ms step_avg:99.17ms +step:1442/1695 train_time:143011ms step_avg:99.18ms +step:1443/1695 train_time:143112ms step_avg:99.18ms +step:1444/1695 train_time:143214ms step_avg:99.18ms +step:1445/1695 train_time:143315ms step_avg:99.18ms +step:1446/1695 train_time:143418ms step_avg:99.18ms +step:1447/1695 train_time:143520ms step_avg:99.18ms +step:1448/1695 train_time:143624ms step_avg:99.19ms +step:1449/1695 train_time:143725ms step_avg:99.19ms +step:1450/1695 train_time:143827ms step_avg:99.19ms +step:1451/1695 train_time:143928ms step_avg:99.19ms +step:1452/1695 train_time:144030ms step_avg:99.19ms +step:1453/1695 train_time:144132ms step_avg:99.20ms +step:1454/1695 train_time:144234ms step_avg:99.20ms +step:1455/1695 train_time:144337ms step_avg:99.20ms +step:1456/1695 train_time:144439ms step_avg:99.20ms +step:1457/1695 train_time:144541ms step_avg:99.20ms +step:1458/1695 train_time:144644ms step_avg:99.21ms +step:1459/1695 train_time:144746ms step_avg:99.21ms +step:1460/1695 train_time:144846ms step_avg:99.21ms +step:1461/1695 train_time:144949ms step_avg:99.21ms +step:1462/1695 train_time:145050ms step_avg:99.21ms +step:1463/1695 train_time:145152ms step_avg:99.21ms +step:1464/1695 train_time:145253ms step_avg:99.22ms +step:1465/1695 train_time:145355ms step_avg:99.22ms +step:1466/1695 train_time:145458ms step_avg:99.22ms +step:1467/1695 train_time:145560ms step_avg:99.22ms +step:1468/1695 train_time:145663ms step_avg:99.23ms +step:1469/1695 train_time:145765ms step_avg:99.23ms +step:1470/1695 train_time:145865ms step_avg:99.23ms +step:1471/1695 train_time:145967ms step_avg:99.23ms +step:1472/1695 train_time:146069ms step_avg:99.23ms +step:1473/1695 train_time:146170ms step_avg:99.23ms +step:1474/1695 train_time:146273ms step_avg:99.24ms +step:1475/1695 train_time:146376ms step_avg:99.24ms +step:1476/1695 train_time:146478ms step_avg:99.24ms +step:1477/1695 train_time:146580ms step_avg:99.24ms +step:1478/1695 train_time:146684ms step_avg:99.24ms +step:1479/1695 train_time:146785ms step_avg:99.25ms +step:1480/1695 train_time:146887ms step_avg:99.25ms +step:1481/1695 train_time:146989ms step_avg:99.25ms +step:1482/1695 train_time:147091ms step_avg:99.25ms +step:1483/1695 train_time:147193ms step_avg:99.25ms +step:1484/1695 train_time:147296ms step_avg:99.26ms +step:1485/1695 train_time:147398ms step_avg:99.26ms +step:1486/1695 train_time:147500ms step_avg:99.26ms +step:1487/1695 train_time:147601ms step_avg:99.26ms +step:1488/1695 train_time:147704ms step_avg:99.26ms +step:1489/1695 train_time:147806ms step_avg:99.27ms +step:1490/1695 train_time:147908ms step_avg:99.27ms +step:1491/1695 train_time:148009ms step_avg:99.27ms +step:1492/1695 train_time:148110ms step_avg:99.27ms +step:1493/1695 train_time:148213ms step_avg:99.27ms +step:1494/1695 train_time:148314ms step_avg:99.27ms +step:1495/1695 train_time:148417ms step_avg:99.28ms +step:1496/1695 train_time:148519ms step_avg:99.28ms +step:1497/1695 train_time:148621ms step_avg:99.28ms +step:1498/1695 train_time:148723ms step_avg:99.28ms +step:1499/1695 train_time:148824ms step_avg:99.28ms +step:1500/1695 train_time:148926ms step_avg:99.28ms +step:1500/1695 val_loss:3.3213 train_time:149024ms step_avg:99.35ms +step:1501/1695 train_time:149049ms step_avg:99.30ms +step:1502/1695 train_time:149137ms step_avg:99.29ms +step:1503/1695 train_time:149237ms step_avg:99.29ms +step:1504/1695 train_time:149338ms step_avg:99.29ms +step:1505/1695 train_time:149438ms step_avg:99.29ms +step:1506/1695 train_time:149539ms step_avg:99.30ms +step:1507/1695 train_time:149640ms step_avg:99.30ms +step:1508/1695 train_time:149740ms step_avg:99.30ms +step:1509/1695 train_time:149842ms step_avg:99.30ms +step:1510/1695 train_time:149943ms step_avg:99.30ms +step:1511/1695 train_time:150048ms step_avg:99.30ms +step:1512/1695 train_time:150152ms step_avg:99.31ms +step:1513/1695 train_time:150253ms step_avg:99.31ms +step:1514/1695 train_time:150355ms step_avg:99.31ms +step:1515/1695 train_time:150460ms step_avg:99.31ms +step:1516/1695 train_time:150562ms step_avg:99.32ms +step:1517/1695 train_time:150662ms step_avg:99.32ms +step:1518/1695 train_time:150764ms step_avg:99.32ms +step:1519/1695 train_time:150868ms step_avg:99.32ms +step:1520/1695 train_time:150969ms step_avg:99.32ms +step:1521/1695 train_time:151071ms step_avg:99.32ms +step:1522/1695 train_time:151173ms step_avg:99.33ms +step:1523/1695 train_time:151275ms step_avg:99.33ms +step:1524/1695 train_time:151379ms step_avg:99.33ms +step:1525/1695 train_time:151483ms step_avg:99.33ms +step:1526/1695 train_time:151586ms step_avg:99.34ms +step:1527/1695 train_time:151687ms step_avg:99.34ms +step:1528/1695 train_time:151794ms step_avg:99.34ms +step:1529/1695 train_time:151895ms step_avg:99.34ms +step:1530/1695 train_time:151997ms step_avg:99.34ms +step:1531/1695 train_time:152098ms step_avg:99.35ms +step:1532/1695 train_time:152200ms step_avg:99.35ms +step:1533/1695 train_time:152303ms step_avg:99.35ms +step:1534/1695 train_time:152406ms step_avg:99.35ms +step:1535/1695 train_time:152508ms step_avg:99.35ms +step:1536/1695 train_time:152610ms step_avg:99.36ms +step:1537/1695 train_time:152712ms step_avg:99.36ms +step:1538/1695 train_time:152814ms step_avg:99.36ms +step:1539/1695 train_time:152915ms step_avg:99.36ms +step:1540/1695 train_time:153017ms step_avg:99.36ms +step:1541/1695 train_time:153120ms step_avg:99.36ms +step:1542/1695 train_time:153224ms step_avg:99.37ms +step:1543/1695 train_time:153326ms step_avg:99.37ms +step:1544/1695 train_time:153428ms step_avg:99.37ms +step:1545/1695 train_time:153530ms step_avg:99.37ms +step:1546/1695 train_time:153633ms step_avg:99.37ms +step:1547/1695 train_time:153736ms step_avg:99.38ms +step:1548/1695 train_time:153837ms step_avg:99.38ms +step:1549/1695 train_time:153939ms step_avg:99.38ms +step:1550/1695 train_time:154040ms step_avg:99.38ms +step:1551/1695 train_time:154143ms step_avg:99.38ms +step:1552/1695 train_time:154244ms step_avg:99.38ms +step:1553/1695 train_time:154347ms step_avg:99.39ms +step:1554/1695 train_time:154449ms step_avg:99.39ms +step:1555/1695 train_time:154551ms step_avg:99.39ms +step:1556/1695 train_time:154654ms step_avg:99.39ms +step:1557/1695 train_time:154757ms step_avg:99.39ms +step:1558/1695 train_time:154859ms step_avg:99.40ms +step:1559/1695 train_time:154962ms step_avg:99.40ms +step:1560/1695 train_time:155063ms step_avg:99.40ms +step:1561/1695 train_time:155165ms step_avg:99.40ms +step:1562/1695 train_time:155268ms step_avg:99.40ms +step:1563/1695 train_time:155372ms step_avg:99.41ms +step:1564/1695 train_time:155473ms step_avg:99.41ms +step:1565/1695 train_time:155574ms step_avg:99.41ms +step:1566/1695 train_time:155675ms step_avg:99.41ms +step:1567/1695 train_time:155776ms step_avg:99.41ms +step:1568/1695 train_time:155876ms step_avg:99.41ms +step:1569/1695 train_time:155977ms step_avg:99.41ms +step:1570/1695 train_time:156080ms step_avg:99.41ms +step:1571/1695 train_time:156181ms step_avg:99.42ms +step:1572/1695 train_time:156282ms step_avg:99.42ms +step:1573/1695 train_time:156384ms step_avg:99.42ms +step:1574/1695 train_time:156486ms step_avg:99.42ms +step:1575/1695 train_time:156588ms step_avg:99.42ms +step:1576/1695 train_time:156691ms step_avg:99.42ms +step:1577/1695 train_time:156794ms step_avg:99.43ms +step:1578/1695 train_time:156895ms step_avg:99.43ms +step:1579/1695 train_time:156997ms step_avg:99.43ms +step:1580/1695 train_time:157100ms step_avg:99.43ms +step:1581/1695 train_time:157202ms step_avg:99.43ms +step:1582/1695 train_time:157303ms step_avg:99.43ms +step:1583/1695 train_time:157406ms step_avg:99.44ms +step:1584/1695 train_time:157509ms step_avg:99.44ms +step:1585/1695 train_time:157610ms step_avg:99.44ms +step:1586/1695 train_time:157713ms step_avg:99.44ms +step:1587/1695 train_time:157814ms step_avg:99.44ms +step:1588/1695 train_time:157916ms step_avg:99.44ms +step:1589/1695 train_time:158017ms step_avg:99.44ms +step:1590/1695 train_time:158118ms step_avg:99.45ms +step:1591/1695 train_time:158220ms step_avg:99.45ms +step:1592/1695 train_time:158322ms step_avg:99.45ms +step:1593/1695 train_time:158423ms step_avg:99.45ms +step:1594/1695 train_time:158527ms step_avg:99.45ms +step:1595/1695 train_time:158629ms step_avg:99.45ms +step:1596/1695 train_time:158731ms step_avg:99.46ms +step:1597/1695 train_time:158833ms step_avg:99.46ms +step:1598/1695 train_time:158936ms step_avg:99.46ms +step:1599/1695 train_time:159037ms step_avg:99.46ms +step:1600/1695 train_time:159139ms step_avg:99.46ms +step:1601/1695 train_time:159241ms step_avg:99.46ms +step:1602/1695 train_time:159343ms step_avg:99.46ms +step:1603/1695 train_time:159444ms step_avg:99.47ms +step:1604/1695 train_time:159545ms step_avg:99.47ms +step:1605/1695 train_time:159647ms step_avg:99.47ms +step:1606/1695 train_time:159750ms step_avg:99.47ms +step:1607/1695 train_time:159852ms step_avg:99.47ms +step:1608/1695 train_time:159955ms step_avg:99.47ms +step:1609/1695 train_time:160056ms step_avg:99.48ms +step:1610/1695 train_time:160158ms step_avg:99.48ms +step:1611/1695 train_time:160260ms step_avg:99.48ms +step:1612/1695 train_time:160362ms step_avg:99.48ms +step:1613/1695 train_time:160463ms step_avg:99.48ms +step:1614/1695 train_time:160564ms step_avg:99.48ms +step:1615/1695 train_time:160667ms step_avg:99.48ms +step:1616/1695 train_time:160767ms step_avg:99.48ms +step:1617/1695 train_time:160871ms step_avg:99.49ms +step:1618/1695 train_time:160974ms step_avg:99.49ms +step:1619/1695 train_time:161076ms step_avg:99.49ms +step:1620/1695 train_time:161178ms step_avg:99.49ms +step:1621/1695 train_time:161280ms step_avg:99.49ms +step:1622/1695 train_time:161382ms step_avg:99.50ms +step:1623/1695 train_time:161483ms step_avg:99.50ms +step:1624/1695 train_time:161584ms step_avg:99.50ms +step:1625/1695 train_time:161687ms step_avg:99.50ms +step:1625/1695 val_loss:3.2926 train_time:161788ms step_avg:99.56ms +step:1626/1695 train_time:161814ms step_avg:99.52ms +step:1627/1695 train_time:161900ms step_avg:99.51ms +step:1628/1695 train_time:162004ms step_avg:99.51ms +step:1629/1695 train_time:162107ms step_avg:99.51ms +step:1630/1695 train_time:162209ms step_avg:99.51ms +step:1631/1695 train_time:162311ms step_avg:99.52ms +step:1632/1695 train_time:162412ms step_avg:99.52ms +step:1633/1695 train_time:162512ms step_avg:99.52ms +step:1634/1695 train_time:162615ms step_avg:99.52ms +step:1635/1695 train_time:162717ms step_avg:99.52ms +step:1636/1695 train_time:162820ms step_avg:99.52ms +step:1637/1695 train_time:162924ms step_avg:99.53ms +step:1638/1695 train_time:163027ms step_avg:99.53ms +step:1639/1695 train_time:163130ms step_avg:99.53ms +step:1640/1695 train_time:163232ms step_avg:99.53ms +step:1641/1695 train_time:163335ms step_avg:99.53ms +step:1642/1695 train_time:163437ms step_avg:99.54ms +step:1643/1695 train_time:163538ms step_avg:99.54ms +step:1644/1695 train_time:163640ms step_avg:99.54ms +step:1645/1695 train_time:163744ms step_avg:99.54ms +step:1646/1695 train_time:163847ms step_avg:99.54ms +step:1647/1695 train_time:163952ms step_avg:99.55ms +step:1648/1695 train_time:164055ms step_avg:99.55ms +step:1649/1695 train_time:164158ms step_avg:99.55ms +step:1650/1695 train_time:164262ms step_avg:99.55ms +step:1651/1695 train_time:164364ms step_avg:99.55ms +step:1652/1695 train_time:164468ms step_avg:99.56ms +step:1653/1695 train_time:164572ms step_avg:99.56ms +step:1654/1695 train_time:164673ms step_avg:99.56ms +step:1655/1695 train_time:164776ms step_avg:99.56ms +step:1656/1695 train_time:164879ms step_avg:99.56ms +step:1657/1695 train_time:164981ms step_avg:99.57ms +step:1658/1695 train_time:165084ms step_avg:99.57ms +step:1659/1695 train_time:165190ms step_avg:99.57ms +step:1660/1695 train_time:165293ms step_avg:99.57ms +step:1661/1695 train_time:165396ms step_avg:99.58ms +step:1662/1695 train_time:165501ms step_avg:99.58ms +step:1663/1695 train_time:165603ms step_avg:99.58ms +step:1664/1695 train_time:165707ms step_avg:99.58ms +step:1665/1695 train_time:165812ms step_avg:99.59ms +step:1666/1695 train_time:165915ms step_avg:99.59ms +step:1667/1695 train_time:166016ms step_avg:99.59ms +step:1668/1695 train_time:166121ms step_avg:99.59ms +step:1669/1695 train_time:166227ms step_avg:99.60ms +step:1670/1695 train_time:166329ms step_avg:99.60ms +step:1671/1695 train_time:166432ms step_avg:99.60ms +step:1672/1695 train_time:166536ms step_avg:99.60ms +step:1673/1695 train_time:166637ms step_avg:99.60ms +step:1674/1695 train_time:166739ms step_avg:99.60ms +step:1675/1695 train_time:166841ms step_avg:99.61ms +step:1676/1695 train_time:166946ms step_avg:99.61ms +step:1677/1695 train_time:167049ms step_avg:99.61ms +step:1678/1695 train_time:167153ms step_avg:99.61ms +step:1679/1695 train_time:167256ms step_avg:99.62ms +step:1680/1695 train_time:167357ms step_avg:99.62ms +step:1681/1695 train_time:167460ms step_avg:99.62ms +step:1682/1695 train_time:167567ms step_avg:99.62ms +step:1683/1695 train_time:167670ms step_avg:99.63ms +step:1684/1695 train_time:167773ms step_avg:99.63ms +step:1685/1695 train_time:167876ms step_avg:99.63ms +step:1686/1695 train_time:167978ms step_avg:99.63ms +step:1687/1695 train_time:168080ms step_avg:99.63ms +step:1688/1695 train_time:168183ms step_avg:99.63ms +step:1689/1695 train_time:168284ms step_avg:99.64ms +step:1690/1695 train_time:168388ms step_avg:99.64ms +step:1691/1695 train_time:168491ms step_avg:99.64ms +step:1692/1695 train_time:168593ms step_avg:99.64ms +step:1693/1695 train_time:168696ms step_avg:99.64ms +step:1694/1695 train_time:168800ms step_avg:99.65ms +step:1695/1695 train_time:168903ms step_avg:99.65ms +step:1695/1695 val_loss:3.2796 train_time:169003ms step_avg:99.71ms +peak memory allocated: 34004 MiB reserved: 49720 MiB diff --git a/records/082325_SparseAttnGate/6df384bb-9c24-46b3-826b-f7c07168c27a.txt b/records/082325_SparseAttnGate/6df384bb-9c24-46b3-826b-f7c07168c27a.txt new file mode 100644 index 000000000..a2ba6ed9d --- /dev/null +++ b/records/082325_SparseAttnGate/6df384bb-9c24-46b3-826b-f7c07168c27a.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:04:05 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 27C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 29C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 30C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 26C P0 112W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 27C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 29C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 29C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 30C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 293658 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 293659 C /usr/bin/python3 614MiB | +| 0 N/A N/A 293660 C /usr/bin/python3 614MiB | +| 0 N/A N/A 293661 C /usr/bin/python3 614MiB | +| 0 N/A N/A 293662 C /usr/bin/python3 614MiB | +| 0 N/A N/A 293663 C /usr/bin/python3 614MiB | +| 0 N/A N/A 293664 C /usr/bin/python3 614MiB | +| 0 N/A N/A 293665 C /usr/bin/python3 614MiB | +| 1 N/A N/A 293659 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 293660 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 293661 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 293662 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 293663 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 293664 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 293665 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1695 train_time:148ms step_avg:148.13ms +step:2/1695 train_time:174ms step_avg:86.81ms +step:3/1695 train_time:245ms step_avg:81.60ms +step:4/1695 train_time:336ms step_avg:84.10ms +step:5/1695 train_time:429ms step_avg:85.81ms +step:6/1695 train_time:522ms step_avg:86.96ms +step:7/1695 train_time:615ms step_avg:87.92ms +step:8/1695 train_time:708ms step_avg:88.55ms +step:9/1695 train_time:801ms step_avg:89.03ms +step:10/1695 train_time:894ms step_avg:89.41ms +step:11/1695 train_time:988ms step_avg:89.80ms +step:12/1695 train_time:1081ms step_avg:90.12ms +step:13/1695 train_time:1177ms step_avg:90.54ms +step:14/1695 train_time:1271ms step_avg:90.81ms +step:15/1695 train_time:1365ms step_avg:91.01ms +step:16/1695 train_time:1459ms step_avg:91.18ms +step:17/1695 train_time:1552ms step_avg:91.28ms +step:18/1695 train_time:1645ms step_avg:91.37ms +step:19/1695 train_time:1738ms step_avg:91.46ms +step:20/1695 train_time:1831ms step_avg:91.54ms +step:21/1695 train_time:1924ms step_avg:91.63ms +step:22/1695 train_time:2017ms step_avg:91.70ms +step:23/1695 train_time:2111ms step_avg:91.80ms +step:24/1695 train_time:2207ms step_avg:91.94ms +step:25/1695 train_time:2301ms step_avg:92.06ms +step:26/1695 train_time:2396ms step_avg:92.15ms +step:27/1695 train_time:2489ms step_avg:92.20ms +step:28/1695 train_time:2583ms step_avg:92.26ms +step:29/1695 train_time:2677ms step_avg:92.32ms +step:30/1695 train_time:2770ms step_avg:92.33ms +step:31/1695 train_time:2863ms step_avg:92.36ms +step:32/1695 train_time:2956ms step_avg:92.38ms +step:33/1695 train_time:3050ms step_avg:92.42ms +step:34/1695 train_time:3144ms step_avg:92.47ms +step:35/1695 train_time:3238ms step_avg:92.53ms +step:36/1695 train_time:3332ms step_avg:92.56ms +step:37/1695 train_time:3427ms step_avg:92.61ms +step:38/1695 train_time:3520ms step_avg:92.64ms +step:39/1695 train_time:3613ms step_avg:92.65ms +step:40/1695 train_time:3706ms step_avg:92.66ms +step:41/1695 train_time:3800ms step_avg:92.67ms +step:42/1695 train_time:3893ms step_avg:92.69ms +step:43/1695 train_time:3986ms step_avg:92.69ms +step:44/1695 train_time:4080ms step_avg:92.72ms +step:45/1695 train_time:4173ms step_avg:92.72ms +step:46/1695 train_time:4268ms step_avg:92.77ms +step:47/1695 train_time:4361ms step_avg:92.79ms +step:48/1695 train_time:4455ms step_avg:92.81ms +step:49/1695 train_time:4549ms step_avg:92.83ms +step:50/1695 train_time:4643ms step_avg:92.86ms +step:51/1695 train_time:4737ms step_avg:92.89ms +step:52/1695 train_time:4830ms step_avg:92.89ms +step:53/1695 train_time:4924ms step_avg:92.91ms +step:54/1695 train_time:5017ms step_avg:92.91ms +step:55/1695 train_time:5110ms step_avg:92.91ms +step:56/1695 train_time:5204ms step_avg:92.93ms +step:57/1695 train_time:5297ms step_avg:92.93ms +step:58/1695 train_time:5390ms step_avg:92.94ms +step:59/1695 train_time:5485ms step_avg:92.96ms +step:60/1695 train_time:5578ms step_avg:92.97ms +step:61/1695 train_time:5672ms step_avg:92.98ms +step:62/1695 train_time:5766ms step_avg:93.00ms +step:63/1695 train_time:5859ms step_avg:93.00ms +step:64/1695 train_time:5952ms step_avg:93.01ms +step:65/1695 train_time:6046ms step_avg:93.02ms +step:66/1695 train_time:6140ms step_avg:93.04ms +step:67/1695 train_time:6234ms step_avg:93.04ms +step:68/1695 train_time:6328ms step_avg:93.05ms +step:69/1695 train_time:6421ms step_avg:93.06ms +step:70/1695 train_time:6514ms step_avg:93.06ms +step:71/1695 train_time:6608ms step_avg:93.07ms +step:72/1695 train_time:6702ms step_avg:93.09ms +step:73/1695 train_time:6797ms step_avg:93.11ms +step:74/1695 train_time:6889ms step_avg:93.10ms +step:75/1695 train_time:6982ms step_avg:93.10ms +step:76/1695 train_time:7077ms step_avg:93.11ms +step:77/1695 train_time:7170ms step_avg:93.11ms +step:78/1695 train_time:7264ms step_avg:93.12ms +step:79/1695 train_time:7356ms step_avg:93.12ms +step:80/1695 train_time:7450ms step_avg:93.12ms +step:81/1695 train_time:7543ms step_avg:93.13ms +step:82/1695 train_time:7637ms step_avg:93.13ms +step:83/1695 train_time:7730ms step_avg:93.14ms +step:84/1695 train_time:7824ms step_avg:93.15ms +step:85/1695 train_time:7918ms step_avg:93.15ms +step:86/1695 train_time:8011ms step_avg:93.15ms +step:87/1695 train_time:8104ms step_avg:93.15ms +step:88/1695 train_time:8198ms step_avg:93.16ms +step:89/1695 train_time:8291ms step_avg:93.16ms +step:90/1695 train_time:8385ms step_avg:93.17ms +step:91/1695 train_time:8479ms step_avg:93.17ms +step:92/1695 train_time:8571ms step_avg:93.17ms +step:93/1695 train_time:8665ms step_avg:93.17ms +step:94/1695 train_time:8758ms step_avg:93.17ms +step:95/1695 train_time:8852ms step_avg:93.17ms +step:96/1695 train_time:8946ms step_avg:93.18ms +step:97/1695 train_time:9039ms step_avg:93.19ms +step:98/1695 train_time:9133ms step_avg:93.19ms +step:99/1695 train_time:9227ms step_avg:93.20ms +step:100/1695 train_time:9321ms step_avg:93.21ms +step:101/1695 train_time:9414ms step_avg:93.21ms +step:102/1695 train_time:9507ms step_avg:93.21ms +step:103/1695 train_time:9602ms step_avg:93.22ms +step:104/1695 train_time:9694ms step_avg:93.21ms +step:105/1695 train_time:9788ms step_avg:93.22ms +step:106/1695 train_time:9882ms step_avg:93.23ms +step:107/1695 train_time:9976ms step_avg:93.23ms +step:108/1695 train_time:10070ms step_avg:93.24ms +step:109/1695 train_time:10164ms step_avg:93.25ms +step:110/1695 train_time:10258ms step_avg:93.26ms +step:111/1695 train_time:10351ms step_avg:93.25ms +step:112/1695 train_time:10445ms step_avg:93.26ms +step:113/1695 train_time:10538ms step_avg:93.26ms +step:114/1695 train_time:10631ms step_avg:93.26ms +step:115/1695 train_time:10724ms step_avg:93.26ms +step:116/1695 train_time:10818ms step_avg:93.26ms +step:117/1695 train_time:10912ms step_avg:93.26ms +step:118/1695 train_time:11005ms step_avg:93.27ms +step:119/1695 train_time:11098ms step_avg:93.26ms +step:120/1695 train_time:11191ms step_avg:93.26ms +step:121/1695 train_time:11285ms step_avg:93.27ms +step:122/1695 train_time:11379ms step_avg:93.27ms +step:123/1695 train_time:11472ms step_avg:93.27ms +step:124/1695 train_time:11566ms step_avg:93.27ms +step:125/1695 train_time:11659ms step_avg:93.27ms +step:125/1695 val_loss:4.6123 train_time:11750ms step_avg:94.00ms +step:126/1695 train_time:11777ms step_avg:93.47ms +step:127/1695 train_time:11853ms step_avg:93.33ms +step:128/1695 train_time:11958ms step_avg:93.42ms +step:129/1695 train_time:12053ms step_avg:93.43ms +step:130/1695 train_time:12147ms step_avg:93.44ms +step:131/1695 train_time:12240ms step_avg:93.43ms +step:132/1695 train_time:12333ms step_avg:93.43ms +step:133/1695 train_time:12426ms step_avg:93.43ms +step:134/1695 train_time:12519ms step_avg:93.42ms +step:135/1695 train_time:12612ms step_avg:93.42ms +step:136/1695 train_time:12705ms step_avg:93.42ms +step:137/1695 train_time:12799ms step_avg:93.42ms +step:138/1695 train_time:12894ms step_avg:93.44ms +step:139/1695 train_time:12990ms step_avg:93.45ms +step:140/1695 train_time:13085ms step_avg:93.46ms +step:141/1695 train_time:13179ms step_avg:93.47ms +step:142/1695 train_time:13273ms step_avg:93.47ms +step:143/1695 train_time:13367ms step_avg:93.47ms +step:144/1695 train_time:13460ms step_avg:93.47ms +step:145/1695 train_time:13554ms step_avg:93.48ms +step:146/1695 train_time:13647ms step_avg:93.47ms +step:147/1695 train_time:13741ms step_avg:93.48ms +step:148/1695 train_time:13835ms step_avg:93.48ms +step:149/1695 train_time:13929ms step_avg:93.48ms +step:150/1695 train_time:14023ms step_avg:93.49ms +step:151/1695 train_time:14118ms step_avg:93.50ms +step:152/1695 train_time:14212ms step_avg:93.50ms +step:153/1695 train_time:14306ms step_avg:93.50ms +step:154/1695 train_time:14400ms step_avg:93.51ms +step:155/1695 train_time:14494ms step_avg:93.51ms +step:156/1695 train_time:14587ms step_avg:93.51ms +step:157/1695 train_time:14681ms step_avg:93.51ms +step:158/1695 train_time:14775ms step_avg:93.51ms +step:159/1695 train_time:14869ms step_avg:93.52ms +step:160/1695 train_time:14963ms step_avg:93.52ms +step:161/1695 train_time:15057ms step_avg:93.52ms +step:162/1695 train_time:15151ms step_avg:93.53ms +step:163/1695 train_time:15246ms step_avg:93.53ms +step:164/1695 train_time:15340ms step_avg:93.53ms +step:165/1695 train_time:15434ms step_avg:93.54ms +step:166/1695 train_time:15528ms step_avg:93.54ms +step:167/1695 train_time:15622ms step_avg:93.54ms +step:168/1695 train_time:15716ms step_avg:93.55ms +step:169/1695 train_time:15809ms step_avg:93.55ms +step:170/1695 train_time:15904ms step_avg:93.55ms +step:171/1695 train_time:15998ms step_avg:93.55ms +step:172/1695 train_time:16093ms step_avg:93.56ms +step:173/1695 train_time:16186ms step_avg:93.56ms +step:174/1695 train_time:16279ms step_avg:93.56ms +step:175/1695 train_time:16373ms step_avg:93.56ms +step:176/1695 train_time:16467ms step_avg:93.56ms +step:177/1695 train_time:16561ms step_avg:93.56ms +step:178/1695 train_time:16655ms step_avg:93.57ms +step:179/1695 train_time:16749ms step_avg:93.57ms +step:180/1695 train_time:16842ms step_avg:93.57ms +step:181/1695 train_time:16937ms step_avg:93.57ms +step:182/1695 train_time:17030ms step_avg:93.57ms +step:183/1695 train_time:17124ms step_avg:93.58ms +step:184/1695 train_time:17219ms step_avg:93.58ms +step:185/1695 train_time:17313ms step_avg:93.58ms +step:186/1695 train_time:17407ms step_avg:93.58ms +step:187/1695 train_time:17500ms step_avg:93.58ms +step:188/1695 train_time:17594ms step_avg:93.58ms +step:189/1695 train_time:17688ms step_avg:93.59ms +step:190/1695 train_time:17781ms step_avg:93.59ms +step:191/1695 train_time:17876ms step_avg:93.59ms +step:192/1695 train_time:17971ms step_avg:93.60ms +step:193/1695 train_time:18065ms step_avg:93.60ms +step:194/1695 train_time:18159ms step_avg:93.60ms +step:195/1695 train_time:18252ms step_avg:93.60ms +step:196/1695 train_time:18347ms step_avg:93.61ms +step:197/1695 train_time:18442ms step_avg:93.61ms +step:198/1695 train_time:18536ms step_avg:93.62ms +step:199/1695 train_time:18630ms step_avg:93.62ms +step:200/1695 train_time:18723ms step_avg:93.62ms +step:201/1695 train_time:18818ms step_avg:93.62ms +step:202/1695 train_time:18912ms step_avg:93.62ms +step:203/1695 train_time:19005ms step_avg:93.62ms +step:204/1695 train_time:19099ms step_avg:93.62ms +step:205/1695 train_time:19193ms step_avg:93.63ms +step:206/1695 train_time:19288ms step_avg:93.63ms +step:207/1695 train_time:19382ms step_avg:93.63ms +step:208/1695 train_time:19476ms step_avg:93.63ms +step:209/1695 train_time:19570ms step_avg:93.63ms +step:210/1695 train_time:19664ms step_avg:93.64ms +step:211/1695 train_time:19758ms step_avg:93.64ms +step:212/1695 train_time:19851ms step_avg:93.64ms +step:213/1695 train_time:19945ms step_avg:93.64ms +step:214/1695 train_time:20040ms step_avg:93.64ms +step:215/1695 train_time:20134ms step_avg:93.65ms +step:216/1695 train_time:20227ms step_avg:93.65ms +step:217/1695 train_time:20321ms step_avg:93.65ms +step:218/1695 train_time:20415ms step_avg:93.65ms +step:219/1695 train_time:20509ms step_avg:93.65ms +step:220/1695 train_time:20603ms step_avg:93.65ms +step:221/1695 train_time:20697ms step_avg:93.65ms +step:222/1695 train_time:20791ms step_avg:93.65ms +step:223/1695 train_time:20884ms step_avg:93.65ms +step:224/1695 train_time:20979ms step_avg:93.66ms +step:225/1695 train_time:21073ms step_avg:93.66ms +step:226/1695 train_time:21167ms step_avg:93.66ms +step:227/1695 train_time:21261ms step_avg:93.66ms +step:228/1695 train_time:21355ms step_avg:93.66ms +step:229/1695 train_time:21449ms step_avg:93.67ms +step:230/1695 train_time:21543ms step_avg:93.67ms +step:231/1695 train_time:21637ms step_avg:93.67ms +step:232/1695 train_time:21731ms step_avg:93.67ms +step:233/1695 train_time:21824ms step_avg:93.67ms +step:234/1695 train_time:21918ms step_avg:93.67ms +step:235/1695 train_time:22012ms step_avg:93.67ms +step:236/1695 train_time:22105ms step_avg:93.67ms +step:237/1695 train_time:22199ms step_avg:93.67ms +step:238/1695 train_time:22293ms step_avg:93.67ms +step:239/1695 train_time:22387ms step_avg:93.67ms +step:240/1695 train_time:22482ms step_avg:93.67ms +step:241/1695 train_time:22575ms step_avg:93.67ms +step:242/1695 train_time:22669ms step_avg:93.67ms +step:243/1695 train_time:22763ms step_avg:93.68ms +step:244/1695 train_time:22857ms step_avg:93.68ms +step:245/1695 train_time:22951ms step_avg:93.68ms +step:246/1695 train_time:23046ms step_avg:93.68ms +step:247/1695 train_time:23138ms step_avg:93.68ms +step:248/1695 train_time:23232ms step_avg:93.68ms +step:249/1695 train_time:23326ms step_avg:93.68ms +step:250/1695 train_time:23419ms step_avg:93.68ms +step:250/1695 val_loss:4.0789 train_time:23511ms step_avg:94.04ms +step:251/1695 train_time:23539ms step_avg:93.78ms +step:252/1695 train_time:23617ms step_avg:93.72ms +step:253/1695 train_time:23717ms step_avg:93.74ms +step:254/1695 train_time:23813ms step_avg:93.75ms +step:255/1695 train_time:23907ms step_avg:93.75ms +step:256/1695 train_time:24000ms step_avg:93.75ms +step:257/1695 train_time:24094ms step_avg:93.75ms +step:258/1695 train_time:24188ms step_avg:93.75ms +step:259/1695 train_time:24282ms step_avg:93.75ms +step:260/1695 train_time:24376ms step_avg:93.75ms +step:261/1695 train_time:24469ms step_avg:93.75ms +step:262/1695 train_time:24564ms step_avg:93.75ms +step:263/1695 train_time:24660ms step_avg:93.76ms +step:264/1695 train_time:24756ms step_avg:93.77ms +step:265/1695 train_time:24852ms step_avg:93.78ms +step:266/1695 train_time:24946ms step_avg:93.78ms +step:267/1695 train_time:25040ms step_avg:93.78ms +step:268/1695 train_time:25135ms step_avg:93.79ms +step:269/1695 train_time:25229ms step_avg:93.79ms +step:270/1695 train_time:25323ms step_avg:93.79ms +step:271/1695 train_time:25417ms step_avg:93.79ms +step:272/1695 train_time:25510ms step_avg:93.79ms +step:273/1695 train_time:25605ms step_avg:93.79ms +step:274/1695 train_time:25700ms step_avg:93.80ms +step:275/1695 train_time:25796ms step_avg:93.80ms +step:276/1695 train_time:25890ms step_avg:93.80ms +step:277/1695 train_time:25984ms step_avg:93.81ms +step:278/1695 train_time:26079ms step_avg:93.81ms +step:279/1695 train_time:26173ms step_avg:93.81ms +step:280/1695 train_time:26267ms step_avg:93.81ms +step:281/1695 train_time:26361ms step_avg:93.81ms +step:282/1695 train_time:26454ms step_avg:93.81ms +step:283/1695 train_time:26548ms step_avg:93.81ms +step:284/1695 train_time:26643ms step_avg:93.81ms +step:285/1695 train_time:26740ms step_avg:93.82ms +step:286/1695 train_time:26835ms step_avg:93.83ms +step:287/1695 train_time:26930ms step_avg:93.83ms +step:288/1695 train_time:27024ms step_avg:93.83ms +step:289/1695 train_time:27119ms step_avg:93.84ms +step:290/1695 train_time:27214ms step_avg:93.84ms +step:291/1695 train_time:27307ms step_avg:93.84ms +step:292/1695 train_time:27401ms step_avg:93.84ms +step:293/1695 train_time:27496ms step_avg:93.84ms +step:294/1695 train_time:27590ms step_avg:93.84ms +step:295/1695 train_time:27685ms step_avg:93.85ms +step:296/1695 train_time:27779ms step_avg:93.85ms +step:297/1695 train_time:27875ms step_avg:93.85ms +step:298/1695 train_time:27969ms step_avg:93.86ms +step:299/1695 train_time:28064ms step_avg:93.86ms +step:300/1695 train_time:28158ms step_avg:93.86ms +step:301/1695 train_time:28253ms step_avg:93.86ms +step:302/1695 train_time:28347ms step_avg:93.86ms +step:303/1695 train_time:28441ms step_avg:93.86ms +step:304/1695 train_time:28535ms step_avg:93.87ms +step:305/1695 train_time:28630ms step_avg:93.87ms +step:306/1695 train_time:28724ms step_avg:93.87ms +step:307/1695 train_time:28819ms step_avg:93.87ms +step:308/1695 train_time:28915ms step_avg:93.88ms +step:309/1695 train_time:29010ms step_avg:93.88ms +step:310/1695 train_time:29104ms step_avg:93.88ms +step:311/1695 train_time:29199ms step_avg:93.89ms +step:312/1695 train_time:29294ms step_avg:93.89ms +step:313/1695 train_time:29388ms step_avg:93.89ms +step:314/1695 train_time:29482ms step_avg:93.89ms +step:315/1695 train_time:29576ms step_avg:93.89ms +step:316/1695 train_time:29670ms step_avg:93.89ms +step:317/1695 train_time:29765ms step_avg:93.89ms +step:318/1695 train_time:29860ms step_avg:93.90ms +step:319/1695 train_time:29955ms step_avg:93.90ms +step:320/1695 train_time:30049ms step_avg:93.90ms +step:321/1695 train_time:30144ms step_avg:93.91ms +step:322/1695 train_time:30239ms step_avg:93.91ms +step:323/1695 train_time:30333ms step_avg:93.91ms +step:324/1695 train_time:30427ms step_avg:93.91ms +step:325/1695 train_time:30522ms step_avg:93.91ms +step:326/1695 train_time:30617ms step_avg:93.92ms +step:327/1695 train_time:30712ms step_avg:93.92ms +step:328/1695 train_time:30806ms step_avg:93.92ms +step:329/1695 train_time:30901ms step_avg:93.92ms +step:330/1695 train_time:30996ms step_avg:93.93ms +step:331/1695 train_time:31091ms step_avg:93.93ms +step:332/1695 train_time:31186ms step_avg:93.93ms +step:333/1695 train_time:31280ms step_avg:93.93ms +step:334/1695 train_time:31374ms step_avg:93.93ms +step:335/1695 train_time:31468ms step_avg:93.93ms +step:336/1695 train_time:31562ms step_avg:93.94ms +step:337/1695 train_time:31657ms step_avg:93.94ms +step:338/1695 train_time:31752ms step_avg:93.94ms +step:339/1695 train_time:31846ms step_avg:93.94ms +step:340/1695 train_time:31940ms step_avg:93.94ms +step:341/1695 train_time:32035ms step_avg:93.94ms +step:342/1695 train_time:32129ms step_avg:93.94ms +step:343/1695 train_time:32224ms step_avg:93.95ms +step:344/1695 train_time:32318ms step_avg:93.95ms +step:345/1695 train_time:32413ms step_avg:93.95ms +step:346/1695 train_time:32506ms step_avg:93.95ms +step:347/1695 train_time:32602ms step_avg:93.95ms +step:348/1695 train_time:32697ms step_avg:93.96ms +step:349/1695 train_time:32791ms step_avg:93.96ms +step:350/1695 train_time:32886ms step_avg:93.96ms +step:351/1695 train_time:32980ms step_avg:93.96ms +step:352/1695 train_time:33075ms step_avg:93.96ms +step:353/1695 train_time:33169ms step_avg:93.96ms +step:354/1695 train_time:33264ms step_avg:93.97ms +step:355/1695 train_time:33359ms step_avg:93.97ms +step:356/1695 train_time:33453ms step_avg:93.97ms +step:357/1695 train_time:33547ms step_avg:93.97ms +step:358/1695 train_time:33642ms step_avg:93.97ms +step:359/1695 train_time:33737ms step_avg:93.97ms +step:360/1695 train_time:33831ms step_avg:93.98ms +step:361/1695 train_time:33925ms step_avg:93.98ms +step:362/1695 train_time:34020ms step_avg:93.98ms +step:363/1695 train_time:34114ms step_avg:93.98ms +step:364/1695 train_time:34208ms step_avg:93.98ms +step:365/1695 train_time:34303ms step_avg:93.98ms +step:366/1695 train_time:34398ms step_avg:93.98ms +step:367/1695 train_time:34492ms step_avg:93.98ms +step:368/1695 train_time:34586ms step_avg:93.98ms +step:369/1695 train_time:34680ms step_avg:93.98ms +step:370/1695 train_time:34774ms step_avg:93.98ms +step:371/1695 train_time:34868ms step_avg:93.98ms +step:372/1695 train_time:34963ms step_avg:93.99ms +step:373/1695 train_time:35058ms step_avg:93.99ms +step:374/1695 train_time:35153ms step_avg:93.99ms +step:375/1695 train_time:35248ms step_avg:93.99ms +step:375/1695 val_loss:3.8806 train_time:35339ms step_avg:94.24ms +step:376/1695 train_time:35365ms step_avg:94.06ms +step:377/1695 train_time:35445ms step_avg:94.02ms +step:378/1695 train_time:35545ms step_avg:94.04ms +step:379/1695 train_time:35644ms step_avg:94.05ms +step:380/1695 train_time:35740ms step_avg:94.05ms +step:381/1695 train_time:35836ms step_avg:94.06ms +step:382/1695 train_time:35931ms step_avg:94.06ms +step:383/1695 train_time:36026ms step_avg:94.06ms +step:384/1695 train_time:36122ms step_avg:94.07ms +step:385/1695 train_time:36219ms step_avg:94.07ms +step:386/1695 train_time:36314ms step_avg:94.08ms +step:387/1695 train_time:36410ms step_avg:94.08ms +step:388/1695 train_time:36508ms step_avg:94.09ms +step:389/1695 train_time:36605ms step_avg:94.10ms +step:390/1695 train_time:36702ms step_avg:94.11ms +step:391/1695 train_time:36798ms step_avg:94.11ms +step:392/1695 train_time:36893ms step_avg:94.12ms +step:393/1695 train_time:36989ms step_avg:94.12ms +step:394/1695 train_time:37085ms step_avg:94.12ms +step:395/1695 train_time:37181ms step_avg:94.13ms +step:396/1695 train_time:37276ms step_avg:94.13ms +step:397/1695 train_time:37373ms step_avg:94.14ms +step:398/1695 train_time:37469ms step_avg:94.14ms +step:399/1695 train_time:37565ms step_avg:94.15ms +step:400/1695 train_time:37662ms step_avg:94.15ms +step:401/1695 train_time:37758ms step_avg:94.16ms +step:402/1695 train_time:37855ms step_avg:94.17ms +step:403/1695 train_time:37952ms step_avg:94.17ms +step:404/1695 train_time:38047ms step_avg:94.18ms +step:405/1695 train_time:38142ms step_avg:94.18ms +step:406/1695 train_time:38238ms step_avg:94.18ms +step:407/1695 train_time:38334ms step_avg:94.19ms +step:408/1695 train_time:38430ms step_avg:94.19ms +step:409/1695 train_time:38525ms step_avg:94.19ms +step:410/1695 train_time:38622ms step_avg:94.20ms +step:411/1695 train_time:38719ms step_avg:94.21ms +step:412/1695 train_time:38815ms step_avg:94.21ms +step:413/1695 train_time:38911ms step_avg:94.22ms +step:414/1695 train_time:39006ms step_avg:94.22ms +step:415/1695 train_time:39103ms step_avg:94.22ms +step:416/1695 train_time:39198ms step_avg:94.23ms +step:417/1695 train_time:39294ms step_avg:94.23ms +step:418/1695 train_time:39389ms step_avg:94.23ms +step:419/1695 train_time:39485ms step_avg:94.24ms +step:420/1695 train_time:39582ms step_avg:94.24ms +step:421/1695 train_time:39677ms step_avg:94.25ms +step:422/1695 train_time:39774ms step_avg:94.25ms +step:423/1695 train_time:39870ms step_avg:94.26ms +step:424/1695 train_time:39966ms step_avg:94.26ms +step:425/1695 train_time:40062ms step_avg:94.26ms +step:426/1695 train_time:40158ms step_avg:94.27ms +step:427/1695 train_time:40255ms step_avg:94.27ms +step:428/1695 train_time:40351ms step_avg:94.28ms +step:429/1695 train_time:40446ms step_avg:94.28ms +step:430/1695 train_time:40543ms step_avg:94.29ms +step:431/1695 train_time:40638ms step_avg:94.29ms +step:432/1695 train_time:40734ms step_avg:94.29ms +step:433/1695 train_time:40830ms step_avg:94.30ms +step:434/1695 train_time:40926ms step_avg:94.30ms +step:435/1695 train_time:41022ms step_avg:94.30ms +step:436/1695 train_time:41118ms step_avg:94.31ms +step:437/1695 train_time:41214ms step_avg:94.31ms +step:438/1695 train_time:41310ms step_avg:94.32ms +step:439/1695 train_time:41406ms step_avg:94.32ms +step:440/1695 train_time:41502ms step_avg:94.32ms +step:441/1695 train_time:41598ms step_avg:94.33ms +step:442/1695 train_time:41695ms step_avg:94.33ms +step:443/1695 train_time:41791ms step_avg:94.34ms +step:444/1695 train_time:41887ms step_avg:94.34ms +step:445/1695 train_time:41983ms step_avg:94.34ms +step:446/1695 train_time:42079ms step_avg:94.35ms +step:447/1695 train_time:42175ms step_avg:94.35ms +step:448/1695 train_time:42271ms step_avg:94.35ms +step:449/1695 train_time:42367ms step_avg:94.36ms +step:450/1695 train_time:42463ms step_avg:94.36ms +step:451/1695 train_time:42560ms step_avg:94.37ms +step:452/1695 train_time:42657ms step_avg:94.37ms +step:453/1695 train_time:42754ms step_avg:94.38ms +step:454/1695 train_time:42850ms step_avg:94.38ms +step:455/1695 train_time:42946ms step_avg:94.39ms +step:456/1695 train_time:43042ms step_avg:94.39ms +step:457/1695 train_time:43138ms step_avg:94.39ms +step:458/1695 train_time:43234ms step_avg:94.40ms +step:459/1695 train_time:43332ms step_avg:94.40ms +step:460/1695 train_time:43428ms step_avg:94.41ms +step:461/1695 train_time:43523ms step_avg:94.41ms +step:462/1695 train_time:43620ms step_avg:94.42ms +step:463/1695 train_time:43717ms step_avg:94.42ms +step:464/1695 train_time:43813ms step_avg:94.42ms +step:465/1695 train_time:43909ms step_avg:94.43ms +step:466/1695 train_time:44004ms step_avg:94.43ms +step:467/1695 train_time:44101ms step_avg:94.43ms +step:468/1695 train_time:44197ms step_avg:94.44ms +step:469/1695 train_time:44294ms step_avg:94.44ms +step:470/1695 train_time:44390ms step_avg:94.45ms +step:471/1695 train_time:44486ms step_avg:94.45ms +step:472/1695 train_time:44582ms step_avg:94.45ms +step:473/1695 train_time:44678ms step_avg:94.46ms +step:474/1695 train_time:44774ms step_avg:94.46ms +step:475/1695 train_time:44870ms step_avg:94.46ms +step:476/1695 train_time:44965ms step_avg:94.47ms +step:477/1695 train_time:45062ms step_avg:94.47ms +step:478/1695 train_time:45158ms step_avg:94.47ms +step:479/1695 train_time:45255ms step_avg:94.48ms +step:480/1695 train_time:45351ms step_avg:94.48ms +step:481/1695 train_time:45446ms step_avg:94.48ms +step:482/1695 train_time:45542ms step_avg:94.49ms +step:483/1695 train_time:45639ms step_avg:94.49ms +step:484/1695 train_time:45735ms step_avg:94.49ms +step:485/1695 train_time:45831ms step_avg:94.50ms +step:486/1695 train_time:45927ms step_avg:94.50ms +step:487/1695 train_time:46023ms step_avg:94.50ms +step:488/1695 train_time:46119ms step_avg:94.51ms +step:489/1695 train_time:46216ms step_avg:94.51ms +step:490/1695 train_time:46312ms step_avg:94.51ms +step:491/1695 train_time:46408ms step_avg:94.52ms +step:492/1695 train_time:46504ms step_avg:94.52ms +step:493/1695 train_time:46600ms step_avg:94.52ms +step:494/1695 train_time:46696ms step_avg:94.53ms +step:495/1695 train_time:46793ms step_avg:94.53ms +step:496/1695 train_time:46889ms step_avg:94.53ms +step:497/1695 train_time:46985ms step_avg:94.54ms +step:498/1695 train_time:47081ms step_avg:94.54ms +step:499/1695 train_time:47178ms step_avg:94.54ms +step:500/1695 train_time:47273ms step_avg:94.55ms +step:500/1695 val_loss:3.7364 train_time:47368ms step_avg:94.74ms +step:501/1695 train_time:47395ms step_avg:94.60ms +step:502/1695 train_time:47475ms step_avg:94.57ms +step:503/1695 train_time:47577ms step_avg:94.59ms +step:504/1695 train_time:47674ms step_avg:94.59ms +step:505/1695 train_time:47770ms step_avg:94.59ms +step:506/1695 train_time:47866ms step_avg:94.60ms +step:507/1695 train_time:47962ms step_avg:94.60ms +step:508/1695 train_time:48057ms step_avg:94.60ms +step:509/1695 train_time:48153ms step_avg:94.60ms +step:510/1695 train_time:48248ms step_avg:94.60ms +step:511/1695 train_time:48345ms step_avg:94.61ms +step:512/1695 train_time:48442ms step_avg:94.61ms +step:513/1695 train_time:48541ms step_avg:94.62ms +step:514/1695 train_time:48639ms step_avg:94.63ms +step:515/1695 train_time:48736ms step_avg:94.63ms +step:516/1695 train_time:48832ms step_avg:94.64ms +step:517/1695 train_time:48930ms step_avg:94.64ms +step:518/1695 train_time:49025ms step_avg:94.64ms +step:519/1695 train_time:49121ms step_avg:94.64ms +step:520/1695 train_time:49217ms step_avg:94.65ms +step:521/1695 train_time:49313ms step_avg:94.65ms +step:522/1695 train_time:49409ms step_avg:94.65ms +step:523/1695 train_time:49506ms step_avg:94.66ms +step:524/1695 train_time:49604ms step_avg:94.66ms +step:525/1695 train_time:49702ms step_avg:94.67ms +step:526/1695 train_time:49799ms step_avg:94.68ms +step:527/1695 train_time:49897ms step_avg:94.68ms +step:528/1695 train_time:49994ms step_avg:94.69ms +step:529/1695 train_time:50091ms step_avg:94.69ms +step:530/1695 train_time:50186ms step_avg:94.69ms +step:531/1695 train_time:50282ms step_avg:94.69ms +step:532/1695 train_time:50379ms step_avg:94.70ms +step:533/1695 train_time:50475ms step_avg:94.70ms +step:534/1695 train_time:50573ms step_avg:94.71ms +step:535/1695 train_time:50671ms step_avg:94.71ms +step:536/1695 train_time:50767ms step_avg:94.72ms +step:537/1695 train_time:50863ms step_avg:94.72ms +step:538/1695 train_time:50960ms step_avg:94.72ms +step:539/1695 train_time:51058ms step_avg:94.73ms +step:540/1695 train_time:51155ms step_avg:94.73ms +step:541/1695 train_time:51253ms step_avg:94.74ms +step:542/1695 train_time:51350ms step_avg:94.74ms +step:543/1695 train_time:51446ms step_avg:94.74ms +step:544/1695 train_time:51542ms step_avg:94.75ms +step:545/1695 train_time:51638ms step_avg:94.75ms +step:546/1695 train_time:51736ms step_avg:94.75ms +step:547/1695 train_time:51833ms step_avg:94.76ms +step:548/1695 train_time:51929ms step_avg:94.76ms +step:549/1695 train_time:52025ms step_avg:94.76ms +step:550/1695 train_time:52122ms step_avg:94.77ms +step:551/1695 train_time:52219ms step_avg:94.77ms +step:552/1695 train_time:52317ms step_avg:94.78ms +step:553/1695 train_time:52415ms step_avg:94.78ms +step:554/1695 train_time:52512ms step_avg:94.79ms +step:555/1695 train_time:52609ms step_avg:94.79ms +step:556/1695 train_time:52706ms step_avg:94.79ms +step:557/1695 train_time:52802ms step_avg:94.80ms +step:558/1695 train_time:52899ms step_avg:94.80ms +step:559/1695 train_time:52995ms step_avg:94.80ms +step:560/1695 train_time:53092ms step_avg:94.81ms +step:561/1695 train_time:53189ms step_avg:94.81ms +step:562/1695 train_time:53286ms step_avg:94.81ms +step:563/1695 train_time:53382ms step_avg:94.82ms +step:564/1695 train_time:53480ms step_avg:94.82ms +step:565/1695 train_time:53577ms step_avg:94.83ms +step:566/1695 train_time:53675ms step_avg:94.83ms +step:567/1695 train_time:53773ms step_avg:94.84ms +step:568/1695 train_time:53870ms step_avg:94.84ms +step:569/1695 train_time:53966ms step_avg:94.84ms +step:570/1695 train_time:54062ms step_avg:94.85ms +step:571/1695 train_time:54159ms step_avg:94.85ms +step:572/1695 train_time:54256ms step_avg:94.85ms +step:573/1695 train_time:54353ms step_avg:94.86ms +step:574/1695 train_time:54450ms step_avg:94.86ms +step:575/1695 train_time:54547ms step_avg:94.86ms +step:576/1695 train_time:54642ms step_avg:94.87ms +step:577/1695 train_time:54739ms step_avg:94.87ms +step:578/1695 train_time:54837ms step_avg:94.87ms +step:579/1695 train_time:54935ms step_avg:94.88ms +step:580/1695 train_time:55033ms step_avg:94.88ms +step:581/1695 train_time:55130ms step_avg:94.89ms +step:582/1695 train_time:55227ms step_avg:94.89ms +step:583/1695 train_time:55323ms step_avg:94.89ms +step:584/1695 train_time:55420ms step_avg:94.90ms +step:585/1695 train_time:55518ms step_avg:94.90ms +step:586/1695 train_time:55616ms step_avg:94.91ms +step:587/1695 train_time:55713ms step_avg:94.91ms +step:588/1695 train_time:55810ms step_avg:94.91ms +step:589/1695 train_time:55906ms step_avg:94.92ms +step:590/1695 train_time:56002ms step_avg:94.92ms +step:591/1695 train_time:56099ms step_avg:94.92ms +step:592/1695 train_time:56196ms step_avg:94.93ms +step:593/1695 train_time:56294ms step_avg:94.93ms +step:594/1695 train_time:56391ms step_avg:94.93ms +step:595/1695 train_time:56487ms step_avg:94.94ms +step:596/1695 train_time:56583ms step_avg:94.94ms +step:597/1695 train_time:56680ms step_avg:94.94ms +step:598/1695 train_time:56776ms step_avg:94.94ms +step:599/1695 train_time:56873ms step_avg:94.95ms +step:600/1695 train_time:56970ms step_avg:94.95ms +step:601/1695 train_time:57066ms step_avg:94.95ms +step:602/1695 train_time:57162ms step_avg:94.95ms +step:603/1695 train_time:57258ms step_avg:94.96ms +step:604/1695 train_time:57355ms step_avg:94.96ms +step:605/1695 train_time:57452ms step_avg:94.96ms +step:606/1695 train_time:57548ms step_avg:94.96ms +step:607/1695 train_time:57644ms step_avg:94.97ms +step:608/1695 train_time:57740ms step_avg:94.97ms +step:609/1695 train_time:57835ms step_avg:94.97ms +step:610/1695 train_time:57932ms step_avg:94.97ms +step:611/1695 train_time:58029ms step_avg:94.97ms +step:612/1695 train_time:58125ms step_avg:94.97ms +step:613/1695 train_time:58222ms step_avg:94.98ms +step:614/1695 train_time:58319ms step_avg:94.98ms +step:615/1695 train_time:58416ms step_avg:94.99ms +step:616/1695 train_time:58512ms step_avg:94.99ms +step:617/1695 train_time:58609ms step_avg:94.99ms +step:618/1695 train_time:58705ms step_avg:94.99ms +step:619/1695 train_time:58801ms step_avg:94.99ms +step:620/1695 train_time:58898ms step_avg:95.00ms +step:621/1695 train_time:58995ms step_avg:95.00ms +step:622/1695 train_time:59092ms step_avg:95.00ms +step:623/1695 train_time:59189ms step_avg:95.01ms +step:624/1695 train_time:59287ms step_avg:95.01ms +step:625/1695 train_time:59383ms step_avg:95.01ms +step:625/1695 val_loss:3.6465 train_time:59477ms step_avg:95.16ms +step:626/1695 train_time:59504ms step_avg:95.05ms +step:627/1695 train_time:59583ms step_avg:95.03ms +step:628/1695 train_time:59681ms step_avg:95.03ms +step:629/1695 train_time:59778ms step_avg:95.04ms +step:630/1695 train_time:59876ms step_avg:95.04ms +step:631/1695 train_time:59974ms step_avg:95.05ms +step:632/1695 train_time:60071ms step_avg:95.05ms +step:633/1695 train_time:60167ms step_avg:95.05ms +step:634/1695 train_time:60264ms step_avg:95.05ms +step:635/1695 train_time:60361ms step_avg:95.06ms +step:636/1695 train_time:60459ms step_avg:95.06ms +step:637/1695 train_time:60558ms step_avg:95.07ms +step:638/1695 train_time:60657ms step_avg:95.07ms +step:639/1695 train_time:60757ms step_avg:95.08ms +step:640/1695 train_time:60856ms step_avg:95.09ms +step:641/1695 train_time:60955ms step_avg:95.09ms +step:642/1695 train_time:61052ms step_avg:95.10ms +step:643/1695 train_time:61150ms step_avg:95.10ms +step:644/1695 train_time:61247ms step_avg:95.10ms +step:645/1695 train_time:61345ms step_avg:95.11ms +step:646/1695 train_time:61443ms step_avg:95.11ms +step:647/1695 train_time:61541ms step_avg:95.12ms +step:648/1695 train_time:61639ms step_avg:95.12ms +step:649/1695 train_time:61737ms step_avg:95.13ms +step:650/1695 train_time:61835ms step_avg:95.13ms +step:651/1695 train_time:61934ms step_avg:95.14ms +step:652/1695 train_time:62033ms step_avg:95.14ms +step:653/1695 train_time:62131ms step_avg:95.15ms +step:654/1695 train_time:62229ms step_avg:95.15ms +step:655/1695 train_time:62327ms step_avg:95.16ms +step:656/1695 train_time:62425ms step_avg:95.16ms +step:657/1695 train_time:62522ms step_avg:95.16ms +step:658/1695 train_time:62620ms step_avg:95.17ms +step:659/1695 train_time:62719ms step_avg:95.17ms +step:660/1695 train_time:62817ms step_avg:95.18ms +step:661/1695 train_time:62915ms step_avg:95.18ms +step:662/1695 train_time:63014ms step_avg:95.19ms +step:663/1695 train_time:63112ms step_avg:95.19ms +step:664/1695 train_time:63209ms step_avg:95.19ms +step:665/1695 train_time:63309ms step_avg:95.20ms +step:666/1695 train_time:63407ms step_avg:95.21ms +step:667/1695 train_time:63505ms step_avg:95.21ms +step:668/1695 train_time:63605ms step_avg:95.22ms +step:669/1695 train_time:63705ms step_avg:95.22ms +step:670/1695 train_time:63803ms step_avg:95.23ms +step:671/1695 train_time:63901ms step_avg:95.23ms +step:672/1695 train_time:64000ms step_avg:95.24ms +step:673/1695 train_time:64098ms step_avg:95.24ms +step:674/1695 train_time:64195ms step_avg:95.25ms +step:675/1695 train_time:64294ms step_avg:95.25ms +step:676/1695 train_time:64391ms step_avg:95.25ms +step:677/1695 train_time:64490ms step_avg:95.26ms +step:678/1695 train_time:64590ms step_avg:95.26ms +step:679/1695 train_time:64690ms step_avg:95.27ms +step:680/1695 train_time:64789ms step_avg:95.28ms +step:681/1695 train_time:64888ms step_avg:95.28ms +step:682/1695 train_time:64987ms step_avg:95.29ms +step:683/1695 train_time:65086ms step_avg:95.29ms +step:684/1695 train_time:65184ms step_avg:95.30ms +step:685/1695 train_time:65282ms step_avg:95.30ms +step:686/1695 train_time:65380ms step_avg:95.31ms +step:687/1695 train_time:65477ms step_avg:95.31ms +step:688/1695 train_time:65575ms step_avg:95.31ms +step:689/1695 train_time:65672ms step_avg:95.32ms +step:690/1695 train_time:65770ms step_avg:95.32ms +step:691/1695 train_time:65869ms step_avg:95.32ms +step:692/1695 train_time:65967ms step_avg:95.33ms +step:693/1695 train_time:66067ms step_avg:95.33ms +step:694/1695 train_time:66166ms step_avg:95.34ms +step:695/1695 train_time:66264ms step_avg:95.34ms +step:696/1695 train_time:66362ms step_avg:95.35ms +step:697/1695 train_time:66460ms step_avg:95.35ms +step:698/1695 train_time:66558ms step_avg:95.35ms +step:699/1695 train_time:66655ms step_avg:95.36ms +step:700/1695 train_time:66753ms step_avg:95.36ms +step:701/1695 train_time:66851ms step_avg:95.36ms +step:702/1695 train_time:66949ms step_avg:95.37ms +step:703/1695 train_time:67047ms step_avg:95.37ms +step:704/1695 train_time:67146ms step_avg:95.38ms +step:705/1695 train_time:67245ms step_avg:95.38ms +step:706/1695 train_time:67343ms step_avg:95.39ms +step:707/1695 train_time:67441ms step_avg:95.39ms +step:708/1695 train_time:67539ms step_avg:95.39ms +step:709/1695 train_time:67636ms step_avg:95.40ms +step:710/1695 train_time:67734ms step_avg:95.40ms +step:711/1695 train_time:67831ms step_avg:95.40ms +step:712/1695 train_time:67930ms step_avg:95.41ms +step:713/1695 train_time:68027ms step_avg:95.41ms +step:714/1695 train_time:68126ms step_avg:95.41ms +step:715/1695 train_time:68225ms step_avg:95.42ms +step:716/1695 train_time:68322ms step_avg:95.42ms +step:717/1695 train_time:68420ms step_avg:95.43ms +step:718/1695 train_time:68518ms step_avg:95.43ms +step:719/1695 train_time:68615ms step_avg:95.43ms +step:720/1695 train_time:68713ms step_avg:95.43ms +step:721/1695 train_time:68810ms step_avg:95.44ms +step:722/1695 train_time:68909ms step_avg:95.44ms +step:723/1695 train_time:69006ms step_avg:95.44ms +step:724/1695 train_time:69104ms step_avg:95.45ms +step:725/1695 train_time:69201ms step_avg:95.45ms +step:726/1695 train_time:69299ms step_avg:95.45ms +step:727/1695 train_time:69398ms step_avg:95.46ms +step:728/1695 train_time:69496ms step_avg:95.46ms +step:729/1695 train_time:69594ms step_avg:95.46ms +step:730/1695 train_time:69692ms step_avg:95.47ms +step:731/1695 train_time:69790ms step_avg:95.47ms +step:732/1695 train_time:69888ms step_avg:95.48ms +step:733/1695 train_time:69986ms step_avg:95.48ms +step:734/1695 train_time:70084ms step_avg:95.48ms +step:735/1695 train_time:70181ms step_avg:95.49ms +step:736/1695 train_time:70279ms step_avg:95.49ms +step:737/1695 train_time:70377ms step_avg:95.49ms +step:738/1695 train_time:70475ms step_avg:95.49ms +step:739/1695 train_time:70572ms step_avg:95.50ms +step:740/1695 train_time:70670ms step_avg:95.50ms +step:741/1695 train_time:70768ms step_avg:95.50ms +step:742/1695 train_time:70867ms step_avg:95.51ms +step:743/1695 train_time:70965ms step_avg:95.51ms +step:744/1695 train_time:71062ms step_avg:95.51ms +step:745/1695 train_time:71160ms step_avg:95.52ms +step:746/1695 train_time:71258ms step_avg:95.52ms +step:747/1695 train_time:71356ms step_avg:95.52ms +step:748/1695 train_time:71454ms step_avg:95.53ms +step:749/1695 train_time:71552ms step_avg:95.53ms +step:750/1695 train_time:71650ms step_avg:95.53ms +step:750/1695 val_loss:3.5852 train_time:71745ms step_avg:95.66ms +step:751/1695 train_time:71772ms step_avg:95.57ms +step:752/1695 train_time:71856ms step_avg:95.55ms +step:753/1695 train_time:71958ms step_avg:95.56ms +step:754/1695 train_time:72057ms step_avg:95.57ms +step:755/1695 train_time:72156ms step_avg:95.57ms +step:756/1695 train_time:72254ms step_avg:95.57ms +step:757/1695 train_time:72351ms step_avg:95.58ms +step:758/1695 train_time:72449ms step_avg:95.58ms +step:759/1695 train_time:72546ms step_avg:95.58ms +step:760/1695 train_time:72643ms step_avg:95.58ms +step:761/1695 train_time:72741ms step_avg:95.59ms +step:762/1695 train_time:72840ms step_avg:95.59ms +step:763/1695 train_time:72939ms step_avg:95.59ms +step:764/1695 train_time:73037ms step_avg:95.60ms +step:765/1695 train_time:73136ms step_avg:95.60ms +step:766/1695 train_time:73234ms step_avg:95.61ms +step:767/1695 train_time:73332ms step_avg:95.61ms +step:768/1695 train_time:73430ms step_avg:95.61ms +step:769/1695 train_time:73528ms step_avg:95.62ms +step:770/1695 train_time:73626ms step_avg:95.62ms +step:771/1695 train_time:73724ms step_avg:95.62ms +step:772/1695 train_time:73823ms step_avg:95.63ms +step:773/1695 train_time:73921ms step_avg:95.63ms +step:774/1695 train_time:74020ms step_avg:95.63ms +step:775/1695 train_time:74117ms step_avg:95.63ms +step:776/1695 train_time:74215ms step_avg:95.64ms +step:777/1695 train_time:74313ms step_avg:95.64ms +step:778/1695 train_time:74411ms step_avg:95.64ms +step:779/1695 train_time:74510ms step_avg:95.65ms +step:780/1695 train_time:74607ms step_avg:95.65ms +step:781/1695 train_time:74705ms step_avg:95.65ms +step:782/1695 train_time:74804ms step_avg:95.66ms +step:783/1695 train_time:74904ms step_avg:95.66ms +step:784/1695 train_time:75002ms step_avg:95.67ms +step:785/1695 train_time:75099ms step_avg:95.67ms +step:786/1695 train_time:75197ms step_avg:95.67ms +step:787/1695 train_time:75294ms step_avg:95.67ms +step:788/1695 train_time:75392ms step_avg:95.68ms +step:789/1695 train_time:75491ms step_avg:95.68ms +step:790/1695 train_time:75590ms step_avg:95.68ms +step:791/1695 train_time:75688ms step_avg:95.69ms +step:792/1695 train_time:75787ms step_avg:95.69ms +step:793/1695 train_time:75885ms step_avg:95.69ms +step:794/1695 train_time:75984ms step_avg:95.70ms +step:795/1695 train_time:76083ms step_avg:95.70ms +step:796/1695 train_time:76181ms step_avg:95.71ms +step:797/1695 train_time:76279ms step_avg:95.71ms +step:798/1695 train_time:76376ms step_avg:95.71ms +step:799/1695 train_time:76474ms step_avg:95.71ms +step:800/1695 train_time:76572ms step_avg:95.72ms +step:801/1695 train_time:76671ms step_avg:95.72ms +step:802/1695 train_time:76771ms step_avg:95.72ms +step:803/1695 train_time:76870ms step_avg:95.73ms +step:804/1695 train_time:76970ms step_avg:95.73ms +step:805/1695 train_time:77069ms step_avg:95.74ms +step:806/1695 train_time:77169ms step_avg:95.74ms +step:807/1695 train_time:77268ms step_avg:95.75ms +step:808/1695 train_time:77367ms step_avg:95.75ms +step:809/1695 train_time:77465ms step_avg:95.75ms +step:810/1695 train_time:77562ms step_avg:95.76ms +step:811/1695 train_time:77659ms step_avg:95.76ms +step:812/1695 train_time:77757ms step_avg:95.76ms +step:813/1695 train_time:77855ms step_avg:95.76ms +step:814/1695 train_time:77952ms step_avg:95.76ms +step:815/1695 train_time:78051ms step_avg:95.77ms +step:816/1695 train_time:78150ms step_avg:95.77ms +step:817/1695 train_time:78249ms step_avg:95.78ms +step:818/1695 train_time:78348ms step_avg:95.78ms +step:819/1695 train_time:78447ms step_avg:95.78ms +step:820/1695 train_time:78545ms step_avg:95.79ms +step:821/1695 train_time:78643ms step_avg:95.79ms +step:822/1695 train_time:78742ms step_avg:95.79ms +step:823/1695 train_time:78840ms step_avg:95.80ms +step:824/1695 train_time:78937ms step_avg:95.80ms +step:825/1695 train_time:79036ms step_avg:95.80ms +step:826/1695 train_time:79134ms step_avg:95.80ms +step:827/1695 train_time:79233ms step_avg:95.81ms +step:828/1695 train_time:79332ms step_avg:95.81ms +step:829/1695 train_time:79432ms step_avg:95.82ms +step:830/1695 train_time:79531ms step_avg:95.82ms +step:831/1695 train_time:79629ms step_avg:95.82ms +step:832/1695 train_time:79728ms step_avg:95.83ms +step:833/1695 train_time:79826ms step_avg:95.83ms +step:834/1695 train_time:79925ms step_avg:95.83ms +step:835/1695 train_time:80023ms step_avg:95.84ms +step:836/1695 train_time:80122ms step_avg:95.84ms +step:837/1695 train_time:80220ms step_avg:95.84ms +step:838/1695 train_time:80319ms step_avg:95.85ms +step:839/1695 train_time:80417ms step_avg:95.85ms +step:840/1695 train_time:80517ms step_avg:95.85ms +step:841/1695 train_time:80618ms step_avg:95.86ms +step:842/1695 train_time:80717ms step_avg:95.86ms +step:843/1695 train_time:80815ms step_avg:95.87ms +step:844/1695 train_time:80914ms step_avg:95.87ms +step:845/1695 train_time:81013ms step_avg:95.87ms +step:846/1695 train_time:81113ms step_avg:95.88ms +step:847/1695 train_time:81212ms step_avg:95.88ms +step:848/1695 train_time:81312ms step_avg:95.89ms +step:849/1695 train_time:81411ms step_avg:95.89ms +step:850/1695 train_time:81510ms step_avg:95.89ms +step:851/1695 train_time:81608ms step_avg:95.90ms +step:852/1695 train_time:81707ms step_avg:95.90ms +step:853/1695 train_time:81805ms step_avg:95.90ms +step:854/1695 train_time:81903ms step_avg:95.91ms +step:855/1695 train_time:82001ms step_avg:95.91ms +step:856/1695 train_time:82099ms step_avg:95.91ms +step:857/1695 train_time:82199ms step_avg:95.91ms +step:858/1695 train_time:82297ms step_avg:95.92ms +step:859/1695 train_time:82395ms step_avg:95.92ms +step:860/1695 train_time:82495ms step_avg:95.92ms +step:861/1695 train_time:82593ms step_avg:95.93ms +step:862/1695 train_time:82692ms step_avg:95.93ms +step:863/1695 train_time:82791ms step_avg:95.93ms +step:864/1695 train_time:82889ms step_avg:95.94ms +step:865/1695 train_time:82989ms step_avg:95.94ms +step:866/1695 train_time:83088ms step_avg:95.94ms +step:867/1695 train_time:83187ms step_avg:95.95ms +step:868/1695 train_time:83285ms step_avg:95.95ms +step:869/1695 train_time:83384ms step_avg:95.95ms +step:870/1695 train_time:83481ms step_avg:95.96ms +step:871/1695 train_time:83580ms step_avg:95.96ms +step:872/1695 train_time:83677ms step_avg:95.96ms +step:873/1695 train_time:83775ms step_avg:95.96ms +step:874/1695 train_time:83873ms step_avg:95.96ms +step:875/1695 train_time:83971ms step_avg:95.97ms +step:875/1695 val_loss:3.5356 train_time:84068ms step_avg:96.08ms +step:876/1695 train_time:84095ms step_avg:96.00ms +step:877/1695 train_time:84181ms step_avg:95.99ms +step:878/1695 train_time:84283ms step_avg:95.99ms +step:879/1695 train_time:84381ms step_avg:96.00ms +step:880/1695 train_time:84479ms step_avg:96.00ms +step:881/1695 train_time:84578ms step_avg:96.00ms +step:882/1695 train_time:84678ms step_avg:96.01ms +step:883/1695 train_time:84777ms step_avg:96.01ms +step:884/1695 train_time:84878ms step_avg:96.02ms +step:885/1695 train_time:84976ms step_avg:96.02ms +step:886/1695 train_time:85077ms step_avg:96.02ms +step:887/1695 train_time:85179ms step_avg:96.03ms +step:888/1695 train_time:85280ms step_avg:96.04ms +step:889/1695 train_time:85381ms step_avg:96.04ms +step:890/1695 train_time:85481ms step_avg:96.05ms +step:891/1695 train_time:85580ms step_avg:96.05ms +step:892/1695 train_time:85679ms step_avg:96.05ms +step:893/1695 train_time:85778ms step_avg:96.06ms +step:894/1695 train_time:85878ms step_avg:96.06ms +step:895/1695 train_time:85978ms step_avg:96.06ms +step:896/1695 train_time:86078ms step_avg:96.07ms +step:897/1695 train_time:86179ms step_avg:96.07ms +step:898/1695 train_time:86280ms step_avg:96.08ms +step:899/1695 train_time:86380ms step_avg:96.08ms +step:900/1695 train_time:86481ms step_avg:96.09ms +step:901/1695 train_time:86581ms step_avg:96.09ms +step:902/1695 train_time:86680ms step_avg:96.10ms +step:903/1695 train_time:86780ms step_avg:96.10ms +step:904/1695 train_time:86879ms step_avg:96.11ms +step:905/1695 train_time:86978ms step_avg:96.11ms +step:906/1695 train_time:87079ms step_avg:96.11ms +step:907/1695 train_time:87179ms step_avg:96.12ms +step:908/1695 train_time:87280ms step_avg:96.12ms +step:909/1695 train_time:87380ms step_avg:96.13ms +step:910/1695 train_time:87480ms step_avg:96.13ms +step:911/1695 train_time:87580ms step_avg:96.14ms +step:912/1695 train_time:87679ms step_avg:96.14ms +step:913/1695 train_time:87780ms step_avg:96.14ms +step:914/1695 train_time:87879ms step_avg:96.15ms +step:915/1695 train_time:87978ms step_avg:96.15ms +step:916/1695 train_time:88077ms step_avg:96.15ms +step:917/1695 train_time:88178ms step_avg:96.16ms +step:918/1695 train_time:88279ms step_avg:96.16ms +step:919/1695 train_time:88380ms step_avg:96.17ms +step:920/1695 train_time:88481ms step_avg:96.18ms +step:921/1695 train_time:88581ms step_avg:96.18ms +step:922/1695 train_time:88680ms step_avg:96.18ms +step:923/1695 train_time:88779ms step_avg:96.19ms +step:924/1695 train_time:88879ms step_avg:96.19ms +step:925/1695 train_time:88980ms step_avg:96.19ms +step:926/1695 train_time:89080ms step_avg:96.20ms +step:927/1695 train_time:89179ms step_avg:96.20ms +step:928/1695 train_time:89279ms step_avg:96.21ms +step:929/1695 train_time:89379ms step_avg:96.21ms +step:930/1695 train_time:89480ms step_avg:96.22ms +step:931/1695 train_time:89580ms step_avg:96.22ms +step:932/1695 train_time:89680ms step_avg:96.22ms +step:933/1695 train_time:89781ms step_avg:96.23ms +step:934/1695 train_time:89881ms step_avg:96.23ms +step:935/1695 train_time:89980ms step_avg:96.24ms +step:936/1695 train_time:90079ms step_avg:96.24ms +step:937/1695 train_time:90179ms step_avg:96.24ms +step:938/1695 train_time:90279ms step_avg:96.25ms +step:939/1695 train_time:90381ms step_avg:96.25ms +step:940/1695 train_time:90482ms step_avg:96.26ms +step:941/1695 train_time:90583ms step_avg:96.26ms +step:942/1695 train_time:90682ms step_avg:96.27ms +step:943/1695 train_time:90784ms step_avg:96.27ms +step:944/1695 train_time:90884ms step_avg:96.28ms +step:945/1695 train_time:90984ms step_avg:96.28ms +step:946/1695 train_time:91083ms step_avg:96.28ms +step:947/1695 train_time:91183ms step_avg:96.29ms +step:948/1695 train_time:91283ms step_avg:96.29ms +step:949/1695 train_time:91382ms step_avg:96.29ms +step:950/1695 train_time:91482ms step_avg:96.30ms +step:951/1695 train_time:91582ms step_avg:96.30ms +step:952/1695 train_time:91682ms step_avg:96.30ms +step:953/1695 train_time:91783ms step_avg:96.31ms +step:954/1695 train_time:91884ms step_avg:96.31ms +step:955/1695 train_time:91984ms step_avg:96.32ms +step:956/1695 train_time:92083ms step_avg:96.32ms +step:957/1695 train_time:92182ms step_avg:96.32ms +step:958/1695 train_time:92282ms step_avg:96.33ms +step:959/1695 train_time:92381ms step_avg:96.33ms +step:960/1695 train_time:92482ms step_avg:96.34ms +step:961/1695 train_time:92581ms step_avg:96.34ms +step:962/1695 train_time:92681ms step_avg:96.34ms +step:963/1695 train_time:92781ms step_avg:96.35ms +step:964/1695 train_time:92881ms step_avg:96.35ms +step:965/1695 train_time:92982ms step_avg:96.35ms +step:966/1695 train_time:93083ms step_avg:96.36ms +step:967/1695 train_time:93184ms step_avg:96.36ms +step:968/1695 train_time:93284ms step_avg:96.37ms +step:969/1695 train_time:93384ms step_avg:96.37ms +step:970/1695 train_time:93483ms step_avg:96.37ms +step:971/1695 train_time:93584ms step_avg:96.38ms +step:972/1695 train_time:93683ms step_avg:96.38ms +step:973/1695 train_time:93782ms step_avg:96.38ms +step:974/1695 train_time:93882ms step_avg:96.39ms +step:975/1695 train_time:93981ms step_avg:96.39ms +step:976/1695 train_time:94082ms step_avg:96.40ms +step:977/1695 train_time:94182ms step_avg:96.40ms +step:978/1695 train_time:94283ms step_avg:96.40ms +step:979/1695 train_time:94383ms step_avg:96.41ms +step:980/1695 train_time:94484ms step_avg:96.41ms +step:981/1695 train_time:94583ms step_avg:96.41ms +step:982/1695 train_time:94685ms step_avg:96.42ms +step:983/1695 train_time:94785ms step_avg:96.42ms +step:984/1695 train_time:94884ms step_avg:96.43ms +step:985/1695 train_time:94983ms step_avg:96.43ms +step:986/1695 train_time:95083ms step_avg:96.43ms +step:987/1695 train_time:95183ms step_avg:96.44ms +step:988/1695 train_time:95283ms step_avg:96.44ms +step:989/1695 train_time:95383ms step_avg:96.44ms +step:990/1695 train_time:95484ms step_avg:96.45ms +step:991/1695 train_time:95584ms step_avg:96.45ms +step:992/1695 train_time:95684ms step_avg:96.46ms +step:993/1695 train_time:95784ms step_avg:96.46ms +step:994/1695 train_time:95884ms step_avg:96.46ms +step:995/1695 train_time:95983ms step_avg:96.47ms +step:996/1695 train_time:96084ms step_avg:96.47ms +step:997/1695 train_time:96184ms step_avg:96.47ms +step:998/1695 train_time:96283ms step_avg:96.48ms +step:999/1695 train_time:96383ms step_avg:96.48ms +step:1000/1695 train_time:96484ms step_avg:96.48ms +step:1000/1695 val_loss:3.4915 train_time:96581ms step_avg:96.58ms +step:1001/1695 train_time:96608ms step_avg:96.51ms +step:1002/1695 train_time:96695ms step_avg:96.50ms +step:1003/1695 train_time:96797ms step_avg:96.51ms +step:1004/1695 train_time:96897ms step_avg:96.51ms +step:1005/1695 train_time:96996ms step_avg:96.51ms +step:1006/1695 train_time:97095ms step_avg:96.52ms +step:1007/1695 train_time:97194ms step_avg:96.52ms +step:1008/1695 train_time:97293ms step_avg:96.52ms +step:1009/1695 train_time:97393ms step_avg:96.52ms +step:1010/1695 train_time:97492ms step_avg:96.53ms +step:1011/1695 train_time:97594ms step_avg:96.53ms +step:1012/1695 train_time:97696ms step_avg:96.54ms +step:1013/1695 train_time:97798ms step_avg:96.54ms +step:1014/1695 train_time:97899ms step_avg:96.55ms +step:1015/1695 train_time:97998ms step_avg:96.55ms +step:1016/1695 train_time:98097ms step_avg:96.55ms +step:1017/1695 train_time:98197ms step_avg:96.56ms +step:1018/1695 train_time:98296ms step_avg:96.56ms +step:1019/1695 train_time:98396ms step_avg:96.56ms +step:1020/1695 train_time:98496ms step_avg:96.56ms +step:1021/1695 train_time:98597ms step_avg:96.57ms +step:1022/1695 train_time:98698ms step_avg:96.57ms +step:1023/1695 train_time:98799ms step_avg:96.58ms +step:1024/1695 train_time:98902ms step_avg:96.58ms +step:1025/1695 train_time:99001ms step_avg:96.59ms +step:1026/1695 train_time:99101ms step_avg:96.59ms +step:1027/1695 train_time:99200ms step_avg:96.59ms +step:1028/1695 train_time:99299ms step_avg:96.59ms +step:1029/1695 train_time:99399ms step_avg:96.60ms +step:1030/1695 train_time:99499ms step_avg:96.60ms +step:1031/1695 train_time:99600ms step_avg:96.60ms +step:1032/1695 train_time:99699ms step_avg:96.61ms +step:1033/1695 train_time:99800ms step_avg:96.61ms +step:1034/1695 train_time:99899ms step_avg:96.61ms +step:1035/1695 train_time:99999ms step_avg:96.62ms +step:1036/1695 train_time:100100ms step_avg:96.62ms +step:1037/1695 train_time:100199ms step_avg:96.62ms +step:1038/1695 train_time:100299ms step_avg:96.63ms +step:1039/1695 train_time:100399ms step_avg:96.63ms +step:1040/1695 train_time:100499ms step_avg:96.63ms +step:1041/1695 train_time:100599ms step_avg:96.64ms +step:1042/1695 train_time:100699ms step_avg:96.64ms +step:1043/1695 train_time:100799ms step_avg:96.64ms +step:1044/1695 train_time:100899ms step_avg:96.65ms +step:1045/1695 train_time:100998ms step_avg:96.65ms +step:1046/1695 train_time:101099ms step_avg:96.65ms +step:1047/1695 train_time:101198ms step_avg:96.66ms +step:1048/1695 train_time:101298ms step_avg:96.66ms +step:1049/1695 train_time:101399ms step_avg:96.66ms +step:1050/1695 train_time:101499ms step_avg:96.67ms +step:1051/1695 train_time:101599ms step_avg:96.67ms +step:1052/1695 train_time:101700ms step_avg:96.67ms +step:1053/1695 train_time:101801ms step_avg:96.68ms +step:1054/1695 train_time:101901ms step_avg:96.68ms +step:1055/1695 train_time:102000ms step_avg:96.68ms +step:1056/1695 train_time:102101ms step_avg:96.69ms +step:1057/1695 train_time:102200ms step_avg:96.69ms +step:1058/1695 train_time:102299ms step_avg:96.69ms +step:1059/1695 train_time:102399ms step_avg:96.69ms +step:1060/1695 train_time:102498ms step_avg:96.70ms +step:1061/1695 train_time:102598ms step_avg:96.70ms +step:1062/1695 train_time:102699ms step_avg:96.70ms +step:1063/1695 train_time:102799ms step_avg:96.71ms +step:1064/1695 train_time:102899ms step_avg:96.71ms +step:1065/1695 train_time:102998ms step_avg:96.71ms +step:1066/1695 train_time:103098ms step_avg:96.71ms +step:1067/1695 train_time:103199ms step_avg:96.72ms +step:1068/1695 train_time:103299ms step_avg:96.72ms +step:1069/1695 train_time:103399ms step_avg:96.73ms +step:1070/1695 train_time:103500ms step_avg:96.73ms +step:1071/1695 train_time:103601ms step_avg:96.73ms +step:1072/1695 train_time:103701ms step_avg:96.74ms +step:1073/1695 train_time:103800ms step_avg:96.74ms +step:1074/1695 train_time:103899ms step_avg:96.74ms +step:1075/1695 train_time:103999ms step_avg:96.74ms +step:1076/1695 train_time:104098ms step_avg:96.75ms +step:1077/1695 train_time:104199ms step_avg:96.75ms +step:1078/1695 train_time:104299ms step_avg:96.75ms +step:1079/1695 train_time:104400ms step_avg:96.76ms +step:1080/1695 train_time:104500ms step_avg:96.76ms +step:1081/1695 train_time:104599ms step_avg:96.76ms +step:1082/1695 train_time:104699ms step_avg:96.76ms +step:1083/1695 train_time:104800ms step_avg:96.77ms +step:1084/1695 train_time:104899ms step_avg:96.77ms +step:1085/1695 train_time:104999ms step_avg:96.77ms +step:1086/1695 train_time:105098ms step_avg:96.78ms +step:1087/1695 train_time:105198ms step_avg:96.78ms +step:1088/1695 train_time:105299ms step_avg:96.78ms +step:1089/1695 train_time:105399ms step_avg:96.78ms +step:1090/1695 train_time:105499ms step_avg:96.79ms +step:1091/1695 train_time:105600ms step_avg:96.79ms +step:1092/1695 train_time:105701ms step_avg:96.80ms +step:1093/1695 train_time:105800ms step_avg:96.80ms +step:1094/1695 train_time:105900ms step_avg:96.80ms +step:1095/1695 train_time:106000ms step_avg:96.80ms +step:1096/1695 train_time:106099ms step_avg:96.81ms +step:1097/1695 train_time:106199ms step_avg:96.81ms +step:1098/1695 train_time:106299ms step_avg:96.81ms +step:1099/1695 train_time:106398ms step_avg:96.81ms +step:1100/1695 train_time:106498ms step_avg:96.82ms +step:1101/1695 train_time:106598ms step_avg:96.82ms +step:1102/1695 train_time:106699ms step_avg:96.82ms +step:1103/1695 train_time:106799ms step_avg:96.83ms +step:1104/1695 train_time:106899ms step_avg:96.83ms +step:1105/1695 train_time:107000ms step_avg:96.83ms +step:1106/1695 train_time:107100ms step_avg:96.84ms +step:1107/1695 train_time:107199ms step_avg:96.84ms +step:1108/1695 train_time:107299ms step_avg:96.84ms +step:1109/1695 train_time:107399ms step_avg:96.84ms +step:1110/1695 train_time:107499ms step_avg:96.85ms +step:1111/1695 train_time:107598ms step_avg:96.85ms +step:1112/1695 train_time:107698ms step_avg:96.85ms +step:1113/1695 train_time:107799ms step_avg:96.85ms +step:1114/1695 train_time:107899ms step_avg:96.86ms +step:1115/1695 train_time:108000ms step_avg:96.86ms +step:1116/1695 train_time:108099ms step_avg:96.86ms +step:1117/1695 train_time:108199ms step_avg:96.87ms +step:1118/1695 train_time:108299ms step_avg:96.87ms +step:1119/1695 train_time:108399ms step_avg:96.87ms +step:1120/1695 train_time:108499ms step_avg:96.87ms +step:1121/1695 train_time:108599ms step_avg:96.88ms +step:1122/1695 train_time:108700ms step_avg:96.88ms +step:1123/1695 train_time:108800ms step_avg:96.88ms +step:1124/1695 train_time:108900ms step_avg:96.89ms +step:1125/1695 train_time:108999ms step_avg:96.89ms +step:1125/1695 val_loss:3.4400 train_time:109096ms step_avg:96.97ms +step:1126/1695 train_time:109124ms step_avg:96.91ms +step:1127/1695 train_time:109210ms step_avg:96.90ms +step:1128/1695 train_time:109311ms step_avg:96.91ms +step:1129/1695 train_time:109411ms step_avg:96.91ms +step:1130/1695 train_time:109510ms step_avg:96.91ms +step:1131/1695 train_time:109609ms step_avg:96.91ms +step:1132/1695 train_time:109709ms step_avg:96.92ms +step:1133/1695 train_time:109808ms step_avg:96.92ms +step:1134/1695 train_time:109908ms step_avg:96.92ms +step:1135/1695 train_time:110008ms step_avg:96.92ms +step:1136/1695 train_time:110109ms step_avg:96.93ms +step:1137/1695 train_time:110211ms step_avg:96.93ms +step:1138/1695 train_time:110313ms step_avg:96.94ms +step:1139/1695 train_time:110413ms step_avg:96.94ms +step:1140/1695 train_time:110513ms step_avg:96.94ms +step:1141/1695 train_time:110613ms step_avg:96.94ms +step:1142/1695 train_time:110713ms step_avg:96.95ms +step:1143/1695 train_time:110813ms step_avg:96.95ms +step:1144/1695 train_time:110913ms step_avg:96.95ms +step:1145/1695 train_time:111014ms step_avg:96.96ms +step:1146/1695 train_time:111115ms step_avg:96.96ms +step:1147/1695 train_time:111216ms step_avg:96.96ms +step:1148/1695 train_time:111317ms step_avg:96.97ms +step:1149/1695 train_time:111418ms step_avg:96.97ms +step:1150/1695 train_time:111519ms step_avg:96.97ms +step:1151/1695 train_time:111621ms step_avg:96.98ms +step:1152/1695 train_time:111722ms step_avg:96.98ms +step:1153/1695 train_time:111824ms step_avg:96.99ms +step:1154/1695 train_time:111924ms step_avg:96.99ms +step:1155/1695 train_time:112025ms step_avg:96.99ms +step:1156/1695 train_time:112126ms step_avg:96.99ms +step:1157/1695 train_time:112228ms step_avg:97.00ms +step:1158/1695 train_time:112330ms step_avg:97.00ms +step:1159/1695 train_time:112431ms step_avg:97.01ms +step:1160/1695 train_time:112530ms step_avg:97.01ms +step:1161/1695 train_time:112631ms step_avg:97.01ms +step:1162/1695 train_time:112731ms step_avg:97.01ms +step:1163/1695 train_time:112833ms step_avg:97.02ms +step:1164/1695 train_time:112933ms step_avg:97.02ms +step:1165/1695 train_time:113033ms step_avg:97.02ms +step:1166/1695 train_time:113134ms step_avg:97.03ms +step:1167/1695 train_time:113234ms step_avg:97.03ms +step:1168/1695 train_time:113335ms step_avg:97.03ms +step:1169/1695 train_time:113436ms step_avg:97.04ms +step:1170/1695 train_time:113538ms step_avg:97.04ms +step:1171/1695 train_time:113637ms step_avg:97.04ms +step:1172/1695 train_time:113740ms step_avg:97.05ms +step:1173/1695 train_time:113840ms step_avg:97.05ms +step:1174/1695 train_time:113942ms step_avg:97.05ms +step:1175/1695 train_time:114043ms step_avg:97.06ms +step:1176/1695 train_time:114145ms step_avg:97.06ms +step:1177/1695 train_time:114247ms step_avg:97.07ms +step:1178/1695 train_time:114349ms step_avg:97.07ms +step:1179/1695 train_time:114453ms step_avg:97.08ms +step:1180/1695 train_time:114553ms step_avg:97.08ms +step:1181/1695 train_time:114653ms step_avg:97.08ms +step:1182/1695 train_time:114754ms step_avg:97.08ms +step:1183/1695 train_time:114853ms step_avg:97.09ms +step:1184/1695 train_time:114955ms step_avg:97.09ms +step:1185/1695 train_time:115057ms step_avg:97.09ms +step:1186/1695 train_time:115159ms step_avg:97.10ms +step:1187/1695 train_time:115260ms step_avg:97.10ms +step:1188/1695 train_time:115362ms step_avg:97.11ms +step:1189/1695 train_time:115463ms step_avg:97.11ms +step:1190/1695 train_time:115564ms step_avg:97.11ms +step:1191/1695 train_time:115664ms step_avg:97.12ms +step:1192/1695 train_time:115765ms step_avg:97.12ms +step:1193/1695 train_time:115866ms step_avg:97.12ms +step:1194/1695 train_time:115969ms step_avg:97.13ms +step:1195/1695 train_time:116070ms step_avg:97.13ms +step:1196/1695 train_time:116171ms step_avg:97.13ms +step:1197/1695 train_time:116273ms step_avg:97.14ms +step:1198/1695 train_time:116373ms step_avg:97.14ms +step:1199/1695 train_time:116473ms step_avg:97.14ms +step:1200/1695 train_time:116573ms step_avg:97.14ms +step:1201/1695 train_time:116673ms step_avg:97.15ms +step:1202/1695 train_time:116775ms step_avg:97.15ms +step:1203/1695 train_time:116877ms step_avg:97.15ms +step:1204/1695 train_time:116979ms step_avg:97.16ms +step:1205/1695 train_time:117080ms step_avg:97.16ms +step:1206/1695 train_time:117181ms step_avg:97.16ms +step:1207/1695 train_time:117282ms step_avg:97.17ms +step:1208/1695 train_time:117384ms step_avg:97.17ms +step:1209/1695 train_time:117485ms step_avg:97.18ms +step:1210/1695 train_time:117586ms step_avg:97.18ms +step:1211/1695 train_time:117687ms step_avg:97.18ms +step:1212/1695 train_time:117789ms step_avg:97.19ms +step:1213/1695 train_time:117890ms step_avg:97.19ms +step:1214/1695 train_time:117991ms step_avg:97.19ms +step:1215/1695 train_time:118092ms step_avg:97.20ms +step:1216/1695 train_time:118194ms step_avg:97.20ms +step:1217/1695 train_time:118293ms step_avg:97.20ms +step:1218/1695 train_time:118395ms step_avg:97.20ms +step:1219/1695 train_time:118495ms step_avg:97.21ms +step:1220/1695 train_time:118597ms step_avg:97.21ms +step:1221/1695 train_time:118698ms step_avg:97.21ms +step:1222/1695 train_time:118800ms step_avg:97.22ms +step:1223/1695 train_time:118903ms step_avg:97.22ms +step:1224/1695 train_time:119006ms step_avg:97.23ms +step:1225/1695 train_time:119107ms step_avg:97.23ms +step:1226/1695 train_time:119207ms step_avg:97.23ms +step:1227/1695 train_time:119308ms step_avg:97.24ms +step:1228/1695 train_time:119409ms step_avg:97.24ms +step:1229/1695 train_time:119510ms step_avg:97.24ms +step:1230/1695 train_time:119612ms step_avg:97.25ms +step:1231/1695 train_time:119713ms step_avg:97.25ms +step:1232/1695 train_time:119813ms step_avg:97.25ms +step:1233/1695 train_time:119914ms step_avg:97.25ms +step:1234/1695 train_time:120015ms step_avg:97.26ms +step:1235/1695 train_time:120115ms step_avg:97.26ms +step:1236/1695 train_time:120216ms step_avg:97.26ms +step:1237/1695 train_time:120318ms step_avg:97.27ms +step:1238/1695 train_time:120421ms step_avg:97.27ms +step:1239/1695 train_time:120522ms step_avg:97.27ms +step:1240/1695 train_time:120624ms step_avg:97.28ms +step:1241/1695 train_time:120725ms step_avg:97.28ms +step:1242/1695 train_time:120826ms step_avg:97.28ms +step:1243/1695 train_time:120927ms step_avg:97.29ms +step:1244/1695 train_time:121028ms step_avg:97.29ms +step:1245/1695 train_time:121130ms step_avg:97.29ms +step:1246/1695 train_time:121232ms step_avg:97.30ms +step:1247/1695 train_time:121332ms step_avg:97.30ms +step:1248/1695 train_time:121433ms step_avg:97.30ms +step:1249/1695 train_time:121533ms step_avg:97.30ms +step:1250/1695 train_time:121633ms step_avg:97.31ms +step:1250/1695 val_loss:3.3953 train_time:121731ms step_avg:97.38ms +step:1251/1695 train_time:121758ms step_avg:97.33ms +step:1252/1695 train_time:121845ms step_avg:97.32ms +step:1253/1695 train_time:121947ms step_avg:97.32ms +step:1254/1695 train_time:122048ms step_avg:97.33ms +step:1255/1695 train_time:122149ms step_avg:97.33ms +step:1256/1695 train_time:122249ms step_avg:97.33ms +step:1257/1695 train_time:122349ms step_avg:97.33ms +step:1258/1695 train_time:122450ms step_avg:97.34ms +step:1259/1695 train_time:122551ms step_avg:97.34ms +step:1260/1695 train_time:122651ms step_avg:97.34ms +step:1261/1695 train_time:122754ms step_avg:97.35ms +step:1262/1695 train_time:122855ms step_avg:97.35ms +step:1263/1695 train_time:122957ms step_avg:97.35ms +step:1264/1695 train_time:123057ms step_avg:97.36ms +step:1265/1695 train_time:123157ms step_avg:97.36ms +step:1266/1695 train_time:123258ms step_avg:97.36ms +step:1267/1695 train_time:123359ms step_avg:97.36ms +step:1268/1695 train_time:123460ms step_avg:97.37ms +step:1269/1695 train_time:123561ms step_avg:97.37ms +step:1270/1695 train_time:123662ms step_avg:97.37ms +step:1271/1695 train_time:123764ms step_avg:97.38ms +step:1272/1695 train_time:123864ms step_avg:97.38ms +step:1273/1695 train_time:123965ms step_avg:97.38ms +step:1274/1695 train_time:124066ms step_avg:97.38ms +step:1275/1695 train_time:124168ms step_avg:97.39ms +step:1276/1695 train_time:124272ms step_avg:97.39ms +step:1277/1695 train_time:124372ms step_avg:97.39ms +step:1278/1695 train_time:124473ms step_avg:97.40ms +step:1279/1695 train_time:124574ms step_avg:97.40ms +step:1280/1695 train_time:124674ms step_avg:97.40ms +step:1281/1695 train_time:124775ms step_avg:97.40ms +step:1282/1695 train_time:124875ms step_avg:97.41ms +step:1283/1695 train_time:124975ms step_avg:97.41ms +step:1284/1695 train_time:125075ms step_avg:97.41ms +step:1285/1695 train_time:125176ms step_avg:97.41ms +step:1286/1695 train_time:125277ms step_avg:97.42ms +step:1287/1695 train_time:125379ms step_avg:97.42ms +step:1288/1695 train_time:125480ms step_avg:97.42ms +step:1289/1695 train_time:125582ms step_avg:97.43ms +step:1290/1695 train_time:125683ms step_avg:97.43ms +step:1291/1695 train_time:125786ms step_avg:97.43ms +step:1292/1695 train_time:125886ms step_avg:97.43ms +step:1293/1695 train_time:125986ms step_avg:97.44ms +step:1294/1695 train_time:126089ms step_avg:97.44ms +step:1295/1695 train_time:126191ms step_avg:97.44ms +step:1296/1695 train_time:126292ms step_avg:97.45ms +step:1297/1695 train_time:126393ms step_avg:97.45ms +step:1298/1695 train_time:126494ms step_avg:97.45ms +step:1299/1695 train_time:126594ms step_avg:97.45ms +step:1300/1695 train_time:126694ms step_avg:97.46ms +step:1301/1695 train_time:126795ms step_avg:97.46ms +step:1302/1695 train_time:126896ms step_avg:97.46ms +step:1303/1695 train_time:126997ms step_avg:97.47ms +step:1304/1695 train_time:127098ms step_avg:97.47ms +step:1305/1695 train_time:127200ms step_avg:97.47ms +step:1306/1695 train_time:127303ms step_avg:97.48ms +step:1307/1695 train_time:127405ms step_avg:97.48ms +step:1308/1695 train_time:127506ms step_avg:97.48ms +step:1309/1695 train_time:127607ms step_avg:97.48ms +step:1310/1695 train_time:127709ms step_avg:97.49ms +step:1311/1695 train_time:127812ms step_avg:97.49ms +step:1312/1695 train_time:127913ms step_avg:97.49ms +step:1313/1695 train_time:128014ms step_avg:97.50ms +step:1314/1695 train_time:128114ms step_avg:97.50ms +step:1315/1695 train_time:128215ms step_avg:97.50ms +step:1316/1695 train_time:128315ms step_avg:97.50ms +step:1317/1695 train_time:128415ms step_avg:97.51ms +step:1318/1695 train_time:128517ms step_avg:97.51ms +step:1319/1695 train_time:128618ms step_avg:97.51ms +step:1320/1695 train_time:128720ms step_avg:97.51ms +step:1321/1695 train_time:128823ms step_avg:97.52ms +step:1322/1695 train_time:128924ms step_avg:97.52ms +step:1323/1695 train_time:129025ms step_avg:97.52ms +step:1324/1695 train_time:129127ms step_avg:97.53ms +step:1325/1695 train_time:129227ms step_avg:97.53ms +step:1326/1695 train_time:129329ms step_avg:97.53ms +step:1327/1695 train_time:129432ms step_avg:97.54ms +step:1328/1695 train_time:129533ms step_avg:97.54ms +step:1329/1695 train_time:129633ms step_avg:97.54ms +step:1330/1695 train_time:129733ms step_avg:97.54ms +step:1331/1695 train_time:129834ms step_avg:97.55ms +step:1332/1695 train_time:129936ms step_avg:97.55ms +step:1333/1695 train_time:130037ms step_avg:97.55ms +step:1334/1695 train_time:130138ms step_avg:97.55ms +step:1335/1695 train_time:130239ms step_avg:97.56ms +step:1336/1695 train_time:130342ms step_avg:97.56ms +step:1337/1695 train_time:130444ms step_avg:97.57ms +step:1338/1695 train_time:130545ms step_avg:97.57ms +step:1339/1695 train_time:130647ms step_avg:97.57ms +step:1340/1695 train_time:130748ms step_avg:97.57ms +step:1341/1695 train_time:130848ms step_avg:97.58ms +step:1342/1695 train_time:130949ms step_avg:97.58ms +step:1343/1695 train_time:131050ms step_avg:97.58ms +step:1344/1695 train_time:131152ms step_avg:97.58ms +step:1345/1695 train_time:131253ms step_avg:97.59ms +step:1346/1695 train_time:131355ms step_avg:97.59ms +step:1347/1695 train_time:131455ms step_avg:97.59ms +step:1348/1695 train_time:131556ms step_avg:97.59ms +step:1349/1695 train_time:131658ms step_avg:97.60ms +step:1350/1695 train_time:131759ms step_avg:97.60ms +step:1351/1695 train_time:131859ms step_avg:97.60ms +step:1352/1695 train_time:131961ms step_avg:97.60ms +step:1353/1695 train_time:132064ms step_avg:97.61ms +step:1354/1695 train_time:132165ms step_avg:97.61ms +step:1355/1695 train_time:132266ms step_avg:97.61ms +step:1356/1695 train_time:132369ms step_avg:97.62ms +step:1357/1695 train_time:132470ms step_avg:97.62ms +step:1358/1695 train_time:132571ms step_avg:97.62ms +step:1359/1695 train_time:132673ms step_avg:97.63ms +step:1360/1695 train_time:132773ms step_avg:97.63ms +step:1361/1695 train_time:132873ms step_avg:97.63ms +step:1362/1695 train_time:132973ms step_avg:97.63ms +step:1363/1695 train_time:133075ms step_avg:97.63ms +step:1364/1695 train_time:133176ms step_avg:97.64ms +step:1365/1695 train_time:133278ms step_avg:97.64ms +step:1366/1695 train_time:133379ms step_avg:97.64ms +step:1367/1695 train_time:133481ms step_avg:97.65ms +step:1368/1695 train_time:133584ms step_avg:97.65ms +step:1369/1695 train_time:133685ms step_avg:97.65ms +step:1370/1695 train_time:133785ms step_avg:97.65ms +step:1371/1695 train_time:133886ms step_avg:97.66ms +step:1372/1695 train_time:133987ms step_avg:97.66ms +step:1373/1695 train_time:134090ms step_avg:97.66ms +step:1374/1695 train_time:134192ms step_avg:97.66ms +step:1375/1695 train_time:134293ms step_avg:97.67ms +step:1375/1695 val_loss:3.3553 train_time:134391ms step_avg:97.74ms +step:1376/1695 train_time:134418ms step_avg:97.69ms +step:1377/1695 train_time:134504ms step_avg:97.68ms +step:1378/1695 train_time:134605ms step_avg:97.68ms +step:1379/1695 train_time:134706ms step_avg:97.68ms +step:1380/1695 train_time:134809ms step_avg:97.69ms +step:1381/1695 train_time:134910ms step_avg:97.69ms +step:1382/1695 train_time:135010ms step_avg:97.69ms +step:1383/1695 train_time:135110ms step_avg:97.69ms +step:1384/1695 train_time:135211ms step_avg:97.70ms +step:1385/1695 train_time:135313ms step_avg:97.70ms +step:1386/1695 train_time:135418ms step_avg:97.70ms +step:1387/1695 train_time:135520ms step_avg:97.71ms +step:1388/1695 train_time:135622ms step_avg:97.71ms +step:1389/1695 train_time:135725ms step_avg:97.71ms +step:1390/1695 train_time:135827ms step_avg:97.72ms +step:1391/1695 train_time:135929ms step_avg:97.72ms +step:1392/1695 train_time:136031ms step_avg:97.72ms +step:1393/1695 train_time:136133ms step_avg:97.73ms +step:1394/1695 train_time:136234ms step_avg:97.73ms +step:1395/1695 train_time:136335ms step_avg:97.73ms +step:1396/1695 train_time:136437ms step_avg:97.73ms +step:1397/1695 train_time:136541ms step_avg:97.74ms +step:1398/1695 train_time:136643ms step_avg:97.74ms +step:1399/1695 train_time:136744ms step_avg:97.74ms +step:1400/1695 train_time:136847ms step_avg:97.75ms +step:1401/1695 train_time:136948ms step_avg:97.75ms +step:1402/1695 train_time:137050ms step_avg:97.75ms +step:1403/1695 train_time:137152ms step_avg:97.76ms +step:1404/1695 train_time:137256ms step_avg:97.76ms +step:1405/1695 train_time:137357ms step_avg:97.76ms +step:1406/1695 train_time:137459ms step_avg:97.77ms +step:1407/1695 train_time:137561ms step_avg:97.77ms +step:1408/1695 train_time:137662ms step_avg:97.77ms +step:1409/1695 train_time:137766ms step_avg:97.78ms +step:1410/1695 train_time:137869ms step_avg:97.78ms +step:1411/1695 train_time:137970ms step_avg:97.78ms +step:1412/1695 train_time:138073ms step_avg:97.79ms +step:1413/1695 train_time:138174ms step_avg:97.79ms +step:1414/1695 train_time:138276ms step_avg:97.79ms +step:1415/1695 train_time:138378ms step_avg:97.79ms +step:1416/1695 train_time:138479ms step_avg:97.80ms +step:1417/1695 train_time:138579ms step_avg:97.80ms +step:1418/1695 train_time:138680ms step_avg:97.80ms +step:1419/1695 train_time:138782ms step_avg:97.80ms +step:1420/1695 train_time:138882ms step_avg:97.80ms +step:1421/1695 train_time:138984ms step_avg:97.81ms +step:1422/1695 train_time:139087ms step_avg:97.81ms +step:1423/1695 train_time:139190ms step_avg:97.81ms +step:1424/1695 train_time:139293ms step_avg:97.82ms +step:1425/1695 train_time:139395ms step_avg:97.82ms +step:1426/1695 train_time:139498ms step_avg:97.82ms +step:1427/1695 train_time:139599ms step_avg:97.83ms +step:1428/1695 train_time:139700ms step_avg:97.83ms +step:1429/1695 train_time:139801ms step_avg:97.83ms +step:1430/1695 train_time:139902ms step_avg:97.83ms +step:1431/1695 train_time:140005ms step_avg:97.84ms +step:1432/1695 train_time:140106ms step_avg:97.84ms +step:1433/1695 train_time:140209ms step_avg:97.84ms +step:1434/1695 train_time:140312ms step_avg:97.85ms +step:1435/1695 train_time:140417ms step_avg:97.85ms +step:1436/1695 train_time:140520ms step_avg:97.86ms +step:1437/1695 train_time:140621ms step_avg:97.86ms +step:1438/1695 train_time:140723ms step_avg:97.86ms +step:1439/1695 train_time:140826ms step_avg:97.86ms +step:1440/1695 train_time:140928ms step_avg:97.87ms +step:1441/1695 train_time:141030ms step_avg:97.87ms +step:1442/1695 train_time:141131ms step_avg:97.87ms +step:1443/1695 train_time:141233ms step_avg:97.87ms +step:1444/1695 train_time:141335ms step_avg:97.88ms +step:1445/1695 train_time:141436ms step_avg:97.88ms +step:1446/1695 train_time:141538ms step_avg:97.88ms +step:1447/1695 train_time:141639ms step_avg:97.88ms +step:1448/1695 train_time:141743ms step_avg:97.89ms +step:1449/1695 train_time:141844ms step_avg:97.89ms +step:1450/1695 train_time:141946ms step_avg:97.89ms +step:1451/1695 train_time:142048ms step_avg:97.90ms +step:1452/1695 train_time:142150ms step_avg:97.90ms +step:1453/1695 train_time:142253ms step_avg:97.90ms +step:1454/1695 train_time:142356ms step_avg:97.91ms +step:1455/1695 train_time:142458ms step_avg:97.91ms +step:1456/1695 train_time:142560ms step_avg:97.91ms +step:1457/1695 train_time:142662ms step_avg:97.92ms +step:1458/1695 train_time:142765ms step_avg:97.92ms +step:1459/1695 train_time:142867ms step_avg:97.92ms +step:1460/1695 train_time:142968ms step_avg:97.92ms +step:1461/1695 train_time:143071ms step_avg:97.93ms +step:1462/1695 train_time:143173ms step_avg:97.93ms +step:1463/1695 train_time:143275ms step_avg:97.93ms +step:1464/1695 train_time:143377ms step_avg:97.94ms +step:1465/1695 train_time:143477ms step_avg:97.94ms +step:1466/1695 train_time:143579ms step_avg:97.94ms +step:1467/1695 train_time:143679ms step_avg:97.94ms +step:1468/1695 train_time:143781ms step_avg:97.94ms +step:1469/1695 train_time:143884ms step_avg:97.95ms +step:1470/1695 train_time:143986ms step_avg:97.95ms +step:1471/1695 train_time:144088ms step_avg:97.95ms +step:1472/1695 train_time:144190ms step_avg:97.96ms +step:1473/1695 train_time:144292ms step_avg:97.96ms +step:1474/1695 train_time:144393ms step_avg:97.96ms +step:1475/1695 train_time:144495ms step_avg:97.96ms +step:1476/1695 train_time:144597ms step_avg:97.97ms +step:1477/1695 train_time:144699ms step_avg:97.97ms +step:1478/1695 train_time:144800ms step_avg:97.97ms +step:1479/1695 train_time:144901ms step_avg:97.97ms +step:1480/1695 train_time:145003ms step_avg:97.98ms +step:1481/1695 train_time:145107ms step_avg:97.98ms +step:1482/1695 train_time:145208ms step_avg:97.98ms +step:1483/1695 train_time:145310ms step_avg:97.98ms +step:1484/1695 train_time:145413ms step_avg:97.99ms +step:1485/1695 train_time:145515ms step_avg:97.99ms +step:1486/1695 train_time:145616ms step_avg:97.99ms +step:1487/1695 train_time:145717ms step_avg:97.99ms +step:1488/1695 train_time:145820ms step_avg:98.00ms +step:1489/1695 train_time:145923ms step_avg:98.00ms +step:1490/1695 train_time:146026ms step_avg:98.00ms +step:1491/1695 train_time:146127ms step_avg:98.01ms +step:1492/1695 train_time:146229ms step_avg:98.01ms +step:1493/1695 train_time:146330ms step_avg:98.01ms +step:1494/1695 train_time:146432ms step_avg:98.01ms +step:1495/1695 train_time:146535ms step_avg:98.02ms +step:1496/1695 train_time:146637ms step_avg:98.02ms +step:1497/1695 train_time:146738ms step_avg:98.02ms +step:1498/1695 train_time:146840ms step_avg:98.02ms +step:1499/1695 train_time:146942ms step_avg:98.03ms +step:1500/1695 train_time:147044ms step_avg:98.03ms +step:1500/1695 val_loss:3.3201 train_time:147143ms step_avg:98.10ms +step:1501/1695 train_time:147170ms step_avg:98.05ms +step:1502/1695 train_time:147256ms step_avg:98.04ms +step:1503/1695 train_time:147358ms step_avg:98.04ms +step:1504/1695 train_time:147460ms step_avg:98.05ms +step:1505/1695 train_time:147562ms step_avg:98.05ms +step:1506/1695 train_time:147663ms step_avg:98.05ms +step:1507/1695 train_time:147764ms step_avg:98.05ms +step:1508/1695 train_time:147865ms step_avg:98.05ms +step:1509/1695 train_time:147968ms step_avg:98.06ms +step:1510/1695 train_time:148070ms step_avg:98.06ms +step:1511/1695 train_time:148173ms step_avg:98.06ms +step:1512/1695 train_time:148275ms step_avg:98.07ms +step:1513/1695 train_time:148376ms step_avg:98.07ms +step:1514/1695 train_time:148479ms step_avg:98.07ms +step:1515/1695 train_time:148583ms step_avg:98.07ms +step:1516/1695 train_time:148685ms step_avg:98.08ms +step:1517/1695 train_time:148785ms step_avg:98.08ms +step:1518/1695 train_time:148887ms step_avg:98.08ms +step:1519/1695 train_time:148990ms step_avg:98.08ms +step:1520/1695 train_time:149091ms step_avg:98.09ms +step:1521/1695 train_time:149192ms step_avg:98.09ms +step:1522/1695 train_time:149294ms step_avg:98.09ms +step:1523/1695 train_time:149398ms step_avg:98.09ms +step:1524/1695 train_time:149503ms step_avg:98.10ms +step:1525/1695 train_time:149606ms step_avg:98.10ms +step:1526/1695 train_time:149708ms step_avg:98.10ms +step:1527/1695 train_time:149809ms step_avg:98.11ms +step:1528/1695 train_time:149915ms step_avg:98.11ms +step:1529/1695 train_time:150016ms step_avg:98.11ms +step:1530/1695 train_time:150118ms step_avg:98.12ms +step:1531/1695 train_time:150220ms step_avg:98.12ms +step:1532/1695 train_time:150322ms step_avg:98.12ms +step:1533/1695 train_time:150424ms step_avg:98.12ms +step:1534/1695 train_time:150526ms step_avg:98.13ms +step:1535/1695 train_time:150628ms step_avg:98.13ms +step:1536/1695 train_time:150731ms step_avg:98.13ms +step:1537/1695 train_time:150833ms step_avg:98.13ms +step:1538/1695 train_time:150934ms step_avg:98.14ms +step:1539/1695 train_time:151035ms step_avg:98.14ms +step:1540/1695 train_time:151136ms step_avg:98.14ms +step:1541/1695 train_time:151239ms step_avg:98.14ms +step:1542/1695 train_time:151344ms step_avg:98.15ms +step:1543/1695 train_time:151446ms step_avg:98.15ms +step:1544/1695 train_time:151548ms step_avg:98.15ms +step:1545/1695 train_time:151649ms step_avg:98.15ms +step:1546/1695 train_time:151751ms step_avg:98.16ms +step:1547/1695 train_time:151855ms step_avg:98.16ms +step:1548/1695 train_time:151956ms step_avg:98.16ms +step:1549/1695 train_time:152058ms step_avg:98.17ms +step:1550/1695 train_time:152160ms step_avg:98.17ms +step:1551/1695 train_time:152262ms step_avg:98.17ms +step:1552/1695 train_time:152364ms step_avg:98.17ms +step:1553/1695 train_time:152467ms step_avg:98.18ms +step:1554/1695 train_time:152569ms step_avg:98.18ms +step:1555/1695 train_time:152670ms step_avg:98.18ms +step:1556/1695 train_time:152773ms step_avg:98.18ms +step:1557/1695 train_time:152876ms step_avg:98.19ms +step:1558/1695 train_time:152978ms step_avg:98.19ms +step:1559/1695 train_time:153081ms step_avg:98.19ms +step:1560/1695 train_time:153181ms step_avg:98.19ms +step:1561/1695 train_time:153283ms step_avg:98.20ms +step:1562/1695 train_time:153385ms step_avg:98.20ms +step:1563/1695 train_time:153488ms step_avg:98.20ms +step:1564/1695 train_time:153590ms step_avg:98.20ms +step:1565/1695 train_time:153691ms step_avg:98.21ms +step:1566/1695 train_time:153793ms step_avg:98.21ms +step:1567/1695 train_time:153894ms step_avg:98.21ms +step:1568/1695 train_time:153995ms step_avg:98.21ms +step:1569/1695 train_time:154096ms step_avg:98.21ms +step:1570/1695 train_time:154198ms step_avg:98.22ms +step:1571/1695 train_time:154299ms step_avg:98.22ms +step:1572/1695 train_time:154401ms step_avg:98.22ms +step:1573/1695 train_time:154504ms step_avg:98.22ms +step:1574/1695 train_time:154606ms step_avg:98.22ms +step:1575/1695 train_time:154708ms step_avg:98.23ms +step:1576/1695 train_time:154810ms step_avg:98.23ms +step:1577/1695 train_time:154914ms step_avg:98.23ms +step:1578/1695 train_time:155015ms step_avg:98.23ms +step:1579/1695 train_time:155116ms step_avg:98.24ms +step:1580/1695 train_time:155218ms step_avg:98.24ms +step:1581/1695 train_time:155319ms step_avg:98.24ms +step:1582/1695 train_time:155421ms step_avg:98.24ms +step:1583/1695 train_time:155524ms step_avg:98.25ms +step:1584/1695 train_time:155626ms step_avg:98.25ms +step:1585/1695 train_time:155728ms step_avg:98.25ms +step:1586/1695 train_time:155831ms step_avg:98.25ms +step:1587/1695 train_time:155933ms step_avg:98.26ms +step:1588/1695 train_time:156034ms step_avg:98.26ms +step:1589/1695 train_time:156135ms step_avg:98.26ms +step:1590/1695 train_time:156237ms step_avg:98.26ms +step:1591/1695 train_time:156340ms step_avg:98.26ms +step:1592/1695 train_time:156442ms step_avg:98.27ms +step:1593/1695 train_time:156543ms step_avg:98.27ms +step:1594/1695 train_time:156647ms step_avg:98.27ms +step:1595/1695 train_time:156748ms step_avg:98.27ms +step:1596/1695 train_time:156850ms step_avg:98.28ms +step:1597/1695 train_time:156952ms step_avg:98.28ms +step:1598/1695 train_time:157055ms step_avg:98.28ms +step:1599/1695 train_time:157156ms step_avg:98.28ms +step:1600/1695 train_time:157257ms step_avg:98.29ms +step:1601/1695 train_time:157360ms step_avg:98.29ms +step:1602/1695 train_time:157461ms step_avg:98.29ms +step:1603/1695 train_time:157563ms step_avg:98.29ms +step:1604/1695 train_time:157664ms step_avg:98.29ms +step:1605/1695 train_time:157767ms step_avg:98.30ms +step:1606/1695 train_time:157870ms step_avg:98.30ms +step:1607/1695 train_time:157972ms step_avg:98.30ms +step:1608/1695 train_time:158074ms step_avg:98.30ms +step:1609/1695 train_time:158175ms step_avg:98.31ms +step:1610/1695 train_time:158277ms step_avg:98.31ms +step:1611/1695 train_time:158380ms step_avg:98.31ms +step:1612/1695 train_time:158481ms step_avg:98.31ms +step:1613/1695 train_time:158582ms step_avg:98.32ms +step:1614/1695 train_time:158683ms step_avg:98.32ms +step:1615/1695 train_time:158784ms step_avg:98.32ms +step:1616/1695 train_time:158887ms step_avg:98.32ms +step:1617/1695 train_time:158990ms step_avg:98.32ms +step:1618/1695 train_time:159093ms step_avg:98.33ms +step:1619/1695 train_time:159195ms step_avg:98.33ms +step:1620/1695 train_time:159297ms step_avg:98.33ms +step:1621/1695 train_time:159399ms step_avg:98.33ms +step:1622/1695 train_time:159499ms step_avg:98.34ms +step:1623/1695 train_time:159600ms step_avg:98.34ms +step:1624/1695 train_time:159702ms step_avg:98.34ms +step:1625/1695 train_time:159806ms step_avg:98.34ms +step:1625/1695 val_loss:3.2915 train_time:159906ms step_avg:98.40ms +step:1626/1695 train_time:159932ms step_avg:98.36ms +step:1627/1695 train_time:160019ms step_avg:98.35ms +step:1628/1695 train_time:160122ms step_avg:98.36ms +step:1629/1695 train_time:160225ms step_avg:98.36ms +step:1630/1695 train_time:160326ms step_avg:98.36ms +step:1631/1695 train_time:160428ms step_avg:98.36ms +step:1632/1695 train_time:160529ms step_avg:98.36ms +step:1633/1695 train_time:160629ms step_avg:98.36ms +step:1634/1695 train_time:160731ms step_avg:98.37ms +step:1635/1695 train_time:160834ms step_avg:98.37ms +step:1636/1695 train_time:160938ms step_avg:98.37ms +step:1637/1695 train_time:161041ms step_avg:98.38ms +step:1638/1695 train_time:161143ms step_avg:98.38ms +step:1639/1695 train_time:161246ms step_avg:98.38ms +step:1640/1695 train_time:161349ms step_avg:98.38ms +step:1641/1695 train_time:161452ms step_avg:98.39ms +step:1642/1695 train_time:161554ms step_avg:98.39ms +step:1643/1695 train_time:161656ms step_avg:98.39ms +step:1644/1695 train_time:161759ms step_avg:98.39ms +step:1645/1695 train_time:161861ms step_avg:98.40ms +step:1646/1695 train_time:161965ms step_avg:98.40ms +step:1647/1695 train_time:162069ms step_avg:98.40ms +step:1648/1695 train_time:162173ms step_avg:98.41ms +step:1649/1695 train_time:162276ms step_avg:98.41ms +step:1650/1695 train_time:162379ms step_avg:98.41ms +step:1651/1695 train_time:162482ms step_avg:98.41ms +step:1652/1695 train_time:162585ms step_avg:98.42ms +step:1653/1695 train_time:162688ms step_avg:98.42ms +step:1654/1695 train_time:162790ms step_avg:98.42ms +step:1655/1695 train_time:162893ms step_avg:98.42ms +step:1656/1695 train_time:162998ms step_avg:98.43ms +step:1657/1695 train_time:163100ms step_avg:98.43ms +step:1658/1695 train_time:163203ms step_avg:98.43ms +step:1659/1695 train_time:163308ms step_avg:98.44ms +step:1660/1695 train_time:163411ms step_avg:98.44ms +step:1661/1695 train_time:163516ms step_avg:98.44ms +step:1662/1695 train_time:163620ms step_avg:98.45ms +step:1663/1695 train_time:163723ms step_avg:98.45ms +step:1664/1695 train_time:163825ms step_avg:98.45ms +step:1665/1695 train_time:163931ms step_avg:98.46ms +step:1666/1695 train_time:164034ms step_avg:98.46ms +step:1667/1695 train_time:164136ms step_avg:98.46ms +step:1668/1695 train_time:164242ms step_avg:98.47ms +step:1669/1695 train_time:164346ms step_avg:98.47ms +step:1670/1695 train_time:164448ms step_avg:98.47ms +step:1671/1695 train_time:164550ms step_avg:98.47ms +step:1672/1695 train_time:164654ms step_avg:98.48ms +step:1673/1695 train_time:164757ms step_avg:98.48ms +step:1674/1695 train_time:164860ms step_avg:98.48ms +step:1675/1695 train_time:164963ms step_avg:98.49ms +step:1676/1695 train_time:165067ms step_avg:98.49ms +step:1677/1695 train_time:165168ms step_avg:98.49ms +step:1678/1695 train_time:165272ms step_avg:98.49ms +step:1679/1695 train_time:165377ms step_avg:98.50ms +step:1680/1695 train_time:165480ms step_avg:98.50ms +step:1681/1695 train_time:165584ms step_avg:98.50ms +step:1682/1695 train_time:165690ms step_avg:98.51ms +step:1683/1695 train_time:165791ms step_avg:98.51ms +step:1684/1695 train_time:165894ms step_avg:98.51ms +step:1685/1695 train_time:165997ms step_avg:98.51ms +step:1686/1695 train_time:166100ms step_avg:98.52ms +step:1687/1695 train_time:166203ms step_avg:98.52ms +step:1688/1695 train_time:166305ms step_avg:98.52ms +step:1689/1695 train_time:166406ms step_avg:98.52ms +step:1690/1695 train_time:166508ms step_avg:98.53ms +step:1691/1695 train_time:166611ms step_avg:98.53ms +step:1692/1695 train_time:166713ms step_avg:98.53ms +step:1693/1695 train_time:166816ms step_avg:98.53ms +step:1694/1695 train_time:166920ms step_avg:98.54ms +step:1695/1695 train_time:167023ms step_avg:98.54ms +step:1695/1695 val_loss:3.2786 train_time:167122ms step_avg:98.60ms +peak memory allocated: 34004 MiB reserved: 49180 MiB diff --git a/records/082325_SparseAttnGate/README.md b/records/082325_SparseAttnGate/README.md new file mode 100644 index 000000000..89f8d1182 --- /dev/null +++ b/records/082325_SparseAttnGate/README.md @@ -0,0 +1,45 @@ +## New record 08/23/25 + +1. Included WR improvements on Triton and grad batching from https://github.com/KellerJordan/modded-nanogpt/pull/109 by @byronxu99 +2. Added a sparse attention gate on the attention output to enable a context based no-op. Found the mechanism was performant with 12 active dimensions from the residual stream. If curious, here is a related blog post from an earlier investigation into non-sparse attention gate with detailed plots: https://medium.com/@larry36d/modulating-attention-scores-cc0bcd853f06. The blog demonstrates how the attention gate reduces the need for the bos_token to function as an attention sink. This is particularly relevant in a sliding window attention context because the bos_token is not always in the context window. ROPE embeddings cause the bos_token attention sink to change based on relative distance, whereas a sparse attention gate is indifferent to distance from start of sample. Estimate of impact: 50 steps fewer, with slight increase in time per step. +3. As a follow-on from 2: Reduced number of iterations from 1750 to 1695. +4. Reverted the lm head scaling changes made on Feb 10th: https://github.com/KellerJordan/modded-nanogpt/commit/85a0a5201f08c4d6bb288ef348bb252d9c33e132. When tested on a single A100, reverting this change drops the L2 norm of the LM head weights from 250 down to 10. The logits need to express values roughly from -10 to 10 in order to capture the range of token probabilities. Dividing by 27.5 (x.size(-1)**0.5) was causing the weights to grow substantially to accomplish this, since the residual stream was being normed prior to the lm_head. The second moment estimate of Adam depends on the parameter scale, and the Adam learning rates were likely heavily tuned prior to the Feb 10th update. If curious, more details near end of this blog post: https://medium.com/@larry36d/exploration-log-exploring-initializing-transformers-with-bigram-distribution-70f9c8800b21. Estimate of impact: 5-10 steps. (in this case just a cleaner cut below 3.28) +5. Chose to keep the minimum lr at 0.1. The bos_align record decreased the minimum lr to 0.05, and a later refactor, perhaps unintentionally, moved it back to 0.1. On further testing, the impact of this value on mean loss is marginal, but lower minimum lr appear to increase the variance of the final loss, making testing more challenging. Lower minimum lr may have higher variance because its committing to diving deep in the local space earlier, and is somewhat rolling the dice on if its a promising region or not. On reflection, I likely originally picked 0.05 because taking the min loss over a grid search will naturally bias to higher variance configurations, which is the opposite of what we want. + + +Validated results (p=0.0059) with 14 runs: +``` +import scipy.stats +import torch + +accs = [3.2774, 3.2782, 3.2796, 3.2815, 3.276 , 3.2777, 3.2784, 3.2795, + 3.281 , 3.2802, 3.2767, 3.2772, 3.28 , 3.2786 + ] +times = [ + 168.627, 169.037, 169.003, 168.727, 168.647, 169.024, 168.917, + 168.999, 168.728, 169.07 , 168.981, 168.938, 168.718, 167.122] + +print('p=%.4f' % scipy.stats.ttest_1samp(accs, 3.28, alternative='less').pvalue) +# p=0.0059 + +print('acc:',torch.std_mean(torch.tensor(accs))) +# acc: (tensor(0.0016), tensor(3.2787)) + +print('time:',torch.std_mean(torch.tensor(times))) +# time: (tensor(0.4946), tensor(168.7527)) +# Running on fresh cluster gave 167.695. actively working in jupyter notebooks on same machines during these runs may be adding variance to timing +``` + +###Negative and neutral test results during this process: + +1. Initialize embedding tokens using bigram distribution. Bigram statistics can be calculated for 100 million tokens in ~1 second or less. I tested initializing the embedding layer using `z = relu(log(p(y|x)/p(y))); embed = norm(rand_linear(z))`, where p(y|x) is the bigram prob of token y given x. This initialization makes it so that tokens with similar bigram statistics will have similar embeddings. If I froze the embedding layer, this initialization performed better than random initialization. However, for non frozen embeddings, the impact was not statistically significant. +2. Weight freezing during training. Since the majority of the time on each step is spent computing the gradient, freezing a subset of weights can substantially decrease time per step. Unfortunately, all combinations tested of this failed to yield an improvement. Typical matmul ops require N FLOPS on the forward pass and 2N FLOPs on the backwards pass. The 2N is to compute the gradient with respect to the weights to update the weights, and the gradient with respect to the data to pass the gradient onwards. The torch compiler is smart enough to compute only N FLOPS on the backwards pass for leaf operations. To leverage this, I tested updating the first 3 layers to run in parallel, and then froze the embedding after a portion of training, such that 3 layers became leaf operations. The change was not kept as the performance drop outweighed the speedup. +3. Logit shift parameter. The residual space activations for all positions are heavily aligned away (>120 degrees) from the lm_head vectors of tokens that never appear in the training set. In other words, the ~400 tokens that never appear in the 50348 vocab size (including the 91 padding vocab entries) may be skewing the topology of the activations in the residual stream. Adding a simple logits += logit_shift enables the model to learn the unigram distribution directly (or even just a static variable that is -inf on padding tokens), without disrupting the residual space. Unfortunately, my implementation of this change was giving memory issues on an A100. On the H100 setup, the change dropped the loss by 0.01 but was slightly edged out by the increase in time per step. I don't have the budget to fiddle substantially with params I can't test on an A100. If a more compute optimized version can be found, this is an easy improvement to the loss, likely equivalent to 50+ steps. +4. Removing torch.compile on zeropower_via_newtonschulz5(). Surprisingly, the torch compiler makes the output of newtonschulz() vary based on the batch dimension size, with a 2% change depending on the batch size. This is relevant when we are batching kqv in one op. This appears to occur because of rounding issues with bfloat16 and some internal accumulations the compiler is altering, as the percent diff drops to less then 0.1% for float32. On an A100 removing the compile gave an improvement when I was testing different batch sizes, but the change was not statistically significant on H100 w/ fp8 lm_head. Unclear exactly what is going on here, but noting that bfloat16 can lead to very unintuitive consequences. +5. Megabatch NetwonSchulz. Inspired by @byronxu99, I tested further impacts of batching for zeropower_via_newtonschulz5(). The results were quite surprising on an A100. The run time was heavily dependent on the batch size, with larger batch sizes running up to twice as fast, based on initial testing (honestly need to sanity check this, seemed too crazy). As a result, I experimented with setting all MLP params in 1 contiguous variable and doing a single iteration of zeropower_via_newtonschulz5(), with [3,4*768,768] input to each GPU as a single pass, and [6,768,768] for Attn to each GPU as a second pass. This gave a total of only 2 iterations of zeropower_via_newtonschulz5() on each GPU per step. I was running into memory errors on the 8H100 setup, and need to get a cheaper distributed setup before I test further. +6. 0.5 init weighting for x0 stream instead of 0. At the end of training on a A100, the x0 weight for many layers is 50x higher than the x weight. Updating the weighting to 0.5 gave a statistically significant improvement on A100, but this was not replicated on the 8H100 setup with fp8 lmhead. +7. Normalize value embedding inputs during forward pass. Seemed like a natural thing to do given norms on the input embedding and the existing lambda to scale value weights. However, this yielded worse performance, perhaps because the value embeddings need to have much high weight than the values and the lambda scaling parameter was not tuned to handle this itself. +8. Renormalize embedding in place between each forward pass. The L2 norm of the embedding layer is climbing from 27 to 500 over the course of training, leading to a different effective learning rate depending on the stage of training. Normalizing this parameter may enable the lr to be tuned more precisely. However, I found norm() still needed to be included in the forward pass for an accurate grad calc, at which point the compute penalty for a second norm outside the forward pass became not worthwhile. +9. Removing value entirely (only use value embedding) for first and last 3 layers. The trained weights indicate that the value embedding is dominating the calculated attention value, and I can save some matmul ops if I can drop 6 layers of value calcs. The change cost roughly 0.015 loss, which unfortunately was worth more than the speedup achieved based on the parameters used. +10. Bigram full initialization. Similar to 1, I tested initializing the lm_head and embedding layer to approximate the bigram distribution. (Bigram could in theory cause learning to start around 5.7 loss, with potentially better generalization during training). Unfortunately, it is not analytically simple to set embed and lm_head to achieve a known bigram distribution, because of the nonlinearity of the softmax. Attempting to approximate this yielded worse performance than random initialization. +11. Dual loss on bigram distribution. I tested having the first X iterations minimize a combination of the next token prediction loss, along with the bigram distribution for that token. Intuition was that since I can compute the bigram distribution of 100 million tokens in 1s, the bigram distribution encodes a higher density of information than a single high variance loss signal of a 500,000 token batch. However, the 50,000x50,000 bigram matrix proved too bulky for compute efficient steps. diff --git a/records/082325_SparseAttnGate/a39b1ae8-3a2a-4952-8032-13183b157053.txt b/records/082325_SparseAttnGate/a39b1ae8-3a2a-4952-8032-13183b157053.txt new file mode 100644 index 000000000..080453143 --- /dev/null +++ b/records/082325_SparseAttnGate/a39b1ae8-3a2a-4952-8032-13183b157053.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:40:05 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 32C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 124W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 314814 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 314815 C /usr/bin/python3 614MiB | +| 0 N/A N/A 314816 C /usr/bin/python3 614MiB | +| 0 N/A N/A 314817 C /usr/bin/python3 614MiB | +| 0 N/A N/A 314818 C /usr/bin/python3 614MiB | +| 0 N/A N/A 314819 C /usr/bin/python3 614MiB | +| 0 N/A N/A 314820 C /usr/bin/python3 614MiB | +| 0 N/A N/A 314821 C /usr/bin/python3 614MiB | +| 1 N/A N/A 314815 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 314816 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 314817 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 314818 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 314819 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 314820 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 314821 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:153ms step_avg:152.82ms +step:2/1695 train_time:177ms step_avg:88.47ms +step:3/1695 train_time:250ms step_avg:83.26ms +step:4/1695 train_time:341ms step_avg:85.31ms +step:5/1695 train_time:435ms step_avg:86.95ms +step:6/1695 train_time:526ms step_avg:87.74ms +step:7/1695 train_time:619ms step_avg:88.47ms +step:8/1695 train_time:713ms step_avg:89.11ms +step:9/1695 train_time:806ms step_avg:89.59ms +step:10/1695 train_time:900ms step_avg:89.95ms +step:11/1695 train_time:993ms step_avg:90.28ms +step:12/1695 train_time:1088ms step_avg:90.64ms +step:13/1695 train_time:1182ms step_avg:90.95ms +step:14/1695 train_time:1278ms step_avg:91.28ms +step:15/1695 train_time:1372ms step_avg:91.44ms +step:16/1695 train_time:1465ms step_avg:91.59ms +step:17/1695 train_time:1559ms step_avg:91.70ms +step:18/1695 train_time:1653ms step_avg:91.82ms +step:19/1695 train_time:1746ms step_avg:91.89ms +step:20/1695 train_time:1839ms step_avg:91.96ms +step:21/1695 train_time:1932ms step_avg:92.01ms +step:22/1695 train_time:2026ms step_avg:92.09ms +step:23/1695 train_time:2119ms step_avg:92.15ms +step:24/1695 train_time:2214ms step_avg:92.26ms +step:25/1695 train_time:2309ms step_avg:92.34ms +step:26/1695 train_time:2403ms step_avg:92.41ms +step:27/1695 train_time:2496ms step_avg:92.45ms +step:28/1695 train_time:2590ms step_avg:92.51ms +step:29/1695 train_time:2684ms step_avg:92.56ms +step:30/1695 train_time:2777ms step_avg:92.58ms +step:31/1695 train_time:2871ms step_avg:92.62ms +step:32/1695 train_time:2964ms step_avg:92.64ms +step:33/1695 train_time:3058ms step_avg:92.65ms +step:34/1695 train_time:3151ms step_avg:92.69ms +step:35/1695 train_time:3245ms step_avg:92.72ms +step:36/1695 train_time:3339ms step_avg:92.75ms +step:37/1695 train_time:3434ms step_avg:92.80ms +step:38/1695 train_time:3528ms step_avg:92.84ms +step:39/1695 train_time:3622ms step_avg:92.87ms +step:40/1695 train_time:3716ms step_avg:92.89ms +step:41/1695 train_time:3810ms step_avg:92.93ms +step:42/1695 train_time:3905ms step_avg:92.99ms +step:43/1695 train_time:3999ms step_avg:93.00ms +step:44/1695 train_time:4093ms step_avg:93.03ms +step:45/1695 train_time:4187ms step_avg:93.03ms +step:46/1695 train_time:4279ms step_avg:93.03ms +step:47/1695 train_time:4373ms step_avg:93.04ms +step:48/1695 train_time:4467ms step_avg:93.05ms +step:49/1695 train_time:4561ms step_avg:93.08ms +step:50/1695 train_time:4655ms step_avg:93.10ms +step:51/1695 train_time:4750ms step_avg:93.13ms +step:52/1695 train_time:4844ms step_avg:93.15ms +step:53/1695 train_time:4938ms step_avg:93.16ms +step:54/1695 train_time:5032ms step_avg:93.19ms +step:55/1695 train_time:5127ms step_avg:93.22ms +step:56/1695 train_time:5221ms step_avg:93.22ms +step:57/1695 train_time:5314ms step_avg:93.23ms +step:58/1695 train_time:5409ms step_avg:93.25ms +step:59/1695 train_time:5502ms step_avg:93.26ms +step:60/1695 train_time:5596ms step_avg:93.27ms +step:61/1695 train_time:5690ms step_avg:93.28ms +step:62/1695 train_time:5783ms step_avg:93.28ms +step:63/1695 train_time:5877ms step_avg:93.29ms +step:64/1695 train_time:5971ms step_avg:93.30ms +step:65/1695 train_time:6066ms step_avg:93.32ms +step:66/1695 train_time:6159ms step_avg:93.32ms +step:67/1695 train_time:6254ms step_avg:93.35ms +step:68/1695 train_time:6349ms step_avg:93.37ms +step:69/1695 train_time:6443ms step_avg:93.37ms +step:70/1695 train_time:6536ms step_avg:93.37ms +step:71/1695 train_time:6630ms step_avg:93.39ms +step:72/1695 train_time:6724ms step_avg:93.40ms +step:73/1695 train_time:6818ms step_avg:93.39ms +step:74/1695 train_time:6911ms step_avg:93.39ms +step:75/1695 train_time:7005ms step_avg:93.40ms +step:76/1695 train_time:7098ms step_avg:93.40ms +step:77/1695 train_time:7192ms step_avg:93.41ms +step:78/1695 train_time:7287ms step_avg:93.42ms +step:79/1695 train_time:7381ms step_avg:93.42ms +step:80/1695 train_time:7474ms step_avg:93.42ms +step:81/1695 train_time:7568ms step_avg:93.43ms +step:82/1695 train_time:7662ms step_avg:93.43ms +step:83/1695 train_time:7755ms step_avg:93.44ms +step:84/1695 train_time:7851ms step_avg:93.46ms +step:85/1695 train_time:7944ms step_avg:93.46ms +step:86/1695 train_time:8037ms step_avg:93.46ms +step:87/1695 train_time:8131ms step_avg:93.46ms +step:88/1695 train_time:8225ms step_avg:93.47ms +step:89/1695 train_time:8318ms step_avg:93.46ms +step:90/1695 train_time:8412ms step_avg:93.47ms +step:91/1695 train_time:8506ms step_avg:93.47ms +step:92/1695 train_time:8600ms step_avg:93.48ms +step:93/1695 train_time:8694ms step_avg:93.48ms +step:94/1695 train_time:8788ms step_avg:93.49ms +step:95/1695 train_time:8881ms step_avg:93.49ms +step:96/1695 train_time:8975ms step_avg:93.49ms +step:97/1695 train_time:9069ms step_avg:93.50ms +step:98/1695 train_time:9163ms step_avg:93.50ms +step:99/1695 train_time:9257ms step_avg:93.51ms +step:100/1695 train_time:9351ms step_avg:93.51ms +step:101/1695 train_time:9444ms step_avg:93.51ms +step:102/1695 train_time:9538ms step_avg:93.51ms +step:103/1695 train_time:9632ms step_avg:93.51ms +step:104/1695 train_time:9726ms step_avg:93.52ms +step:105/1695 train_time:9820ms step_avg:93.52ms +step:106/1695 train_time:9914ms step_avg:93.53ms +step:107/1695 train_time:10008ms step_avg:93.53ms +step:108/1695 train_time:10101ms step_avg:93.53ms +step:109/1695 train_time:10195ms step_avg:93.53ms +step:110/1695 train_time:10289ms step_avg:93.53ms +step:111/1695 train_time:10382ms step_avg:93.53ms +step:112/1695 train_time:10475ms step_avg:93.53ms +step:113/1695 train_time:10569ms step_avg:93.53ms +step:114/1695 train_time:10662ms step_avg:93.53ms +step:115/1695 train_time:10757ms step_avg:93.54ms +step:116/1695 train_time:10850ms step_avg:93.54ms +step:117/1695 train_time:10943ms step_avg:93.53ms +step:118/1695 train_time:11037ms step_avg:93.54ms +step:119/1695 train_time:11131ms step_avg:93.54ms +step:120/1695 train_time:11225ms step_avg:93.54ms +step:121/1695 train_time:11318ms step_avg:93.54ms +step:122/1695 train_time:11413ms step_avg:93.55ms +step:123/1695 train_time:11507ms step_avg:93.55ms +step:124/1695 train_time:11601ms step_avg:93.56ms +step:125/1695 train_time:11695ms step_avg:93.56ms +step:125/1695 val_loss:4.5970 train_time:11787ms step_avg:94.30ms +step:126/1695 train_time:11813ms step_avg:93.75ms +step:127/1695 train_time:11890ms step_avg:93.62ms +step:128/1695 train_time:11993ms step_avg:93.70ms +step:129/1695 train_time:12090ms step_avg:93.72ms +step:130/1695 train_time:12184ms step_avg:93.72ms +step:131/1695 train_time:12277ms step_avg:93.72ms +step:132/1695 train_time:12371ms step_avg:93.72ms +step:133/1695 train_time:12465ms step_avg:93.72ms +step:134/1695 train_time:12558ms step_avg:93.72ms +step:135/1695 train_time:12652ms step_avg:93.72ms +step:136/1695 train_time:12745ms step_avg:93.71ms +step:137/1695 train_time:12839ms step_avg:93.72ms +step:138/1695 train_time:12936ms step_avg:93.74ms +step:139/1695 train_time:13034ms step_avg:93.77ms +step:140/1695 train_time:13130ms step_avg:93.78ms +step:141/1695 train_time:13224ms step_avg:93.79ms +step:142/1695 train_time:13318ms step_avg:93.79ms +step:143/1695 train_time:13412ms step_avg:93.79ms +step:144/1695 train_time:13505ms step_avg:93.79ms +step:145/1695 train_time:13599ms step_avg:93.79ms +step:146/1695 train_time:13693ms step_avg:93.79ms +step:147/1695 train_time:13788ms step_avg:93.79ms +step:148/1695 train_time:13882ms step_avg:93.80ms +step:149/1695 train_time:13977ms step_avg:93.80ms +step:150/1695 train_time:14072ms step_avg:93.82ms +step:151/1695 train_time:14168ms step_avg:93.83ms +step:152/1695 train_time:14262ms step_avg:93.83ms +step:153/1695 train_time:14356ms step_avg:93.83ms +step:154/1695 train_time:14451ms step_avg:93.84ms +step:155/1695 train_time:14545ms step_avg:93.84ms +step:156/1695 train_time:14638ms step_avg:93.83ms +step:157/1695 train_time:14732ms step_avg:93.83ms +step:158/1695 train_time:14826ms step_avg:93.84ms +step:159/1695 train_time:14921ms step_avg:93.84ms +step:160/1695 train_time:15016ms step_avg:93.85ms +step:161/1695 train_time:15112ms step_avg:93.86ms +step:162/1695 train_time:15207ms step_avg:93.87ms +step:163/1695 train_time:15301ms step_avg:93.87ms +step:164/1695 train_time:15395ms step_avg:93.87ms +step:165/1695 train_time:15490ms step_avg:93.88ms +step:166/1695 train_time:15584ms step_avg:93.88ms +step:167/1695 train_time:15677ms step_avg:93.88ms +step:168/1695 train_time:15772ms step_avg:93.88ms +step:169/1695 train_time:15867ms step_avg:93.89ms +step:170/1695 train_time:15961ms step_avg:93.89ms +step:171/1695 train_time:16055ms step_avg:93.89ms +step:172/1695 train_time:16149ms step_avg:93.89ms +step:173/1695 train_time:16244ms step_avg:93.90ms +step:174/1695 train_time:16338ms step_avg:93.89ms +step:175/1695 train_time:16433ms step_avg:93.90ms +step:176/1695 train_time:16528ms step_avg:93.91ms +step:177/1695 train_time:16623ms step_avg:93.91ms +step:178/1695 train_time:16717ms step_avg:93.91ms +step:179/1695 train_time:16811ms step_avg:93.92ms +step:180/1695 train_time:16905ms step_avg:93.92ms +step:181/1695 train_time:16999ms step_avg:93.92ms +step:182/1695 train_time:17094ms step_avg:93.93ms +step:183/1695 train_time:17189ms step_avg:93.93ms +step:184/1695 train_time:17283ms step_avg:93.93ms +step:185/1695 train_time:17376ms step_avg:93.93ms +step:186/1695 train_time:17471ms step_avg:93.93ms +step:187/1695 train_time:17567ms step_avg:93.94ms +step:188/1695 train_time:17660ms step_avg:93.94ms +step:189/1695 train_time:17754ms step_avg:93.94ms +step:190/1695 train_time:17849ms step_avg:93.94ms +step:191/1695 train_time:17943ms step_avg:93.94ms +step:192/1695 train_time:18037ms step_avg:93.94ms +step:193/1695 train_time:18134ms step_avg:93.96ms +step:194/1695 train_time:18229ms step_avg:93.96ms +step:195/1695 train_time:18322ms step_avg:93.96ms +step:196/1695 train_time:18416ms step_avg:93.96ms +step:197/1695 train_time:18511ms step_avg:93.96ms +step:198/1695 train_time:18605ms step_avg:93.96ms +step:199/1695 train_time:18698ms step_avg:93.96ms +step:200/1695 train_time:18793ms step_avg:93.96ms +step:201/1695 train_time:18888ms step_avg:93.97ms +step:202/1695 train_time:18982ms step_avg:93.97ms +step:203/1695 train_time:19076ms step_avg:93.97ms +step:204/1695 train_time:19171ms step_avg:93.98ms +step:205/1695 train_time:19265ms step_avg:93.98ms +step:206/1695 train_time:19359ms step_avg:93.97ms +step:207/1695 train_time:19453ms step_avg:93.98ms +step:208/1695 train_time:19548ms step_avg:93.98ms +step:209/1695 train_time:19641ms step_avg:93.98ms +step:210/1695 train_time:19735ms step_avg:93.98ms +step:211/1695 train_time:19829ms step_avg:93.98ms +step:212/1695 train_time:19923ms step_avg:93.98ms +step:213/1695 train_time:20018ms step_avg:93.98ms +step:214/1695 train_time:20113ms step_avg:93.98ms +step:215/1695 train_time:20208ms step_avg:93.99ms +step:216/1695 train_time:20302ms step_avg:93.99ms +step:217/1695 train_time:20395ms step_avg:93.99ms +step:218/1695 train_time:20489ms step_avg:93.99ms +step:219/1695 train_time:20582ms step_avg:93.98ms +step:220/1695 train_time:20676ms step_avg:93.98ms +step:221/1695 train_time:20770ms step_avg:93.98ms +step:222/1695 train_time:20866ms step_avg:93.99ms +step:223/1695 train_time:20961ms step_avg:93.99ms +step:224/1695 train_time:21054ms step_avg:93.99ms +step:225/1695 train_time:21149ms step_avg:93.99ms +step:226/1695 train_time:21243ms step_avg:93.99ms +step:227/1695 train_time:21337ms step_avg:93.99ms +step:228/1695 train_time:21432ms step_avg:94.00ms +step:229/1695 train_time:21527ms step_avg:94.00ms +step:230/1695 train_time:21620ms step_avg:94.00ms +step:231/1695 train_time:21714ms step_avg:94.00ms +step:232/1695 train_time:21808ms step_avg:94.00ms +step:233/1695 train_time:21903ms step_avg:94.00ms +step:234/1695 train_time:21996ms step_avg:94.00ms +step:235/1695 train_time:22091ms step_avg:94.00ms +step:236/1695 train_time:22184ms step_avg:94.00ms +step:237/1695 train_time:22278ms step_avg:94.00ms +step:238/1695 train_time:22372ms step_avg:94.00ms +step:239/1695 train_time:22467ms step_avg:94.00ms +step:240/1695 train_time:22562ms step_avg:94.01ms +step:241/1695 train_time:22657ms step_avg:94.01ms +step:242/1695 train_time:22752ms step_avg:94.02ms +step:243/1695 train_time:22847ms step_avg:94.02ms +step:244/1695 train_time:22941ms step_avg:94.02ms +step:245/1695 train_time:23035ms step_avg:94.02ms +step:246/1695 train_time:23130ms step_avg:94.03ms +step:247/1695 train_time:23225ms step_avg:94.03ms +step:248/1695 train_time:23318ms step_avg:94.02ms +step:249/1695 train_time:23413ms step_avg:94.03ms +step:250/1695 train_time:23508ms step_avg:94.03ms +step:250/1695 val_loss:4.0722 train_time:23600ms step_avg:94.40ms +step:251/1695 train_time:23626ms step_avg:94.13ms +step:252/1695 train_time:23704ms step_avg:94.07ms +step:253/1695 train_time:23806ms step_avg:94.09ms +step:254/1695 train_time:23901ms step_avg:94.10ms +step:255/1695 train_time:23995ms step_avg:94.10ms +step:256/1695 train_time:24088ms step_avg:94.10ms +step:257/1695 train_time:24182ms step_avg:94.09ms +step:258/1695 train_time:24276ms step_avg:94.09ms +step:259/1695 train_time:24369ms step_avg:94.09ms +step:260/1695 train_time:24464ms step_avg:94.09ms +step:261/1695 train_time:24558ms step_avg:94.09ms +step:262/1695 train_time:24653ms step_avg:94.09ms +step:263/1695 train_time:24749ms step_avg:94.10ms +step:264/1695 train_time:24846ms step_avg:94.11ms +step:265/1695 train_time:24942ms step_avg:94.12ms +step:266/1695 train_time:25037ms step_avg:94.12ms +step:267/1695 train_time:25130ms step_avg:94.12ms +step:268/1695 train_time:25225ms step_avg:94.12ms +step:269/1695 train_time:25319ms step_avg:94.12ms +step:270/1695 train_time:25413ms step_avg:94.12ms +step:271/1695 train_time:25507ms step_avg:94.12ms +step:272/1695 train_time:25602ms step_avg:94.13ms +step:273/1695 train_time:25698ms step_avg:94.13ms +step:274/1695 train_time:25792ms step_avg:94.13ms +step:275/1695 train_time:25888ms step_avg:94.14ms +step:276/1695 train_time:25983ms step_avg:94.14ms +step:277/1695 train_time:26079ms step_avg:94.15ms +step:278/1695 train_time:26173ms step_avg:94.15ms +step:279/1695 train_time:26267ms step_avg:94.15ms +step:280/1695 train_time:26362ms step_avg:94.15ms +step:281/1695 train_time:26457ms step_avg:94.15ms +step:282/1695 train_time:26550ms step_avg:94.15ms +step:283/1695 train_time:26645ms step_avg:94.15ms +step:284/1695 train_time:26740ms step_avg:94.15ms +step:285/1695 train_time:26835ms step_avg:94.16ms +step:286/1695 train_time:26930ms step_avg:94.16ms +step:287/1695 train_time:27026ms step_avg:94.17ms +step:288/1695 train_time:27121ms step_avg:94.17ms +step:289/1695 train_time:27215ms step_avg:94.17ms +step:290/1695 train_time:27309ms step_avg:94.17ms +step:291/1695 train_time:27404ms step_avg:94.17ms +step:292/1695 train_time:27499ms step_avg:94.17ms +step:293/1695 train_time:27592ms step_avg:94.17ms +step:294/1695 train_time:27686ms step_avg:94.17ms +step:295/1695 train_time:27781ms step_avg:94.17ms +step:296/1695 train_time:27875ms step_avg:94.17ms +step:297/1695 train_time:27969ms step_avg:94.17ms +step:298/1695 train_time:28066ms step_avg:94.18ms +step:299/1695 train_time:28162ms step_avg:94.19ms +step:300/1695 train_time:28256ms step_avg:94.19ms +step:301/1695 train_time:28350ms step_avg:94.19ms +step:302/1695 train_time:28445ms step_avg:94.19ms +step:303/1695 train_time:28539ms step_avg:94.19ms +step:304/1695 train_time:28633ms step_avg:94.19ms +step:305/1695 train_time:28728ms step_avg:94.19ms +step:306/1695 train_time:28823ms step_avg:94.19ms +step:307/1695 train_time:28917ms step_avg:94.19ms +step:308/1695 train_time:29012ms step_avg:94.20ms +step:309/1695 train_time:29107ms step_avg:94.20ms +step:310/1695 train_time:29201ms step_avg:94.20ms +step:311/1695 train_time:29296ms step_avg:94.20ms +step:312/1695 train_time:29390ms step_avg:94.20ms +step:313/1695 train_time:29485ms step_avg:94.20ms +step:314/1695 train_time:29580ms step_avg:94.20ms +step:315/1695 train_time:29674ms step_avg:94.20ms +step:316/1695 train_time:29768ms step_avg:94.20ms +step:317/1695 train_time:29865ms step_avg:94.21ms +step:318/1695 train_time:29960ms step_avg:94.21ms +step:319/1695 train_time:30054ms step_avg:94.21ms +step:320/1695 train_time:30149ms step_avg:94.21ms +step:321/1695 train_time:30243ms step_avg:94.21ms +step:322/1695 train_time:30338ms step_avg:94.22ms +step:323/1695 train_time:30432ms step_avg:94.22ms +step:324/1695 train_time:30527ms step_avg:94.22ms +step:325/1695 train_time:30622ms step_avg:94.22ms +step:326/1695 train_time:30717ms step_avg:94.22ms +step:327/1695 train_time:30812ms step_avg:94.23ms +step:328/1695 train_time:30907ms step_avg:94.23ms +step:329/1695 train_time:31001ms step_avg:94.23ms +step:330/1695 train_time:31096ms step_avg:94.23ms +step:331/1695 train_time:31190ms step_avg:94.23ms +step:332/1695 train_time:31285ms step_avg:94.23ms +step:333/1695 train_time:31379ms step_avg:94.23ms +step:334/1695 train_time:31475ms step_avg:94.24ms +step:335/1695 train_time:31569ms step_avg:94.23ms +step:336/1695 train_time:31664ms step_avg:94.24ms +step:337/1695 train_time:31759ms step_avg:94.24ms +step:338/1695 train_time:31853ms step_avg:94.24ms +step:339/1695 train_time:31947ms step_avg:94.24ms +step:340/1695 train_time:32043ms step_avg:94.24ms +step:341/1695 train_time:32138ms step_avg:94.25ms +step:342/1695 train_time:32233ms step_avg:94.25ms +step:343/1695 train_time:32327ms step_avg:94.25ms +step:344/1695 train_time:32422ms step_avg:94.25ms +step:345/1695 train_time:32517ms step_avg:94.25ms +step:346/1695 train_time:32611ms step_avg:94.25ms +step:347/1695 train_time:32705ms step_avg:94.25ms +step:348/1695 train_time:32800ms step_avg:94.25ms +step:349/1695 train_time:32895ms step_avg:94.25ms +step:350/1695 train_time:32989ms step_avg:94.25ms +step:351/1695 train_time:33083ms step_avg:94.25ms +step:352/1695 train_time:33178ms step_avg:94.26ms +step:353/1695 train_time:33273ms step_avg:94.26ms +step:354/1695 train_time:33367ms step_avg:94.26ms +step:355/1695 train_time:33463ms step_avg:94.26ms +step:356/1695 train_time:33557ms step_avg:94.26ms +step:357/1695 train_time:33652ms step_avg:94.26ms +step:358/1695 train_time:33746ms step_avg:94.26ms +step:359/1695 train_time:33841ms step_avg:94.27ms +step:360/1695 train_time:33936ms step_avg:94.27ms +step:361/1695 train_time:34031ms step_avg:94.27ms +step:362/1695 train_time:34126ms step_avg:94.27ms +step:363/1695 train_time:34221ms step_avg:94.27ms +step:364/1695 train_time:34315ms step_avg:94.27ms +step:365/1695 train_time:34409ms step_avg:94.27ms +step:366/1695 train_time:34504ms step_avg:94.27ms +step:367/1695 train_time:34599ms step_avg:94.27ms +step:368/1695 train_time:34693ms step_avg:94.27ms +step:369/1695 train_time:34787ms step_avg:94.27ms +step:370/1695 train_time:34882ms step_avg:94.28ms +step:371/1695 train_time:34978ms step_avg:94.28ms +step:372/1695 train_time:35072ms step_avg:94.28ms +step:373/1695 train_time:35167ms step_avg:94.28ms +step:374/1695 train_time:35261ms step_avg:94.28ms +step:375/1695 train_time:35355ms step_avg:94.28ms +step:375/1695 val_loss:3.8750 train_time:35447ms step_avg:94.53ms +step:376/1695 train_time:35473ms step_avg:94.34ms +step:377/1695 train_time:35551ms step_avg:94.30ms +step:378/1695 train_time:35650ms step_avg:94.31ms +step:379/1695 train_time:35747ms step_avg:94.32ms +step:380/1695 train_time:35843ms step_avg:94.32ms +step:381/1695 train_time:35940ms step_avg:94.33ms +step:382/1695 train_time:36035ms step_avg:94.33ms +step:383/1695 train_time:36130ms step_avg:94.33ms +step:384/1695 train_time:36226ms step_avg:94.34ms +step:385/1695 train_time:36322ms step_avg:94.34ms +step:386/1695 train_time:36418ms step_avg:94.35ms +step:387/1695 train_time:36516ms step_avg:94.36ms +step:388/1695 train_time:36613ms step_avg:94.36ms +step:389/1695 train_time:36710ms step_avg:94.37ms +step:390/1695 train_time:36807ms step_avg:94.38ms +step:391/1695 train_time:36902ms step_avg:94.38ms +step:392/1695 train_time:36998ms step_avg:94.38ms +step:393/1695 train_time:37095ms step_avg:94.39ms +step:394/1695 train_time:37191ms step_avg:94.39ms +step:395/1695 train_time:37286ms step_avg:94.40ms +step:396/1695 train_time:37382ms step_avg:94.40ms +step:397/1695 train_time:37479ms step_avg:94.41ms +step:398/1695 train_time:37577ms step_avg:94.42ms +step:399/1695 train_time:37675ms step_avg:94.42ms +step:400/1695 train_time:37771ms step_avg:94.43ms +step:401/1695 train_time:37867ms step_avg:94.43ms +step:402/1695 train_time:37963ms step_avg:94.44ms +step:403/1695 train_time:38059ms step_avg:94.44ms +step:404/1695 train_time:38156ms step_avg:94.45ms +step:405/1695 train_time:38252ms step_avg:94.45ms +step:406/1695 train_time:38347ms step_avg:94.45ms +step:407/1695 train_time:38443ms step_avg:94.46ms +step:408/1695 train_time:38541ms step_avg:94.46ms +step:409/1695 train_time:38637ms step_avg:94.47ms +step:410/1695 train_time:38734ms step_avg:94.47ms +step:411/1695 train_time:38830ms step_avg:94.48ms +step:412/1695 train_time:38926ms step_avg:94.48ms +step:413/1695 train_time:39022ms step_avg:94.48ms +step:414/1695 train_time:39118ms step_avg:94.49ms +step:415/1695 train_time:39215ms step_avg:94.49ms +step:416/1695 train_time:39311ms step_avg:94.50ms +step:417/1695 train_time:39407ms step_avg:94.50ms +step:418/1695 train_time:39504ms step_avg:94.51ms +step:419/1695 train_time:39600ms step_avg:94.51ms +step:420/1695 train_time:39697ms step_avg:94.52ms +step:421/1695 train_time:39793ms step_avg:94.52ms +step:422/1695 train_time:39890ms step_avg:94.53ms +step:423/1695 train_time:39986ms step_avg:94.53ms +step:424/1695 train_time:40082ms step_avg:94.53ms +step:425/1695 train_time:40179ms step_avg:94.54ms +step:426/1695 train_time:40276ms step_avg:94.55ms +step:427/1695 train_time:40372ms step_avg:94.55ms +step:428/1695 train_time:40468ms step_avg:94.55ms +step:429/1695 train_time:40564ms step_avg:94.55ms +step:430/1695 train_time:40660ms step_avg:94.56ms +step:431/1695 train_time:40757ms step_avg:94.56ms +step:432/1695 train_time:40854ms step_avg:94.57ms +step:433/1695 train_time:40951ms step_avg:94.57ms +step:434/1695 train_time:41047ms step_avg:94.58ms +step:435/1695 train_time:41143ms step_avg:94.58ms +step:436/1695 train_time:41239ms step_avg:94.59ms +step:437/1695 train_time:41336ms step_avg:94.59ms +step:438/1695 train_time:41432ms step_avg:94.59ms +step:439/1695 train_time:41528ms step_avg:94.60ms +step:440/1695 train_time:41624ms step_avg:94.60ms +step:441/1695 train_time:41721ms step_avg:94.60ms +step:442/1695 train_time:41818ms step_avg:94.61ms +step:443/1695 train_time:41915ms step_avg:94.62ms +step:444/1695 train_time:42011ms step_avg:94.62ms +step:445/1695 train_time:42107ms step_avg:94.62ms +step:446/1695 train_time:42203ms step_avg:94.62ms +step:447/1695 train_time:42299ms step_avg:94.63ms +step:448/1695 train_time:42395ms step_avg:94.63ms +step:449/1695 train_time:42491ms step_avg:94.63ms +step:450/1695 train_time:42586ms step_avg:94.64ms +step:451/1695 train_time:42683ms step_avg:94.64ms +step:452/1695 train_time:42780ms step_avg:94.65ms +step:453/1695 train_time:42877ms step_avg:94.65ms +step:454/1695 train_time:42974ms step_avg:94.66ms +step:455/1695 train_time:43071ms step_avg:94.66ms +step:456/1695 train_time:43167ms step_avg:94.66ms +step:457/1695 train_time:43263ms step_avg:94.67ms +step:458/1695 train_time:43360ms step_avg:94.67ms +step:459/1695 train_time:43457ms step_avg:94.68ms +step:460/1695 train_time:43553ms step_avg:94.68ms +step:461/1695 train_time:43649ms step_avg:94.68ms +step:462/1695 train_time:43745ms step_avg:94.69ms +step:463/1695 train_time:43841ms step_avg:94.69ms +step:464/1695 train_time:43938ms step_avg:94.69ms +step:465/1695 train_time:44035ms step_avg:94.70ms +step:466/1695 train_time:44131ms step_avg:94.70ms +step:467/1695 train_time:44227ms step_avg:94.70ms +step:468/1695 train_time:44323ms step_avg:94.71ms +step:469/1695 train_time:44420ms step_avg:94.71ms +step:470/1695 train_time:44517ms step_avg:94.72ms +step:471/1695 train_time:44613ms step_avg:94.72ms +step:472/1695 train_time:44709ms step_avg:94.72ms +step:473/1695 train_time:44804ms step_avg:94.72ms +step:474/1695 train_time:44901ms step_avg:94.73ms +step:475/1695 train_time:44998ms step_avg:94.73ms +step:476/1695 train_time:45095ms step_avg:94.74ms +step:477/1695 train_time:45191ms step_avg:94.74ms +step:478/1695 train_time:45287ms step_avg:94.74ms +step:479/1695 train_time:45382ms step_avg:94.74ms +step:480/1695 train_time:45479ms step_avg:94.75ms +step:481/1695 train_time:45575ms step_avg:94.75ms +step:482/1695 train_time:45671ms step_avg:94.75ms +step:483/1695 train_time:45767ms step_avg:94.76ms +step:484/1695 train_time:45863ms step_avg:94.76ms +step:485/1695 train_time:45960ms step_avg:94.76ms +step:486/1695 train_time:46057ms step_avg:94.77ms +step:487/1695 train_time:46154ms step_avg:94.77ms +step:488/1695 train_time:46250ms step_avg:94.78ms +step:489/1695 train_time:46346ms step_avg:94.78ms +step:490/1695 train_time:46442ms step_avg:94.78ms +step:491/1695 train_time:46539ms step_avg:94.78ms +step:492/1695 train_time:46635ms step_avg:94.79ms +step:493/1695 train_time:46732ms step_avg:94.79ms +step:494/1695 train_time:46827ms step_avg:94.79ms +step:495/1695 train_time:46923ms step_avg:94.79ms +step:496/1695 train_time:47020ms step_avg:94.80ms +step:497/1695 train_time:47117ms step_avg:94.80ms +step:498/1695 train_time:47214ms step_avg:94.81ms +step:499/1695 train_time:47310ms step_avg:94.81ms +step:500/1695 train_time:47406ms step_avg:94.81ms +step:500/1695 val_loss:3.7286 train_time:47501ms step_avg:95.00ms +step:501/1695 train_time:47526ms step_avg:94.86ms +step:502/1695 train_time:47607ms step_avg:94.83ms +step:503/1695 train_time:47709ms step_avg:94.85ms +step:504/1695 train_time:47805ms step_avg:94.85ms +step:505/1695 train_time:47900ms step_avg:94.85ms +step:506/1695 train_time:47997ms step_avg:94.86ms +step:507/1695 train_time:48093ms step_avg:94.86ms +step:508/1695 train_time:48188ms step_avg:94.86ms +step:509/1695 train_time:48284ms step_avg:94.86ms +step:510/1695 train_time:48380ms step_avg:94.86ms +step:511/1695 train_time:48478ms step_avg:94.87ms +step:512/1695 train_time:48577ms step_avg:94.88ms +step:513/1695 train_time:48678ms step_avg:94.89ms +step:514/1695 train_time:48775ms step_avg:94.89ms +step:515/1695 train_time:48871ms step_avg:94.90ms +step:516/1695 train_time:48967ms step_avg:94.90ms +step:517/1695 train_time:49063ms step_avg:94.90ms +step:518/1695 train_time:49159ms step_avg:94.90ms +step:519/1695 train_time:49256ms step_avg:94.91ms +step:520/1695 train_time:49351ms step_avg:94.91ms +step:521/1695 train_time:49447ms step_avg:94.91ms +step:522/1695 train_time:49544ms step_avg:94.91ms +step:523/1695 train_time:49642ms step_avg:94.92ms +step:524/1695 train_time:49740ms step_avg:94.92ms +step:525/1695 train_time:49839ms step_avg:94.93ms +step:526/1695 train_time:49936ms step_avg:94.94ms +step:527/1695 train_time:50033ms step_avg:94.94ms +step:528/1695 train_time:50129ms step_avg:94.94ms +step:529/1695 train_time:50225ms step_avg:94.94ms +step:530/1695 train_time:50321ms step_avg:94.94ms +step:531/1695 train_time:50418ms step_avg:94.95ms +step:532/1695 train_time:50516ms step_avg:94.95ms +step:533/1695 train_time:50613ms step_avg:94.96ms +step:534/1695 train_time:50710ms step_avg:94.96ms +step:535/1695 train_time:50808ms step_avg:94.97ms +step:536/1695 train_time:50904ms step_avg:94.97ms +step:537/1695 train_time:51001ms step_avg:94.97ms +step:538/1695 train_time:51098ms step_avg:94.98ms +step:539/1695 train_time:51194ms step_avg:94.98ms +step:540/1695 train_time:51290ms step_avg:94.98ms +step:541/1695 train_time:51386ms step_avg:94.98ms +step:542/1695 train_time:51483ms step_avg:94.99ms +step:543/1695 train_time:51580ms step_avg:94.99ms +step:544/1695 train_time:51678ms step_avg:95.00ms +step:545/1695 train_time:51776ms step_avg:95.00ms +step:546/1695 train_time:51873ms step_avg:95.00ms +step:547/1695 train_time:51969ms step_avg:95.01ms +step:548/1695 train_time:52065ms step_avg:95.01ms +step:549/1695 train_time:52161ms step_avg:95.01ms +step:550/1695 train_time:52258ms step_avg:95.01ms +step:551/1695 train_time:52354ms step_avg:95.02ms +step:552/1695 train_time:52451ms step_avg:95.02ms +step:553/1695 train_time:52547ms step_avg:95.02ms +step:554/1695 train_time:52644ms step_avg:95.03ms +step:555/1695 train_time:52741ms step_avg:95.03ms +step:556/1695 train_time:52839ms step_avg:95.03ms +step:557/1695 train_time:52937ms step_avg:95.04ms +step:558/1695 train_time:53034ms step_avg:95.04ms +step:559/1695 train_time:53132ms step_avg:95.05ms +step:560/1695 train_time:53227ms step_avg:95.05ms +step:561/1695 train_time:53324ms step_avg:95.05ms +step:562/1695 train_time:53420ms step_avg:95.05ms +step:563/1695 train_time:53518ms step_avg:95.06ms +step:564/1695 train_time:53614ms step_avg:95.06ms +step:565/1695 train_time:53711ms step_avg:95.06ms +step:566/1695 train_time:53807ms step_avg:95.07ms +step:567/1695 train_time:53904ms step_avg:95.07ms +step:568/1695 train_time:54002ms step_avg:95.07ms +step:569/1695 train_time:54099ms step_avg:95.08ms +step:570/1695 train_time:54196ms step_avg:95.08ms +step:571/1695 train_time:54292ms step_avg:95.08ms +step:572/1695 train_time:54388ms step_avg:95.08ms +step:573/1695 train_time:54484ms step_avg:95.09ms +step:574/1695 train_time:54580ms step_avg:95.09ms +step:575/1695 train_time:54677ms step_avg:95.09ms +step:576/1695 train_time:54774ms step_avg:95.09ms +step:577/1695 train_time:54871ms step_avg:95.10ms +step:578/1695 train_time:54967ms step_avg:95.10ms +step:579/1695 train_time:55063ms step_avg:95.10ms +step:580/1695 train_time:55160ms step_avg:95.10ms +step:581/1695 train_time:55257ms step_avg:95.11ms +step:582/1695 train_time:55355ms step_avg:95.11ms +step:583/1695 train_time:55452ms step_avg:95.11ms +step:584/1695 train_time:55548ms step_avg:95.12ms +step:585/1695 train_time:55644ms step_avg:95.12ms +step:586/1695 train_time:55741ms step_avg:95.12ms +step:587/1695 train_time:55838ms step_avg:95.12ms +step:588/1695 train_time:55935ms step_avg:95.13ms +step:589/1695 train_time:56032ms step_avg:95.13ms +step:590/1695 train_time:56128ms step_avg:95.13ms +step:591/1695 train_time:56225ms step_avg:95.13ms +step:592/1695 train_time:56322ms step_avg:95.14ms +step:593/1695 train_time:56419ms step_avg:95.14ms +step:594/1695 train_time:56516ms step_avg:95.15ms +step:595/1695 train_time:56614ms step_avg:95.15ms +step:596/1695 train_time:56710ms step_avg:95.15ms +step:597/1695 train_time:56806ms step_avg:95.15ms +step:598/1695 train_time:56902ms step_avg:95.15ms +step:599/1695 train_time:56999ms step_avg:95.16ms +step:600/1695 train_time:57096ms step_avg:95.16ms +step:601/1695 train_time:57192ms step_avg:95.16ms +step:602/1695 train_time:57289ms step_avg:95.16ms +step:603/1695 train_time:57386ms step_avg:95.17ms +step:604/1695 train_time:57482ms step_avg:95.17ms +step:605/1695 train_time:57580ms step_avg:95.17ms +step:606/1695 train_time:57677ms step_avg:95.18ms +step:607/1695 train_time:57774ms step_avg:95.18ms +step:608/1695 train_time:57870ms step_avg:95.18ms +step:609/1695 train_time:57966ms step_avg:95.18ms +step:610/1695 train_time:58062ms step_avg:95.18ms +step:611/1695 train_time:58159ms step_avg:95.19ms +step:612/1695 train_time:58257ms step_avg:95.19ms +step:613/1695 train_time:58354ms step_avg:95.19ms +step:614/1695 train_time:58451ms step_avg:95.20ms +step:615/1695 train_time:58548ms step_avg:95.20ms +step:616/1695 train_time:58645ms step_avg:95.20ms +step:617/1695 train_time:58741ms step_avg:95.20ms +step:618/1695 train_time:58839ms step_avg:95.21ms +step:619/1695 train_time:58936ms step_avg:95.21ms +step:620/1695 train_time:59032ms step_avg:95.21ms +step:621/1695 train_time:59128ms step_avg:95.21ms +step:622/1695 train_time:59225ms step_avg:95.22ms +step:623/1695 train_time:59321ms step_avg:95.22ms +step:624/1695 train_time:59419ms step_avg:95.22ms +step:625/1695 train_time:59516ms step_avg:95.23ms +step:625/1695 val_loss:3.6445 train_time:59612ms step_avg:95.38ms +step:626/1695 train_time:59637ms step_avg:95.27ms +step:627/1695 train_time:59719ms step_avg:95.25ms +step:628/1695 train_time:59822ms step_avg:95.26ms +step:629/1695 train_time:60137ms step_avg:95.61ms +step:630/1695 train_time:60233ms step_avg:95.61ms +step:631/1695 train_time:60330ms step_avg:95.61ms +step:632/1695 train_time:60427ms step_avg:95.61ms +step:633/1695 train_time:60523ms step_avg:95.61ms +step:634/1695 train_time:60620ms step_avg:95.62ms +step:635/1695 train_time:60718ms step_avg:95.62ms +step:636/1695 train_time:60815ms step_avg:95.62ms +step:637/1695 train_time:60912ms step_avg:95.62ms +step:638/1695 train_time:61010ms step_avg:95.63ms +step:639/1695 train_time:61113ms step_avg:95.64ms +step:640/1695 train_time:61212ms step_avg:95.64ms +step:641/1695 train_time:61310ms step_avg:95.65ms +step:642/1695 train_time:61407ms step_avg:95.65ms +step:643/1695 train_time:61831ms step_avg:96.16ms +step:644/1695 train_time:61927ms step_avg:96.16ms +step:645/1695 train_time:62024ms step_avg:96.16ms +step:646/1695 train_time:62121ms step_avg:96.16ms +step:647/1695 train_time:62218ms step_avg:96.16ms +step:648/1695 train_time:62316ms step_avg:96.17ms +step:649/1695 train_time:62413ms step_avg:96.17ms +step:650/1695 train_time:62511ms step_avg:96.17ms +step:651/1695 train_time:62607ms step_avg:96.17ms +step:652/1695 train_time:62705ms step_avg:96.17ms +step:653/1695 train_time:63092ms step_avg:96.62ms +step:654/1695 train_time:63188ms step_avg:96.62ms +step:655/1695 train_time:63285ms step_avg:96.62ms +step:656/1695 train_time:63382ms step_avg:96.62ms +step:657/1695 train_time:63480ms step_avg:96.62ms +step:658/1695 train_time:63577ms step_avg:96.62ms +step:659/1695 train_time:63920ms step_avg:96.99ms +step:660/1695 train_time:64016ms step_avg:96.99ms +step:661/1695 train_time:64113ms step_avg:96.99ms +step:662/1695 train_time:64210ms step_avg:96.99ms +step:663/1695 train_time:64307ms step_avg:96.99ms +step:664/1695 train_time:64404ms step_avg:96.99ms +step:665/1695 train_time:64501ms step_avg:96.99ms +step:666/1695 train_time:64598ms step_avg:96.99ms +step:667/1695 train_time:64696ms step_avg:96.99ms +step:668/1695 train_time:64793ms step_avg:97.00ms +step:669/1695 train_time:64893ms step_avg:97.00ms +step:670/1695 train_time:64993ms step_avg:97.00ms +step:671/1695 train_time:65091ms step_avg:97.01ms +step:672/1695 train_time:65189ms step_avg:97.01ms +step:673/1695 train_time:65286ms step_avg:97.01ms +step:674/1695 train_time:65383ms step_avg:97.01ms +step:675/1695 train_time:65480ms step_avg:97.01ms +step:676/1695 train_time:65578ms step_avg:97.01ms +step:677/1695 train_time:65675ms step_avg:97.01ms +step:678/1695 train_time:65773ms step_avg:97.01ms +step:679/1695 train_time:65871ms step_avg:97.01ms +step:680/1695 train_time:65970ms step_avg:97.01ms +step:681/1695 train_time:66068ms step_avg:97.02ms +step:682/1695 train_time:66166ms step_avg:97.02ms +step:683/1695 train_time:66264ms step_avg:97.02ms +step:684/1695 train_time:66362ms step_avg:97.02ms +step:685/1695 train_time:66459ms step_avg:97.02ms +step:686/1695 train_time:66557ms step_avg:97.02ms +step:687/1695 train_time:66655ms step_avg:97.02ms +step:688/1695 train_time:66753ms step_avg:97.02ms +step:689/1695 train_time:66851ms step_avg:97.03ms +step:690/1695 train_time:66949ms step_avg:97.03ms +step:691/1695 train_time:67047ms step_avg:97.03ms +step:692/1695 train_time:67145ms step_avg:97.03ms +step:693/1695 train_time:67243ms step_avg:97.03ms +step:694/1695 train_time:67341ms step_avg:97.03ms +step:695/1695 train_time:67439ms step_avg:97.03ms +step:696/1695 train_time:67538ms step_avg:97.04ms +step:697/1695 train_time:67636ms step_avg:97.04ms +step:698/1695 train_time:67734ms step_avg:97.04ms +step:699/1695 train_time:67832ms step_avg:97.04ms +step:700/1695 train_time:67930ms step_avg:97.04ms +step:701/1695 train_time:68028ms step_avg:97.04ms +step:702/1695 train_time:68126ms step_avg:97.05ms +step:703/1695 train_time:68224ms step_avg:97.05ms +step:704/1695 train_time:68322ms step_avg:97.05ms +step:705/1695 train_time:68420ms step_avg:97.05ms +step:706/1695 train_time:68518ms step_avg:97.05ms +step:707/1695 train_time:68616ms step_avg:97.05ms +step:708/1695 train_time:68714ms step_avg:97.05ms +step:709/1695 train_time:68812ms step_avg:97.05ms +step:710/1695 train_time:68910ms step_avg:97.06ms +step:711/1695 train_time:69008ms step_avg:97.06ms +step:712/1695 train_time:69106ms step_avg:97.06ms +step:713/1695 train_time:69424ms step_avg:97.37ms +step:714/1695 train_time:69520ms step_avg:97.37ms +step:715/1695 train_time:69617ms step_avg:97.37ms +step:716/1695 train_time:69715ms step_avg:97.37ms +step:717/1695 train_time:69812ms step_avg:97.37ms +step:718/1695 train_time:69909ms step_avg:97.37ms +step:719/1695 train_time:70006ms step_avg:97.37ms +step:720/1695 train_time:70103ms step_avg:97.36ms +step:721/1695 train_time:70199ms step_avg:97.36ms +step:722/1695 train_time:70299ms step_avg:97.37ms +step:723/1695 train_time:70402ms step_avg:97.37ms +step:724/1695 train_time:70500ms step_avg:97.38ms +step:725/1695 train_time:70598ms step_avg:97.38ms +step:726/1695 train_time:70696ms step_avg:97.38ms +step:727/1695 train_time:70794ms step_avg:97.38ms +step:728/1695 train_time:70892ms step_avg:97.38ms +step:729/1695 train_time:70990ms step_avg:97.38ms +step:730/1695 train_time:71087ms step_avg:97.38ms +step:731/1695 train_time:71184ms step_avg:97.38ms +step:732/1695 train_time:71282ms step_avg:97.38ms +step:733/1695 train_time:71382ms step_avg:97.38ms +step:734/1695 train_time:71481ms step_avg:97.39ms +step:735/1695 train_time:71579ms step_avg:97.39ms +step:736/1695 train_time:71677ms step_avg:97.39ms +step:737/1695 train_time:71775ms step_avg:97.39ms +step:738/1695 train_time:71874ms step_avg:97.39ms +step:739/1695 train_time:71972ms step_avg:97.39ms +step:740/1695 train_time:72070ms step_avg:97.39ms +step:741/1695 train_time:72168ms step_avg:97.39ms +step:742/1695 train_time:72267ms step_avg:97.39ms +step:743/1695 train_time:72365ms step_avg:97.40ms +step:744/1695 train_time:72463ms step_avg:97.40ms +step:745/1695 train_time:72561ms step_avg:97.40ms +step:746/1695 train_time:72659ms step_avg:97.40ms +step:747/1695 train_time:72757ms step_avg:97.40ms +step:748/1695 train_time:72855ms step_avg:97.40ms +step:749/1695 train_time:72952ms step_avg:97.40ms +step:750/1695 train_time:73050ms step_avg:97.40ms +step:750/1695 val_loss:3.5832 train_time:73147ms step_avg:97.53ms +step:751/1695 train_time:73173ms step_avg:97.43ms +step:752/1695 train_time:73256ms step_avg:97.41ms +step:753/1695 train_time:73356ms step_avg:97.42ms +step:754/1695 train_time:73454ms step_avg:97.42ms +step:755/1695 train_time:73552ms step_avg:97.42ms +step:756/1695 train_time:73650ms step_avg:97.42ms +step:757/1695 train_time:73747ms step_avg:97.42ms +step:758/1695 train_time:73845ms step_avg:97.42ms +step:759/1695 train_time:73943ms step_avg:97.42ms +step:760/1695 train_time:74041ms step_avg:97.42ms +step:761/1695 train_time:74138ms step_avg:97.42ms +step:762/1695 train_time:74237ms step_avg:97.42ms +step:763/1695 train_time:74336ms step_avg:97.43ms +step:764/1695 train_time:74434ms step_avg:97.43ms +step:765/1695 train_time:74533ms step_avg:97.43ms +step:766/1695 train_time:74630ms step_avg:97.43ms +step:767/1695 train_time:74728ms step_avg:97.43ms +step:768/1695 train_time:74827ms step_avg:97.43ms +step:769/1695 train_time:74925ms step_avg:97.43ms +step:770/1695 train_time:75023ms step_avg:97.43ms +step:771/1695 train_time:75122ms step_avg:97.43ms +step:772/1695 train_time:75221ms step_avg:97.44ms +step:773/1695 train_time:75321ms step_avg:97.44ms +step:774/1695 train_time:75420ms step_avg:97.44ms +step:775/1695 train_time:75521ms step_avg:97.45ms +step:776/1695 train_time:75620ms step_avg:97.45ms +step:777/1695 train_time:75719ms step_avg:97.45ms +step:778/1695 train_time:75817ms step_avg:97.45ms +step:779/1695 train_time:75915ms step_avg:97.45ms +step:780/1695 train_time:76012ms step_avg:97.45ms +step:781/1695 train_time:76110ms step_avg:97.45ms +step:782/1695 train_time:76207ms step_avg:97.45ms +step:783/1695 train_time:76305ms step_avg:97.45ms +step:784/1695 train_time:76663ms step_avg:97.78ms +step:785/1695 train_time:76832ms step_avg:97.87ms +step:786/1695 train_time:76928ms step_avg:97.87ms +step:787/1695 train_time:77025ms step_avg:97.87ms +step:788/1695 train_time:77122ms step_avg:97.87ms +step:789/1695 train_time:77511ms step_avg:98.24ms +step:790/1695 train_time:77562ms step_avg:98.18ms +step:791/1695 train_time:77659ms step_avg:98.18ms +step:792/1695 train_time:77756ms step_avg:98.18ms +step:793/1695 train_time:77854ms step_avg:98.18ms +step:794/1695 train_time:77951ms step_avg:98.17ms +step:795/1695 train_time:78048ms step_avg:98.17ms +step:796/1695 train_time:78146ms step_avg:98.17ms +step:797/1695 train_time:78243ms step_avg:98.17ms +step:798/1695 train_time:78340ms step_avg:98.17ms +step:799/1695 train_time:78440ms step_avg:98.17ms +step:800/1695 train_time:78541ms step_avg:98.18ms +step:801/1695 train_time:78641ms step_avg:98.18ms +step:802/1695 train_time:78739ms step_avg:98.18ms +step:803/1695 train_time:78837ms step_avg:98.18ms +step:804/1695 train_time:78935ms step_avg:98.18ms +step:805/1695 train_time:79033ms step_avg:98.18ms +step:806/1695 train_time:79130ms step_avg:98.18ms +step:807/1695 train_time:79228ms step_avg:98.18ms +step:808/1695 train_time:79326ms step_avg:98.18ms +step:809/1695 train_time:79426ms step_avg:98.18ms +step:810/1695 train_time:79525ms step_avg:98.18ms +step:811/1695 train_time:79625ms step_avg:98.18ms +step:812/1695 train_time:79725ms step_avg:98.18ms +step:813/1695 train_time:79825ms step_avg:98.19ms +step:814/1695 train_time:79924ms step_avg:98.19ms +step:815/1695 train_time:80024ms step_avg:98.19ms +step:816/1695 train_time:80123ms step_avg:98.19ms +step:817/1695 train_time:80221ms step_avg:98.19ms +step:818/1695 train_time:80319ms step_avg:98.19ms +step:819/1695 train_time:80417ms step_avg:98.19ms +step:820/1695 train_time:80515ms step_avg:98.19ms +step:821/1695 train_time:80613ms step_avg:98.19ms +step:822/1695 train_time:80711ms step_avg:98.19ms +step:823/1695 train_time:80809ms step_avg:98.19ms +step:824/1695 train_time:80908ms step_avg:98.19ms +step:825/1695 train_time:81007ms step_avg:98.19ms +step:826/1695 train_time:81105ms step_avg:98.19ms +step:827/1695 train_time:81204ms step_avg:98.19ms +step:828/1695 train_time:81303ms step_avg:98.19ms +step:829/1695 train_time:81402ms step_avg:98.19ms +step:830/1695 train_time:81500ms step_avg:98.19ms +step:831/1695 train_time:81599ms step_avg:98.19ms +step:832/1695 train_time:81697ms step_avg:98.19ms +step:833/1695 train_time:81796ms step_avg:98.19ms +step:834/1695 train_time:81893ms step_avg:98.19ms +step:835/1695 train_time:81990ms step_avg:98.19ms +step:836/1695 train_time:82088ms step_avg:98.19ms +step:837/1695 train_time:82186ms step_avg:98.19ms +step:838/1695 train_time:82284ms step_avg:98.19ms +step:839/1695 train_time:82383ms step_avg:98.19ms +step:840/1695 train_time:82483ms step_avg:98.19ms +step:841/1695 train_time:82581ms step_avg:98.19ms +step:842/1695 train_time:82680ms step_avg:98.20ms +step:843/1695 train_time:82779ms step_avg:98.20ms +step:844/1695 train_time:82879ms step_avg:98.20ms +step:845/1695 train_time:82977ms step_avg:98.20ms +step:846/1695 train_time:83075ms step_avg:98.20ms +step:847/1695 train_time:83173ms step_avg:98.20ms +step:848/1695 train_time:83271ms step_avg:98.20ms +step:849/1695 train_time:83369ms step_avg:98.20ms +step:850/1695 train_time:83467ms step_avg:98.20ms +step:851/1695 train_time:83566ms step_avg:98.20ms +step:852/1695 train_time:83665ms step_avg:98.20ms +step:853/1695 train_time:83765ms step_avg:98.20ms +step:854/1695 train_time:83865ms step_avg:98.20ms +step:855/1695 train_time:83964ms step_avg:98.20ms +step:856/1695 train_time:84064ms step_avg:98.21ms +step:857/1695 train_time:84164ms step_avg:98.21ms +step:858/1695 train_time:84264ms step_avg:98.21ms +step:859/1695 train_time:84364ms step_avg:98.21ms +step:860/1695 train_time:84462ms step_avg:98.21ms +step:861/1695 train_time:84560ms step_avg:98.21ms +step:862/1695 train_time:84664ms step_avg:98.22ms +step:863/1695 train_time:84757ms step_avg:98.21ms +step:864/1695 train_time:84855ms step_avg:98.21ms +step:865/1695 train_time:84953ms step_avg:98.21ms +step:866/1695 train_time:85051ms step_avg:98.21ms +step:867/1695 train_time:85149ms step_avg:98.21ms +step:868/1695 train_time:85247ms step_avg:98.21ms +step:869/1695 train_time:85346ms step_avg:98.21ms +step:870/1695 train_time:85445ms step_avg:98.21ms +step:871/1695 train_time:85543ms step_avg:98.21ms +step:872/1695 train_time:85642ms step_avg:98.21ms +step:873/1695 train_time:85740ms step_avg:98.21ms +step:874/1695 train_time:85839ms step_avg:98.21ms +step:875/1695 train_time:85938ms step_avg:98.22ms +step:875/1695 val_loss:3.5364 train_time:86035ms step_avg:98.33ms +step:876/1695 train_time:86061ms step_avg:98.24ms +step:877/1695 train_time:86146ms step_avg:98.23ms +step:878/1695 train_time:86245ms step_avg:98.23ms +step:879/1695 train_time:86344ms step_avg:98.23ms +step:880/1695 train_time:86443ms step_avg:98.23ms +step:881/1695 train_time:86541ms step_avg:98.23ms +step:882/1695 train_time:86639ms step_avg:98.23ms +step:883/1695 train_time:86738ms step_avg:98.23ms +step:884/1695 train_time:86838ms step_avg:98.23ms +step:885/1695 train_time:86936ms step_avg:98.23ms +step:886/1695 train_time:87037ms step_avg:98.24ms +step:887/1695 train_time:87137ms step_avg:98.24ms +step:888/1695 train_time:87239ms step_avg:98.24ms +step:889/1695 train_time:87339ms step_avg:98.24ms +step:890/1695 train_time:87439ms step_avg:98.25ms +step:891/1695 train_time:87538ms step_avg:98.25ms +step:892/1695 train_time:87637ms step_avg:98.25ms +step:893/1695 train_time:87736ms step_avg:98.25ms +step:894/1695 train_time:87834ms step_avg:98.25ms +step:895/1695 train_time:87934ms step_avg:98.25ms +step:896/1695 train_time:88034ms step_avg:98.25ms +step:897/1695 train_time:88133ms step_avg:98.25ms +step:898/1695 train_time:88233ms step_avg:98.26ms +step:899/1695 train_time:88334ms step_avg:98.26ms +step:900/1695 train_time:88436ms step_avg:98.26ms +step:901/1695 train_time:88536ms step_avg:98.26ms +step:902/1695 train_time:88637ms step_avg:98.27ms +step:903/1695 train_time:88736ms step_avg:98.27ms +step:904/1695 train_time:88835ms step_avg:98.27ms +step:905/1695 train_time:88933ms step_avg:98.27ms +step:906/1695 train_time:89033ms step_avg:98.27ms +step:907/1695 train_time:89133ms step_avg:98.27ms +step:908/1695 train_time:89233ms step_avg:98.27ms +step:909/1695 train_time:89333ms step_avg:98.28ms +step:910/1695 train_time:89433ms step_avg:98.28ms +step:911/1695 train_time:89535ms step_avg:98.28ms +step:912/1695 train_time:89635ms step_avg:98.28ms +step:913/1695 train_time:89734ms step_avg:98.29ms +step:914/1695 train_time:89834ms step_avg:98.29ms +step:915/1695 train_time:89933ms step_avg:98.29ms +step:916/1695 train_time:90032ms step_avg:98.29ms +step:917/1695 train_time:90133ms step_avg:98.29ms +step:918/1695 train_time:90234ms step_avg:98.29ms +step:919/1695 train_time:90334ms step_avg:98.30ms +step:920/1695 train_time:90434ms step_avg:98.30ms +step:921/1695 train_time:90535ms step_avg:98.30ms +step:922/1695 train_time:90636ms step_avg:98.30ms +step:923/1695 train_time:90736ms step_avg:98.31ms +step:924/1695 train_time:90835ms step_avg:98.31ms +step:925/1695 train_time:90935ms step_avg:98.31ms +step:926/1695 train_time:91035ms step_avg:98.31ms +step:927/1695 train_time:91135ms step_avg:98.31ms +step:928/1695 train_time:91234ms step_avg:98.31ms +step:929/1695 train_time:91334ms step_avg:98.31ms +step:930/1695 train_time:91435ms step_avg:98.32ms +step:931/1695 train_time:91537ms step_avg:98.32ms +step:932/1695 train_time:91637ms step_avg:98.32ms +step:933/1695 train_time:91736ms step_avg:98.32ms +step:934/1695 train_time:91836ms step_avg:98.33ms +step:935/1695 train_time:91935ms step_avg:98.33ms +step:936/1695 train_time:92035ms step_avg:98.33ms +step:937/1695 train_time:92134ms step_avg:98.33ms +step:938/1695 train_time:92234ms step_avg:98.33ms +step:939/1695 train_time:92334ms step_avg:98.33ms +step:940/1695 train_time:92434ms step_avg:98.33ms +step:941/1695 train_time:92535ms step_avg:98.34ms +step:942/1695 train_time:92636ms step_avg:98.34ms +step:943/1695 train_time:92736ms step_avg:98.34ms +step:944/1695 train_time:92835ms step_avg:98.34ms +step:945/1695 train_time:92936ms step_avg:98.34ms +step:946/1695 train_time:93036ms step_avg:98.35ms +step:947/1695 train_time:93135ms step_avg:98.35ms +step:948/1695 train_time:93235ms step_avg:98.35ms +step:949/1695 train_time:93334ms step_avg:98.35ms +step:950/1695 train_time:93435ms step_avg:98.35ms +step:951/1695 train_time:93535ms step_avg:98.35ms +step:952/1695 train_time:93635ms step_avg:98.36ms +step:953/1695 train_time:93736ms step_avg:98.36ms +step:954/1695 train_time:93835ms step_avg:98.36ms +step:955/1695 train_time:93935ms step_avg:98.36ms +step:956/1695 train_time:94034ms step_avg:98.36ms +step:957/1695 train_time:94135ms step_avg:98.36ms +step:958/1695 train_time:94234ms step_avg:98.37ms +step:959/1695 train_time:94334ms step_avg:98.37ms +step:960/1695 train_time:94434ms step_avg:98.37ms +step:961/1695 train_time:94535ms step_avg:98.37ms +step:962/1695 train_time:94635ms step_avg:98.37ms +step:963/1695 train_time:94735ms step_avg:98.37ms +step:964/1695 train_time:94834ms step_avg:98.38ms +step:965/1695 train_time:94934ms step_avg:98.38ms +step:966/1695 train_time:95035ms step_avg:98.38ms +step:967/1695 train_time:95136ms step_avg:98.38ms +step:968/1695 train_time:95235ms step_avg:98.38ms +step:969/1695 train_time:95335ms step_avg:98.39ms +step:970/1695 train_time:95435ms step_avg:98.39ms +step:971/1695 train_time:95535ms step_avg:98.39ms +step:972/1695 train_time:95635ms step_avg:98.39ms +step:973/1695 train_time:95736ms step_avg:98.39ms +step:974/1695 train_time:95835ms step_avg:98.39ms +step:975/1695 train_time:95935ms step_avg:98.39ms +step:976/1695 train_time:96034ms step_avg:98.40ms +step:977/1695 train_time:96134ms step_avg:98.40ms +step:978/1695 train_time:96234ms step_avg:98.40ms +step:979/1695 train_time:96334ms step_avg:98.40ms +step:980/1695 train_time:96435ms step_avg:98.40ms +step:981/1695 train_time:96536ms step_avg:98.41ms +step:982/1695 train_time:96638ms step_avg:98.41ms +step:983/1695 train_time:96737ms step_avg:98.41ms +step:984/1695 train_time:96836ms step_avg:98.41ms +step:985/1695 train_time:96937ms step_avg:98.41ms +step:986/1695 train_time:97037ms step_avg:98.41ms +step:987/1695 train_time:97137ms step_avg:98.42ms +step:988/1695 train_time:97236ms step_avg:98.42ms +step:989/1695 train_time:97336ms step_avg:98.42ms +step:990/1695 train_time:97436ms step_avg:98.42ms +step:991/1695 train_time:97536ms step_avg:98.42ms +step:992/1695 train_time:97636ms step_avg:98.42ms +step:993/1695 train_time:97735ms step_avg:98.42ms +step:994/1695 train_time:97835ms step_avg:98.43ms +step:995/1695 train_time:97935ms step_avg:98.43ms +step:996/1695 train_time:98034ms step_avg:98.43ms +step:997/1695 train_time:98135ms step_avg:98.43ms +step:998/1695 train_time:98235ms step_avg:98.43ms +step:999/1695 train_time:98335ms step_avg:98.43ms +step:1000/1695 train_time:98434ms step_avg:98.43ms +step:1000/1695 val_loss:3.4915 train_time:98532ms step_avg:98.53ms +step:1001/1695 train_time:98558ms step_avg:98.46ms +step:1002/1695 train_time:98645ms step_avg:98.45ms +step:1003/1695 train_time:98749ms step_avg:98.45ms +step:1004/1695 train_time:98849ms step_avg:98.46ms +step:1005/1695 train_time:98949ms step_avg:98.46ms +step:1006/1695 train_time:99049ms step_avg:98.46ms +step:1007/1695 train_time:99149ms step_avg:98.46ms +step:1008/1695 train_time:99248ms step_avg:98.46ms +step:1009/1695 train_time:99347ms step_avg:98.46ms +step:1010/1695 train_time:99445ms step_avg:98.46ms +step:1011/1695 train_time:99547ms step_avg:98.46ms +step:1012/1695 train_time:99650ms step_avg:98.47ms +step:1013/1695 train_time:99752ms step_avg:98.47ms +step:1014/1695 train_time:99856ms step_avg:98.48ms +step:1015/1695 train_time:99955ms step_avg:98.48ms +step:1016/1695 train_time:100054ms step_avg:98.48ms +step:1017/1695 train_time:100154ms step_avg:98.48ms +step:1018/1695 train_time:100253ms step_avg:98.48ms +step:1019/1695 train_time:100352ms step_avg:98.48ms +step:1020/1695 train_time:100453ms step_avg:98.48ms +step:1021/1695 train_time:100555ms step_avg:98.49ms +step:1022/1695 train_time:100656ms step_avg:98.49ms +step:1023/1695 train_time:100756ms step_avg:98.49ms +step:1024/1695 train_time:100858ms step_avg:98.49ms +step:1025/1695 train_time:100958ms step_avg:98.50ms +step:1026/1695 train_time:101058ms step_avg:98.50ms +step:1027/1695 train_time:101156ms step_avg:98.50ms +step:1028/1695 train_time:101256ms step_avg:98.50ms +step:1029/1695 train_time:101355ms step_avg:98.50ms +step:1030/1695 train_time:101456ms step_avg:98.50ms +step:1031/1695 train_time:101556ms step_avg:98.50ms +step:1032/1695 train_time:101656ms step_avg:98.50ms +step:1033/1695 train_time:101757ms step_avg:98.51ms +step:1034/1695 train_time:101858ms step_avg:98.51ms +step:1035/1695 train_time:101957ms step_avg:98.51ms +step:1036/1695 train_time:102057ms step_avg:98.51ms +step:1037/1695 train_time:102157ms step_avg:98.51ms +step:1038/1695 train_time:102256ms step_avg:98.51ms +step:1039/1695 train_time:102355ms step_avg:98.51ms +step:1040/1695 train_time:102456ms step_avg:98.52ms +step:1041/1695 train_time:102556ms step_avg:98.52ms +step:1042/1695 train_time:102657ms step_avg:98.52ms +step:1043/1695 train_time:102757ms step_avg:98.52ms +step:1044/1695 train_time:102856ms step_avg:98.52ms +step:1045/1695 train_time:102956ms step_avg:98.52ms +step:1046/1695 train_time:103056ms step_avg:98.52ms +step:1047/1695 train_time:103156ms step_avg:98.52ms +step:1048/1695 train_time:103255ms step_avg:98.53ms +step:1049/1695 train_time:103355ms step_avg:98.53ms +step:1050/1695 train_time:103454ms step_avg:98.53ms +step:1051/1695 train_time:103556ms step_avg:98.53ms +step:1052/1695 train_time:103655ms step_avg:98.53ms +step:1053/1695 train_time:103754ms step_avg:98.53ms +step:1054/1695 train_time:103855ms step_avg:98.53ms +step:1055/1695 train_time:103956ms step_avg:98.54ms +step:1056/1695 train_time:104055ms step_avg:98.54ms +step:1057/1695 train_time:104155ms step_avg:98.54ms +step:1058/1695 train_time:104254ms step_avg:98.54ms +step:1059/1695 train_time:104353ms step_avg:98.54ms +step:1060/1695 train_time:104452ms step_avg:98.54ms +step:1061/1695 train_time:104553ms step_avg:98.54ms +step:1062/1695 train_time:104654ms step_avg:98.54ms +step:1063/1695 train_time:104756ms step_avg:98.55ms +step:1064/1695 train_time:104856ms step_avg:98.55ms +step:1065/1695 train_time:104956ms step_avg:98.55ms +step:1066/1695 train_time:105056ms step_avg:98.55ms +step:1067/1695 train_time:105155ms step_avg:98.55ms +step:1068/1695 train_time:105255ms step_avg:98.55ms +step:1069/1695 train_time:105355ms step_avg:98.55ms +step:1070/1695 train_time:105454ms step_avg:98.56ms +step:1071/1695 train_time:105554ms step_avg:98.56ms +step:1072/1695 train_time:105655ms step_avg:98.56ms +step:1073/1695 train_time:105755ms step_avg:98.56ms +step:1074/1695 train_time:105855ms step_avg:98.56ms +step:1075/1695 train_time:105955ms step_avg:98.56ms +step:1076/1695 train_time:106055ms step_avg:98.56ms +step:1077/1695 train_time:106156ms step_avg:98.57ms +step:1078/1695 train_time:106255ms step_avg:98.57ms +step:1079/1695 train_time:106354ms step_avg:98.57ms +step:1080/1695 train_time:106453ms step_avg:98.57ms +step:1081/1695 train_time:106552ms step_avg:98.57ms +step:1082/1695 train_time:106652ms step_avg:98.57ms +step:1083/1695 train_time:106752ms step_avg:98.57ms +step:1084/1695 train_time:106852ms step_avg:98.57ms +step:1085/1695 train_time:106953ms step_avg:98.57ms +step:1086/1695 train_time:107053ms step_avg:98.58ms +step:1087/1695 train_time:107154ms step_avg:98.58ms +step:1088/1695 train_time:107254ms step_avg:98.58ms +step:1089/1695 train_time:107355ms step_avg:98.58ms +step:1090/1695 train_time:107455ms step_avg:98.58ms +step:1091/1695 train_time:107556ms step_avg:98.59ms +step:1092/1695 train_time:107655ms step_avg:98.59ms +step:1093/1695 train_time:107755ms step_avg:98.59ms +step:1094/1695 train_time:107855ms step_avg:98.59ms +step:1095/1695 train_time:107955ms step_avg:98.59ms +step:1096/1695 train_time:108054ms step_avg:98.59ms +step:1097/1695 train_time:108153ms step_avg:98.59ms +step:1098/1695 train_time:108254ms step_avg:98.59ms +step:1099/1695 train_time:108353ms step_avg:98.59ms +step:1100/1695 train_time:108454ms step_avg:98.59ms +step:1101/1695 train_time:108554ms step_avg:98.60ms +step:1102/1695 train_time:108654ms step_avg:98.60ms +step:1103/1695 train_time:108755ms step_avg:98.60ms +step:1104/1695 train_time:108855ms step_avg:98.60ms +step:1105/1695 train_time:108955ms step_avg:98.60ms +step:1106/1695 train_time:109056ms step_avg:98.60ms +step:1107/1695 train_time:109155ms step_avg:98.60ms +step:1108/1695 train_time:109255ms step_avg:98.61ms +step:1109/1695 train_time:109354ms step_avg:98.61ms +step:1110/1695 train_time:109454ms step_avg:98.61ms +step:1111/1695 train_time:109555ms step_avg:98.61ms +step:1112/1695 train_time:109656ms step_avg:98.61ms +step:1113/1695 train_time:109756ms step_avg:98.61ms +step:1114/1695 train_time:109856ms step_avg:98.61ms +step:1115/1695 train_time:109956ms step_avg:98.62ms +step:1116/1695 train_time:110056ms step_avg:98.62ms +step:1117/1695 train_time:110155ms step_avg:98.62ms +step:1118/1695 train_time:110255ms step_avg:98.62ms +step:1119/1695 train_time:110354ms step_avg:98.62ms +step:1120/1695 train_time:110454ms step_avg:98.62ms +step:1121/1695 train_time:110554ms step_avg:98.62ms +step:1122/1695 train_time:110654ms step_avg:98.62ms +step:1123/1695 train_time:110754ms step_avg:98.62ms +step:1124/1695 train_time:110854ms step_avg:98.62ms +step:1125/1695 train_time:110955ms step_avg:98.63ms +step:1125/1695 val_loss:3.4410 train_time:111053ms step_avg:98.71ms +step:1126/1695 train_time:111079ms step_avg:98.65ms +step:1127/1695 train_time:111166ms step_avg:98.64ms +step:1128/1695 train_time:111269ms step_avg:98.64ms +step:1129/1695 train_time:111369ms step_avg:98.64ms +step:1130/1695 train_time:111468ms step_avg:98.64ms +step:1131/1695 train_time:111567ms step_avg:98.64ms +step:1132/1695 train_time:111666ms step_avg:98.64ms +step:1133/1695 train_time:111766ms step_avg:98.65ms +step:1134/1695 train_time:111866ms step_avg:98.65ms +step:1135/1695 train_time:111966ms step_avg:98.65ms +step:1136/1695 train_time:112068ms step_avg:98.65ms +step:1137/1695 train_time:112171ms step_avg:98.66ms +step:1138/1695 train_time:112272ms step_avg:98.66ms +step:1139/1695 train_time:112373ms step_avg:98.66ms +step:1140/1695 train_time:112473ms step_avg:98.66ms +step:1141/1695 train_time:112574ms step_avg:98.66ms +step:1142/1695 train_time:112675ms step_avg:98.66ms +step:1143/1695 train_time:112775ms step_avg:98.67ms +step:1144/1695 train_time:112877ms step_avg:98.67ms +step:1145/1695 train_time:112979ms step_avg:98.67ms +step:1146/1695 train_time:113080ms step_avg:98.67ms +step:1147/1695 train_time:113181ms step_avg:98.68ms +step:1148/1695 train_time:113283ms step_avg:98.68ms +step:1149/1695 train_time:113384ms step_avg:98.68ms +step:1150/1695 train_time:113486ms step_avg:98.68ms +step:1151/1695 train_time:113587ms step_avg:98.69ms +step:1152/1695 train_time:113688ms step_avg:98.69ms +step:1153/1695 train_time:113789ms step_avg:98.69ms +step:1154/1695 train_time:113889ms step_avg:98.69ms +step:1155/1695 train_time:113989ms step_avg:98.69ms +step:1156/1695 train_time:114090ms step_avg:98.69ms +step:1157/1695 train_time:114191ms step_avg:98.70ms +step:1158/1695 train_time:114291ms step_avg:98.70ms +step:1159/1695 train_time:114391ms step_avg:98.70ms +step:1160/1695 train_time:114491ms step_avg:98.70ms +step:1161/1695 train_time:114591ms step_avg:98.70ms +step:1162/1695 train_time:114692ms step_avg:98.70ms +step:1163/1695 train_time:114793ms step_avg:98.70ms +step:1164/1695 train_time:114893ms step_avg:98.71ms +step:1165/1695 train_time:114996ms step_avg:98.71ms +step:1166/1695 train_time:115098ms step_avg:98.71ms +step:1167/1695 train_time:115197ms step_avg:98.71ms +step:1168/1695 train_time:115299ms step_avg:98.72ms +step:1169/1695 train_time:115401ms step_avg:98.72ms +step:1170/1695 train_time:115502ms step_avg:98.72ms +step:1171/1695 train_time:115603ms step_avg:98.72ms +step:1172/1695 train_time:115705ms step_avg:98.72ms +step:1173/1695 train_time:115805ms step_avg:98.73ms +step:1174/1695 train_time:115907ms step_avg:98.73ms +step:1175/1695 train_time:116008ms step_avg:98.73ms +step:1176/1695 train_time:116109ms step_avg:98.73ms +step:1177/1695 train_time:116209ms step_avg:98.73ms +step:1178/1695 train_time:116310ms step_avg:98.74ms +step:1179/1695 train_time:116414ms step_avg:98.74ms +step:1180/1695 train_time:116513ms step_avg:98.74ms +step:1181/1695 train_time:116613ms step_avg:98.74ms +step:1182/1695 train_time:116714ms step_avg:98.74ms +step:1183/1695 train_time:116815ms step_avg:98.74ms +step:1184/1695 train_time:116918ms step_avg:98.75ms +step:1185/1695 train_time:117020ms step_avg:98.75ms +step:1186/1695 train_time:117121ms step_avg:98.75ms +step:1187/1695 train_time:117222ms step_avg:98.75ms +step:1188/1695 train_time:117323ms step_avg:98.76ms +step:1189/1695 train_time:117424ms step_avg:98.76ms +step:1190/1695 train_time:117525ms step_avg:98.76ms +step:1191/1695 train_time:117625ms step_avg:98.76ms +step:1192/1695 train_time:117726ms step_avg:98.76ms +step:1193/1695 train_time:117827ms step_avg:98.77ms +step:1194/1695 train_time:117928ms step_avg:98.77ms +step:1195/1695 train_time:118029ms step_avg:98.77ms +step:1196/1695 train_time:118131ms step_avg:98.77ms +step:1197/1695 train_time:118231ms step_avg:98.77ms +step:1198/1695 train_time:118331ms step_avg:98.77ms +step:1199/1695 train_time:118432ms step_avg:98.78ms +step:1200/1695 train_time:118531ms step_avg:98.78ms +step:1201/1695 train_time:118630ms step_avg:98.78ms +step:1202/1695 train_time:118731ms step_avg:98.78ms +step:1203/1695 train_time:118832ms step_avg:98.78ms +step:1204/1695 train_time:118933ms step_avg:98.78ms +step:1205/1695 train_time:119033ms step_avg:98.78ms +step:1206/1695 train_time:119135ms step_avg:98.79ms +step:1207/1695 train_time:119236ms step_avg:98.79ms +step:1208/1695 train_time:119338ms step_avg:98.79ms +step:1209/1695 train_time:119439ms step_avg:98.79ms +step:1210/1695 train_time:119540ms step_avg:98.79ms +step:1211/1695 train_time:119641ms step_avg:98.80ms +step:1212/1695 train_time:119743ms step_avg:98.80ms +step:1213/1695 train_time:119844ms step_avg:98.80ms +step:1214/1695 train_time:119945ms step_avg:98.80ms +step:1215/1695 train_time:120047ms step_avg:98.80ms +step:1216/1695 train_time:120149ms step_avg:98.81ms +step:1217/1695 train_time:120249ms step_avg:98.81ms +step:1218/1695 train_time:120350ms step_avg:98.81ms +step:1219/1695 train_time:120450ms step_avg:98.81ms +step:1220/1695 train_time:120551ms step_avg:98.81ms +step:1221/1695 train_time:120652ms step_avg:98.81ms +step:1222/1695 train_time:120752ms step_avg:98.82ms +step:1223/1695 train_time:120852ms step_avg:98.82ms +step:1224/1695 train_time:120952ms step_avg:98.82ms +step:1225/1695 train_time:121053ms step_avg:98.82ms +step:1226/1695 train_time:121153ms step_avg:98.82ms +step:1227/1695 train_time:121254ms step_avg:98.82ms +step:1228/1695 train_time:121355ms step_avg:98.82ms +step:1229/1695 train_time:121456ms step_avg:98.82ms +step:1230/1695 train_time:121556ms step_avg:98.83ms +step:1231/1695 train_time:121657ms step_avg:98.83ms +step:1232/1695 train_time:121758ms step_avg:98.83ms +step:1233/1695 train_time:121859ms step_avg:98.83ms +step:1234/1695 train_time:121962ms step_avg:98.83ms +step:1235/1695 train_time:122063ms step_avg:98.84ms +step:1236/1695 train_time:122166ms step_avg:98.84ms +step:1237/1695 train_time:122267ms step_avg:98.84ms +step:1238/1695 train_time:122369ms step_avg:98.84ms +step:1239/1695 train_time:122469ms step_avg:98.85ms +step:1240/1695 train_time:122570ms step_avg:98.85ms +step:1241/1695 train_time:122671ms step_avg:98.85ms +step:1242/1695 train_time:122771ms step_avg:98.85ms +step:1243/1695 train_time:122871ms step_avg:98.85ms +step:1244/1695 train_time:122971ms step_avg:98.85ms +step:1245/1695 train_time:123071ms step_avg:98.85ms +step:1246/1695 train_time:123172ms step_avg:98.85ms +step:1247/1695 train_time:123273ms step_avg:98.86ms +step:1248/1695 train_time:123373ms step_avg:98.86ms +step:1249/1695 train_time:123473ms step_avg:98.86ms +step:1250/1695 train_time:123573ms step_avg:98.86ms +step:1250/1695 val_loss:3.3958 train_time:123671ms step_avg:98.94ms +step:1251/1695 train_time:123697ms step_avg:98.88ms +step:1252/1695 train_time:123786ms step_avg:98.87ms +step:1253/1695 train_time:123887ms step_avg:98.87ms +step:1254/1695 train_time:123989ms step_avg:98.87ms +step:1255/1695 train_time:124090ms step_avg:98.88ms +step:1256/1695 train_time:124190ms step_avg:98.88ms +step:1257/1695 train_time:124289ms step_avg:98.88ms +step:1258/1695 train_time:124390ms step_avg:98.88ms +step:1259/1695 train_time:124490ms step_avg:98.88ms +step:1260/1695 train_time:124591ms step_avg:98.88ms +step:1261/1695 train_time:124693ms step_avg:98.88ms +step:1262/1695 train_time:124797ms step_avg:98.89ms +step:1263/1695 train_time:124897ms step_avg:98.89ms +step:1264/1695 train_time:124997ms step_avg:98.89ms +step:1265/1695 train_time:125097ms step_avg:98.89ms +step:1266/1695 train_time:125196ms step_avg:98.89ms +step:1267/1695 train_time:125296ms step_avg:98.89ms +step:1268/1695 train_time:125396ms step_avg:98.89ms +step:1269/1695 train_time:125497ms step_avg:98.89ms +step:1270/1695 train_time:125598ms step_avg:98.90ms +step:1271/1695 train_time:125699ms step_avg:98.90ms +step:1272/1695 train_time:125800ms step_avg:98.90ms +step:1273/1695 train_time:125902ms step_avg:98.90ms +step:1274/1695 train_time:126002ms step_avg:98.90ms +step:1275/1695 train_time:126104ms step_avg:98.91ms +step:1276/1695 train_time:126209ms step_avg:98.91ms +step:1277/1695 train_time:126310ms step_avg:98.91ms +step:1278/1695 train_time:126411ms step_avg:98.91ms +step:1279/1695 train_time:126512ms step_avg:98.91ms +step:1280/1695 train_time:126613ms step_avg:98.92ms +step:1281/1695 train_time:126714ms step_avg:98.92ms +step:1282/1695 train_time:126814ms step_avg:98.92ms +step:1283/1695 train_time:126915ms step_avg:98.92ms +step:1284/1695 train_time:127015ms step_avg:98.92ms +step:1285/1695 train_time:127115ms step_avg:98.92ms +step:1286/1695 train_time:127216ms step_avg:98.92ms +step:1287/1695 train_time:127318ms step_avg:98.93ms +step:1288/1695 train_time:127418ms step_avg:98.93ms +step:1289/1695 train_time:127518ms step_avg:98.93ms +step:1290/1695 train_time:127619ms step_avg:98.93ms +step:1291/1695 train_time:127720ms step_avg:98.93ms +step:1292/1695 train_time:127821ms step_avg:98.93ms +step:1293/1695 train_time:127924ms step_avg:98.94ms +step:1294/1695 train_time:128026ms step_avg:98.94ms +step:1295/1695 train_time:128128ms step_avg:98.94ms +step:1296/1695 train_time:128229ms step_avg:98.94ms +step:1297/1695 train_time:128330ms step_avg:98.94ms +step:1298/1695 train_time:128431ms step_avg:98.95ms +step:1299/1695 train_time:128532ms step_avg:98.95ms +step:1300/1695 train_time:128633ms step_avg:98.95ms +step:1301/1695 train_time:128734ms step_avg:98.95ms +step:1302/1695 train_time:128836ms step_avg:98.95ms +step:1303/1695 train_time:128936ms step_avg:98.95ms +step:1304/1695 train_time:129037ms step_avg:98.95ms +step:1305/1695 train_time:129137ms step_avg:98.96ms +step:1306/1695 train_time:129237ms step_avg:98.96ms +step:1307/1695 train_time:129338ms step_avg:98.96ms +step:1308/1695 train_time:129439ms step_avg:98.96ms +step:1309/1695 train_time:129539ms step_avg:98.96ms +step:1310/1695 train_time:129640ms step_avg:98.96ms +step:1311/1695 train_time:129742ms step_avg:98.96ms +step:1312/1695 train_time:129843ms step_avg:98.97ms +step:1313/1695 train_time:129946ms step_avg:98.97ms +step:1314/1695 train_time:130047ms step_avg:98.97ms +step:1315/1695 train_time:130147ms step_avg:98.97ms +step:1316/1695 train_time:130249ms step_avg:98.97ms +step:1317/1695 train_time:130351ms step_avg:98.98ms +step:1318/1695 train_time:130451ms step_avg:98.98ms +step:1319/1695 train_time:130553ms step_avg:98.98ms +step:1320/1695 train_time:130655ms step_avg:98.98ms +step:1321/1695 train_time:130756ms step_avg:98.98ms +step:1322/1695 train_time:130857ms step_avg:98.98ms +step:1323/1695 train_time:130956ms step_avg:98.98ms +step:1324/1695 train_time:131057ms step_avg:98.99ms +step:1325/1695 train_time:131158ms step_avg:98.99ms +step:1326/1695 train_time:131259ms step_avg:98.99ms +step:1327/1695 train_time:131361ms step_avg:98.99ms +step:1328/1695 train_time:131463ms step_avg:98.99ms +step:1329/1695 train_time:131564ms step_avg:98.99ms +step:1330/1695 train_time:131664ms step_avg:99.00ms +step:1331/1695 train_time:131766ms step_avg:99.00ms +step:1332/1695 train_time:131867ms step_avg:99.00ms +step:1333/1695 train_time:131969ms step_avg:99.00ms +step:1334/1695 train_time:132070ms step_avg:99.00ms +step:1335/1695 train_time:132172ms step_avg:99.01ms +step:1336/1695 train_time:132272ms step_avg:99.01ms +step:1337/1695 train_time:132374ms step_avg:99.01ms +step:1338/1695 train_time:132474ms step_avg:99.01ms +step:1339/1695 train_time:132575ms step_avg:99.01ms +step:1340/1695 train_time:132675ms step_avg:99.01ms +step:1341/1695 train_time:132777ms step_avg:99.01ms +step:1342/1695 train_time:132877ms step_avg:99.01ms +step:1343/1695 train_time:132978ms step_avg:99.02ms +step:1344/1695 train_time:133078ms step_avg:99.02ms +step:1345/1695 train_time:133179ms step_avg:99.02ms +step:1346/1695 train_time:133280ms step_avg:99.02ms +step:1347/1695 train_time:133382ms step_avg:99.02ms +step:1348/1695 train_time:133484ms step_avg:99.02ms +step:1349/1695 train_time:133586ms step_avg:99.03ms +step:1350/1695 train_time:133688ms step_avg:99.03ms +step:1351/1695 train_time:133789ms step_avg:99.03ms +step:1352/1695 train_time:133889ms step_avg:99.03ms +step:1353/1695 train_time:133990ms step_avg:99.03ms +step:1354/1695 train_time:134090ms step_avg:99.03ms +step:1355/1695 train_time:134190ms step_avg:99.03ms +step:1356/1695 train_time:134292ms step_avg:99.04ms +step:1357/1695 train_time:134393ms step_avg:99.04ms +step:1358/1695 train_time:134494ms step_avg:99.04ms +step:1359/1695 train_time:134595ms step_avg:99.04ms +step:1360/1695 train_time:134696ms step_avg:99.04ms +step:1361/1695 train_time:134796ms step_avg:99.04ms +step:1362/1695 train_time:134896ms step_avg:99.04ms +step:1363/1695 train_time:134997ms step_avg:99.04ms +step:1364/1695 train_time:135098ms step_avg:99.05ms +step:1365/1695 train_time:135199ms step_avg:99.05ms +step:1366/1695 train_time:135301ms step_avg:99.05ms +step:1367/1695 train_time:135403ms step_avg:99.05ms +step:1368/1695 train_time:135505ms step_avg:99.05ms +step:1369/1695 train_time:135607ms step_avg:99.06ms +step:1370/1695 train_time:135709ms step_avg:99.06ms +step:1371/1695 train_time:135809ms step_avg:99.06ms +step:1372/1695 train_time:135910ms step_avg:99.06ms +step:1373/1695 train_time:136011ms step_avg:99.06ms +step:1374/1695 train_time:136113ms step_avg:99.06ms +step:1375/1695 train_time:136215ms step_avg:99.07ms +step:1375/1695 val_loss:3.3554 train_time:136313ms step_avg:99.14ms +step:1376/1695 train_time:136338ms step_avg:99.08ms +step:1377/1695 train_time:136429ms step_avg:99.08ms +step:1378/1695 train_time:136531ms step_avg:99.08ms +step:1379/1695 train_time:136632ms step_avg:99.08ms +step:1380/1695 train_time:136735ms step_avg:99.08ms +step:1381/1695 train_time:136835ms step_avg:99.08ms +step:1382/1695 train_time:136935ms step_avg:99.08ms +step:1383/1695 train_time:137035ms step_avg:99.09ms +step:1384/1695 train_time:137136ms step_avg:99.09ms +step:1385/1695 train_time:137237ms step_avg:99.09ms +step:1386/1695 train_time:137341ms step_avg:99.09ms +step:1387/1695 train_time:137443ms step_avg:99.09ms +step:1388/1695 train_time:137545ms step_avg:99.10ms +step:1389/1695 train_time:137647ms step_avg:99.10ms +step:1390/1695 train_time:137748ms step_avg:99.10ms +step:1391/1695 train_time:137849ms step_avg:99.10ms +step:1392/1695 train_time:137951ms step_avg:99.10ms +step:1393/1695 train_time:138053ms step_avg:99.10ms +step:1394/1695 train_time:138154ms step_avg:99.11ms +step:1395/1695 train_time:138255ms step_avg:99.11ms +step:1396/1695 train_time:138359ms step_avg:99.11ms +step:1397/1695 train_time:138462ms step_avg:99.11ms +step:1398/1695 train_time:138565ms step_avg:99.12ms +step:1399/1695 train_time:138667ms step_avg:99.12ms +step:1400/1695 train_time:138769ms step_avg:99.12ms +step:1401/1695 train_time:138870ms step_avg:99.12ms +step:1402/1695 train_time:138972ms step_avg:99.12ms +step:1403/1695 train_time:139075ms step_avg:99.13ms +step:1404/1695 train_time:139178ms step_avg:99.13ms +step:1405/1695 train_time:139279ms step_avg:99.13ms +step:1406/1695 train_time:139382ms step_avg:99.13ms +step:1407/1695 train_time:139483ms step_avg:99.14ms +step:1408/1695 train_time:139584ms step_avg:99.14ms +step:1409/1695 train_time:139688ms step_avg:99.14ms +step:1410/1695 train_time:139790ms step_avg:99.14ms +step:1411/1695 train_time:139891ms step_avg:99.14ms +step:1412/1695 train_time:139995ms step_avg:99.15ms +step:1413/1695 train_time:140096ms step_avg:99.15ms +step:1414/1695 train_time:140199ms step_avg:99.15ms +step:1415/1695 train_time:140302ms step_avg:99.15ms +step:1416/1695 train_time:140402ms step_avg:99.15ms +step:1417/1695 train_time:140503ms step_avg:99.15ms +step:1418/1695 train_time:140604ms step_avg:99.16ms +step:1419/1695 train_time:140705ms step_avg:99.16ms +step:1420/1695 train_time:140806ms step_avg:99.16ms +step:1421/1695 train_time:140908ms step_avg:99.16ms +step:1422/1695 train_time:141009ms step_avg:99.16ms +step:1423/1695 train_time:141111ms step_avg:99.16ms +step:1424/1695 train_time:141213ms step_avg:99.17ms +step:1425/1695 train_time:141316ms step_avg:99.17ms +step:1426/1695 train_time:141420ms step_avg:99.17ms +step:1427/1695 train_time:141521ms step_avg:99.17ms +step:1428/1695 train_time:141623ms step_avg:99.18ms +step:1429/1695 train_time:141724ms step_avg:99.18ms +step:1430/1695 train_time:141825ms step_avg:99.18ms +step:1431/1695 train_time:141927ms step_avg:99.18ms +step:1432/1695 train_time:142028ms step_avg:99.18ms +step:1433/1695 train_time:142130ms step_avg:99.18ms +step:1434/1695 train_time:142232ms step_avg:99.19ms +step:1435/1695 train_time:142334ms step_avg:99.19ms +step:1436/1695 train_time:142437ms step_avg:99.19ms +step:1437/1695 train_time:142538ms step_avg:99.19ms +step:1438/1695 train_time:142639ms step_avg:99.19ms +step:1439/1695 train_time:142742ms step_avg:99.20ms +step:1440/1695 train_time:142844ms step_avg:99.20ms +step:1441/1695 train_time:142947ms step_avg:99.20ms +step:1442/1695 train_time:143048ms step_avg:99.20ms +step:1443/1695 train_time:143148ms step_avg:99.20ms +step:1444/1695 train_time:143251ms step_avg:99.20ms +step:1445/1695 train_time:143352ms step_avg:99.21ms +step:1446/1695 train_time:143454ms step_avg:99.21ms +step:1447/1695 train_time:143557ms step_avg:99.21ms +step:1448/1695 train_time:143662ms step_avg:99.21ms +step:1449/1695 train_time:143762ms step_avg:99.21ms +step:1450/1695 train_time:143864ms step_avg:99.22ms +step:1451/1695 train_time:143965ms step_avg:99.22ms +step:1452/1695 train_time:144067ms step_avg:99.22ms +step:1453/1695 train_time:144169ms step_avg:99.22ms +step:1454/1695 train_time:144272ms step_avg:99.22ms +step:1455/1695 train_time:144374ms step_avg:99.23ms +step:1456/1695 train_time:144476ms step_avg:99.23ms +step:1457/1695 train_time:144578ms step_avg:99.23ms +step:1458/1695 train_time:144680ms step_avg:99.23ms +step:1459/1695 train_time:144782ms step_avg:99.23ms +step:1460/1695 train_time:144882ms step_avg:99.23ms +step:1461/1695 train_time:144985ms step_avg:99.24ms +step:1462/1695 train_time:145086ms step_avg:99.24ms +step:1463/1695 train_time:145187ms step_avg:99.24ms +step:1464/1695 train_time:145288ms step_avg:99.24ms +step:1465/1695 train_time:145390ms step_avg:99.24ms +step:1466/1695 train_time:145494ms step_avg:99.25ms +step:1467/1695 train_time:145596ms step_avg:99.25ms +step:1468/1695 train_time:145699ms step_avg:99.25ms +step:1469/1695 train_time:145802ms step_avg:99.25ms +step:1470/1695 train_time:145902ms step_avg:99.25ms +step:1471/1695 train_time:146003ms step_avg:99.25ms +step:1472/1695 train_time:146104ms step_avg:99.26ms +step:1473/1695 train_time:146204ms step_avg:99.26ms +step:1474/1695 train_time:146306ms step_avg:99.26ms +step:1475/1695 train_time:146408ms step_avg:99.26ms +step:1476/1695 train_time:146510ms step_avg:99.26ms +step:1477/1695 train_time:146613ms step_avg:99.26ms +step:1478/1695 train_time:146716ms step_avg:99.27ms +step:1479/1695 train_time:146818ms step_avg:99.27ms +step:1480/1695 train_time:146919ms step_avg:99.27ms +step:1481/1695 train_time:147021ms step_avg:99.27ms +step:1482/1695 train_time:147122ms step_avg:99.27ms +step:1483/1695 train_time:147224ms step_avg:99.27ms +step:1484/1695 train_time:147326ms step_avg:99.28ms +step:1485/1695 train_time:147429ms step_avg:99.28ms +step:1486/1695 train_time:147530ms step_avg:99.28ms +step:1487/1695 train_time:147631ms step_avg:99.28ms +step:1488/1695 train_time:147734ms step_avg:99.28ms +step:1489/1695 train_time:147837ms step_avg:99.29ms +step:1490/1695 train_time:147941ms step_avg:99.29ms +step:1491/1695 train_time:148042ms step_avg:99.29ms +step:1492/1695 train_time:148143ms step_avg:99.29ms +step:1493/1695 train_time:148244ms step_avg:99.29ms +step:1494/1695 train_time:148345ms step_avg:99.29ms +step:1495/1695 train_time:148447ms step_avg:99.30ms +step:1496/1695 train_time:148548ms step_avg:99.30ms +step:1497/1695 train_time:148649ms step_avg:99.30ms +step:1498/1695 train_time:148752ms step_avg:99.30ms +step:1499/1695 train_time:148855ms step_avg:99.30ms +step:1500/1695 train_time:148957ms step_avg:99.30ms +step:1500/1695 val_loss:3.3207 train_time:149058ms step_avg:99.37ms +step:1501/1695 train_time:149083ms step_avg:99.32ms +step:1502/1695 train_time:149173ms step_avg:99.32ms +step:1503/1695 train_time:149276ms step_avg:99.32ms +step:1504/1695 train_time:149377ms step_avg:99.32ms +step:1505/1695 train_time:149478ms step_avg:99.32ms +step:1506/1695 train_time:149579ms step_avg:99.32ms +step:1507/1695 train_time:149679ms step_avg:99.32ms +step:1508/1695 train_time:149779ms step_avg:99.32ms +step:1509/1695 train_time:149881ms step_avg:99.32ms +step:1510/1695 train_time:149982ms step_avg:99.33ms +step:1511/1695 train_time:150086ms step_avg:99.33ms +step:1512/1695 train_time:150190ms step_avg:99.33ms +step:1513/1695 train_time:150292ms step_avg:99.33ms +step:1514/1695 train_time:150394ms step_avg:99.34ms +step:1515/1695 train_time:150499ms step_avg:99.34ms +step:1516/1695 train_time:150600ms step_avg:99.34ms +step:1517/1695 train_time:150700ms step_avg:99.34ms +step:1518/1695 train_time:150801ms step_avg:99.34ms +step:1519/1695 train_time:150904ms step_avg:99.34ms +step:1520/1695 train_time:151006ms step_avg:99.35ms +step:1521/1695 train_time:151108ms step_avg:99.35ms +step:1522/1695 train_time:151210ms step_avg:99.35ms +step:1523/1695 train_time:151313ms step_avg:99.35ms +step:1524/1695 train_time:151417ms step_avg:99.35ms +step:1525/1695 train_time:151520ms step_avg:99.36ms +step:1526/1695 train_time:151622ms step_avg:99.36ms +step:1527/1695 train_time:151724ms step_avg:99.36ms +step:1528/1695 train_time:151830ms step_avg:99.37ms +step:1529/1695 train_time:151931ms step_avg:99.37ms +step:1530/1695 train_time:152034ms step_avg:99.37ms +step:1531/1695 train_time:152135ms step_avg:99.37ms +step:1532/1695 train_time:152237ms step_avg:99.37ms +step:1533/1695 train_time:152338ms step_avg:99.37ms +step:1534/1695 train_time:152440ms step_avg:99.37ms +step:1535/1695 train_time:152543ms step_avg:99.38ms +step:1536/1695 train_time:152644ms step_avg:99.38ms +step:1537/1695 train_time:152746ms step_avg:99.38ms +step:1538/1695 train_time:152848ms step_avg:99.38ms +step:1539/1695 train_time:152950ms step_avg:99.38ms +step:1540/1695 train_time:153052ms step_avg:99.38ms +step:1541/1695 train_time:153156ms step_avg:99.39ms +step:1542/1695 train_time:153259ms step_avg:99.39ms +step:1543/1695 train_time:153362ms step_avg:99.39ms +step:1544/1695 train_time:153464ms step_avg:99.39ms +step:1545/1695 train_time:153566ms step_avg:99.40ms +step:1546/1695 train_time:153667ms step_avg:99.40ms +step:1547/1695 train_time:153769ms step_avg:99.40ms +step:1548/1695 train_time:153871ms step_avg:99.40ms +step:1549/1695 train_time:153974ms step_avg:99.40ms +step:1550/1695 train_time:154075ms step_avg:99.40ms +step:1551/1695 train_time:154178ms step_avg:99.41ms +step:1552/1695 train_time:154280ms step_avg:99.41ms +step:1553/1695 train_time:154382ms step_avg:99.41ms +step:1554/1695 train_time:154483ms step_avg:99.41ms +step:1555/1695 train_time:154585ms step_avg:99.41ms +step:1556/1695 train_time:154686ms step_avg:99.41ms +step:1557/1695 train_time:154790ms step_avg:99.42ms +step:1558/1695 train_time:154894ms step_avg:99.42ms +step:1559/1695 train_time:154997ms step_avg:99.42ms +step:1560/1695 train_time:155098ms step_avg:99.42ms +step:1561/1695 train_time:155199ms step_avg:99.42ms +step:1562/1695 train_time:155301ms step_avg:99.42ms +step:1563/1695 train_time:155406ms step_avg:99.43ms +step:1564/1695 train_time:155508ms step_avg:99.43ms +step:1565/1695 train_time:155609ms step_avg:99.43ms +step:1566/1695 train_time:155710ms step_avg:99.43ms +step:1567/1695 train_time:155812ms step_avg:99.43ms +step:1568/1695 train_time:155913ms step_avg:99.43ms +step:1569/1695 train_time:156014ms step_avg:99.44ms +step:1570/1695 train_time:156118ms step_avg:99.44ms +step:1571/1695 train_time:156219ms step_avg:99.44ms +step:1572/1695 train_time:156320ms step_avg:99.44ms +step:1573/1695 train_time:156422ms step_avg:99.44ms +step:1574/1695 train_time:156523ms step_avg:99.44ms +step:1575/1695 train_time:156625ms step_avg:99.44ms +step:1576/1695 train_time:156728ms step_avg:99.45ms +step:1577/1695 train_time:156832ms step_avg:99.45ms +step:1578/1695 train_time:156933ms step_avg:99.45ms +step:1579/1695 train_time:157035ms step_avg:99.45ms +step:1580/1695 train_time:157137ms step_avg:99.45ms +step:1581/1695 train_time:157238ms step_avg:99.45ms +step:1582/1695 train_time:157339ms step_avg:99.46ms +step:1583/1695 train_time:157442ms step_avg:99.46ms +step:1584/1695 train_time:157545ms step_avg:99.46ms +step:1585/1695 train_time:157647ms step_avg:99.46ms +step:1586/1695 train_time:157750ms step_avg:99.46ms +step:1587/1695 train_time:157852ms step_avg:99.47ms +step:1588/1695 train_time:157953ms step_avg:99.47ms +step:1589/1695 train_time:158055ms step_avg:99.47ms +step:1590/1695 train_time:158157ms step_avg:99.47ms +step:1591/1695 train_time:158258ms step_avg:99.47ms +step:1592/1695 train_time:158360ms step_avg:99.47ms +step:1593/1695 train_time:158461ms step_avg:99.47ms +step:1594/1695 train_time:158565ms step_avg:99.48ms +step:1595/1695 train_time:158668ms step_avg:99.48ms +step:1596/1695 train_time:158771ms step_avg:99.48ms +step:1597/1695 train_time:158873ms step_avg:99.48ms +step:1598/1695 train_time:158975ms step_avg:99.48ms +step:1599/1695 train_time:159076ms step_avg:99.48ms +step:1600/1695 train_time:159177ms step_avg:99.49ms +step:1601/1695 train_time:159279ms step_avg:99.49ms +step:1602/1695 train_time:159381ms step_avg:99.49ms +step:1603/1695 train_time:159482ms step_avg:99.49ms +step:1604/1695 train_time:159583ms step_avg:99.49ms +step:1605/1695 train_time:159685ms step_avg:99.49ms +step:1606/1695 train_time:159788ms step_avg:99.49ms +step:1607/1695 train_time:159890ms step_avg:99.50ms +step:1608/1695 train_time:159992ms step_avg:99.50ms +step:1609/1695 train_time:160093ms step_avg:99.50ms +step:1610/1695 train_time:160197ms step_avg:99.50ms +step:1611/1695 train_time:160298ms step_avg:99.50ms +step:1612/1695 train_time:160399ms step_avg:99.50ms +step:1613/1695 train_time:160500ms step_avg:99.50ms +step:1614/1695 train_time:160600ms step_avg:99.50ms +step:1615/1695 train_time:160701ms step_avg:99.51ms +step:1616/1695 train_time:160802ms step_avg:99.51ms +step:1617/1695 train_time:160905ms step_avg:99.51ms +step:1618/1695 train_time:161008ms step_avg:99.51ms +step:1619/1695 train_time:161111ms step_avg:99.51ms +step:1620/1695 train_time:161215ms step_avg:99.52ms +step:1621/1695 train_time:161316ms step_avg:99.52ms +step:1622/1695 train_time:161420ms step_avg:99.52ms +step:1623/1695 train_time:161522ms step_avg:99.52ms +step:1624/1695 train_time:161623ms step_avg:99.52ms +step:1625/1695 train_time:161727ms step_avg:99.52ms +step:1625/1695 val_loss:3.2913 train_time:161827ms step_avg:99.59ms +step:1626/1695 train_time:161853ms step_avg:99.54ms +step:1627/1695 train_time:161939ms step_avg:99.53ms +step:1628/1695 train_time:162041ms step_avg:99.53ms +step:1629/1695 train_time:162144ms step_avg:99.54ms +step:1630/1695 train_time:162245ms step_avg:99.54ms +step:1631/1695 train_time:162346ms step_avg:99.54ms +step:1632/1695 train_time:162448ms step_avg:99.54ms +step:1633/1695 train_time:162548ms step_avg:99.54ms +step:1634/1695 train_time:162651ms step_avg:99.54ms +step:1635/1695 train_time:162753ms step_avg:99.54ms +step:1636/1695 train_time:162856ms step_avg:99.55ms +step:1637/1695 train_time:162960ms step_avg:99.55ms +step:1638/1695 train_time:163064ms step_avg:99.55ms +step:1639/1695 train_time:163167ms step_avg:99.55ms +step:1640/1695 train_time:163268ms step_avg:99.55ms +step:1641/1695 train_time:163371ms step_avg:99.56ms +step:1642/1695 train_time:163473ms step_avg:99.56ms +step:1643/1695 train_time:163574ms step_avg:99.56ms +step:1644/1695 train_time:163676ms step_avg:99.56ms +step:1645/1695 train_time:163781ms step_avg:99.56ms +step:1646/1695 train_time:163884ms step_avg:99.57ms +step:1647/1695 train_time:163990ms step_avg:99.57ms +step:1648/1695 train_time:164093ms step_avg:99.57ms +step:1649/1695 train_time:164196ms step_avg:99.57ms +step:1650/1695 train_time:164298ms step_avg:99.57ms +step:1651/1695 train_time:164400ms step_avg:99.58ms +step:1652/1695 train_time:164504ms step_avg:99.58ms +step:1653/1695 train_time:164608ms step_avg:99.58ms +step:1654/1695 train_time:164710ms step_avg:99.58ms +step:1655/1695 train_time:164814ms step_avg:99.59ms +step:1656/1695 train_time:164917ms step_avg:99.59ms +step:1657/1695 train_time:165019ms step_avg:99.59ms +step:1658/1695 train_time:165122ms step_avg:99.59ms +step:1659/1695 train_time:165228ms step_avg:99.60ms +step:1660/1695 train_time:165330ms step_avg:99.60ms +step:1661/1695 train_time:165434ms step_avg:99.60ms +step:1662/1695 train_time:165539ms step_avg:99.60ms +step:1663/1695 train_time:165642ms step_avg:99.60ms +step:1664/1695 train_time:165745ms step_avg:99.61ms +step:1665/1695 train_time:165850ms step_avg:99.61ms +step:1666/1695 train_time:165953ms step_avg:99.61ms +step:1667/1695 train_time:166055ms step_avg:99.61ms +step:1668/1695 train_time:166159ms step_avg:99.62ms +step:1669/1695 train_time:166265ms step_avg:99.62ms +step:1670/1695 train_time:166367ms step_avg:99.62ms +step:1671/1695 train_time:166469ms step_avg:99.62ms +step:1672/1695 train_time:166572ms step_avg:99.62ms +step:1673/1695 train_time:166674ms step_avg:99.63ms +step:1674/1695 train_time:166775ms step_avg:99.63ms +step:1675/1695 train_time:166878ms step_avg:99.63ms +step:1676/1695 train_time:166984ms step_avg:99.63ms +step:1677/1695 train_time:167086ms step_avg:99.63ms +step:1678/1695 train_time:167189ms step_avg:99.64ms +step:1679/1695 train_time:167293ms step_avg:99.64ms +step:1680/1695 train_time:167394ms step_avg:99.64ms +step:1681/1695 train_time:167496ms step_avg:99.64ms +step:1682/1695 train_time:167602ms step_avg:99.64ms +step:1683/1695 train_time:167705ms step_avg:99.65ms +step:1684/1695 train_time:167809ms step_avg:99.65ms +step:1685/1695 train_time:167911ms step_avg:99.65ms +step:1686/1695 train_time:168013ms step_avg:99.65ms +step:1687/1695 train_time:168115ms step_avg:99.65ms +step:1688/1695 train_time:168218ms step_avg:99.66ms +step:1689/1695 train_time:168319ms step_avg:99.66ms +step:1690/1695 train_time:168422ms step_avg:99.66ms +step:1691/1695 train_time:168525ms step_avg:99.66ms +step:1692/1695 train_time:168628ms step_avg:99.66ms +step:1693/1695 train_time:168731ms step_avg:99.66ms +step:1694/1695 train_time:168834ms step_avg:99.67ms +step:1695/1695 train_time:168938ms step_avg:99.67ms +step:1695/1695 val_loss:3.2782 train_time:169037ms step_avg:99.73ms +peak memory allocated: 34005 MiB reserved: 49660 MiB diff --git a/records/082325_SparseAttnGate/c6be54c1-12d0-45a3-83cb-41cad0868d15.txt b/records/082325_SparseAttnGate/c6be54c1-12d0-45a3-83cb-41cad0868d15.txt new file mode 100644 index 000000000..d1010d1e1 --- /dev/null +++ b/records/082325_SparseAttnGate/c6be54c1-12d0-45a3-83cb-41cad0868d15.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:56:00 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 31C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 30C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 323795 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 323796 C /usr/bin/python3 614MiB | +| 0 N/A N/A 323797 C /usr/bin/python3 614MiB | +| 0 N/A N/A 323798 C /usr/bin/python3 614MiB | +| 0 N/A N/A 323799 C /usr/bin/python3 614MiB | +| 0 N/A N/A 323800 C /usr/bin/python3 614MiB | +| 0 N/A N/A 323801 C /usr/bin/python3 614MiB | +| 0 N/A N/A 323802 C /usr/bin/python3 614MiB | +| 1 N/A N/A 323796 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 323797 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 323798 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 323799 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 323800 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 323801 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 323802 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:154ms step_avg:153.64ms +step:2/1695 train_time:180ms step_avg:90.19ms +step:3/1695 train_time:250ms step_avg:83.26ms +step:4/1695 train_time:342ms step_avg:85.41ms +step:5/1695 train_time:434ms step_avg:86.81ms +step:6/1695 train_time:526ms step_avg:87.73ms +step:7/1695 train_time:620ms step_avg:88.53ms +step:8/1695 train_time:712ms step_avg:89.01ms +step:9/1695 train_time:805ms step_avg:89.45ms +step:10/1695 train_time:898ms step_avg:89.83ms +step:11/1695 train_time:991ms step_avg:90.09ms +step:12/1695 train_time:1087ms step_avg:90.60ms +step:13/1695 train_time:1185ms step_avg:91.12ms +step:14/1695 train_time:1280ms step_avg:91.40ms +step:15/1695 train_time:1373ms step_avg:91.53ms +step:16/1695 train_time:1466ms step_avg:91.65ms +step:17/1695 train_time:1561ms step_avg:91.82ms +step:18/1695 train_time:1653ms step_avg:91.83ms +step:19/1695 train_time:1746ms step_avg:91.87ms +step:20/1695 train_time:1839ms step_avg:91.94ms +step:21/1695 train_time:1932ms step_avg:92.00ms +step:22/1695 train_time:2025ms step_avg:92.06ms +step:23/1695 train_time:2120ms step_avg:92.17ms +step:24/1695 train_time:2215ms step_avg:92.27ms +step:25/1695 train_time:2310ms step_avg:92.40ms +step:26/1695 train_time:2404ms step_avg:92.46ms +step:27/1695 train_time:2498ms step_avg:92.53ms +step:28/1695 train_time:2592ms step_avg:92.57ms +step:29/1695 train_time:2686ms step_avg:92.63ms +step:30/1695 train_time:2779ms step_avg:92.64ms +step:31/1695 train_time:2872ms step_avg:92.65ms +step:32/1695 train_time:2966ms step_avg:92.68ms +step:33/1695 train_time:3061ms step_avg:92.75ms +step:34/1695 train_time:3154ms step_avg:92.77ms +step:35/1695 train_time:3248ms step_avg:92.80ms +step:36/1695 train_time:3342ms step_avg:92.83ms +step:37/1695 train_time:3435ms step_avg:92.85ms +step:38/1695 train_time:3529ms step_avg:92.88ms +step:39/1695 train_time:3624ms step_avg:92.92ms +step:40/1695 train_time:3717ms step_avg:92.93ms +step:41/1695 train_time:3811ms step_avg:92.94ms +step:42/1695 train_time:3905ms step_avg:92.97ms +step:43/1695 train_time:3998ms step_avg:92.98ms +step:44/1695 train_time:4093ms step_avg:93.02ms +step:45/1695 train_time:4186ms step_avg:93.02ms +step:46/1695 train_time:4280ms step_avg:93.04ms +step:47/1695 train_time:4374ms step_avg:93.06ms +step:48/1695 train_time:4468ms step_avg:93.09ms +step:49/1695 train_time:4562ms step_avg:93.10ms +step:50/1695 train_time:4655ms step_avg:93.10ms +step:51/1695 train_time:4749ms step_avg:93.12ms +step:52/1695 train_time:4843ms step_avg:93.14ms +step:53/1695 train_time:4937ms step_avg:93.16ms +step:54/1695 train_time:5031ms step_avg:93.17ms +step:55/1695 train_time:5125ms step_avg:93.19ms +step:56/1695 train_time:5219ms step_avg:93.19ms +step:57/1695 train_time:5312ms step_avg:93.20ms +step:58/1695 train_time:5405ms step_avg:93.20ms +step:59/1695 train_time:5500ms step_avg:93.21ms +step:60/1695 train_time:5593ms step_avg:93.22ms +step:61/1695 train_time:5687ms step_avg:93.24ms +step:62/1695 train_time:5782ms step_avg:93.26ms +step:63/1695 train_time:5876ms step_avg:93.27ms +step:64/1695 train_time:5970ms step_avg:93.29ms +step:65/1695 train_time:6063ms step_avg:93.28ms +step:66/1695 train_time:6157ms step_avg:93.28ms +step:67/1695 train_time:6250ms step_avg:93.29ms +step:68/1695 train_time:6344ms step_avg:93.30ms +step:69/1695 train_time:6438ms step_avg:93.30ms +step:70/1695 train_time:6532ms step_avg:93.31ms +step:71/1695 train_time:6625ms step_avg:93.30ms +step:72/1695 train_time:6718ms step_avg:93.31ms +step:73/1695 train_time:6812ms step_avg:93.32ms +step:74/1695 train_time:6906ms step_avg:93.33ms +step:75/1695 train_time:7000ms step_avg:93.33ms +step:76/1695 train_time:7093ms step_avg:93.33ms +step:77/1695 train_time:7186ms step_avg:93.33ms +step:78/1695 train_time:7280ms step_avg:93.33ms +step:79/1695 train_time:7374ms step_avg:93.34ms +step:80/1695 train_time:7467ms step_avg:93.33ms +step:81/1695 train_time:7562ms step_avg:93.35ms +step:82/1695 train_time:7655ms step_avg:93.35ms +step:83/1695 train_time:7749ms step_avg:93.36ms +step:84/1695 train_time:7843ms step_avg:93.36ms +step:85/1695 train_time:7936ms step_avg:93.36ms +step:86/1695 train_time:8029ms step_avg:93.36ms +step:87/1695 train_time:8123ms step_avg:93.37ms +step:88/1695 train_time:8216ms step_avg:93.37ms +step:89/1695 train_time:8309ms step_avg:93.37ms +step:90/1695 train_time:8403ms step_avg:93.37ms +step:91/1695 train_time:8497ms step_avg:93.37ms +step:92/1695 train_time:8590ms step_avg:93.37ms +step:93/1695 train_time:8685ms step_avg:93.38ms +step:94/1695 train_time:8779ms step_avg:93.39ms +step:95/1695 train_time:8872ms step_avg:93.39ms +step:96/1695 train_time:8966ms step_avg:93.40ms +step:97/1695 train_time:9060ms step_avg:93.40ms +step:98/1695 train_time:9152ms step_avg:93.39ms +step:99/1695 train_time:9246ms step_avg:93.39ms +step:100/1695 train_time:9340ms step_avg:93.40ms +step:101/1695 train_time:9433ms step_avg:93.40ms +step:102/1695 train_time:9527ms step_avg:93.40ms +step:103/1695 train_time:9621ms step_avg:93.40ms +step:104/1695 train_time:9714ms step_avg:93.40ms +step:105/1695 train_time:9808ms step_avg:93.41ms +step:106/1695 train_time:9902ms step_avg:93.41ms +step:107/1695 train_time:9996ms step_avg:93.42ms +step:108/1695 train_time:10090ms step_avg:93.43ms +step:109/1695 train_time:10185ms step_avg:93.44ms +step:110/1695 train_time:10278ms step_avg:93.43ms +step:111/1695 train_time:10371ms step_avg:93.43ms +step:112/1695 train_time:10464ms step_avg:93.43ms +step:113/1695 train_time:10557ms step_avg:93.43ms +step:114/1695 train_time:10651ms step_avg:93.43ms +step:115/1695 train_time:10744ms step_avg:93.42ms +step:116/1695 train_time:10837ms step_avg:93.42ms +step:117/1695 train_time:10931ms step_avg:93.43ms +step:118/1695 train_time:11025ms step_avg:93.44ms +step:119/1695 train_time:11119ms step_avg:93.43ms +step:120/1695 train_time:11212ms step_avg:93.44ms +step:121/1695 train_time:11306ms step_avg:93.44ms +step:122/1695 train_time:11400ms step_avg:93.44ms +step:123/1695 train_time:11493ms step_avg:93.44ms +step:124/1695 train_time:11587ms step_avg:93.44ms +step:125/1695 train_time:11680ms step_avg:93.44ms +step:125/1695 val_loss:4.6007 train_time:11772ms step_avg:94.18ms +step:126/1695 train_time:11800ms step_avg:93.65ms +step:127/1695 train_time:11876ms step_avg:93.52ms +step:128/1695 train_time:11979ms step_avg:93.59ms +step:129/1695 train_time:12076ms step_avg:93.61ms +step:130/1695 train_time:12170ms step_avg:93.62ms +step:131/1695 train_time:12263ms step_avg:93.61ms +step:132/1695 train_time:12357ms step_avg:93.61ms +step:133/1695 train_time:12450ms step_avg:93.61ms +step:134/1695 train_time:12543ms step_avg:93.61ms +step:135/1695 train_time:12637ms step_avg:93.60ms +step:136/1695 train_time:12731ms step_avg:93.61ms +step:137/1695 train_time:12827ms step_avg:93.63ms +step:138/1695 train_time:12924ms step_avg:93.65ms +step:139/1695 train_time:13021ms step_avg:93.67ms +step:140/1695 train_time:13116ms step_avg:93.69ms +step:141/1695 train_time:13211ms step_avg:93.70ms +step:142/1695 train_time:13305ms step_avg:93.70ms +step:143/1695 train_time:13399ms step_avg:93.70ms +step:144/1695 train_time:13492ms step_avg:93.70ms +step:145/1695 train_time:13586ms step_avg:93.70ms +step:146/1695 train_time:13679ms step_avg:93.69ms +step:147/1695 train_time:13773ms step_avg:93.70ms +step:148/1695 train_time:13868ms step_avg:93.70ms +step:149/1695 train_time:13962ms step_avg:93.71ms +step:150/1695 train_time:14058ms step_avg:93.72ms +step:151/1695 train_time:14153ms step_avg:93.73ms +step:152/1695 train_time:14247ms step_avg:93.73ms +step:153/1695 train_time:14341ms step_avg:93.73ms +step:154/1695 train_time:14435ms step_avg:93.74ms +step:155/1695 train_time:14529ms step_avg:93.74ms +step:156/1695 train_time:14623ms step_avg:93.73ms +step:157/1695 train_time:14717ms step_avg:93.74ms +step:158/1695 train_time:14811ms step_avg:93.74ms +step:159/1695 train_time:14907ms step_avg:93.75ms +step:160/1695 train_time:15000ms step_avg:93.75ms +step:161/1695 train_time:15094ms step_avg:93.75ms +step:162/1695 train_time:15188ms step_avg:93.76ms +step:163/1695 train_time:15283ms step_avg:93.76ms +step:164/1695 train_time:15377ms step_avg:93.76ms +step:165/1695 train_time:15471ms step_avg:93.76ms +step:166/1695 train_time:15565ms step_avg:93.76ms +step:167/1695 train_time:15658ms step_avg:93.76ms +step:168/1695 train_time:15752ms step_avg:93.76ms +step:169/1695 train_time:15847ms step_avg:93.77ms +step:170/1695 train_time:15941ms step_avg:93.77ms +step:171/1695 train_time:16034ms step_avg:93.77ms +step:172/1695 train_time:16129ms step_avg:93.77ms +step:173/1695 train_time:16222ms step_avg:93.77ms +step:174/1695 train_time:16316ms step_avg:93.77ms +step:175/1695 train_time:16411ms step_avg:93.78ms +step:176/1695 train_time:16504ms step_avg:93.77ms +step:177/1695 train_time:16599ms step_avg:93.78ms +step:178/1695 train_time:16693ms step_avg:93.78ms +step:179/1695 train_time:16787ms step_avg:93.78ms +step:180/1695 train_time:16881ms step_avg:93.78ms +step:181/1695 train_time:16974ms step_avg:93.78ms +step:182/1695 train_time:17068ms step_avg:93.78ms +step:183/1695 train_time:17161ms step_avg:93.78ms +step:184/1695 train_time:17255ms step_avg:93.78ms +step:185/1695 train_time:17349ms step_avg:93.78ms +step:186/1695 train_time:17443ms step_avg:93.78ms +step:187/1695 train_time:17538ms step_avg:93.78ms +step:188/1695 train_time:17632ms step_avg:93.78ms +step:189/1695 train_time:17726ms step_avg:93.79ms +step:190/1695 train_time:17820ms step_avg:93.79ms +step:191/1695 train_time:17915ms step_avg:93.79ms +step:192/1695 train_time:18009ms step_avg:93.80ms +step:193/1695 train_time:18102ms step_avg:93.79ms +step:194/1695 train_time:18197ms step_avg:93.80ms +step:195/1695 train_time:18290ms step_avg:93.80ms +step:196/1695 train_time:18384ms step_avg:93.80ms +step:197/1695 train_time:18478ms step_avg:93.80ms +step:198/1695 train_time:18572ms step_avg:93.80ms +step:199/1695 train_time:18667ms step_avg:93.80ms +step:200/1695 train_time:18761ms step_avg:93.80ms +step:201/1695 train_time:18854ms step_avg:93.80ms +step:202/1695 train_time:18949ms step_avg:93.81ms +step:203/1695 train_time:19043ms step_avg:93.81ms +step:204/1695 train_time:19137ms step_avg:93.81ms +step:205/1695 train_time:19231ms step_avg:93.81ms +step:206/1695 train_time:19325ms step_avg:93.81ms +step:207/1695 train_time:19419ms step_avg:93.81ms +step:208/1695 train_time:19514ms step_avg:93.82ms +step:209/1695 train_time:19607ms step_avg:93.81ms +step:210/1695 train_time:19701ms step_avg:93.81ms +step:211/1695 train_time:19796ms step_avg:93.82ms +step:212/1695 train_time:19889ms step_avg:93.82ms +step:213/1695 train_time:19984ms step_avg:93.82ms +step:214/1695 train_time:20078ms step_avg:93.82ms +step:215/1695 train_time:20173ms step_avg:93.83ms +step:216/1695 train_time:20267ms step_avg:93.83ms +step:217/1695 train_time:20361ms step_avg:93.83ms +step:218/1695 train_time:20455ms step_avg:93.83ms +step:219/1695 train_time:20548ms step_avg:93.83ms +step:220/1695 train_time:20642ms step_avg:93.83ms +step:221/1695 train_time:20737ms step_avg:93.83ms +step:222/1695 train_time:20830ms step_avg:93.83ms +step:223/1695 train_time:20924ms step_avg:93.83ms +step:224/1695 train_time:21019ms step_avg:93.83ms +step:225/1695 train_time:21113ms step_avg:93.83ms +step:226/1695 train_time:21207ms step_avg:93.83ms +step:227/1695 train_time:21301ms step_avg:93.84ms +step:228/1695 train_time:21394ms step_avg:93.83ms +step:229/1695 train_time:21489ms step_avg:93.84ms +step:230/1695 train_time:21583ms step_avg:93.84ms +step:231/1695 train_time:21676ms step_avg:93.84ms +step:232/1695 train_time:21770ms step_avg:93.84ms +step:233/1695 train_time:21864ms step_avg:93.84ms +step:234/1695 train_time:21958ms step_avg:93.84ms +step:235/1695 train_time:22052ms step_avg:93.84ms +step:236/1695 train_time:22147ms step_avg:93.84ms +step:237/1695 train_time:22241ms step_avg:93.84ms +step:238/1695 train_time:22336ms step_avg:93.85ms +step:239/1695 train_time:22430ms step_avg:93.85ms +step:240/1695 train_time:22525ms step_avg:93.85ms +step:241/1695 train_time:22619ms step_avg:93.86ms +step:242/1695 train_time:22714ms step_avg:93.86ms +step:243/1695 train_time:22808ms step_avg:93.86ms +step:244/1695 train_time:22901ms step_avg:93.86ms +step:245/1695 train_time:22995ms step_avg:93.86ms +step:246/1695 train_time:23090ms step_avg:93.86ms +step:247/1695 train_time:23183ms step_avg:93.86ms +step:248/1695 train_time:23278ms step_avg:93.86ms +step:249/1695 train_time:23374ms step_avg:93.87ms +step:250/1695 train_time:23468ms step_avg:93.87ms +step:250/1695 val_loss:4.0786 train_time:23559ms step_avg:94.24ms +step:251/1695 train_time:23587ms step_avg:93.97ms +step:252/1695 train_time:23665ms step_avg:93.91ms +step:253/1695 train_time:23761ms step_avg:93.92ms +step:254/1695 train_time:23855ms step_avg:93.92ms +step:255/1695 train_time:23949ms step_avg:93.92ms +step:256/1695 train_time:24044ms step_avg:93.92ms +step:257/1695 train_time:24137ms step_avg:93.92ms +step:258/1695 train_time:24231ms step_avg:93.92ms +step:259/1695 train_time:24325ms step_avg:93.92ms +step:260/1695 train_time:24418ms step_avg:93.92ms +step:261/1695 train_time:24514ms step_avg:93.92ms +step:262/1695 train_time:24612ms step_avg:93.94ms +step:263/1695 train_time:24709ms step_avg:93.95ms +step:264/1695 train_time:24805ms step_avg:93.96ms +step:265/1695 train_time:24899ms step_avg:93.96ms +step:266/1695 train_time:24994ms step_avg:93.96ms +step:267/1695 train_time:25088ms step_avg:93.96ms +step:268/1695 train_time:25182ms step_avg:93.96ms +step:269/1695 train_time:25275ms step_avg:93.96ms +step:270/1695 train_time:25370ms step_avg:93.96ms +step:271/1695 train_time:25463ms step_avg:93.96ms +step:272/1695 train_time:25558ms step_avg:93.96ms +step:273/1695 train_time:25655ms step_avg:93.98ms +step:274/1695 train_time:25752ms step_avg:93.98ms +step:275/1695 train_time:25847ms step_avg:93.99ms +step:276/1695 train_time:25940ms step_avg:93.99ms +step:277/1695 train_time:26035ms step_avg:93.99ms +step:278/1695 train_time:26131ms step_avg:94.00ms +step:279/1695 train_time:26225ms step_avg:94.00ms +step:280/1695 train_time:26319ms step_avg:94.00ms +step:281/1695 train_time:26412ms step_avg:93.99ms +step:282/1695 train_time:26506ms step_avg:93.99ms +step:283/1695 train_time:26601ms step_avg:94.00ms +step:284/1695 train_time:26697ms step_avg:94.00ms +step:285/1695 train_time:26792ms step_avg:94.01ms +step:286/1695 train_time:26887ms step_avg:94.01ms +step:287/1695 train_time:26980ms step_avg:94.01ms +step:288/1695 train_time:27075ms step_avg:94.01ms +step:289/1695 train_time:27170ms step_avg:94.01ms +step:290/1695 train_time:27265ms step_avg:94.02ms +step:291/1695 train_time:27359ms step_avg:94.02ms +step:292/1695 train_time:27453ms step_avg:94.02ms +step:293/1695 train_time:27546ms step_avg:94.01ms +step:294/1695 train_time:27641ms step_avg:94.02ms +step:295/1695 train_time:27736ms step_avg:94.02ms +step:296/1695 train_time:27830ms step_avg:94.02ms +step:297/1695 train_time:27925ms step_avg:94.02ms +step:298/1695 train_time:28019ms step_avg:94.02ms +step:299/1695 train_time:28113ms step_avg:94.02ms +step:300/1695 train_time:28208ms step_avg:94.03ms +step:301/1695 train_time:28303ms step_avg:94.03ms +step:302/1695 train_time:28397ms step_avg:94.03ms +step:303/1695 train_time:28491ms step_avg:94.03ms +step:304/1695 train_time:28585ms step_avg:94.03ms +step:305/1695 train_time:28679ms step_avg:94.03ms +step:306/1695 train_time:28774ms step_avg:94.03ms +step:307/1695 train_time:28869ms step_avg:94.04ms +step:308/1695 train_time:28963ms step_avg:94.04ms +step:309/1695 train_time:29058ms step_avg:94.04ms +step:310/1695 train_time:29152ms step_avg:94.04ms +step:311/1695 train_time:29247ms step_avg:94.04ms +step:312/1695 train_time:29341ms step_avg:94.04ms +step:313/1695 train_time:29435ms step_avg:94.04ms +step:314/1695 train_time:29530ms step_avg:94.04ms +step:315/1695 train_time:29624ms step_avg:94.04ms +step:316/1695 train_time:29718ms step_avg:94.04ms +step:317/1695 train_time:29813ms step_avg:94.05ms +step:318/1695 train_time:29907ms step_avg:94.05ms +step:319/1695 train_time:30001ms step_avg:94.05ms +step:320/1695 train_time:30096ms step_avg:94.05ms +step:321/1695 train_time:30192ms step_avg:94.06ms +step:322/1695 train_time:30286ms step_avg:94.06ms +step:323/1695 train_time:30380ms step_avg:94.06ms +step:324/1695 train_time:30474ms step_avg:94.06ms +step:325/1695 train_time:30569ms step_avg:94.06ms +step:326/1695 train_time:30664ms step_avg:94.06ms +step:327/1695 train_time:30757ms step_avg:94.06ms +step:328/1695 train_time:30853ms step_avg:94.06ms +step:329/1695 train_time:30948ms step_avg:94.07ms +step:330/1695 train_time:31042ms step_avg:94.07ms +step:331/1695 train_time:31136ms step_avg:94.07ms +step:332/1695 train_time:31232ms step_avg:94.07ms +step:333/1695 train_time:31327ms step_avg:94.07ms +step:334/1695 train_time:31421ms step_avg:94.08ms +step:335/1695 train_time:31516ms step_avg:94.08ms +step:336/1695 train_time:31611ms step_avg:94.08ms +step:337/1695 train_time:31706ms step_avg:94.08ms +step:338/1695 train_time:31800ms step_avg:94.08ms +step:339/1695 train_time:31894ms step_avg:94.08ms +step:340/1695 train_time:31989ms step_avg:94.08ms +step:341/1695 train_time:32083ms step_avg:94.08ms +step:342/1695 train_time:32177ms step_avg:94.09ms +step:343/1695 train_time:32271ms step_avg:94.09ms +step:344/1695 train_time:32367ms step_avg:94.09ms +step:345/1695 train_time:32461ms step_avg:94.09ms +step:346/1695 train_time:32555ms step_avg:94.09ms +step:347/1695 train_time:32649ms step_avg:94.09ms +step:348/1695 train_time:32744ms step_avg:94.09ms +step:349/1695 train_time:32838ms step_avg:94.09ms +step:350/1695 train_time:32932ms step_avg:94.09ms +step:351/1695 train_time:33027ms step_avg:94.09ms +step:352/1695 train_time:33121ms step_avg:94.09ms +step:353/1695 train_time:33215ms step_avg:94.09ms +step:354/1695 train_time:33310ms step_avg:94.10ms +step:355/1695 train_time:33406ms step_avg:94.10ms +step:356/1695 train_time:33499ms step_avg:94.10ms +step:357/1695 train_time:33593ms step_avg:94.10ms +step:358/1695 train_time:33688ms step_avg:94.10ms +step:359/1695 train_time:33781ms step_avg:94.10ms +step:360/1695 train_time:33876ms step_avg:94.10ms +step:361/1695 train_time:33972ms step_avg:94.10ms +step:362/1695 train_time:34065ms step_avg:94.10ms +step:363/1695 train_time:34159ms step_avg:94.10ms +step:364/1695 train_time:34255ms step_avg:94.11ms +step:365/1695 train_time:34349ms step_avg:94.11ms +step:366/1695 train_time:34444ms step_avg:94.11ms +step:367/1695 train_time:34538ms step_avg:94.11ms +step:368/1695 train_time:34632ms step_avg:94.11ms +step:369/1695 train_time:34727ms step_avg:94.11ms +step:370/1695 train_time:34822ms step_avg:94.11ms +step:371/1695 train_time:34916ms step_avg:94.11ms +step:372/1695 train_time:35010ms step_avg:94.11ms +step:373/1695 train_time:35104ms step_avg:94.11ms +step:374/1695 train_time:35199ms step_avg:94.11ms +step:375/1695 train_time:35293ms step_avg:94.12ms +step:375/1695 val_loss:3.8792 train_time:35386ms step_avg:94.36ms +step:376/1695 train_time:35413ms step_avg:94.18ms +step:377/1695 train_time:35491ms step_avg:94.14ms +step:378/1695 train_time:35593ms step_avg:94.16ms +step:379/1695 train_time:35690ms step_avg:94.17ms +step:380/1695 train_time:35785ms step_avg:94.17ms +step:381/1695 train_time:35881ms step_avg:94.17ms +step:382/1695 train_time:35976ms step_avg:94.18ms +step:383/1695 train_time:36071ms step_avg:94.18ms +step:384/1695 train_time:36167ms step_avg:94.18ms +step:385/1695 train_time:36262ms step_avg:94.19ms +step:386/1695 train_time:36357ms step_avg:94.19ms +step:387/1695 train_time:36453ms step_avg:94.20ms +step:388/1695 train_time:36552ms step_avg:94.21ms +step:389/1695 train_time:36650ms step_avg:94.22ms +step:390/1695 train_time:36748ms step_avg:94.22ms +step:391/1695 train_time:36844ms step_avg:94.23ms +step:392/1695 train_time:36940ms step_avg:94.24ms +step:393/1695 train_time:37036ms step_avg:94.24ms +step:394/1695 train_time:37132ms step_avg:94.24ms +step:395/1695 train_time:37228ms step_avg:94.25ms +step:396/1695 train_time:37323ms step_avg:94.25ms +step:397/1695 train_time:37419ms step_avg:94.25ms +step:398/1695 train_time:37515ms step_avg:94.26ms +step:399/1695 train_time:37612ms step_avg:94.27ms +step:400/1695 train_time:37710ms step_avg:94.27ms +step:401/1695 train_time:37807ms step_avg:94.28ms +step:402/1695 train_time:37903ms step_avg:94.29ms +step:403/1695 train_time:37999ms step_avg:94.29ms +step:404/1695 train_time:38095ms step_avg:94.30ms +step:405/1695 train_time:38190ms step_avg:94.30ms +step:406/1695 train_time:38286ms step_avg:94.30ms +step:407/1695 train_time:38382ms step_avg:94.30ms +step:408/1695 train_time:38477ms step_avg:94.31ms +step:409/1695 train_time:38573ms step_avg:94.31ms +step:410/1695 train_time:38670ms step_avg:94.32ms +step:411/1695 train_time:38766ms step_avg:94.32ms +step:412/1695 train_time:38862ms step_avg:94.33ms +step:413/1695 train_time:38958ms step_avg:94.33ms +step:414/1695 train_time:39054ms step_avg:94.33ms +step:415/1695 train_time:39151ms step_avg:94.34ms +step:416/1695 train_time:39246ms step_avg:94.34ms +step:417/1695 train_time:39342ms step_avg:94.34ms +step:418/1695 train_time:39438ms step_avg:94.35ms +step:419/1695 train_time:39534ms step_avg:94.35ms +step:420/1695 train_time:39630ms step_avg:94.36ms +step:421/1695 train_time:39726ms step_avg:94.36ms +step:422/1695 train_time:39823ms step_avg:94.37ms +step:423/1695 train_time:39920ms step_avg:94.37ms +step:424/1695 train_time:40016ms step_avg:94.38ms +step:425/1695 train_time:40111ms step_avg:94.38ms +step:426/1695 train_time:40208ms step_avg:94.38ms +step:427/1695 train_time:40304ms step_avg:94.39ms +step:428/1695 train_time:40401ms step_avg:94.39ms +step:429/1695 train_time:40495ms step_avg:94.39ms +step:430/1695 train_time:40591ms step_avg:94.40ms +step:431/1695 train_time:40687ms step_avg:94.40ms +step:432/1695 train_time:40783ms step_avg:94.41ms +step:433/1695 train_time:40878ms step_avg:94.41ms +step:434/1695 train_time:40974ms step_avg:94.41ms +step:435/1695 train_time:41070ms step_avg:94.41ms +step:436/1695 train_time:41166ms step_avg:94.42ms +step:437/1695 train_time:41263ms step_avg:94.42ms +step:438/1695 train_time:41359ms step_avg:94.43ms +step:439/1695 train_time:41455ms step_avg:94.43ms +step:440/1695 train_time:41550ms step_avg:94.43ms +step:441/1695 train_time:41647ms step_avg:94.44ms +step:442/1695 train_time:41743ms step_avg:94.44ms +step:443/1695 train_time:41840ms step_avg:94.45ms +step:444/1695 train_time:41936ms step_avg:94.45ms +step:445/1695 train_time:42033ms step_avg:94.46ms +step:446/1695 train_time:42129ms step_avg:94.46ms +step:447/1695 train_time:42225ms step_avg:94.46ms +step:448/1695 train_time:42321ms step_avg:94.47ms +step:449/1695 train_time:42417ms step_avg:94.47ms +step:450/1695 train_time:42513ms step_avg:94.47ms +step:451/1695 train_time:42610ms step_avg:94.48ms +step:452/1695 train_time:42706ms step_avg:94.48ms +step:453/1695 train_time:42802ms step_avg:94.49ms +step:454/1695 train_time:42898ms step_avg:94.49ms +step:455/1695 train_time:42994ms step_avg:94.49ms +step:456/1695 train_time:43090ms step_avg:94.50ms +step:457/1695 train_time:43187ms step_avg:94.50ms +step:458/1695 train_time:43283ms step_avg:94.50ms +step:459/1695 train_time:43379ms step_avg:94.51ms +step:460/1695 train_time:43475ms step_avg:94.51ms +step:461/1695 train_time:43571ms step_avg:94.51ms +step:462/1695 train_time:43667ms step_avg:94.52ms +step:463/1695 train_time:43764ms step_avg:94.52ms +step:464/1695 train_time:43860ms step_avg:94.53ms +step:465/1695 train_time:43956ms step_avg:94.53ms +step:466/1695 train_time:44052ms step_avg:94.53ms +step:467/1695 train_time:44149ms step_avg:94.54ms +step:468/1695 train_time:44246ms step_avg:94.54ms +step:469/1695 train_time:44343ms step_avg:94.55ms +step:470/1695 train_time:44439ms step_avg:94.55ms +step:471/1695 train_time:44535ms step_avg:94.55ms +step:472/1695 train_time:44631ms step_avg:94.56ms +step:473/1695 train_time:44727ms step_avg:94.56ms +step:474/1695 train_time:44823ms step_avg:94.56ms +step:475/1695 train_time:44920ms step_avg:94.57ms +step:476/1695 train_time:45016ms step_avg:94.57ms +step:477/1695 train_time:45112ms step_avg:94.57ms +step:478/1695 train_time:45208ms step_avg:94.58ms +step:479/1695 train_time:45305ms step_avg:94.58ms +step:480/1695 train_time:45401ms step_avg:94.59ms +step:481/1695 train_time:45497ms step_avg:94.59ms +step:482/1695 train_time:45593ms step_avg:94.59ms +step:483/1695 train_time:45688ms step_avg:94.59ms +step:484/1695 train_time:45785ms step_avg:94.60ms +step:485/1695 train_time:45881ms step_avg:94.60ms +step:486/1695 train_time:45977ms step_avg:94.60ms +step:487/1695 train_time:46073ms step_avg:94.61ms +step:488/1695 train_time:46170ms step_avg:94.61ms +step:489/1695 train_time:46267ms step_avg:94.61ms +step:490/1695 train_time:46363ms step_avg:94.62ms +step:491/1695 train_time:46460ms step_avg:94.62ms +step:492/1695 train_time:46555ms step_avg:94.62ms +step:493/1695 train_time:46650ms step_avg:94.63ms +step:494/1695 train_time:46747ms step_avg:94.63ms +step:495/1695 train_time:46844ms step_avg:94.63ms +step:496/1695 train_time:46940ms step_avg:94.64ms +step:497/1695 train_time:47036ms step_avg:94.64ms +step:498/1695 train_time:47131ms step_avg:94.64ms +step:499/1695 train_time:47228ms step_avg:94.65ms +step:500/1695 train_time:47325ms step_avg:94.65ms +step:500/1695 val_loss:3.7347 train_time:47420ms step_avg:94.84ms +step:501/1695 train_time:47447ms step_avg:94.70ms +step:502/1695 train_time:47528ms step_avg:94.68ms +step:503/1695 train_time:47629ms step_avg:94.69ms +step:504/1695 train_time:47725ms step_avg:94.69ms +step:505/1695 train_time:47820ms step_avg:94.69ms +step:506/1695 train_time:47916ms step_avg:94.69ms +step:507/1695 train_time:48012ms step_avg:94.70ms +step:508/1695 train_time:48107ms step_avg:94.70ms +step:509/1695 train_time:48203ms step_avg:94.70ms +step:510/1695 train_time:48299ms step_avg:94.70ms +step:511/1695 train_time:48395ms step_avg:94.71ms +step:512/1695 train_time:48499ms step_avg:94.72ms +step:513/1695 train_time:48590ms step_avg:94.72ms +step:514/1695 train_time:48688ms step_avg:94.72ms +step:515/1695 train_time:48785ms step_avg:94.73ms +step:516/1695 train_time:48881ms step_avg:94.73ms +step:517/1695 train_time:48977ms step_avg:94.73ms +step:518/1695 train_time:49074ms step_avg:94.74ms +step:519/1695 train_time:49170ms step_avg:94.74ms +step:520/1695 train_time:49267ms step_avg:94.74ms +step:521/1695 train_time:49362ms step_avg:94.74ms +step:522/1695 train_time:49458ms step_avg:94.75ms +step:523/1695 train_time:49556ms step_avg:94.75ms +step:524/1695 train_time:49653ms step_avg:94.76ms +step:525/1695 train_time:49751ms step_avg:94.76ms +step:526/1695 train_time:49848ms step_avg:94.77ms +step:527/1695 train_time:49944ms step_avg:94.77ms +step:528/1695 train_time:50040ms step_avg:94.77ms +step:529/1695 train_time:50136ms step_avg:94.78ms +step:530/1695 train_time:50233ms step_avg:94.78ms +step:531/1695 train_time:50329ms step_avg:94.78ms +step:532/1695 train_time:50426ms step_avg:94.79ms +step:533/1695 train_time:50522ms step_avg:94.79ms +step:534/1695 train_time:50619ms step_avg:94.79ms +step:535/1695 train_time:50716ms step_avg:94.80ms +step:536/1695 train_time:50813ms step_avg:94.80ms +step:537/1695 train_time:50909ms step_avg:94.80ms +step:538/1695 train_time:51006ms step_avg:94.81ms +step:539/1695 train_time:51102ms step_avg:94.81ms +step:540/1695 train_time:51198ms step_avg:94.81ms +step:541/1695 train_time:51295ms step_avg:94.81ms +step:542/1695 train_time:51392ms step_avg:94.82ms +step:543/1695 train_time:51489ms step_avg:94.82ms +step:544/1695 train_time:51586ms step_avg:94.83ms +step:545/1695 train_time:51682ms step_avg:94.83ms +step:546/1695 train_time:51779ms step_avg:94.83ms +step:547/1695 train_time:51875ms step_avg:94.84ms +step:548/1695 train_time:51972ms step_avg:94.84ms +step:549/1695 train_time:52068ms step_avg:94.84ms +step:550/1695 train_time:52164ms step_avg:94.84ms +step:551/1695 train_time:52260ms step_avg:94.85ms +step:552/1695 train_time:52356ms step_avg:94.85ms +step:553/1695 train_time:52453ms step_avg:94.85ms +step:554/1695 train_time:52549ms step_avg:94.85ms +step:555/1695 train_time:52646ms step_avg:94.86ms +step:556/1695 train_time:52742ms step_avg:94.86ms +step:557/1695 train_time:52839ms step_avg:94.86ms +step:558/1695 train_time:52935ms step_avg:94.87ms +step:559/1695 train_time:53032ms step_avg:94.87ms +step:560/1695 train_time:53128ms step_avg:94.87ms +step:561/1695 train_time:53225ms step_avg:94.87ms +step:562/1695 train_time:53321ms step_avg:94.88ms +step:563/1695 train_time:53417ms step_avg:94.88ms +step:564/1695 train_time:53513ms step_avg:94.88ms +step:565/1695 train_time:53609ms step_avg:94.88ms +step:566/1695 train_time:53707ms step_avg:94.89ms +step:567/1695 train_time:53803ms step_avg:94.89ms +step:568/1695 train_time:53899ms step_avg:94.89ms +step:569/1695 train_time:53996ms step_avg:94.90ms +step:570/1695 train_time:54093ms step_avg:94.90ms +step:571/1695 train_time:54189ms step_avg:94.90ms +step:572/1695 train_time:54286ms step_avg:94.91ms +step:573/1695 train_time:54382ms step_avg:94.91ms +step:574/1695 train_time:54479ms step_avg:94.91ms +step:575/1695 train_time:54576ms step_avg:94.91ms +step:576/1695 train_time:54673ms step_avg:94.92ms +step:577/1695 train_time:54769ms step_avg:94.92ms +step:578/1695 train_time:54866ms step_avg:94.92ms +step:579/1695 train_time:54962ms step_avg:94.93ms +step:580/1695 train_time:55059ms step_avg:94.93ms +step:581/1695 train_time:55156ms step_avg:94.93ms +step:582/1695 train_time:55253ms step_avg:94.94ms +step:583/1695 train_time:55351ms step_avg:94.94ms +step:584/1695 train_time:55447ms step_avg:94.94ms +step:585/1695 train_time:55544ms step_avg:94.95ms +step:586/1695 train_time:55642ms step_avg:94.95ms +step:587/1695 train_time:55739ms step_avg:94.96ms +step:588/1695 train_time:55835ms step_avg:94.96ms +step:589/1695 train_time:55931ms step_avg:94.96ms +step:590/1695 train_time:56027ms step_avg:94.96ms +step:591/1695 train_time:56123ms step_avg:94.96ms +step:592/1695 train_time:56219ms step_avg:94.96ms +step:593/1695 train_time:56316ms step_avg:94.97ms +step:594/1695 train_time:56412ms step_avg:94.97ms +step:595/1695 train_time:56509ms step_avg:94.97ms +step:596/1695 train_time:56605ms step_avg:94.97ms +step:597/1695 train_time:56701ms step_avg:94.98ms +step:598/1695 train_time:56797ms step_avg:94.98ms +step:599/1695 train_time:56894ms step_avg:94.98ms +step:600/1695 train_time:56990ms step_avg:94.98ms +step:601/1695 train_time:57087ms step_avg:94.99ms +step:602/1695 train_time:57184ms step_avg:94.99ms +step:603/1695 train_time:57281ms step_avg:94.99ms +step:604/1695 train_time:57377ms step_avg:94.99ms +step:605/1695 train_time:57474ms step_avg:95.00ms +step:606/1695 train_time:57571ms step_avg:95.00ms +step:607/1695 train_time:57667ms step_avg:95.00ms +step:608/1695 train_time:57762ms step_avg:95.00ms +step:609/1695 train_time:57859ms step_avg:95.01ms +step:610/1695 train_time:57955ms step_avg:95.01ms +step:611/1695 train_time:58053ms step_avg:95.01ms +step:612/1695 train_time:58150ms step_avg:95.02ms +step:613/1695 train_time:58245ms step_avg:95.02ms +step:614/1695 train_time:58341ms step_avg:95.02ms +step:615/1695 train_time:58437ms step_avg:95.02ms +step:616/1695 train_time:58533ms step_avg:95.02ms +step:617/1695 train_time:58630ms step_avg:95.02ms +step:618/1695 train_time:58726ms step_avg:95.03ms +step:619/1695 train_time:58823ms step_avg:95.03ms +step:620/1695 train_time:58919ms step_avg:95.03ms +step:621/1695 train_time:59016ms step_avg:95.03ms +step:622/1695 train_time:59113ms step_avg:95.04ms +step:623/1695 train_time:59209ms step_avg:95.04ms +step:624/1695 train_time:59306ms step_avg:95.04ms +step:625/1695 train_time:59403ms step_avg:95.04ms +step:625/1695 val_loss:3.6497 train_time:59497ms step_avg:95.20ms +step:626/1695 train_time:59525ms step_avg:95.09ms +step:627/1695 train_time:59606ms step_avg:95.07ms +step:628/1695 train_time:59706ms step_avg:95.07ms +step:629/1695 train_time:60035ms step_avg:95.44ms +step:630/1695 train_time:60131ms step_avg:95.45ms +step:631/1695 train_time:60227ms step_avg:95.45ms +step:632/1695 train_time:60325ms step_avg:95.45ms +step:633/1695 train_time:60422ms step_avg:95.45ms +step:634/1695 train_time:60519ms step_avg:95.46ms +step:635/1695 train_time:60616ms step_avg:95.46ms +step:636/1695 train_time:60713ms step_avg:95.46ms +step:637/1695 train_time:60810ms step_avg:95.46ms +step:638/1695 train_time:60907ms step_avg:95.47ms +step:639/1695 train_time:61008ms step_avg:95.47ms +step:640/1695 train_time:61109ms step_avg:95.48ms +step:641/1695 train_time:61433ms step_avg:95.84ms +step:642/1695 train_time:61528ms step_avg:95.84ms +step:643/1695 train_time:61625ms step_avg:95.84ms +step:644/1695 train_time:61722ms step_avg:95.84ms +step:645/1695 train_time:61818ms step_avg:95.84ms +step:646/1695 train_time:61916ms step_avg:95.85ms +step:647/1695 train_time:62253ms step_avg:96.22ms +step:648/1695 train_time:62348ms step_avg:96.22ms +step:649/1695 train_time:62446ms step_avg:96.22ms +step:650/1695 train_time:62543ms step_avg:96.22ms +step:651/1695 train_time:62640ms step_avg:96.22ms +step:652/1695 train_time:62738ms step_avg:96.22ms +step:653/1695 train_time:63076ms step_avg:96.59ms +step:654/1695 train_time:63173ms step_avg:96.59ms +step:655/1695 train_time:63270ms step_avg:96.60ms +step:656/1695 train_time:63367ms step_avg:96.60ms +step:657/1695 train_time:63464ms step_avg:96.60ms +step:658/1695 train_time:63561ms step_avg:96.60ms +step:659/1695 train_time:63658ms step_avg:96.60ms +step:660/1695 train_time:63755ms step_avg:96.60ms +step:661/1695 train_time:63852ms step_avg:96.60ms +step:662/1695 train_time:63950ms step_avg:96.60ms +step:663/1695 train_time:64050ms step_avg:96.61ms +step:664/1695 train_time:64150ms step_avg:96.61ms +step:665/1695 train_time:64248ms step_avg:96.61ms +step:666/1695 train_time:64345ms step_avg:96.61ms +step:667/1695 train_time:64443ms step_avg:96.62ms +step:668/1695 train_time:64540ms step_avg:96.62ms +step:669/1695 train_time:64638ms step_avg:96.62ms +step:670/1695 train_time:64735ms step_avg:96.62ms +step:671/1695 train_time:64833ms step_avg:96.62ms +step:672/1695 train_time:64931ms step_avg:96.62ms +step:673/1695 train_time:65029ms step_avg:96.62ms +step:674/1695 train_time:65127ms step_avg:96.63ms +step:675/1695 train_time:65225ms step_avg:96.63ms +step:676/1695 train_time:65323ms step_avg:96.63ms +step:677/1695 train_time:65422ms step_avg:96.63ms +step:678/1695 train_time:65519ms step_avg:96.64ms +step:679/1695 train_time:65617ms step_avg:96.64ms +step:680/1695 train_time:65715ms step_avg:96.64ms +step:681/1695 train_time:65812ms step_avg:96.64ms +step:682/1695 train_time:65909ms step_avg:96.64ms +step:683/1695 train_time:66007ms step_avg:96.64ms +step:684/1695 train_time:66106ms step_avg:96.65ms +step:685/1695 train_time:66204ms step_avg:96.65ms +step:686/1695 train_time:66302ms step_avg:96.65ms +step:687/1695 train_time:66399ms step_avg:96.65ms +step:688/1695 train_time:66497ms step_avg:96.65ms +step:689/1695 train_time:66595ms step_avg:96.65ms +step:690/1695 train_time:66693ms step_avg:96.66ms +step:691/1695 train_time:66791ms step_avg:96.66ms +step:692/1695 train_time:66888ms step_avg:96.66ms +step:693/1695 train_time:66986ms step_avg:96.66ms +step:694/1695 train_time:67085ms step_avg:96.66ms +step:695/1695 train_time:67184ms step_avg:96.67ms +step:696/1695 train_time:67282ms step_avg:96.67ms +step:697/1695 train_time:67380ms step_avg:96.67ms +step:698/1695 train_time:67478ms step_avg:96.67ms +step:699/1695 train_time:67575ms step_avg:96.67ms +step:700/1695 train_time:67673ms step_avg:96.68ms +step:701/1695 train_time:67770ms step_avg:96.68ms +step:702/1695 train_time:67867ms step_avg:96.68ms +step:703/1695 train_time:67964ms step_avg:96.68ms +step:704/1695 train_time:68063ms step_avg:96.68ms +step:705/1695 train_time:68161ms step_avg:96.68ms +step:706/1695 train_time:68260ms step_avg:96.68ms +step:707/1695 train_time:68358ms step_avg:96.69ms +step:708/1695 train_time:68456ms step_avg:96.69ms +step:709/1695 train_time:68553ms step_avg:96.69ms +step:710/1695 train_time:68651ms step_avg:96.69ms +step:711/1695 train_time:68748ms step_avg:96.69ms +step:712/1695 train_time:68846ms step_avg:96.69ms +step:713/1695 train_time:68943ms step_avg:96.69ms +step:714/1695 train_time:69388ms step_avg:97.18ms +step:715/1695 train_time:69437ms step_avg:97.12ms +step:716/1695 train_time:69533ms step_avg:97.11ms +step:717/1695 train_time:69630ms step_avg:97.11ms +step:718/1695 train_time:69727ms step_avg:97.11ms +step:719/1695 train_time:69824ms step_avg:97.11ms +step:720/1695 train_time:69921ms step_avg:97.11ms +step:721/1695 train_time:70018ms step_avg:97.11ms +step:722/1695 train_time:70116ms step_avg:97.11ms +step:723/1695 train_time:70213ms step_avg:97.11ms +step:724/1695 train_time:70312ms step_avg:97.12ms +step:725/1695 train_time:70412ms step_avg:97.12ms +step:726/1695 train_time:70511ms step_avg:97.12ms +step:727/1695 train_time:70609ms step_avg:97.12ms +step:728/1695 train_time:70707ms step_avg:97.12ms +step:729/1695 train_time:70804ms step_avg:97.12ms +step:730/1695 train_time:70901ms step_avg:97.12ms +step:731/1695 train_time:70998ms step_avg:97.12ms +step:732/1695 train_time:71095ms step_avg:97.12ms +step:733/1695 train_time:71192ms step_avg:97.12ms +step:734/1695 train_time:71290ms step_avg:97.13ms +step:735/1695 train_time:71389ms step_avg:97.13ms +step:736/1695 train_time:71488ms step_avg:97.13ms +step:737/1695 train_time:71586ms step_avg:97.13ms +step:738/1695 train_time:71684ms step_avg:97.13ms +step:739/1695 train_time:71783ms step_avg:97.14ms +step:740/1695 train_time:71880ms step_avg:97.14ms +step:741/1695 train_time:71977ms step_avg:97.14ms +step:742/1695 train_time:72075ms step_avg:97.14ms +step:743/1695 train_time:72172ms step_avg:97.14ms +step:744/1695 train_time:72269ms step_avg:97.14ms +step:745/1695 train_time:72367ms step_avg:97.14ms +step:746/1695 train_time:72464ms step_avg:97.14ms +step:747/1695 train_time:72562ms step_avg:97.14ms +step:748/1695 train_time:72661ms step_avg:97.14ms +step:749/1695 train_time:72760ms step_avg:97.14ms +step:750/1695 train_time:72857ms step_avg:97.14ms +step:750/1695 val_loss:3.5863 train_time:72953ms step_avg:97.27ms +step:751/1695 train_time:72981ms step_avg:97.18ms +step:752/1695 train_time:73064ms step_avg:97.16ms +step:753/1695 train_time:73165ms step_avg:97.16ms +step:754/1695 train_time:73265ms step_avg:97.17ms +step:755/1695 train_time:73363ms step_avg:97.17ms +step:756/1695 train_time:73461ms step_avg:97.17ms +step:757/1695 train_time:73560ms step_avg:97.17ms +step:758/1695 train_time:73656ms step_avg:97.17ms +step:759/1695 train_time:73753ms step_avg:97.17ms +step:760/1695 train_time:73851ms step_avg:97.17ms +step:761/1695 train_time:73949ms step_avg:97.17ms +step:762/1695 train_time:74047ms step_avg:97.17ms +step:763/1695 train_time:74146ms step_avg:97.18ms +step:764/1695 train_time:74246ms step_avg:97.18ms +step:765/1695 train_time:74345ms step_avg:97.18ms +step:766/1695 train_time:74444ms step_avg:97.18ms +step:767/1695 train_time:74542ms step_avg:97.19ms +step:768/1695 train_time:74639ms step_avg:97.19ms +step:769/1695 train_time:74737ms step_avg:97.19ms +step:770/1695 train_time:74834ms step_avg:97.19ms +step:771/1695 train_time:74933ms step_avg:97.19ms +step:772/1695 train_time:75263ms step_avg:97.49ms +step:773/1695 train_time:75359ms step_avg:97.49ms +step:774/1695 train_time:75456ms step_avg:97.49ms +step:775/1695 train_time:75553ms step_avg:97.49ms +step:776/1695 train_time:75651ms step_avg:97.49ms +step:777/1695 train_time:75749ms step_avg:97.49ms +step:778/1695 train_time:75846ms step_avg:97.49ms +step:779/1695 train_time:75944ms step_avg:97.49ms +step:780/1695 train_time:76042ms step_avg:97.49ms +step:781/1695 train_time:76139ms step_avg:97.49ms +step:782/1695 train_time:76240ms step_avg:97.49ms +step:783/1695 train_time:76338ms step_avg:97.49ms +step:784/1695 train_time:76438ms step_avg:97.50ms +step:785/1695 train_time:76537ms step_avg:97.50ms +step:786/1695 train_time:76634ms step_avg:97.50ms +step:787/1695 train_time:76732ms step_avg:97.50ms +step:788/1695 train_time:76829ms step_avg:97.50ms +step:789/1695 train_time:76927ms step_avg:97.50ms +step:790/1695 train_time:77249ms step_avg:97.78ms +step:791/1695 train_time:77345ms step_avg:97.78ms +step:792/1695 train_time:77443ms step_avg:97.78ms +step:793/1695 train_time:77540ms step_avg:97.78ms +step:794/1695 train_time:77637ms step_avg:97.78ms +step:795/1695 train_time:77734ms step_avg:97.78ms +step:796/1695 train_time:78123ms step_avg:98.14ms +step:797/1695 train_time:78219ms step_avg:98.14ms +step:798/1695 train_time:78315ms step_avg:98.14ms +step:799/1695 train_time:78412ms step_avg:98.14ms +step:800/1695 train_time:78510ms step_avg:98.14ms +step:801/1695 train_time:78607ms step_avg:98.14ms +step:802/1695 train_time:78705ms step_avg:98.14ms +step:803/1695 train_time:78802ms step_avg:98.13ms +step:804/1695 train_time:78899ms step_avg:98.13ms +step:805/1695 train_time:78998ms step_avg:98.13ms +step:806/1695 train_time:79097ms step_avg:98.14ms +step:807/1695 train_time:79196ms step_avg:98.14ms +step:808/1695 train_time:79295ms step_avg:98.14ms +step:809/1695 train_time:79394ms step_avg:98.14ms +step:810/1695 train_time:79492ms step_avg:98.14ms +step:811/1695 train_time:79589ms step_avg:98.14ms +step:812/1695 train_time:79687ms step_avg:98.14ms +step:813/1695 train_time:79785ms step_avg:98.14ms +step:814/1695 train_time:79882ms step_avg:98.13ms +step:815/1695 train_time:79980ms step_avg:98.13ms +step:816/1695 train_time:80078ms step_avg:98.13ms +step:817/1695 train_time:80176ms step_avg:98.13ms +step:818/1695 train_time:80274ms step_avg:98.13ms +step:819/1695 train_time:80372ms step_avg:98.13ms +step:820/1695 train_time:80470ms step_avg:98.13ms +step:821/1695 train_time:80568ms step_avg:98.13ms +step:822/1695 train_time:80667ms step_avg:98.13ms +step:823/1695 train_time:80764ms step_avg:98.13ms +step:824/1695 train_time:80862ms step_avg:98.13ms +step:825/1695 train_time:80960ms step_avg:98.13ms +step:826/1695 train_time:81058ms step_avg:98.13ms +step:827/1695 train_time:81157ms step_avg:98.13ms +step:828/1695 train_time:81255ms step_avg:98.13ms +step:829/1695 train_time:81353ms step_avg:98.13ms +step:830/1695 train_time:81451ms step_avg:98.13ms +step:831/1695 train_time:81549ms step_avg:98.13ms +step:832/1695 train_time:81647ms step_avg:98.13ms +step:833/1695 train_time:81745ms step_avg:98.13ms +step:834/1695 train_time:81842ms step_avg:98.13ms +step:835/1695 train_time:81940ms step_avg:98.13ms +step:836/1695 train_time:82038ms step_avg:98.13ms +step:837/1695 train_time:82137ms step_avg:98.13ms +step:838/1695 train_time:82235ms step_avg:98.13ms +step:839/1695 train_time:82334ms step_avg:98.13ms +step:840/1695 train_time:82432ms step_avg:98.13ms +step:841/1695 train_time:82530ms step_avg:98.13ms +step:842/1695 train_time:82628ms step_avg:98.13ms +step:843/1695 train_time:82726ms step_avg:98.13ms +step:844/1695 train_time:82825ms step_avg:98.13ms +step:845/1695 train_time:82923ms step_avg:98.13ms +step:846/1695 train_time:83021ms step_avg:98.13ms +step:847/1695 train_time:83119ms step_avg:98.13ms +step:848/1695 train_time:83217ms step_avg:98.13ms +step:849/1695 train_time:83315ms step_avg:98.13ms +step:850/1695 train_time:83414ms step_avg:98.13ms +step:851/1695 train_time:83512ms step_avg:98.13ms +step:852/1695 train_time:83609ms step_avg:98.13ms +step:853/1695 train_time:83707ms step_avg:98.13ms +step:854/1695 train_time:83805ms step_avg:98.13ms +step:855/1695 train_time:83903ms step_avg:98.13ms +step:856/1695 train_time:84001ms step_avg:98.13ms +step:857/1695 train_time:84099ms step_avg:98.13ms +step:858/1695 train_time:84196ms step_avg:98.13ms +step:859/1695 train_time:84295ms step_avg:98.13ms +step:860/1695 train_time:84393ms step_avg:98.13ms +step:861/1695 train_time:84492ms step_avg:98.13ms +step:862/1695 train_time:84590ms step_avg:98.13ms +step:863/1695 train_time:84688ms step_avg:98.13ms +step:864/1695 train_time:84785ms step_avg:98.13ms +step:865/1695 train_time:84883ms step_avg:98.13ms +step:866/1695 train_time:84981ms step_avg:98.13ms +step:867/1695 train_time:85079ms step_avg:98.13ms +step:868/1695 train_time:85177ms step_avg:98.13ms +step:869/1695 train_time:85275ms step_avg:98.13ms +step:870/1695 train_time:85373ms step_avg:98.13ms +step:871/1695 train_time:85471ms step_avg:98.13ms +step:872/1695 train_time:85570ms step_avg:98.13ms +step:873/1695 train_time:85667ms step_avg:98.13ms +step:874/1695 train_time:85765ms step_avg:98.13ms +step:875/1695 train_time:85863ms step_avg:98.13ms +step:875/1695 val_loss:3.5358 train_time:85959ms step_avg:98.24ms +step:876/1695 train_time:85987ms step_avg:98.16ms +step:877/1695 train_time:86069ms step_avg:98.14ms +step:878/1695 train_time:86171ms step_avg:98.14ms +step:879/1695 train_time:86269ms step_avg:98.14ms +step:880/1695 train_time:86366ms step_avg:98.14ms +step:881/1695 train_time:86464ms step_avg:98.14ms +step:882/1695 train_time:86563ms step_avg:98.14ms +step:883/1695 train_time:86662ms step_avg:98.15ms +step:884/1695 train_time:86761ms step_avg:98.15ms +step:885/1695 train_time:86859ms step_avg:98.15ms +step:886/1695 train_time:86958ms step_avg:98.15ms +step:887/1695 train_time:87058ms step_avg:98.15ms +step:888/1695 train_time:87161ms step_avg:98.15ms +step:889/1695 train_time:87262ms step_avg:98.16ms +step:890/1695 train_time:87363ms step_avg:98.16ms +step:891/1695 train_time:87462ms step_avg:98.16ms +step:892/1695 train_time:87562ms step_avg:98.16ms +step:893/1695 train_time:87660ms step_avg:98.16ms +step:894/1695 train_time:87759ms step_avg:98.16ms +step:895/1695 train_time:87857ms step_avg:98.16ms +step:896/1695 train_time:87956ms step_avg:98.16ms +step:897/1695 train_time:88055ms step_avg:98.17ms +step:898/1695 train_time:88155ms step_avg:98.17ms +step:899/1695 train_time:88255ms step_avg:98.17ms +step:900/1695 train_time:88357ms step_avg:98.17ms +step:901/1695 train_time:88457ms step_avg:98.18ms +step:902/1695 train_time:88558ms step_avg:98.18ms +step:903/1695 train_time:88657ms step_avg:98.18ms +step:904/1695 train_time:88757ms step_avg:98.18ms +step:905/1695 train_time:88856ms step_avg:98.18ms +step:906/1695 train_time:88954ms step_avg:98.18ms +step:907/1695 train_time:89053ms step_avg:98.18ms +step:908/1695 train_time:89153ms step_avg:98.19ms +step:909/1695 train_time:89252ms step_avg:98.19ms +step:910/1695 train_time:89353ms step_avg:98.19ms +step:911/1695 train_time:89454ms step_avg:98.19ms +step:912/1695 train_time:89555ms step_avg:98.20ms +step:913/1695 train_time:89654ms step_avg:98.20ms +step:914/1695 train_time:89754ms step_avg:98.20ms +step:915/1695 train_time:89853ms step_avg:98.20ms +step:916/1695 train_time:89953ms step_avg:98.20ms +step:917/1695 train_time:90052ms step_avg:98.20ms +step:918/1695 train_time:90151ms step_avg:98.20ms +step:919/1695 train_time:90250ms step_avg:98.20ms +step:920/1695 train_time:90349ms step_avg:98.21ms +step:921/1695 train_time:90450ms step_avg:98.21ms +step:922/1695 train_time:90550ms step_avg:98.21ms +step:923/1695 train_time:90649ms step_avg:98.21ms +step:924/1695 train_time:90748ms step_avg:98.21ms +step:925/1695 train_time:90846ms step_avg:98.21ms +step:926/1695 train_time:90946ms step_avg:98.21ms +step:927/1695 train_time:91045ms step_avg:98.21ms +step:928/1695 train_time:91144ms step_avg:98.22ms +step:929/1695 train_time:91243ms step_avg:98.22ms +step:930/1695 train_time:91343ms step_avg:98.22ms +step:931/1695 train_time:91444ms step_avg:98.22ms +step:932/1695 train_time:91543ms step_avg:98.22ms +step:933/1695 train_time:91643ms step_avg:98.22ms +step:934/1695 train_time:91742ms step_avg:98.23ms +step:935/1695 train_time:91842ms step_avg:98.23ms +step:936/1695 train_time:91941ms step_avg:98.23ms +step:937/1695 train_time:92040ms step_avg:98.23ms +step:938/1695 train_time:92140ms step_avg:98.23ms +step:939/1695 train_time:92241ms step_avg:98.23ms +step:940/1695 train_time:92340ms step_avg:98.23ms +step:941/1695 train_time:92440ms step_avg:98.24ms +step:942/1695 train_time:92540ms step_avg:98.24ms +step:943/1695 train_time:92640ms step_avg:98.24ms +step:944/1695 train_time:92740ms step_avg:98.24ms +step:945/1695 train_time:92841ms step_avg:98.24ms +step:946/1695 train_time:92941ms step_avg:98.25ms +step:947/1695 train_time:93040ms step_avg:98.25ms +step:948/1695 train_time:93139ms step_avg:98.25ms +step:949/1695 train_time:93239ms step_avg:98.25ms +step:950/1695 train_time:93338ms step_avg:98.25ms +step:951/1695 train_time:93438ms step_avg:98.25ms +step:952/1695 train_time:93538ms step_avg:98.25ms +step:953/1695 train_time:93639ms step_avg:98.26ms +step:954/1695 train_time:93738ms step_avg:98.26ms +step:955/1695 train_time:93838ms step_avg:98.26ms +step:956/1695 train_time:93937ms step_avg:98.26ms +step:957/1695 train_time:94036ms step_avg:98.26ms +step:958/1695 train_time:94136ms step_avg:98.26ms +step:959/1695 train_time:94235ms step_avg:98.26ms +step:960/1695 train_time:94335ms step_avg:98.27ms +step:961/1695 train_time:94435ms step_avg:98.27ms +step:962/1695 train_time:94535ms step_avg:98.27ms +step:963/1695 train_time:94635ms step_avg:98.27ms +step:964/1695 train_time:94736ms step_avg:98.27ms +step:965/1695 train_time:94837ms step_avg:98.28ms +step:966/1695 train_time:94937ms step_avg:98.28ms +step:967/1695 train_time:95037ms step_avg:98.28ms +step:968/1695 train_time:95135ms step_avg:98.28ms +step:969/1695 train_time:95236ms step_avg:98.28ms +step:970/1695 train_time:95336ms step_avg:98.28ms +step:971/1695 train_time:95435ms step_avg:98.29ms +step:972/1695 train_time:95535ms step_avg:98.29ms +step:973/1695 train_time:95635ms step_avg:98.29ms +step:974/1695 train_time:95736ms step_avg:98.29ms +step:975/1695 train_time:95836ms step_avg:98.29ms +step:976/1695 train_time:95936ms step_avg:98.30ms +step:977/1695 train_time:96036ms step_avg:98.30ms +step:978/1695 train_time:96136ms step_avg:98.30ms +step:979/1695 train_time:96236ms step_avg:98.30ms +step:980/1695 train_time:96336ms step_avg:98.30ms +step:981/1695 train_time:96436ms step_avg:98.30ms +step:982/1695 train_time:96536ms step_avg:98.31ms +step:983/1695 train_time:96637ms step_avg:98.31ms +step:984/1695 train_time:96736ms step_avg:98.31ms +step:985/1695 train_time:96836ms step_avg:98.31ms +step:986/1695 train_time:96936ms step_avg:98.31ms +step:987/1695 train_time:97037ms step_avg:98.31ms +step:988/1695 train_time:97136ms step_avg:98.32ms +step:989/1695 train_time:97236ms step_avg:98.32ms +step:990/1695 train_time:97337ms step_avg:98.32ms +step:991/1695 train_time:97437ms step_avg:98.32ms +step:992/1695 train_time:97537ms step_avg:98.32ms +step:993/1695 train_time:97636ms step_avg:98.32ms +step:994/1695 train_time:97736ms step_avg:98.33ms +step:995/1695 train_time:97837ms step_avg:98.33ms +step:996/1695 train_time:97936ms step_avg:98.33ms +step:997/1695 train_time:98036ms step_avg:98.33ms +step:998/1695 train_time:98135ms step_avg:98.33ms +step:999/1695 train_time:98236ms step_avg:98.33ms +step:1000/1695 train_time:98335ms step_avg:98.33ms +step:1000/1695 val_loss:3.4922 train_time:98433ms step_avg:98.43ms +step:1001/1695 train_time:98469ms step_avg:98.37ms +step:1002/1695 train_time:98546ms step_avg:98.35ms +step:1003/1695 train_time:98643ms step_avg:98.35ms +step:1004/1695 train_time:98742ms step_avg:98.35ms +step:1005/1695 train_time:98841ms step_avg:98.35ms +step:1006/1695 train_time:98940ms step_avg:98.35ms +step:1007/1695 train_time:99039ms step_avg:98.35ms +step:1008/1695 train_time:99138ms step_avg:98.35ms +step:1009/1695 train_time:99237ms step_avg:98.35ms +step:1010/1695 train_time:99336ms step_avg:98.35ms +step:1011/1695 train_time:99439ms step_avg:98.36ms +step:1012/1695 train_time:99540ms step_avg:98.36ms +step:1013/1695 train_time:99640ms step_avg:98.36ms +step:1014/1695 train_time:99739ms step_avg:98.36ms +step:1015/1695 train_time:99838ms step_avg:98.36ms +step:1016/1695 train_time:99938ms step_avg:98.36ms +step:1017/1695 train_time:100038ms step_avg:98.37ms +step:1018/1695 train_time:100137ms step_avg:98.37ms +step:1019/1695 train_time:100236ms step_avg:98.37ms +step:1020/1695 train_time:100336ms step_avg:98.37ms +step:1021/1695 train_time:100437ms step_avg:98.37ms +step:1022/1695 train_time:100537ms step_avg:98.37ms +step:1023/1695 train_time:100638ms step_avg:98.38ms +step:1024/1695 train_time:100739ms step_avg:98.38ms +step:1025/1695 train_time:100839ms step_avg:98.38ms +step:1026/1695 train_time:100939ms step_avg:98.38ms +step:1027/1695 train_time:101038ms step_avg:98.38ms +step:1028/1695 train_time:101137ms step_avg:98.38ms +step:1029/1695 train_time:101237ms step_avg:98.38ms +step:1030/1695 train_time:101337ms step_avg:98.39ms +step:1031/1695 train_time:101438ms step_avg:98.39ms +step:1032/1695 train_time:101539ms step_avg:98.39ms +step:1033/1695 train_time:101639ms step_avg:98.39ms +step:1034/1695 train_time:101738ms step_avg:98.39ms +step:1035/1695 train_time:101838ms step_avg:98.39ms +step:1036/1695 train_time:101937ms step_avg:98.39ms +step:1037/1695 train_time:102036ms step_avg:98.40ms +step:1038/1695 train_time:102136ms step_avg:98.40ms +step:1039/1695 train_time:102234ms step_avg:98.40ms +step:1040/1695 train_time:102333ms step_avg:98.40ms +step:1041/1695 train_time:102434ms step_avg:98.40ms +step:1042/1695 train_time:102535ms step_avg:98.40ms +step:1043/1695 train_time:102636ms step_avg:98.40ms +step:1044/1695 train_time:102736ms step_avg:98.41ms +step:1045/1695 train_time:102837ms step_avg:98.41ms +step:1046/1695 train_time:102937ms step_avg:98.41ms +step:1047/1695 train_time:103036ms step_avg:98.41ms +step:1048/1695 train_time:103135ms step_avg:98.41ms +step:1049/1695 train_time:103234ms step_avg:98.41ms +step:1050/1695 train_time:103333ms step_avg:98.41ms +step:1051/1695 train_time:103434ms step_avg:98.41ms +step:1052/1695 train_time:103534ms step_avg:98.42ms +step:1053/1695 train_time:103633ms step_avg:98.42ms +step:1054/1695 train_time:103733ms step_avg:98.42ms +step:1055/1695 train_time:103832ms step_avg:98.42ms +step:1056/1695 train_time:103932ms step_avg:98.42ms +step:1057/1695 train_time:104032ms step_avg:98.42ms +step:1058/1695 train_time:104132ms step_avg:98.42ms +step:1059/1695 train_time:104231ms step_avg:98.42ms +step:1060/1695 train_time:104330ms step_avg:98.42ms +step:1061/1695 train_time:104430ms step_avg:98.43ms +step:1062/1695 train_time:104530ms step_avg:98.43ms +step:1063/1695 train_time:104630ms step_avg:98.43ms +step:1064/1695 train_time:104730ms step_avg:98.43ms +step:1065/1695 train_time:104829ms step_avg:98.43ms +step:1066/1695 train_time:104929ms step_avg:98.43ms +step:1067/1695 train_time:105030ms step_avg:98.44ms +step:1068/1695 train_time:105131ms step_avg:98.44ms +step:1069/1695 train_time:105231ms step_avg:98.44ms +step:1070/1695 train_time:105333ms step_avg:98.44ms +step:1071/1695 train_time:105433ms step_avg:98.44ms +step:1072/1695 train_time:105532ms step_avg:98.44ms +step:1073/1695 train_time:105632ms step_avg:98.45ms +step:1074/1695 train_time:105732ms step_avg:98.45ms +step:1075/1695 train_time:105832ms step_avg:98.45ms +step:1076/1695 train_time:105932ms step_avg:98.45ms +step:1077/1695 train_time:106033ms step_avg:98.45ms +step:1078/1695 train_time:106133ms step_avg:98.45ms +step:1079/1695 train_time:106233ms step_avg:98.45ms +step:1080/1695 train_time:106332ms step_avg:98.46ms +step:1081/1695 train_time:106433ms step_avg:98.46ms +step:1082/1695 train_time:106533ms step_avg:98.46ms +step:1083/1695 train_time:106633ms step_avg:98.46ms +step:1084/1695 train_time:106732ms step_avg:98.46ms +step:1085/1695 train_time:106832ms step_avg:98.46ms +step:1086/1695 train_time:106932ms step_avg:98.46ms +step:1087/1695 train_time:107032ms step_avg:98.47ms +step:1088/1695 train_time:107132ms step_avg:98.47ms +step:1089/1695 train_time:107231ms step_avg:98.47ms +step:1090/1695 train_time:107332ms step_avg:98.47ms +step:1091/1695 train_time:107432ms step_avg:98.47ms +step:1092/1695 train_time:107532ms step_avg:98.47ms +step:1093/1695 train_time:107633ms step_avg:98.47ms +step:1094/1695 train_time:107733ms step_avg:98.48ms +step:1095/1695 train_time:107833ms step_avg:98.48ms +step:1096/1695 train_time:107932ms step_avg:98.48ms +step:1097/1695 train_time:108032ms step_avg:98.48ms +step:1098/1695 train_time:108132ms step_avg:98.48ms +step:1099/1695 train_time:108232ms step_avg:98.48ms +step:1100/1695 train_time:108332ms step_avg:98.48ms +step:1101/1695 train_time:108432ms step_avg:98.49ms +step:1102/1695 train_time:108533ms step_avg:98.49ms +step:1103/1695 train_time:108633ms step_avg:98.49ms +step:1104/1695 train_time:108732ms step_avg:98.49ms +step:1105/1695 train_time:108832ms step_avg:98.49ms +step:1106/1695 train_time:108932ms step_avg:98.49ms +step:1107/1695 train_time:109033ms step_avg:98.49ms +step:1108/1695 train_time:109133ms step_avg:98.50ms +step:1109/1695 train_time:109233ms step_avg:98.50ms +step:1110/1695 train_time:109333ms step_avg:98.50ms +step:1111/1695 train_time:109432ms step_avg:98.50ms +step:1112/1695 train_time:109533ms step_avg:98.50ms +step:1113/1695 train_time:109632ms step_avg:98.50ms +step:1114/1695 train_time:109733ms step_avg:98.50ms +step:1115/1695 train_time:109832ms step_avg:98.50ms +step:1116/1695 train_time:109933ms step_avg:98.51ms +step:1117/1695 train_time:110033ms step_avg:98.51ms +step:1118/1695 train_time:110133ms step_avg:98.51ms +step:1119/1695 train_time:110233ms step_avg:98.51ms +step:1120/1695 train_time:110333ms step_avg:98.51ms +step:1121/1695 train_time:110434ms step_avg:98.51ms +step:1122/1695 train_time:110533ms step_avg:98.51ms +step:1123/1695 train_time:110634ms step_avg:98.52ms +step:1124/1695 train_time:110733ms step_avg:98.52ms +step:1125/1695 train_time:110834ms step_avg:98.52ms +step:1125/1695 val_loss:3.4399 train_time:110931ms step_avg:98.61ms +step:1126/1695 train_time:110957ms step_avg:98.54ms +step:1127/1695 train_time:111046ms step_avg:98.53ms +step:1128/1695 train_time:111150ms step_avg:98.54ms +step:1129/1695 train_time:111251ms step_avg:98.54ms +step:1130/1695 train_time:111349ms step_avg:98.54ms +step:1131/1695 train_time:111448ms step_avg:98.54ms +step:1132/1695 train_time:111548ms step_avg:98.54ms +step:1133/1695 train_time:111647ms step_avg:98.54ms +step:1134/1695 train_time:111747ms step_avg:98.54ms +step:1135/1695 train_time:111847ms step_avg:98.54ms +step:1136/1695 train_time:111949ms step_avg:98.55ms +step:1137/1695 train_time:112053ms step_avg:98.55ms +step:1138/1695 train_time:112156ms step_avg:98.56ms +step:1139/1695 train_time:112256ms step_avg:98.56ms +step:1140/1695 train_time:112356ms step_avg:98.56ms +step:1141/1695 train_time:112455ms step_avg:98.56ms +step:1142/1695 train_time:112556ms step_avg:98.56ms +step:1143/1695 train_time:112656ms step_avg:98.56ms +step:1144/1695 train_time:112756ms step_avg:98.56ms +step:1145/1695 train_time:112858ms step_avg:98.57ms +step:1146/1695 train_time:112959ms step_avg:98.57ms +step:1147/1695 train_time:113060ms step_avg:98.57ms +step:1148/1695 train_time:113161ms step_avg:98.57ms +step:1149/1695 train_time:113261ms step_avg:98.57ms +step:1150/1695 train_time:113362ms step_avg:98.58ms +step:1151/1695 train_time:113462ms step_avg:98.58ms +step:1152/1695 train_time:113563ms step_avg:98.58ms +step:1153/1695 train_time:113663ms step_avg:98.58ms +step:1154/1695 train_time:113763ms step_avg:98.58ms +step:1155/1695 train_time:113863ms step_avg:98.58ms +step:1156/1695 train_time:113963ms step_avg:98.58ms +step:1157/1695 train_time:114065ms step_avg:98.59ms +step:1158/1695 train_time:114165ms step_avg:98.59ms +step:1159/1695 train_time:114266ms step_avg:98.59ms +step:1160/1695 train_time:114367ms step_avg:98.59ms +step:1161/1695 train_time:114468ms step_avg:98.59ms +step:1162/1695 train_time:114569ms step_avg:98.60ms +step:1163/1695 train_time:114673ms step_avg:98.60ms +step:1164/1695 train_time:114775ms step_avg:98.60ms +step:1165/1695 train_time:114875ms step_avg:98.60ms +step:1166/1695 train_time:114976ms step_avg:98.61ms +step:1167/1695 train_time:115076ms step_avg:98.61ms +step:1168/1695 train_time:115177ms step_avg:98.61ms +step:1169/1695 train_time:115278ms step_avg:98.61ms +step:1170/1695 train_time:115378ms step_avg:98.61ms +step:1171/1695 train_time:115480ms step_avg:98.62ms +step:1172/1695 train_time:115581ms step_avg:98.62ms +step:1173/1695 train_time:115682ms step_avg:98.62ms +step:1174/1695 train_time:115782ms step_avg:98.62ms +step:1175/1695 train_time:115882ms step_avg:98.62ms +step:1176/1695 train_time:115983ms step_avg:98.63ms +step:1177/1695 train_time:116083ms step_avg:98.63ms +step:1178/1695 train_time:116183ms step_avg:98.63ms +step:1179/1695 train_time:116287ms step_avg:98.63ms +step:1180/1695 train_time:116390ms step_avg:98.64ms +step:1181/1695 train_time:116491ms step_avg:98.64ms +step:1182/1695 train_time:116592ms step_avg:98.64ms +step:1183/1695 train_time:116691ms step_avg:98.64ms +step:1184/1695 train_time:116794ms step_avg:98.64ms +step:1185/1695 train_time:116895ms step_avg:98.65ms +step:1186/1695 train_time:116996ms step_avg:98.65ms +step:1187/1695 train_time:117098ms step_avg:98.65ms +step:1188/1695 train_time:117200ms step_avg:98.65ms +step:1189/1695 train_time:117299ms step_avg:98.65ms +step:1190/1695 train_time:117400ms step_avg:98.66ms +step:1191/1695 train_time:117501ms step_avg:98.66ms +step:1192/1695 train_time:117601ms step_avg:98.66ms +step:1193/1695 train_time:117702ms step_avg:98.66ms +step:1194/1695 train_time:117804ms step_avg:98.66ms +step:1195/1695 train_time:117903ms step_avg:98.66ms +step:1196/1695 train_time:118004ms step_avg:98.67ms +step:1197/1695 train_time:118106ms step_avg:98.67ms +step:1198/1695 train_time:118207ms step_avg:98.67ms +step:1199/1695 train_time:118309ms step_avg:98.67ms +step:1200/1695 train_time:118409ms step_avg:98.67ms +step:1201/1695 train_time:118511ms step_avg:98.68ms +step:1202/1695 train_time:118611ms step_avg:98.68ms +step:1203/1695 train_time:118712ms step_avg:98.68ms +step:1204/1695 train_time:118813ms step_avg:98.68ms +step:1205/1695 train_time:118914ms step_avg:98.68ms +step:1206/1695 train_time:119015ms step_avg:98.69ms +step:1207/1695 train_time:119117ms step_avg:98.69ms +step:1208/1695 train_time:119218ms step_avg:98.69ms +step:1209/1695 train_time:119318ms step_avg:98.69ms +step:1210/1695 train_time:119420ms step_avg:98.69ms +step:1211/1695 train_time:119520ms step_avg:98.70ms +step:1212/1695 train_time:119620ms step_avg:98.70ms +step:1213/1695 train_time:119720ms step_avg:98.70ms +step:1214/1695 train_time:119820ms step_avg:98.70ms +step:1215/1695 train_time:119921ms step_avg:98.70ms +step:1216/1695 train_time:120023ms step_avg:98.70ms +step:1217/1695 train_time:120124ms step_avg:98.70ms +step:1218/1695 train_time:120224ms step_avg:98.71ms +step:1219/1695 train_time:120326ms step_avg:98.71ms +step:1220/1695 train_time:120429ms step_avg:98.71ms +step:1221/1695 train_time:120530ms step_avg:98.71ms +step:1222/1695 train_time:120630ms step_avg:98.72ms +step:1223/1695 train_time:120732ms step_avg:98.72ms +step:1224/1695 train_time:120832ms step_avg:98.72ms +step:1225/1695 train_time:120934ms step_avg:98.72ms +step:1226/1695 train_time:121034ms step_avg:98.72ms +step:1227/1695 train_time:121137ms step_avg:98.73ms +step:1228/1695 train_time:121238ms step_avg:98.73ms +step:1229/1695 train_time:121339ms step_avg:98.73ms +step:1230/1695 train_time:121440ms step_avg:98.73ms +step:1231/1695 train_time:121540ms step_avg:98.73ms +step:1232/1695 train_time:121641ms step_avg:98.73ms +step:1233/1695 train_time:121741ms step_avg:98.74ms +step:1234/1695 train_time:121842ms step_avg:98.74ms +step:1235/1695 train_time:121942ms step_avg:98.74ms +step:1236/1695 train_time:122043ms step_avg:98.74ms +step:1237/1695 train_time:122144ms step_avg:98.74ms +step:1238/1695 train_time:122245ms step_avg:98.74ms +step:1239/1695 train_time:122346ms step_avg:98.75ms +step:1240/1695 train_time:122448ms step_avg:98.75ms +step:1241/1695 train_time:122549ms step_avg:98.75ms +step:1242/1695 train_time:122650ms step_avg:98.75ms +step:1243/1695 train_time:122751ms step_avg:98.75ms +step:1244/1695 train_time:122851ms step_avg:98.76ms +step:1245/1695 train_time:122952ms step_avg:98.76ms +step:1246/1695 train_time:123054ms step_avg:98.76ms +step:1247/1695 train_time:123154ms step_avg:98.76ms +step:1248/1695 train_time:123256ms step_avg:98.76ms +step:1249/1695 train_time:123357ms step_avg:98.77ms +step:1250/1695 train_time:123458ms step_avg:98.77ms +step:1250/1695 val_loss:3.3944 train_time:123558ms step_avg:98.85ms +step:1251/1695 train_time:123586ms step_avg:98.79ms +step:1252/1695 train_time:123668ms step_avg:98.78ms +step:1253/1695 train_time:123769ms step_avg:98.78ms +step:1254/1695 train_time:123871ms step_avg:98.78ms +step:1255/1695 train_time:123971ms step_avg:98.78ms +step:1256/1695 train_time:124071ms step_avg:98.78ms +step:1257/1695 train_time:124170ms step_avg:98.78ms +step:1258/1695 train_time:124270ms step_avg:98.78ms +step:1259/1695 train_time:124370ms step_avg:98.78ms +step:1260/1695 train_time:124470ms step_avg:98.79ms +step:1261/1695 train_time:124574ms step_avg:98.79ms +step:1262/1695 train_time:124677ms step_avg:98.79ms +step:1263/1695 train_time:124778ms step_avg:98.80ms +step:1264/1695 train_time:124879ms step_avg:98.80ms +step:1265/1695 train_time:124979ms step_avg:98.80ms +step:1266/1695 train_time:125080ms step_avg:98.80ms +step:1267/1695 train_time:125181ms step_avg:98.80ms +step:1268/1695 train_time:125283ms step_avg:98.80ms +step:1269/1695 train_time:125384ms step_avg:98.81ms +step:1270/1695 train_time:125485ms step_avg:98.81ms +step:1271/1695 train_time:125589ms step_avg:98.81ms +step:1272/1695 train_time:125688ms step_avg:98.81ms +step:1273/1695 train_time:125788ms step_avg:98.81ms +step:1274/1695 train_time:125888ms step_avg:98.81ms +step:1275/1695 train_time:125988ms step_avg:98.81ms +step:1276/1695 train_time:126090ms step_avg:98.82ms +step:1277/1695 train_time:126190ms step_avg:98.82ms +step:1278/1695 train_time:126292ms step_avg:98.82ms +step:1279/1695 train_time:126393ms step_avg:98.82ms +step:1280/1695 train_time:126496ms step_avg:98.82ms +step:1281/1695 train_time:126598ms step_avg:98.83ms +step:1282/1695 train_time:126700ms step_avg:98.83ms +step:1283/1695 train_time:126800ms step_avg:98.83ms +step:1284/1695 train_time:126900ms step_avg:98.83ms +step:1285/1695 train_time:127001ms step_avg:98.83ms +step:1286/1695 train_time:127102ms step_avg:98.83ms +step:1287/1695 train_time:127203ms step_avg:98.84ms +step:1288/1695 train_time:127304ms step_avg:98.84ms +step:1289/1695 train_time:127406ms step_avg:98.84ms +step:1290/1695 train_time:127506ms step_avg:98.84ms +step:1291/1695 train_time:127608ms step_avg:98.84ms +step:1292/1695 train_time:127709ms step_avg:98.85ms +step:1293/1695 train_time:127810ms step_avg:98.85ms +step:1294/1695 train_time:127911ms step_avg:98.85ms +step:1295/1695 train_time:128012ms step_avg:98.85ms +step:1296/1695 train_time:128114ms step_avg:98.85ms +step:1297/1695 train_time:128216ms step_avg:98.86ms +step:1298/1695 train_time:128318ms step_avg:98.86ms +step:1299/1695 train_time:128420ms step_avg:98.86ms +step:1300/1695 train_time:128520ms step_avg:98.86ms +step:1301/1695 train_time:128621ms step_avg:98.86ms +step:1302/1695 train_time:128723ms step_avg:98.87ms +step:1303/1695 train_time:128826ms step_avg:98.87ms +step:1304/1695 train_time:128926ms step_avg:98.87ms +step:1305/1695 train_time:129028ms step_avg:98.87ms +step:1306/1695 train_time:129128ms step_avg:98.87ms +step:1307/1695 train_time:129229ms step_avg:98.87ms +step:1308/1695 train_time:129329ms step_avg:98.88ms +step:1309/1695 train_time:129430ms step_avg:98.88ms +step:1310/1695 train_time:129530ms step_avg:98.88ms +step:1311/1695 train_time:129631ms step_avg:98.88ms +step:1312/1695 train_time:129733ms step_avg:98.88ms +step:1313/1695 train_time:129835ms step_avg:98.88ms +step:1314/1695 train_time:129938ms step_avg:98.89ms +step:1315/1695 train_time:130039ms step_avg:98.89ms +step:1316/1695 train_time:130140ms step_avg:98.89ms +step:1317/1695 train_time:130241ms step_avg:98.89ms +step:1318/1695 train_time:130342ms step_avg:98.89ms +step:1319/1695 train_time:130443ms step_avg:98.90ms +step:1320/1695 train_time:130546ms step_avg:98.90ms +step:1321/1695 train_time:130646ms step_avg:98.90ms +step:1322/1695 train_time:130747ms step_avg:98.90ms +step:1323/1695 train_time:130847ms step_avg:98.90ms +step:1324/1695 train_time:130948ms step_avg:98.90ms +step:1325/1695 train_time:131049ms step_avg:98.91ms +step:1326/1695 train_time:131150ms step_avg:98.91ms +step:1327/1695 train_time:131252ms step_avg:98.91ms +step:1328/1695 train_time:131353ms step_avg:98.91ms +step:1329/1695 train_time:131455ms step_avg:98.91ms +step:1330/1695 train_time:131555ms step_avg:98.91ms +step:1331/1695 train_time:131658ms step_avg:98.92ms +step:1332/1695 train_time:131758ms step_avg:98.92ms +step:1333/1695 train_time:131859ms step_avg:98.92ms +step:1334/1695 train_time:131960ms step_avg:98.92ms +step:1335/1695 train_time:132061ms step_avg:98.92ms +step:1336/1695 train_time:132162ms step_avg:98.92ms +step:1337/1695 train_time:132266ms step_avg:98.93ms +step:1338/1695 train_time:132366ms step_avg:98.93ms +step:1339/1695 train_time:132467ms step_avg:98.93ms +step:1340/1695 train_time:132567ms step_avg:98.93ms +step:1341/1695 train_time:132667ms step_avg:98.93ms +step:1342/1695 train_time:132768ms step_avg:98.93ms +step:1343/1695 train_time:132868ms step_avg:98.93ms +step:1344/1695 train_time:132968ms step_avg:98.93ms +step:1345/1695 train_time:133069ms step_avg:98.94ms +step:1346/1695 train_time:133171ms step_avg:98.94ms +step:1347/1695 train_time:133272ms step_avg:98.94ms +step:1348/1695 train_time:133372ms step_avg:98.94ms +step:1349/1695 train_time:133473ms step_avg:98.94ms +step:1350/1695 train_time:133575ms step_avg:98.94ms +step:1351/1695 train_time:133675ms step_avg:98.95ms +step:1352/1695 train_time:133776ms step_avg:98.95ms +step:1353/1695 train_time:133877ms step_avg:98.95ms +step:1354/1695 train_time:133979ms step_avg:98.95ms +step:1355/1695 train_time:134079ms step_avg:98.95ms +step:1356/1695 train_time:134182ms step_avg:98.95ms +step:1357/1695 train_time:134283ms step_avg:98.96ms +step:1358/1695 train_time:134385ms step_avg:98.96ms +step:1359/1695 train_time:134486ms step_avg:98.96ms +step:1360/1695 train_time:134586ms step_avg:98.96ms +step:1361/1695 train_time:134687ms step_avg:98.96ms +step:1362/1695 train_time:134788ms step_avg:98.96ms +step:1363/1695 train_time:134889ms step_avg:98.96ms +step:1364/1695 train_time:134990ms step_avg:98.97ms +step:1365/1695 train_time:135091ms step_avg:98.97ms +step:1366/1695 train_time:135192ms step_avg:98.97ms +step:1367/1695 train_time:135295ms step_avg:98.97ms +step:1368/1695 train_time:135397ms step_avg:98.97ms +step:1369/1695 train_time:135497ms step_avg:98.98ms +step:1370/1695 train_time:135599ms step_avg:98.98ms +step:1371/1695 train_time:135700ms step_avg:98.98ms +step:1372/1695 train_time:135800ms step_avg:98.98ms +step:1373/1695 train_time:135901ms step_avg:98.98ms +step:1374/1695 train_time:136003ms step_avg:98.98ms +step:1375/1695 train_time:136106ms step_avg:98.99ms +step:1375/1695 val_loss:3.3545 train_time:136205ms step_avg:99.06ms +step:1376/1695 train_time:136232ms step_avg:99.01ms +step:1377/1695 train_time:136316ms step_avg:98.99ms +step:1378/1695 train_time:136420ms step_avg:99.00ms +step:1379/1695 train_time:136521ms step_avg:99.00ms +step:1380/1695 train_time:136623ms step_avg:99.00ms +step:1381/1695 train_time:136723ms step_avg:99.00ms +step:1382/1695 train_time:136822ms step_avg:99.00ms +step:1383/1695 train_time:136922ms step_avg:99.00ms +step:1384/1695 train_time:137023ms step_avg:99.00ms +step:1385/1695 train_time:137124ms step_avg:99.01ms +step:1386/1695 train_time:137227ms step_avg:99.01ms +step:1387/1695 train_time:137332ms step_avg:99.01ms +step:1388/1695 train_time:137433ms step_avg:99.02ms +step:1389/1695 train_time:137536ms step_avg:99.02ms +step:1390/1695 train_time:137637ms step_avg:99.02ms +step:1391/1695 train_time:137739ms step_avg:99.02ms +step:1392/1695 train_time:137841ms step_avg:99.02ms +step:1393/1695 train_time:137943ms step_avg:99.03ms +step:1394/1695 train_time:138043ms step_avg:99.03ms +step:1395/1695 train_time:138145ms step_avg:99.03ms +step:1396/1695 train_time:138248ms step_avg:99.03ms +step:1397/1695 train_time:138350ms step_avg:99.03ms +step:1398/1695 train_time:138452ms step_avg:99.04ms +step:1399/1695 train_time:138554ms step_avg:99.04ms +step:1400/1695 train_time:138656ms step_avg:99.04ms +step:1401/1695 train_time:138756ms step_avg:99.04ms +step:1402/1695 train_time:138859ms step_avg:99.04ms +step:1403/1695 train_time:138961ms step_avg:99.05ms +step:1404/1695 train_time:139063ms step_avg:99.05ms +step:1405/1695 train_time:139165ms step_avg:99.05ms +step:1406/1695 train_time:139267ms step_avg:99.05ms +step:1407/1695 train_time:139370ms step_avg:99.05ms +step:1408/1695 train_time:139471ms step_avg:99.06ms +step:1409/1695 train_time:139576ms step_avg:99.06ms +step:1410/1695 train_time:139678ms step_avg:99.06ms +step:1411/1695 train_time:139779ms step_avg:99.06ms +step:1412/1695 train_time:139882ms step_avg:99.07ms +step:1413/1695 train_time:139983ms step_avg:99.07ms +step:1414/1695 train_time:140085ms step_avg:99.07ms +step:1415/1695 train_time:140187ms step_avg:99.07ms +step:1416/1695 train_time:140287ms step_avg:99.07ms +step:1417/1695 train_time:140389ms step_avg:99.07ms +step:1418/1695 train_time:140490ms step_avg:99.08ms +step:1419/1695 train_time:140592ms step_avg:99.08ms +step:1420/1695 train_time:140694ms step_avg:99.08ms +step:1421/1695 train_time:140795ms step_avg:99.08ms +step:1422/1695 train_time:140897ms step_avg:99.08ms +step:1423/1695 train_time:140999ms step_avg:99.09ms +step:1424/1695 train_time:141100ms step_avg:99.09ms +step:1425/1695 train_time:141203ms step_avg:99.09ms +step:1426/1695 train_time:141306ms step_avg:99.09ms +step:1427/1695 train_time:141409ms step_avg:99.10ms +step:1428/1695 train_time:141511ms step_avg:99.10ms +step:1429/1695 train_time:141612ms step_avg:99.10ms +step:1430/1695 train_time:141713ms step_avg:99.10ms +step:1431/1695 train_time:141815ms step_avg:99.10ms +step:1432/1695 train_time:141915ms step_avg:99.10ms +step:1433/1695 train_time:142017ms step_avg:99.11ms +step:1434/1695 train_time:142120ms step_avg:99.11ms +step:1435/1695 train_time:142223ms step_avg:99.11ms +step:1436/1695 train_time:142326ms step_avg:99.11ms +step:1437/1695 train_time:142427ms step_avg:99.11ms +step:1438/1695 train_time:142529ms step_avg:99.12ms +step:1439/1695 train_time:142631ms step_avg:99.12ms +step:1440/1695 train_time:142734ms step_avg:99.12ms +step:1441/1695 train_time:142836ms step_avg:99.12ms +step:1442/1695 train_time:142936ms step_avg:99.12ms +step:1443/1695 train_time:143037ms step_avg:99.12ms +step:1444/1695 train_time:143139ms step_avg:99.13ms +step:1445/1695 train_time:143241ms step_avg:99.13ms +step:1446/1695 train_time:143345ms step_avg:99.13ms +step:1447/1695 train_time:143446ms step_avg:99.13ms +step:1448/1695 train_time:143550ms step_avg:99.14ms +step:1449/1695 train_time:143650ms step_avg:99.14ms +step:1450/1695 train_time:143751ms step_avg:99.14ms +step:1451/1695 train_time:143852ms step_avg:99.14ms +step:1452/1695 train_time:143954ms step_avg:99.14ms +step:1453/1695 train_time:144055ms step_avg:99.14ms +step:1454/1695 train_time:144158ms step_avg:99.15ms +step:1455/1695 train_time:144260ms step_avg:99.15ms +step:1456/1695 train_time:144363ms step_avg:99.15ms +step:1457/1695 train_time:144466ms step_avg:99.15ms +step:1458/1695 train_time:144567ms step_avg:99.15ms +step:1459/1695 train_time:144669ms step_avg:99.16ms +step:1460/1695 train_time:144770ms step_avg:99.16ms +step:1461/1695 train_time:144872ms step_avg:99.16ms +step:1462/1695 train_time:144973ms step_avg:99.16ms +step:1463/1695 train_time:145074ms step_avg:99.16ms +step:1464/1695 train_time:145176ms step_avg:99.16ms +step:1465/1695 train_time:145277ms step_avg:99.17ms +step:1466/1695 train_time:145380ms step_avg:99.17ms +step:1467/1695 train_time:145483ms step_avg:99.17ms +step:1468/1695 train_time:145585ms step_avg:99.17ms +step:1469/1695 train_time:145687ms step_avg:99.17ms +step:1470/1695 train_time:145788ms step_avg:99.18ms +step:1471/1695 train_time:145890ms step_avg:99.18ms +step:1472/1695 train_time:145991ms step_avg:99.18ms +step:1473/1695 train_time:146092ms step_avg:99.18ms +step:1474/1695 train_time:146193ms step_avg:99.18ms +step:1475/1695 train_time:146294ms step_avg:99.18ms +step:1476/1695 train_time:146397ms step_avg:99.18ms +step:1477/1695 train_time:146500ms step_avg:99.19ms +step:1478/1695 train_time:146601ms step_avg:99.19ms +step:1479/1695 train_time:146703ms step_avg:99.19ms +step:1480/1695 train_time:146805ms step_avg:99.19ms +step:1481/1695 train_time:146906ms step_avg:99.19ms +step:1482/1695 train_time:147008ms step_avg:99.20ms +step:1483/1695 train_time:147110ms step_avg:99.20ms +step:1484/1695 train_time:147213ms step_avg:99.20ms +step:1485/1695 train_time:147315ms step_avg:99.20ms +step:1486/1695 train_time:147416ms step_avg:99.20ms +step:1487/1695 train_time:147517ms step_avg:99.20ms +step:1488/1695 train_time:147619ms step_avg:99.21ms +step:1489/1695 train_time:147722ms step_avg:99.21ms +step:1490/1695 train_time:147824ms step_avg:99.21ms +step:1491/1695 train_time:147926ms step_avg:99.21ms +step:1492/1695 train_time:148027ms step_avg:99.21ms +step:1493/1695 train_time:148128ms step_avg:99.22ms +step:1494/1695 train_time:148230ms step_avg:99.22ms +step:1495/1695 train_time:148331ms step_avg:99.22ms +step:1496/1695 train_time:148432ms step_avg:99.22ms +step:1497/1695 train_time:148533ms step_avg:99.22ms +step:1498/1695 train_time:148635ms step_avg:99.22ms +step:1499/1695 train_time:148736ms step_avg:99.22ms +step:1500/1695 train_time:148838ms step_avg:99.23ms +step:1500/1695 val_loss:3.3204 train_time:148940ms step_avg:99.29ms +step:1501/1695 train_time:148971ms step_avg:99.25ms +step:1502/1695 train_time:149057ms step_avg:99.24ms +step:1503/1695 train_time:149159ms step_avg:99.24ms +step:1504/1695 train_time:149260ms step_avg:99.24ms +step:1505/1695 train_time:149362ms step_avg:99.24ms +step:1506/1695 train_time:149462ms step_avg:99.24ms +step:1507/1695 train_time:149564ms step_avg:99.25ms +step:1508/1695 train_time:149665ms step_avg:99.25ms +step:1509/1695 train_time:149767ms step_avg:99.25ms +step:1510/1695 train_time:149869ms step_avg:99.25ms +step:1511/1695 train_time:149972ms step_avg:99.25ms +step:1512/1695 train_time:150074ms step_avg:99.26ms +step:1513/1695 train_time:150175ms step_avg:99.26ms +step:1514/1695 train_time:150277ms step_avg:99.26ms +step:1515/1695 train_time:150383ms step_avg:99.26ms +step:1516/1695 train_time:150484ms step_avg:99.26ms +step:1517/1695 train_time:150584ms step_avg:99.26ms +step:1518/1695 train_time:150685ms step_avg:99.27ms +step:1519/1695 train_time:150789ms step_avg:99.27ms +step:1520/1695 train_time:150890ms step_avg:99.27ms +step:1521/1695 train_time:150992ms step_avg:99.27ms +step:1522/1695 train_time:151093ms step_avg:99.27ms +step:1523/1695 train_time:151194ms step_avg:99.27ms +step:1524/1695 train_time:151297ms step_avg:99.28ms +step:1525/1695 train_time:151401ms step_avg:99.28ms +step:1526/1695 train_time:151504ms step_avg:99.28ms +step:1527/1695 train_time:151606ms step_avg:99.28ms +step:1528/1695 train_time:151711ms step_avg:99.29ms +step:1529/1695 train_time:151812ms step_avg:99.29ms +step:1530/1695 train_time:151916ms step_avg:99.29ms +step:1531/1695 train_time:152019ms step_avg:99.29ms +step:1532/1695 train_time:152121ms step_avg:99.30ms +step:1533/1695 train_time:152223ms step_avg:99.30ms +step:1534/1695 train_time:152325ms step_avg:99.30ms +step:1535/1695 train_time:152427ms step_avg:99.30ms +step:1536/1695 train_time:152528ms step_avg:99.30ms +step:1537/1695 train_time:152629ms step_avg:99.30ms +step:1538/1695 train_time:152730ms step_avg:99.30ms +step:1539/1695 train_time:152831ms step_avg:99.31ms +step:1540/1695 train_time:152933ms step_avg:99.31ms +step:1541/1695 train_time:153037ms step_avg:99.31ms +step:1542/1695 train_time:153143ms step_avg:99.31ms +step:1543/1695 train_time:153245ms step_avg:99.32ms +step:1544/1695 train_time:153347ms step_avg:99.32ms +step:1545/1695 train_time:153449ms step_avg:99.32ms +step:1546/1695 train_time:153550ms step_avg:99.32ms +step:1547/1695 train_time:153653ms step_avg:99.32ms +step:1548/1695 train_time:153755ms step_avg:99.32ms +step:1549/1695 train_time:153856ms step_avg:99.33ms +step:1550/1695 train_time:153958ms step_avg:99.33ms +step:1551/1695 train_time:154060ms step_avg:99.33ms +step:1552/1695 train_time:154163ms step_avg:99.33ms +step:1553/1695 train_time:154267ms step_avg:99.33ms +step:1554/1695 train_time:154368ms step_avg:99.34ms +step:1555/1695 train_time:154469ms step_avg:99.34ms +step:1556/1695 train_time:154571ms step_avg:99.34ms +step:1557/1695 train_time:154675ms step_avg:99.34ms +step:1558/1695 train_time:154779ms step_avg:99.34ms +step:1559/1695 train_time:154881ms step_avg:99.35ms +step:1560/1695 train_time:154983ms step_avg:99.35ms +step:1561/1695 train_time:155084ms step_avg:99.35ms +step:1562/1695 train_time:155186ms step_avg:99.35ms +step:1563/1695 train_time:155289ms step_avg:99.35ms +step:1564/1695 train_time:155391ms step_avg:99.35ms +step:1565/1695 train_time:155491ms step_avg:99.36ms +step:1566/1695 train_time:155593ms step_avg:99.36ms +step:1567/1695 train_time:155693ms step_avg:99.36ms +step:1568/1695 train_time:155794ms step_avg:99.36ms +step:1569/1695 train_time:155895ms step_avg:99.36ms +step:1570/1695 train_time:155997ms step_avg:99.36ms +step:1571/1695 train_time:156099ms step_avg:99.36ms +step:1572/1695 train_time:156202ms step_avg:99.36ms +step:1573/1695 train_time:156304ms step_avg:99.37ms +step:1574/1695 train_time:156405ms step_avg:99.37ms +step:1575/1695 train_time:156506ms step_avg:99.37ms +step:1576/1695 train_time:156609ms step_avg:99.37ms +step:1577/1695 train_time:156712ms step_avg:99.37ms +step:1578/1695 train_time:156812ms step_avg:99.37ms +step:1579/1695 train_time:156914ms step_avg:99.38ms +step:1580/1695 train_time:157015ms step_avg:99.38ms +step:1581/1695 train_time:157118ms step_avg:99.38ms +step:1582/1695 train_time:157221ms step_avg:99.38ms +step:1583/1695 train_time:157324ms step_avg:99.38ms +step:1584/1695 train_time:157426ms step_avg:99.39ms +step:1585/1695 train_time:157527ms step_avg:99.39ms +step:1586/1695 train_time:157629ms step_avg:99.39ms +step:1587/1695 train_time:157732ms step_avg:99.39ms +step:1588/1695 train_time:157833ms step_avg:99.39ms +step:1589/1695 train_time:157934ms step_avg:99.39ms +step:1590/1695 train_time:158035ms step_avg:99.39ms +step:1591/1695 train_time:158137ms step_avg:99.39ms +step:1592/1695 train_time:158240ms step_avg:99.40ms +step:1593/1695 train_time:158342ms step_avg:99.40ms +step:1594/1695 train_time:158446ms step_avg:99.40ms +step:1595/1695 train_time:158548ms step_avg:99.40ms +step:1596/1695 train_time:158650ms step_avg:99.40ms +step:1597/1695 train_time:158751ms step_avg:99.41ms +step:1598/1695 train_time:158854ms step_avg:99.41ms +step:1599/1695 train_time:158956ms step_avg:99.41ms +step:1600/1695 train_time:159058ms step_avg:99.41ms +step:1601/1695 train_time:159161ms step_avg:99.41ms +step:1602/1695 train_time:159263ms step_avg:99.41ms +step:1603/1695 train_time:159364ms step_avg:99.42ms +step:1604/1695 train_time:159465ms step_avg:99.42ms +step:1605/1695 train_time:159568ms step_avg:99.42ms +step:1606/1695 train_time:159670ms step_avg:99.42ms +step:1607/1695 train_time:159771ms step_avg:99.42ms +step:1608/1695 train_time:159872ms step_avg:99.42ms +step:1609/1695 train_time:159974ms step_avg:99.42ms +step:1610/1695 train_time:160076ms step_avg:99.43ms +step:1611/1695 train_time:160179ms step_avg:99.43ms +step:1612/1695 train_time:160281ms step_avg:99.43ms +step:1613/1695 train_time:160383ms step_avg:99.43ms +step:1614/1695 train_time:160484ms step_avg:99.43ms +step:1615/1695 train_time:160586ms step_avg:99.43ms +step:1616/1695 train_time:160687ms step_avg:99.44ms +step:1617/1695 train_time:160789ms step_avg:99.44ms +step:1618/1695 train_time:160891ms step_avg:99.44ms +step:1619/1695 train_time:160992ms step_avg:99.44ms +step:1620/1695 train_time:161094ms step_avg:99.44ms +step:1621/1695 train_time:161195ms step_avg:99.44ms +step:1622/1695 train_time:161297ms step_avg:99.44ms +step:1623/1695 train_time:161402ms step_avg:99.45ms +step:1624/1695 train_time:161504ms step_avg:99.45ms +step:1625/1695 train_time:161607ms step_avg:99.45ms +step:1625/1695 val_loss:3.2911 train_time:161708ms step_avg:99.51ms +step:1626/1695 train_time:161735ms step_avg:99.47ms +step:1627/1695 train_time:161823ms step_avg:99.46ms +step:1628/1695 train_time:161924ms step_avg:99.46ms +step:1629/1695 train_time:162026ms step_avg:99.46ms +step:1630/1695 train_time:162128ms step_avg:99.46ms +step:1631/1695 train_time:162230ms step_avg:99.47ms +step:1632/1695 train_time:162332ms step_avg:99.47ms +step:1633/1695 train_time:162432ms step_avg:99.47ms +step:1634/1695 train_time:162534ms step_avg:99.47ms +step:1635/1695 train_time:162637ms step_avg:99.47ms +step:1636/1695 train_time:162740ms step_avg:99.47ms +step:1637/1695 train_time:162843ms step_avg:99.48ms +step:1638/1695 train_time:162945ms step_avg:99.48ms +step:1639/1695 train_time:163047ms step_avg:99.48ms +step:1640/1695 train_time:163150ms step_avg:99.48ms +step:1641/1695 train_time:163253ms step_avg:99.48ms +step:1642/1695 train_time:163356ms step_avg:99.49ms +step:1643/1695 train_time:163459ms step_avg:99.49ms +step:1644/1695 train_time:163561ms step_avg:99.49ms +step:1645/1695 train_time:163664ms step_avg:99.49ms +step:1646/1695 train_time:163768ms step_avg:99.49ms +step:1647/1695 train_time:163871ms step_avg:99.50ms +step:1648/1695 train_time:163976ms step_avg:99.50ms +step:1649/1695 train_time:164078ms step_avg:99.50ms +step:1650/1695 train_time:164182ms step_avg:99.50ms +step:1651/1695 train_time:164283ms step_avg:99.51ms +step:1652/1695 train_time:164385ms step_avg:99.51ms +step:1653/1695 train_time:164488ms step_avg:99.51ms +step:1654/1695 train_time:164590ms step_avg:99.51ms +step:1655/1695 train_time:164692ms step_avg:99.51ms +step:1656/1695 train_time:164794ms step_avg:99.51ms +step:1657/1695 train_time:164896ms step_avg:99.51ms +step:1658/1695 train_time:165000ms step_avg:99.52ms +step:1659/1695 train_time:165107ms step_avg:99.52ms +step:1660/1695 train_time:165209ms step_avg:99.52ms +step:1661/1695 train_time:165313ms step_avg:99.53ms +step:1662/1695 train_time:165418ms step_avg:99.53ms +step:1663/1695 train_time:165522ms step_avg:99.53ms +step:1664/1695 train_time:165624ms step_avg:99.53ms +step:1665/1695 train_time:165731ms step_avg:99.54ms +step:1666/1695 train_time:165833ms step_avg:99.54ms +step:1667/1695 train_time:165935ms step_avg:99.54ms +step:1668/1695 train_time:166039ms step_avg:99.54ms +step:1669/1695 train_time:166143ms step_avg:99.55ms +step:1670/1695 train_time:166245ms step_avg:99.55ms +step:1671/1695 train_time:166347ms step_avg:99.55ms +step:1672/1695 train_time:166450ms step_avg:99.55ms +step:1673/1695 train_time:166552ms step_avg:99.55ms +step:1674/1695 train_time:166654ms step_avg:99.55ms +step:1675/1695 train_time:166757ms step_avg:99.56ms +step:1676/1695 train_time:166861ms step_avg:99.56ms +step:1677/1695 train_time:166963ms step_avg:99.56ms +step:1678/1695 train_time:167066ms step_avg:99.56ms +step:1679/1695 train_time:167169ms step_avg:99.56ms +step:1680/1695 train_time:167272ms step_avg:99.57ms +step:1681/1695 train_time:167375ms step_avg:99.57ms +step:1682/1695 train_time:167483ms step_avg:99.57ms +step:1683/1695 train_time:167585ms step_avg:99.58ms +step:1684/1695 train_time:167687ms step_avg:99.58ms +step:1685/1695 train_time:167790ms step_avg:99.58ms +step:1686/1695 train_time:167893ms step_avg:99.58ms +step:1687/1695 train_time:167995ms step_avg:99.58ms +step:1688/1695 train_time:168097ms step_avg:99.58ms +step:1689/1695 train_time:168199ms step_avg:99.59ms +step:1690/1695 train_time:168301ms step_avg:99.59ms +step:1691/1695 train_time:168403ms step_avg:99.59ms +step:1692/1695 train_time:168505ms step_avg:99.59ms +step:1693/1695 train_time:168610ms step_avg:99.59ms +step:1694/1695 train_time:168714ms step_avg:99.60ms +step:1695/1695 train_time:168817ms step_avg:99.60ms +step:1695/1695 val_loss:3.2784 train_time:168917ms step_avg:99.66ms +peak memory allocated: 34004 MiB reserved: 49660 MiB diff --git a/records/082325_SparseAttnGate/ca042caf-b232-4a25-b28f-88e39a2009d3.txt b/records/082325_SparseAttnGate/ca042caf-b232-4a25-b28f-88e39a2009d3.txt new file mode 100644 index 000000000..2b5eb6f73 --- /dev/null +++ b/records/082325_SparseAttnGate/ca042caf-b232-4a25-b28f-88e39a2009d3.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:20:14 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 32C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 124W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 121W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 303577 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 303578 C /usr/bin/python3 614MiB | +| 0 N/A N/A 303579 C /usr/bin/python3 614MiB | +| 0 N/A N/A 303580 C /usr/bin/python3 614MiB | +| 0 N/A N/A 303581 C /usr/bin/python3 614MiB | +| 0 N/A N/A 303582 C /usr/bin/python3 614MiB | +| 0 N/A N/A 303583 C /usr/bin/python3 614MiB | +| 0 N/A N/A 303584 C /usr/bin/python3 614MiB | +| 1 N/A N/A 303578 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 303579 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 303580 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 303581 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 303582 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 303583 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 303584 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:154ms step_avg:154.29ms +step:2/1695 train_time:181ms step_avg:90.43ms +step:3/1695 train_time:250ms step_avg:83.32ms +step:4/1695 train_time:342ms step_avg:85.40ms +step:5/1695 train_time:434ms step_avg:86.90ms +step:6/1695 train_time:527ms step_avg:87.85ms +step:7/1695 train_time:620ms step_avg:88.57ms +step:8/1695 train_time:713ms step_avg:89.11ms +step:9/1695 train_time:806ms step_avg:89.52ms +step:10/1695 train_time:899ms step_avg:89.85ms +step:11/1695 train_time:993ms step_avg:90.24ms +step:12/1695 train_time:1089ms step_avg:90.71ms +step:13/1695 train_time:1183ms step_avg:91.00ms +step:14/1695 train_time:1278ms step_avg:91.25ms +step:15/1695 train_time:1372ms step_avg:91.45ms +step:16/1695 train_time:1465ms step_avg:91.59ms +step:17/1695 train_time:1559ms step_avg:91.68ms +step:18/1695 train_time:1652ms step_avg:91.77ms +step:19/1695 train_time:1745ms step_avg:91.83ms +step:20/1695 train_time:1838ms step_avg:91.91ms +step:21/1695 train_time:1933ms step_avg:92.04ms +step:22/1695 train_time:2027ms step_avg:92.14ms +step:23/1695 train_time:2121ms step_avg:92.21ms +step:24/1695 train_time:2216ms step_avg:92.32ms +step:25/1695 train_time:2310ms step_avg:92.40ms +step:26/1695 train_time:2405ms step_avg:92.49ms +step:27/1695 train_time:2499ms step_avg:92.54ms +step:28/1695 train_time:2593ms step_avg:92.60ms +step:29/1695 train_time:2687ms step_avg:92.65ms +step:30/1695 train_time:2780ms step_avg:92.67ms +step:31/1695 train_time:2873ms step_avg:92.69ms +step:32/1695 train_time:2967ms step_avg:92.72ms +step:33/1695 train_time:3060ms step_avg:92.73ms +step:34/1695 train_time:3155ms step_avg:92.79ms +step:35/1695 train_time:3249ms step_avg:92.84ms +step:36/1695 train_time:3343ms step_avg:92.87ms +step:37/1695 train_time:3437ms step_avg:92.89ms +step:38/1695 train_time:3531ms step_avg:92.93ms +step:39/1695 train_time:3625ms step_avg:92.95ms +step:40/1695 train_time:3718ms step_avg:92.96ms +step:41/1695 train_time:3812ms step_avg:92.97ms +step:42/1695 train_time:3906ms step_avg:92.99ms +step:43/1695 train_time:3999ms step_avg:92.99ms +step:44/1695 train_time:4092ms step_avg:93.00ms +step:45/1695 train_time:4186ms step_avg:93.03ms +step:46/1695 train_time:4280ms step_avg:93.04ms +step:47/1695 train_time:4374ms step_avg:93.06ms +step:48/1695 train_time:4468ms step_avg:93.08ms +step:49/1695 train_time:4561ms step_avg:93.09ms +step:50/1695 train_time:4655ms step_avg:93.10ms +step:51/1695 train_time:4750ms step_avg:93.13ms +step:52/1695 train_time:4843ms step_avg:93.13ms +step:53/1695 train_time:4936ms step_avg:93.14ms +step:54/1695 train_time:5031ms step_avg:93.17ms +step:55/1695 train_time:5126ms step_avg:93.20ms +step:56/1695 train_time:5219ms step_avg:93.20ms +step:57/1695 train_time:5315ms step_avg:93.24ms +step:58/1695 train_time:5409ms step_avg:93.26ms +step:59/1695 train_time:5502ms step_avg:93.26ms +step:60/1695 train_time:5596ms step_avg:93.26ms +step:61/1695 train_time:5690ms step_avg:93.28ms +step:62/1695 train_time:5784ms step_avg:93.29ms +step:63/1695 train_time:5877ms step_avg:93.28ms +step:64/1695 train_time:5971ms step_avg:93.30ms +step:65/1695 train_time:6065ms step_avg:93.31ms +step:66/1695 train_time:6159ms step_avg:93.31ms +step:67/1695 train_time:6253ms step_avg:93.33ms +step:68/1695 train_time:6347ms step_avg:93.34ms +step:69/1695 train_time:6440ms step_avg:93.34ms +step:70/1695 train_time:6535ms step_avg:93.35ms +step:71/1695 train_time:6628ms step_avg:93.35ms +step:72/1695 train_time:6721ms step_avg:93.35ms +step:73/1695 train_time:6815ms step_avg:93.35ms +step:74/1695 train_time:6909ms step_avg:93.36ms +step:75/1695 train_time:7003ms step_avg:93.37ms +step:76/1695 train_time:7096ms step_avg:93.37ms +step:77/1695 train_time:7191ms step_avg:93.39ms +step:78/1695 train_time:7285ms step_avg:93.40ms +step:79/1695 train_time:7379ms step_avg:93.40ms +step:80/1695 train_time:7473ms step_avg:93.41ms +step:81/1695 train_time:7566ms step_avg:93.41ms +step:82/1695 train_time:7659ms step_avg:93.41ms +step:83/1695 train_time:7753ms step_avg:93.41ms +step:84/1695 train_time:7846ms step_avg:93.41ms +step:85/1695 train_time:7940ms step_avg:93.42ms +step:86/1695 train_time:8034ms step_avg:93.42ms +step:87/1695 train_time:8127ms step_avg:93.42ms +step:88/1695 train_time:8220ms step_avg:93.41ms +step:89/1695 train_time:8315ms step_avg:93.43ms +step:90/1695 train_time:8410ms step_avg:93.44ms +step:91/1695 train_time:8504ms step_avg:93.45ms +step:92/1695 train_time:8597ms step_avg:93.45ms +step:93/1695 train_time:8691ms step_avg:93.45ms +step:94/1695 train_time:8784ms step_avg:93.44ms +step:95/1695 train_time:8877ms step_avg:93.44ms +step:96/1695 train_time:8971ms step_avg:93.45ms +step:97/1695 train_time:9064ms step_avg:93.44ms +step:98/1695 train_time:9157ms step_avg:93.44ms +step:99/1695 train_time:9251ms step_avg:93.45ms +step:100/1695 train_time:9345ms step_avg:93.45ms +step:101/1695 train_time:9439ms step_avg:93.46ms +step:102/1695 train_time:9534ms step_avg:93.47ms +step:103/1695 train_time:9627ms step_avg:93.47ms +step:104/1695 train_time:9721ms step_avg:93.47ms +step:105/1695 train_time:9814ms step_avg:93.47ms +step:106/1695 train_time:9907ms step_avg:93.47ms +step:107/1695 train_time:10000ms step_avg:93.46ms +step:108/1695 train_time:10094ms step_avg:93.46ms +step:109/1695 train_time:10188ms step_avg:93.47ms +step:110/1695 train_time:10281ms step_avg:93.47ms +step:111/1695 train_time:10375ms step_avg:93.47ms +step:112/1695 train_time:10469ms step_avg:93.47ms +step:113/1695 train_time:10562ms step_avg:93.47ms +step:114/1695 train_time:10656ms step_avg:93.47ms +step:115/1695 train_time:10750ms step_avg:93.48ms +step:116/1695 train_time:10844ms step_avg:93.48ms +step:117/1695 train_time:10937ms step_avg:93.47ms +step:118/1695 train_time:11031ms step_avg:93.48ms +step:119/1695 train_time:11124ms step_avg:93.48ms +step:120/1695 train_time:11218ms step_avg:93.48ms +step:121/1695 train_time:11312ms step_avg:93.48ms +step:122/1695 train_time:11406ms step_avg:93.49ms +step:123/1695 train_time:11499ms step_avg:93.49ms +step:124/1695 train_time:11593ms step_avg:93.49ms +step:125/1695 train_time:11687ms step_avg:93.50ms +step:125/1695 val_loss:4.6033 train_time:11779ms step_avg:94.23ms +step:126/1695 train_time:11806ms step_avg:93.70ms +step:127/1695 train_time:11881ms step_avg:93.55ms +step:128/1695 train_time:11983ms step_avg:93.62ms +step:129/1695 train_time:12081ms step_avg:93.65ms +step:130/1695 train_time:12176ms step_avg:93.66ms +step:131/1695 train_time:12269ms step_avg:93.66ms +step:132/1695 train_time:12362ms step_avg:93.65ms +step:133/1695 train_time:12455ms step_avg:93.65ms +step:134/1695 train_time:12549ms step_avg:93.65ms +step:135/1695 train_time:12642ms step_avg:93.64ms +step:136/1695 train_time:12736ms step_avg:93.64ms +step:137/1695 train_time:12830ms step_avg:93.65ms +step:138/1695 train_time:12925ms step_avg:93.66ms +step:139/1695 train_time:13021ms step_avg:93.68ms +step:140/1695 train_time:13117ms step_avg:93.70ms +step:141/1695 train_time:13212ms step_avg:93.71ms +step:142/1695 train_time:13307ms step_avg:93.71ms +step:143/1695 train_time:13400ms step_avg:93.71ms +step:144/1695 train_time:13494ms step_avg:93.71ms +step:145/1695 train_time:13588ms step_avg:93.71ms +step:146/1695 train_time:13682ms step_avg:93.71ms +step:147/1695 train_time:13775ms step_avg:93.71ms +step:148/1695 train_time:13869ms step_avg:93.71ms +step:149/1695 train_time:13964ms step_avg:93.72ms +step:150/1695 train_time:14058ms step_avg:93.72ms +step:151/1695 train_time:14154ms step_avg:93.74ms +step:152/1695 train_time:14248ms step_avg:93.74ms +step:153/1695 train_time:14341ms step_avg:93.73ms +step:154/1695 train_time:14437ms step_avg:93.74ms +step:155/1695 train_time:14532ms step_avg:93.75ms +step:156/1695 train_time:14626ms step_avg:93.75ms +step:157/1695 train_time:14719ms step_avg:93.75ms +step:158/1695 train_time:14814ms step_avg:93.76ms +step:159/1695 train_time:14908ms step_avg:93.76ms +step:160/1695 train_time:15002ms step_avg:93.77ms +step:161/1695 train_time:15097ms step_avg:93.77ms +step:162/1695 train_time:15192ms step_avg:93.78ms +step:163/1695 train_time:15286ms step_avg:93.78ms +step:164/1695 train_time:15380ms step_avg:93.78ms +step:165/1695 train_time:15474ms step_avg:93.78ms +step:166/1695 train_time:15569ms step_avg:93.79ms +step:167/1695 train_time:15663ms step_avg:93.79ms +step:168/1695 train_time:15757ms step_avg:93.79ms +step:169/1695 train_time:15851ms step_avg:93.79ms +step:170/1695 train_time:15945ms step_avg:93.80ms +step:171/1695 train_time:16039ms step_avg:93.79ms +step:172/1695 train_time:16133ms step_avg:93.80ms +step:173/1695 train_time:16227ms step_avg:93.80ms +step:174/1695 train_time:16320ms step_avg:93.79ms +step:175/1695 train_time:16415ms step_avg:93.80ms +step:176/1695 train_time:16509ms step_avg:93.80ms +step:177/1695 train_time:16603ms step_avg:93.80ms +step:178/1695 train_time:16697ms step_avg:93.80ms +step:179/1695 train_time:16791ms step_avg:93.81ms +step:180/1695 train_time:16885ms step_avg:93.81ms +step:181/1695 train_time:16979ms step_avg:93.81ms +step:182/1695 train_time:17074ms step_avg:93.81ms +step:183/1695 train_time:17168ms step_avg:93.82ms +step:184/1695 train_time:17262ms step_avg:93.82ms +step:185/1695 train_time:17356ms step_avg:93.82ms +step:186/1695 train_time:17451ms step_avg:93.82ms +step:187/1695 train_time:17545ms step_avg:93.83ms +step:188/1695 train_time:17639ms step_avg:93.83ms +step:189/1695 train_time:17733ms step_avg:93.83ms +step:190/1695 train_time:17827ms step_avg:93.82ms +step:191/1695 train_time:17920ms step_avg:93.82ms +step:192/1695 train_time:18015ms step_avg:93.83ms +step:193/1695 train_time:18110ms step_avg:93.83ms +step:194/1695 train_time:18203ms step_avg:93.83ms +step:195/1695 train_time:18297ms step_avg:93.83ms +step:196/1695 train_time:18392ms step_avg:93.84ms +step:197/1695 train_time:18486ms step_avg:93.84ms +step:198/1695 train_time:18580ms step_avg:93.84ms +step:199/1695 train_time:18675ms step_avg:93.84ms +step:200/1695 train_time:18769ms step_avg:93.85ms +step:201/1695 train_time:18862ms step_avg:93.84ms +step:202/1695 train_time:18957ms step_avg:93.84ms +step:203/1695 train_time:19052ms step_avg:93.85ms +step:204/1695 train_time:19145ms step_avg:93.85ms +step:205/1695 train_time:19239ms step_avg:93.85ms +step:206/1695 train_time:19333ms step_avg:93.85ms +step:207/1695 train_time:19427ms step_avg:93.85ms +step:208/1695 train_time:19520ms step_avg:93.85ms +step:209/1695 train_time:19615ms step_avg:93.85ms +step:210/1695 train_time:19709ms step_avg:93.85ms +step:211/1695 train_time:19803ms step_avg:93.85ms +step:212/1695 train_time:19897ms step_avg:93.86ms +step:213/1695 train_time:19991ms step_avg:93.86ms +step:214/1695 train_time:20085ms step_avg:93.86ms +step:215/1695 train_time:20179ms step_avg:93.86ms +step:216/1695 train_time:20274ms step_avg:93.86ms +step:217/1695 train_time:20368ms step_avg:93.86ms +step:218/1695 train_time:20461ms step_avg:93.86ms +step:219/1695 train_time:20555ms step_avg:93.86ms +step:220/1695 train_time:20650ms step_avg:93.86ms +step:221/1695 train_time:20745ms step_avg:93.87ms +step:222/1695 train_time:20839ms step_avg:93.87ms +step:223/1695 train_time:20933ms step_avg:93.87ms +step:224/1695 train_time:21027ms step_avg:93.87ms +step:225/1695 train_time:21121ms step_avg:93.87ms +step:226/1695 train_time:21216ms step_avg:93.88ms +step:227/1695 train_time:21311ms step_avg:93.88ms +step:228/1695 train_time:21405ms step_avg:93.88ms +step:229/1695 train_time:21499ms step_avg:93.88ms +step:230/1695 train_time:21594ms step_avg:93.89ms +step:231/1695 train_time:21688ms step_avg:93.89ms +step:232/1695 train_time:21781ms step_avg:93.88ms +step:233/1695 train_time:21875ms step_avg:93.88ms +step:234/1695 train_time:21969ms step_avg:93.88ms +step:235/1695 train_time:22062ms step_avg:93.88ms +step:236/1695 train_time:22156ms step_avg:93.88ms +step:237/1695 train_time:22250ms step_avg:93.88ms +step:238/1695 train_time:22344ms step_avg:93.88ms +step:239/1695 train_time:22438ms step_avg:93.88ms +step:240/1695 train_time:22532ms step_avg:93.88ms +step:241/1695 train_time:22626ms step_avg:93.88ms +step:242/1695 train_time:22720ms step_avg:93.88ms +step:243/1695 train_time:22814ms step_avg:93.88ms +step:244/1695 train_time:22907ms step_avg:93.88ms +step:245/1695 train_time:23001ms step_avg:93.88ms +step:246/1695 train_time:23096ms step_avg:93.88ms +step:247/1695 train_time:23189ms step_avg:93.88ms +step:248/1695 train_time:23283ms step_avg:93.88ms +step:249/1695 train_time:23377ms step_avg:93.88ms +step:250/1695 train_time:23470ms step_avg:93.88ms +step:250/1695 val_loss:4.0846 train_time:23562ms step_avg:94.25ms +step:251/1695 train_time:23590ms step_avg:93.98ms +step:252/1695 train_time:23665ms step_avg:93.91ms +step:253/1695 train_time:23763ms step_avg:93.92ms +step:254/1695 train_time:23858ms step_avg:93.93ms +step:255/1695 train_time:23952ms step_avg:93.93ms +step:256/1695 train_time:24047ms step_avg:93.93ms +step:257/1695 train_time:24140ms step_avg:93.93ms +step:258/1695 train_time:24234ms step_avg:93.93ms +step:259/1695 train_time:24327ms step_avg:93.93ms +step:260/1695 train_time:24421ms step_avg:93.93ms +step:261/1695 train_time:24515ms step_avg:93.93ms +step:262/1695 train_time:24612ms step_avg:93.94ms +step:263/1695 train_time:24708ms step_avg:93.95ms +step:264/1695 train_time:24803ms step_avg:93.95ms +step:265/1695 train_time:24898ms step_avg:93.95ms +step:266/1695 train_time:24993ms step_avg:93.96ms +step:267/1695 train_time:25087ms step_avg:93.96ms +step:268/1695 train_time:25181ms step_avg:93.96ms +step:269/1695 train_time:25274ms step_avg:93.96ms +step:270/1695 train_time:25369ms step_avg:93.96ms +step:271/1695 train_time:25463ms step_avg:93.96ms +step:272/1695 train_time:25557ms step_avg:93.96ms +step:273/1695 train_time:25653ms step_avg:93.97ms +step:274/1695 train_time:25750ms step_avg:93.98ms +step:275/1695 train_time:25845ms step_avg:93.98ms +step:276/1695 train_time:25939ms step_avg:93.98ms +step:277/1695 train_time:26033ms step_avg:93.98ms +step:278/1695 train_time:26128ms step_avg:93.99ms +step:279/1695 train_time:26222ms step_avg:93.99ms +step:280/1695 train_time:26316ms step_avg:93.99ms +step:281/1695 train_time:26411ms step_avg:93.99ms +step:282/1695 train_time:26506ms step_avg:93.99ms +step:283/1695 train_time:26600ms step_avg:93.99ms +step:284/1695 train_time:26695ms step_avg:94.00ms +step:285/1695 train_time:26791ms step_avg:94.00ms +step:286/1695 train_time:26887ms step_avg:94.01ms +step:287/1695 train_time:26981ms step_avg:94.01ms +step:288/1695 train_time:27075ms step_avg:94.01ms +step:289/1695 train_time:27170ms step_avg:94.01ms +step:290/1695 train_time:27264ms step_avg:94.02ms +step:291/1695 train_time:27358ms step_avg:94.01ms +step:292/1695 train_time:27453ms step_avg:94.02ms +step:293/1695 train_time:27548ms step_avg:94.02ms +step:294/1695 train_time:27642ms step_avg:94.02ms +step:295/1695 train_time:27736ms step_avg:94.02ms +step:296/1695 train_time:27832ms step_avg:94.03ms +step:297/1695 train_time:27927ms step_avg:94.03ms +step:298/1695 train_time:28022ms step_avg:94.03ms +step:299/1695 train_time:28116ms step_avg:94.03ms +step:300/1695 train_time:28210ms step_avg:94.03ms +step:301/1695 train_time:28304ms step_avg:94.03ms +step:302/1695 train_time:28398ms step_avg:94.03ms +step:303/1695 train_time:28492ms step_avg:94.03ms +step:304/1695 train_time:28587ms step_avg:94.04ms +step:305/1695 train_time:28681ms step_avg:94.04ms +step:306/1695 train_time:28775ms step_avg:94.03ms +step:307/1695 train_time:28870ms step_avg:94.04ms +step:308/1695 train_time:28965ms step_avg:94.04ms +step:309/1695 train_time:29059ms step_avg:94.04ms +step:310/1695 train_time:29154ms step_avg:94.04ms +step:311/1695 train_time:29248ms step_avg:94.05ms +step:312/1695 train_time:29343ms step_avg:94.05ms +step:313/1695 train_time:29436ms step_avg:94.05ms +step:314/1695 train_time:29532ms step_avg:94.05ms +step:315/1695 train_time:29627ms step_avg:94.05ms +step:316/1695 train_time:29722ms step_avg:94.06ms +step:317/1695 train_time:29816ms step_avg:94.06ms +step:318/1695 train_time:29911ms step_avg:94.06ms +step:319/1695 train_time:30007ms step_avg:94.07ms +step:320/1695 train_time:30101ms step_avg:94.06ms +step:321/1695 train_time:30195ms step_avg:94.07ms +step:322/1695 train_time:30291ms step_avg:94.07ms +step:323/1695 train_time:30385ms step_avg:94.07ms +step:324/1695 train_time:30479ms step_avg:94.07ms +step:325/1695 train_time:30574ms step_avg:94.07ms +step:326/1695 train_time:30668ms step_avg:94.07ms +step:327/1695 train_time:30763ms step_avg:94.08ms +step:328/1695 train_time:30857ms step_avg:94.08ms +step:329/1695 train_time:30952ms step_avg:94.08ms +step:330/1695 train_time:31047ms step_avg:94.08ms +step:331/1695 train_time:31140ms step_avg:94.08ms +step:332/1695 train_time:31235ms step_avg:94.08ms +step:333/1695 train_time:31330ms step_avg:94.08ms +step:334/1695 train_time:31425ms step_avg:94.09ms +step:335/1695 train_time:31519ms step_avg:94.09ms +step:336/1695 train_time:31614ms step_avg:94.09ms +step:337/1695 train_time:31710ms step_avg:94.09ms +step:338/1695 train_time:31803ms step_avg:94.09ms +step:339/1695 train_time:31898ms step_avg:94.09ms +step:340/1695 train_time:31992ms step_avg:94.10ms +step:341/1695 train_time:32087ms step_avg:94.10ms +step:342/1695 train_time:32181ms step_avg:94.10ms +step:343/1695 train_time:32275ms step_avg:94.10ms +step:344/1695 train_time:32370ms step_avg:94.10ms +step:345/1695 train_time:32464ms step_avg:94.10ms +step:346/1695 train_time:32559ms step_avg:94.10ms +step:347/1695 train_time:32654ms step_avg:94.10ms +step:348/1695 train_time:32747ms step_avg:94.10ms +step:349/1695 train_time:32842ms step_avg:94.10ms +step:350/1695 train_time:32937ms step_avg:94.10ms +step:351/1695 train_time:33032ms step_avg:94.11ms +step:352/1695 train_time:33126ms step_avg:94.11ms +step:353/1695 train_time:33220ms step_avg:94.11ms +step:354/1695 train_time:33315ms step_avg:94.11ms +step:355/1695 train_time:33410ms step_avg:94.11ms +step:356/1695 train_time:33504ms step_avg:94.11ms +step:357/1695 train_time:33598ms step_avg:94.11ms +step:358/1695 train_time:33692ms step_avg:94.11ms +step:359/1695 train_time:33786ms step_avg:94.11ms +step:360/1695 train_time:33880ms step_avg:94.11ms +step:361/1695 train_time:33974ms step_avg:94.11ms +step:362/1695 train_time:34070ms step_avg:94.12ms +step:363/1695 train_time:34163ms step_avg:94.11ms +step:364/1695 train_time:34258ms step_avg:94.11ms +step:365/1695 train_time:34352ms step_avg:94.12ms +step:366/1695 train_time:34447ms step_avg:94.12ms +step:367/1695 train_time:34541ms step_avg:94.12ms +step:368/1695 train_time:34635ms step_avg:94.12ms +step:369/1695 train_time:34730ms step_avg:94.12ms +step:370/1695 train_time:34824ms step_avg:94.12ms +step:371/1695 train_time:34918ms step_avg:94.12ms +step:372/1695 train_time:35013ms step_avg:94.12ms +step:373/1695 train_time:35107ms step_avg:94.12ms +step:374/1695 train_time:35202ms step_avg:94.12ms +step:375/1695 train_time:35296ms step_avg:94.12ms +step:375/1695 val_loss:3.8779 train_time:35389ms step_avg:94.37ms +step:376/1695 train_time:35417ms step_avg:94.19ms +step:377/1695 train_time:35495ms step_avg:94.15ms +step:378/1695 train_time:35595ms step_avg:94.17ms +step:379/1695 train_time:35693ms step_avg:94.18ms +step:380/1695 train_time:35789ms step_avg:94.18ms +step:381/1695 train_time:35884ms step_avg:94.18ms +step:382/1695 train_time:35980ms step_avg:94.19ms +step:383/1695 train_time:36075ms step_avg:94.19ms +step:384/1695 train_time:36170ms step_avg:94.19ms +step:385/1695 train_time:36266ms step_avg:94.20ms +step:386/1695 train_time:36361ms step_avg:94.20ms +step:387/1695 train_time:36459ms step_avg:94.21ms +step:388/1695 train_time:36556ms step_avg:94.22ms +step:389/1695 train_time:36653ms step_avg:94.22ms +step:390/1695 train_time:36750ms step_avg:94.23ms +step:391/1695 train_time:36846ms step_avg:94.24ms +step:392/1695 train_time:36943ms step_avg:94.24ms +step:393/1695 train_time:37038ms step_avg:94.24ms +step:394/1695 train_time:37134ms step_avg:94.25ms +step:395/1695 train_time:37229ms step_avg:94.25ms +step:396/1695 train_time:37325ms step_avg:94.26ms +step:397/1695 train_time:37421ms step_avg:94.26ms +step:398/1695 train_time:37517ms step_avg:94.26ms +step:399/1695 train_time:37613ms step_avg:94.27ms +step:400/1695 train_time:37710ms step_avg:94.27ms +step:401/1695 train_time:37806ms step_avg:94.28ms +step:402/1695 train_time:37903ms step_avg:94.29ms +step:403/1695 train_time:37999ms step_avg:94.29ms +step:404/1695 train_time:38095ms step_avg:94.29ms +step:405/1695 train_time:38190ms step_avg:94.30ms +step:406/1695 train_time:38286ms step_avg:94.30ms +step:407/1695 train_time:38382ms step_avg:94.31ms +step:408/1695 train_time:38478ms step_avg:94.31ms +step:409/1695 train_time:38574ms step_avg:94.31ms +step:410/1695 train_time:38670ms step_avg:94.32ms +step:411/1695 train_time:38766ms step_avg:94.32ms +step:412/1695 train_time:38863ms step_avg:94.33ms +step:413/1695 train_time:38959ms step_avg:94.33ms +step:414/1695 train_time:39054ms step_avg:94.33ms +step:415/1695 train_time:39150ms step_avg:94.34ms +step:416/1695 train_time:39246ms step_avg:94.34ms +step:417/1695 train_time:39343ms step_avg:94.35ms +step:418/1695 train_time:39439ms step_avg:94.35ms +step:419/1695 train_time:39535ms step_avg:94.35ms +step:420/1695 train_time:39630ms step_avg:94.36ms +step:421/1695 train_time:39728ms step_avg:94.36ms +step:422/1695 train_time:39824ms step_avg:94.37ms +step:423/1695 train_time:39920ms step_avg:94.37ms +step:424/1695 train_time:40016ms step_avg:94.38ms +step:425/1695 train_time:40111ms step_avg:94.38ms +step:426/1695 train_time:40208ms step_avg:94.38ms +step:427/1695 train_time:40304ms step_avg:94.39ms +step:428/1695 train_time:40400ms step_avg:94.39ms +step:429/1695 train_time:40496ms step_avg:94.40ms +step:430/1695 train_time:40592ms step_avg:94.40ms +step:431/1695 train_time:40689ms step_avg:94.41ms +step:432/1695 train_time:40785ms step_avg:94.41ms +step:433/1695 train_time:40882ms step_avg:94.42ms +step:434/1695 train_time:40978ms step_avg:94.42ms +step:435/1695 train_time:41074ms step_avg:94.42ms +step:436/1695 train_time:41171ms step_avg:94.43ms +step:437/1695 train_time:41267ms step_avg:94.43ms +step:438/1695 train_time:41363ms step_avg:94.44ms +step:439/1695 train_time:41459ms step_avg:94.44ms +step:440/1695 train_time:41555ms step_avg:94.44ms +step:441/1695 train_time:41651ms step_avg:94.45ms +step:442/1695 train_time:41747ms step_avg:94.45ms +step:443/1695 train_time:41844ms step_avg:94.46ms +step:444/1695 train_time:41940ms step_avg:94.46ms +step:445/1695 train_time:42035ms step_avg:94.46ms +step:446/1695 train_time:42131ms step_avg:94.46ms +step:447/1695 train_time:42227ms step_avg:94.47ms +step:448/1695 train_time:42323ms step_avg:94.47ms +step:449/1695 train_time:42419ms step_avg:94.47ms +step:450/1695 train_time:42515ms step_avg:94.48ms +step:451/1695 train_time:42611ms step_avg:94.48ms +step:452/1695 train_time:42707ms step_avg:94.48ms +step:453/1695 train_time:42803ms step_avg:94.49ms +step:454/1695 train_time:42900ms step_avg:94.49ms +step:455/1695 train_time:42995ms step_avg:94.50ms +step:456/1695 train_time:43091ms step_avg:94.50ms +step:457/1695 train_time:43187ms step_avg:94.50ms +step:458/1695 train_time:43283ms step_avg:94.50ms +step:459/1695 train_time:43380ms step_avg:94.51ms +step:460/1695 train_time:43475ms step_avg:94.51ms +step:461/1695 train_time:43571ms step_avg:94.51ms +step:462/1695 train_time:43668ms step_avg:94.52ms +step:463/1695 train_time:43764ms step_avg:94.52ms +step:464/1695 train_time:43861ms step_avg:94.53ms +step:465/1695 train_time:43957ms step_avg:94.53ms +step:466/1695 train_time:44053ms step_avg:94.53ms +step:467/1695 train_time:44149ms step_avg:94.54ms +step:468/1695 train_time:44245ms step_avg:94.54ms +step:469/1695 train_time:44341ms step_avg:94.54ms +step:470/1695 train_time:44437ms step_avg:94.55ms +step:471/1695 train_time:44533ms step_avg:94.55ms +step:472/1695 train_time:44629ms step_avg:94.55ms +step:473/1695 train_time:44725ms step_avg:94.56ms +step:474/1695 train_time:44821ms step_avg:94.56ms +step:475/1695 train_time:44917ms step_avg:94.56ms +step:476/1695 train_time:45014ms step_avg:94.57ms +step:477/1695 train_time:45109ms step_avg:94.57ms +step:478/1695 train_time:45206ms step_avg:94.57ms +step:479/1695 train_time:45303ms step_avg:94.58ms +step:480/1695 train_time:45399ms step_avg:94.58ms +step:481/1695 train_time:45495ms step_avg:94.58ms +step:482/1695 train_time:45591ms step_avg:94.59ms +step:483/1695 train_time:45687ms step_avg:94.59ms +step:484/1695 train_time:45783ms step_avg:94.59ms +step:485/1695 train_time:45880ms step_avg:94.60ms +step:486/1695 train_time:45975ms step_avg:94.60ms +step:487/1695 train_time:46071ms step_avg:94.60ms +step:488/1695 train_time:46167ms step_avg:94.60ms +step:489/1695 train_time:46263ms step_avg:94.61ms +step:490/1695 train_time:46360ms step_avg:94.61ms +step:491/1695 train_time:46457ms step_avg:94.62ms +step:492/1695 train_time:46552ms step_avg:94.62ms +step:493/1695 train_time:46649ms step_avg:94.62ms +step:494/1695 train_time:46746ms step_avg:94.63ms +step:495/1695 train_time:46843ms step_avg:94.63ms +step:496/1695 train_time:46939ms step_avg:94.63ms +step:497/1695 train_time:47035ms step_avg:94.64ms +step:498/1695 train_time:47130ms step_avg:94.64ms +step:499/1695 train_time:47227ms step_avg:94.64ms +step:500/1695 train_time:47324ms step_avg:94.65ms +step:500/1695 val_loss:3.7325 train_time:47418ms step_avg:94.84ms +step:501/1695 train_time:47445ms step_avg:94.70ms +step:502/1695 train_time:47525ms step_avg:94.67ms +step:503/1695 train_time:47625ms step_avg:94.68ms +step:504/1695 train_time:47722ms step_avg:94.69ms +step:505/1695 train_time:47818ms step_avg:94.69ms +step:506/1695 train_time:47914ms step_avg:94.69ms +step:507/1695 train_time:48010ms step_avg:94.69ms +step:508/1695 train_time:48105ms step_avg:94.70ms +step:509/1695 train_time:48201ms step_avg:94.70ms +step:510/1695 train_time:48297ms step_avg:94.70ms +step:511/1695 train_time:48393ms step_avg:94.70ms +step:512/1695 train_time:48491ms step_avg:94.71ms +step:513/1695 train_time:48590ms step_avg:94.72ms +step:514/1695 train_time:48688ms step_avg:94.72ms +step:515/1695 train_time:48784ms step_avg:94.73ms +step:516/1695 train_time:48881ms step_avg:94.73ms +step:517/1695 train_time:48976ms step_avg:94.73ms +step:518/1695 train_time:49072ms step_avg:94.73ms +step:519/1695 train_time:49169ms step_avg:94.74ms +step:520/1695 train_time:49265ms step_avg:94.74ms +step:521/1695 train_time:49360ms step_avg:94.74ms +step:522/1695 train_time:49456ms step_avg:94.74ms +step:523/1695 train_time:49554ms step_avg:94.75ms +step:524/1695 train_time:49652ms step_avg:94.76ms +step:525/1695 train_time:49750ms step_avg:94.76ms +step:526/1695 train_time:49847ms step_avg:94.77ms +step:527/1695 train_time:49944ms step_avg:94.77ms +step:528/1695 train_time:50040ms step_avg:94.77ms +step:529/1695 train_time:50136ms step_avg:94.77ms +step:530/1695 train_time:50232ms step_avg:94.78ms +step:531/1695 train_time:50328ms step_avg:94.78ms +step:532/1695 train_time:50425ms step_avg:94.78ms +step:533/1695 train_time:50522ms step_avg:94.79ms +step:534/1695 train_time:50619ms step_avg:94.79ms +step:535/1695 train_time:50715ms step_avg:94.80ms +step:536/1695 train_time:50813ms step_avg:94.80ms +step:537/1695 train_time:50910ms step_avg:94.81ms +step:538/1695 train_time:51007ms step_avg:94.81ms +step:539/1695 train_time:51104ms step_avg:94.81ms +step:540/1695 train_time:51200ms step_avg:94.81ms +step:541/1695 train_time:51296ms step_avg:94.82ms +step:542/1695 train_time:51392ms step_avg:94.82ms +step:543/1695 train_time:51490ms step_avg:94.82ms +step:544/1695 train_time:51586ms step_avg:94.83ms +step:545/1695 train_time:51684ms step_avg:94.83ms +step:546/1695 train_time:51780ms step_avg:94.84ms +step:547/1695 train_time:51876ms step_avg:94.84ms +step:548/1695 train_time:51973ms step_avg:94.84ms +step:549/1695 train_time:52070ms step_avg:94.85ms +step:550/1695 train_time:52167ms step_avg:94.85ms +step:551/1695 train_time:52263ms step_avg:94.85ms +step:552/1695 train_time:52359ms step_avg:94.85ms +step:553/1695 train_time:52455ms step_avg:94.86ms +step:554/1695 train_time:52552ms step_avg:94.86ms +step:555/1695 train_time:52649ms step_avg:94.86ms +step:556/1695 train_time:52746ms step_avg:94.87ms +step:557/1695 train_time:52842ms step_avg:94.87ms +step:558/1695 train_time:52938ms step_avg:94.87ms +step:559/1695 train_time:53035ms step_avg:94.88ms +step:560/1695 train_time:53133ms step_avg:94.88ms +step:561/1695 train_time:53230ms step_avg:94.88ms +step:562/1695 train_time:53327ms step_avg:94.89ms +step:563/1695 train_time:53424ms step_avg:94.89ms +step:564/1695 train_time:53520ms step_avg:94.89ms +step:565/1695 train_time:53617ms step_avg:94.90ms +step:566/1695 train_time:53715ms step_avg:94.90ms +step:567/1695 train_time:53813ms step_avg:94.91ms +step:568/1695 train_time:53910ms step_avg:94.91ms +step:569/1695 train_time:54006ms step_avg:94.91ms +step:570/1695 train_time:54101ms step_avg:94.91ms +step:571/1695 train_time:54197ms step_avg:94.92ms +step:572/1695 train_time:54293ms step_avg:94.92ms +step:573/1695 train_time:54391ms step_avg:94.92ms +step:574/1695 train_time:54487ms step_avg:94.93ms +step:575/1695 train_time:54584ms step_avg:94.93ms +step:576/1695 train_time:54681ms step_avg:94.93ms +step:577/1695 train_time:54777ms step_avg:94.93ms +step:578/1695 train_time:54873ms step_avg:94.94ms +step:579/1695 train_time:54970ms step_avg:94.94ms +step:580/1695 train_time:55067ms step_avg:94.94ms +step:581/1695 train_time:55163ms step_avg:94.95ms +step:582/1695 train_time:55259ms step_avg:94.95ms +step:583/1695 train_time:55355ms step_avg:94.95ms +step:584/1695 train_time:55452ms step_avg:94.95ms +step:585/1695 train_time:55550ms step_avg:94.96ms +step:586/1695 train_time:55646ms step_avg:94.96ms +step:587/1695 train_time:55743ms step_avg:94.96ms +step:588/1695 train_time:55839ms step_avg:94.96ms +step:589/1695 train_time:55934ms step_avg:94.96ms +step:590/1695 train_time:56031ms step_avg:94.97ms +step:591/1695 train_time:56127ms step_avg:94.97ms +step:592/1695 train_time:56224ms step_avg:94.97ms +step:593/1695 train_time:56320ms step_avg:94.98ms +step:594/1695 train_time:56416ms step_avg:94.98ms +step:595/1695 train_time:56513ms step_avg:94.98ms +step:596/1695 train_time:56609ms step_avg:94.98ms +step:597/1695 train_time:56706ms step_avg:94.98ms +step:598/1695 train_time:56803ms step_avg:94.99ms +step:599/1695 train_time:56899ms step_avg:94.99ms +step:600/1695 train_time:56995ms step_avg:94.99ms +step:601/1695 train_time:57092ms step_avg:95.00ms +step:602/1695 train_time:57189ms step_avg:95.00ms +step:603/1695 train_time:57286ms step_avg:95.00ms +step:604/1695 train_time:57383ms step_avg:95.01ms +step:605/1695 train_time:57479ms step_avg:95.01ms +step:606/1695 train_time:57575ms step_avg:95.01ms +step:607/1695 train_time:57672ms step_avg:95.01ms +step:608/1695 train_time:57769ms step_avg:95.01ms +step:609/1695 train_time:57866ms step_avg:95.02ms +step:610/1695 train_time:57962ms step_avg:95.02ms +step:611/1695 train_time:58059ms step_avg:95.02ms +step:612/1695 train_time:58155ms step_avg:95.02ms +step:613/1695 train_time:58253ms step_avg:95.03ms +step:614/1695 train_time:58350ms step_avg:95.03ms +step:615/1695 train_time:58447ms step_avg:95.04ms +step:616/1695 train_time:58544ms step_avg:95.04ms +step:617/1695 train_time:58640ms step_avg:95.04ms +step:618/1695 train_time:58736ms step_avg:95.04ms +step:619/1695 train_time:58832ms step_avg:95.04ms +step:620/1695 train_time:58929ms step_avg:95.05ms +step:621/1695 train_time:59027ms step_avg:95.05ms +step:622/1695 train_time:59123ms step_avg:95.05ms +step:623/1695 train_time:59219ms step_avg:95.05ms +step:624/1695 train_time:59315ms step_avg:95.06ms +step:625/1695 train_time:59412ms step_avg:95.06ms +step:625/1695 val_loss:3.6469 train_time:59507ms step_avg:95.21ms +step:626/1695 train_time:59535ms step_avg:95.10ms +step:627/1695 train_time:59614ms step_avg:95.08ms +step:628/1695 train_time:59714ms step_avg:95.09ms +step:629/1695 train_time:59812ms step_avg:95.09ms +step:630/1695 train_time:59911ms step_avg:95.10ms +step:631/1695 train_time:60009ms step_avg:95.10ms +step:632/1695 train_time:60107ms step_avg:95.11ms +step:633/1695 train_time:60205ms step_avg:95.11ms +step:634/1695 train_time:60302ms step_avg:95.11ms +step:635/1695 train_time:60399ms step_avg:95.12ms +step:636/1695 train_time:60496ms step_avg:95.12ms +step:637/1695 train_time:60594ms step_avg:95.12ms +step:638/1695 train_time:60694ms step_avg:95.13ms +step:639/1695 train_time:60792ms step_avg:95.14ms +step:640/1695 train_time:60890ms step_avg:95.14ms +step:641/1695 train_time:60989ms step_avg:95.15ms +step:642/1695 train_time:61087ms step_avg:95.15ms +step:643/1695 train_time:61185ms step_avg:95.16ms +step:644/1695 train_time:61282ms step_avg:95.16ms +step:645/1695 train_time:61379ms step_avg:95.16ms +step:646/1695 train_time:61477ms step_avg:95.17ms +step:647/1695 train_time:61575ms step_avg:95.17ms +step:648/1695 train_time:61673ms step_avg:95.17ms +step:649/1695 train_time:61772ms step_avg:95.18ms +step:650/1695 train_time:61870ms step_avg:95.18ms +step:651/1695 train_time:61969ms step_avg:95.19ms +step:652/1695 train_time:62067ms step_avg:95.20ms +step:653/1695 train_time:62164ms step_avg:95.20ms +step:654/1695 train_time:62262ms step_avg:95.20ms +step:655/1695 train_time:62359ms step_avg:95.20ms +step:656/1695 train_time:62456ms step_avg:95.21ms +step:657/1695 train_time:62554ms step_avg:95.21ms +step:658/1695 train_time:62652ms step_avg:95.22ms +step:659/1695 train_time:62751ms step_avg:95.22ms +step:660/1695 train_time:62849ms step_avg:95.23ms +step:661/1695 train_time:62947ms step_avg:95.23ms +step:662/1695 train_time:63046ms step_avg:95.23ms +step:663/1695 train_time:63143ms step_avg:95.24ms +step:664/1695 train_time:63241ms step_avg:95.24ms +step:665/1695 train_time:63339ms step_avg:95.25ms +step:666/1695 train_time:63437ms step_avg:95.25ms +step:667/1695 train_time:63534ms step_avg:95.25ms +step:668/1695 train_time:63632ms step_avg:95.26ms +step:669/1695 train_time:63730ms step_avg:95.26ms +step:670/1695 train_time:63828ms step_avg:95.27ms +step:671/1695 train_time:63927ms step_avg:95.27ms +step:672/1695 train_time:64025ms step_avg:95.27ms +step:673/1695 train_time:64122ms step_avg:95.28ms +step:674/1695 train_time:64220ms step_avg:95.28ms +step:675/1695 train_time:64318ms step_avg:95.29ms +step:676/1695 train_time:64416ms step_avg:95.29ms +step:677/1695 train_time:64513ms step_avg:95.29ms +step:678/1695 train_time:64611ms step_avg:95.30ms +step:679/1695 train_time:64709ms step_avg:95.30ms +step:680/1695 train_time:64808ms step_avg:95.31ms +step:681/1695 train_time:64906ms step_avg:95.31ms +step:682/1695 train_time:65004ms step_avg:95.31ms +step:683/1695 train_time:65103ms step_avg:95.32ms +step:684/1695 train_time:65202ms step_avg:95.32ms +step:685/1695 train_time:65300ms step_avg:95.33ms +step:686/1695 train_time:65398ms step_avg:95.33ms +step:687/1695 train_time:65495ms step_avg:95.34ms +step:688/1695 train_time:65592ms step_avg:95.34ms +step:689/1695 train_time:65690ms step_avg:95.34ms +step:690/1695 train_time:65788ms step_avg:95.34ms +step:691/1695 train_time:65886ms step_avg:95.35ms +step:692/1695 train_time:65983ms step_avg:95.35ms +step:693/1695 train_time:66081ms step_avg:95.35ms +step:694/1695 train_time:66179ms step_avg:95.36ms +step:695/1695 train_time:66276ms step_avg:95.36ms +step:696/1695 train_time:66374ms step_avg:95.36ms +step:697/1695 train_time:66473ms step_avg:95.37ms +step:698/1695 train_time:66571ms step_avg:95.37ms +step:699/1695 train_time:66669ms step_avg:95.38ms +step:700/1695 train_time:66767ms step_avg:95.38ms +step:701/1695 train_time:67101ms step_avg:95.72ms +step:702/1695 train_time:67198ms step_avg:95.72ms +step:703/1695 train_time:67294ms step_avg:95.72ms +step:704/1695 train_time:67392ms step_avg:95.73ms +step:705/1695 train_time:67489ms step_avg:95.73ms +step:706/1695 train_time:67587ms step_avg:95.73ms +step:707/1695 train_time:67683ms step_avg:95.73ms +step:708/1695 train_time:67781ms step_avg:95.74ms +step:709/1695 train_time:67878ms step_avg:95.74ms +step:710/1695 train_time:67977ms step_avg:95.74ms +step:711/1695 train_time:68076ms step_avg:95.75ms +step:712/1695 train_time:68175ms step_avg:95.75ms +step:713/1695 train_time:68526ms step_avg:96.11ms +step:714/1695 train_time:68622ms step_avg:96.11ms +step:715/1695 train_time:68718ms step_avg:96.11ms +step:716/1695 train_time:68815ms step_avg:96.11ms +step:717/1695 train_time:68912ms step_avg:96.11ms +step:718/1695 train_time:69009ms step_avg:96.11ms +step:719/1695 train_time:69106ms step_avg:96.11ms +step:720/1695 train_time:69204ms step_avg:96.12ms +step:721/1695 train_time:69301ms step_avg:96.12ms +step:722/1695 train_time:69398ms step_avg:96.12ms +step:723/1695 train_time:69498ms step_avg:96.12ms +step:724/1695 train_time:69595ms step_avg:96.13ms +step:725/1695 train_time:69693ms step_avg:96.13ms +step:726/1695 train_time:69791ms step_avg:96.13ms +step:727/1695 train_time:69888ms step_avg:96.13ms +step:728/1695 train_time:69986ms step_avg:96.13ms +step:729/1695 train_time:70083ms step_avg:96.14ms +step:730/1695 train_time:70180ms step_avg:96.14ms +step:731/1695 train_time:70277ms step_avg:96.14ms +step:732/1695 train_time:70375ms step_avg:96.14ms +step:733/1695 train_time:70474ms step_avg:96.14ms +step:734/1695 train_time:70572ms step_avg:96.15ms +step:735/1695 train_time:70670ms step_avg:96.15ms +step:736/1695 train_time:70769ms step_avg:96.15ms +step:737/1695 train_time:70866ms step_avg:96.16ms +step:738/1695 train_time:70964ms step_avg:96.16ms +step:739/1695 train_time:71061ms step_avg:96.16ms +step:740/1695 train_time:71158ms step_avg:96.16ms +step:741/1695 train_time:71255ms step_avg:96.16ms +step:742/1695 train_time:71352ms step_avg:96.16ms +step:743/1695 train_time:71452ms step_avg:96.17ms +step:744/1695 train_time:71551ms step_avg:96.17ms +step:745/1695 train_time:71650ms step_avg:96.17ms +step:746/1695 train_time:71748ms step_avg:96.18ms +step:747/1695 train_time:71846ms step_avg:96.18ms +step:748/1695 train_time:71944ms step_avg:96.18ms +step:749/1695 train_time:72043ms step_avg:96.19ms +step:750/1695 train_time:72141ms step_avg:96.19ms +step:750/1695 val_loss:3.5857 train_time:72236ms step_avg:96.32ms +step:751/1695 train_time:72264ms step_avg:96.22ms +step:752/1695 train_time:72348ms step_avg:96.21ms +step:753/1695 train_time:72450ms step_avg:96.22ms +step:754/1695 train_time:72547ms step_avg:96.22ms +step:755/1695 train_time:72645ms step_avg:96.22ms +step:756/1695 train_time:72743ms step_avg:96.22ms +step:757/1695 train_time:72840ms step_avg:96.22ms +step:758/1695 train_time:72938ms step_avg:96.22ms +step:759/1695 train_time:73035ms step_avg:96.23ms +step:760/1695 train_time:73133ms step_avg:96.23ms +step:761/1695 train_time:73230ms step_avg:96.23ms +step:762/1695 train_time:73330ms step_avg:96.23ms +step:763/1695 train_time:73430ms step_avg:96.24ms +step:764/1695 train_time:73528ms step_avg:96.24ms +step:765/1695 train_time:73626ms step_avg:96.24ms +step:766/1695 train_time:73724ms step_avg:96.24ms +step:767/1695 train_time:73822ms step_avg:96.25ms +step:768/1695 train_time:73920ms step_avg:96.25ms +step:769/1695 train_time:74018ms step_avg:96.25ms +step:770/1695 train_time:74115ms step_avg:96.25ms +step:771/1695 train_time:74214ms step_avg:96.26ms +step:772/1695 train_time:74608ms step_avg:96.64ms +step:773/1695 train_time:74705ms step_avg:96.64ms +step:774/1695 train_time:74802ms step_avg:96.64ms +step:775/1695 train_time:74900ms step_avg:96.64ms +step:776/1695 train_time:74997ms step_avg:96.65ms +step:777/1695 train_time:75095ms step_avg:96.65ms +step:778/1695 train_time:75421ms step_avg:96.94ms +step:779/1695 train_time:75517ms step_avg:96.94ms +step:780/1695 train_time:75614ms step_avg:96.94ms +step:781/1695 train_time:75712ms step_avg:96.94ms +step:782/1695 train_time:75811ms step_avg:96.94ms +step:783/1695 train_time:75907ms step_avg:96.94ms +step:784/1695 train_time:76004ms step_avg:96.94ms +step:785/1695 train_time:76101ms step_avg:96.94ms +step:786/1695 train_time:76199ms step_avg:96.95ms +step:787/1695 train_time:76298ms step_avg:96.95ms +step:788/1695 train_time:76402ms step_avg:96.96ms +step:789/1695 train_time:76501ms step_avg:96.96ms +step:790/1695 train_time:76842ms step_avg:97.27ms +step:791/1695 train_time:76938ms step_avg:97.27ms +step:792/1695 train_time:77036ms step_avg:97.27ms +step:793/1695 train_time:77133ms step_avg:97.27ms +step:794/1695 train_time:77229ms step_avg:97.27ms +step:795/1695 train_time:77327ms step_avg:97.27ms +step:796/1695 train_time:77664ms step_avg:97.57ms +step:797/1695 train_time:78096ms step_avg:97.99ms +step:798/1695 train_time:78145ms step_avg:97.93ms +step:799/1695 train_time:78241ms step_avg:97.92ms +step:800/1695 train_time:78338ms step_avg:97.92ms +step:801/1695 train_time:78435ms step_avg:97.92ms +step:802/1695 train_time:78533ms step_avg:97.92ms +step:803/1695 train_time:78630ms step_avg:97.92ms +step:804/1695 train_time:78727ms step_avg:97.92ms +step:805/1695 train_time:78825ms step_avg:97.92ms +step:806/1695 train_time:78922ms step_avg:97.92ms +step:807/1695 train_time:79020ms step_avg:97.92ms +step:808/1695 train_time:79122ms step_avg:97.92ms +step:809/1695 train_time:79222ms step_avg:97.93ms +step:810/1695 train_time:79321ms step_avg:97.93ms +step:811/1695 train_time:79420ms step_avg:97.93ms +step:812/1695 train_time:79519ms step_avg:97.93ms +step:813/1695 train_time:79618ms step_avg:97.93ms +step:814/1695 train_time:79716ms step_avg:97.93ms +step:815/1695 train_time:79813ms step_avg:97.93ms +step:816/1695 train_time:79910ms step_avg:97.93ms +step:817/1695 train_time:80008ms step_avg:97.93ms +step:818/1695 train_time:80105ms step_avg:97.93ms +step:819/1695 train_time:80204ms step_avg:97.93ms +step:820/1695 train_time:80302ms step_avg:97.93ms +step:821/1695 train_time:80400ms step_avg:97.93ms +step:822/1695 train_time:80499ms step_avg:97.93ms +step:823/1695 train_time:80598ms step_avg:97.93ms +step:824/1695 train_time:80696ms step_avg:97.93ms +step:825/1695 train_time:80795ms step_avg:97.93ms +step:826/1695 train_time:80893ms step_avg:97.93ms +step:827/1695 train_time:80991ms step_avg:97.93ms +step:828/1695 train_time:81089ms step_avg:97.93ms +step:829/1695 train_time:81187ms step_avg:97.93ms +step:830/1695 train_time:81285ms step_avg:97.93ms +step:831/1695 train_time:81382ms step_avg:97.93ms +step:832/1695 train_time:81481ms step_avg:97.93ms +step:833/1695 train_time:81579ms step_avg:97.93ms +step:834/1695 train_time:81678ms step_avg:97.94ms +step:835/1695 train_time:81775ms step_avg:97.93ms +step:836/1695 train_time:81874ms step_avg:97.94ms +step:837/1695 train_time:81974ms step_avg:97.94ms +step:838/1695 train_time:82072ms step_avg:97.94ms +step:839/1695 train_time:82170ms step_avg:97.94ms +step:840/1695 train_time:82268ms step_avg:97.94ms +step:841/1695 train_time:82366ms step_avg:97.94ms +step:842/1695 train_time:82464ms step_avg:97.94ms +step:843/1695 train_time:82562ms step_avg:97.94ms +step:844/1695 train_time:82661ms step_avg:97.94ms +step:845/1695 train_time:82759ms step_avg:97.94ms +step:846/1695 train_time:82858ms step_avg:97.94ms +step:847/1695 train_time:82957ms step_avg:97.94ms +step:848/1695 train_time:83056ms step_avg:97.94ms +step:849/1695 train_time:83154ms step_avg:97.94ms +step:850/1695 train_time:83252ms step_avg:97.94ms +step:851/1695 train_time:83350ms step_avg:97.94ms +step:852/1695 train_time:83448ms step_avg:97.94ms +step:853/1695 train_time:83547ms step_avg:97.94ms +step:854/1695 train_time:83645ms step_avg:97.94ms +step:855/1695 train_time:83742ms step_avg:97.94ms +step:856/1695 train_time:83840ms step_avg:97.94ms +step:857/1695 train_time:83938ms step_avg:97.94ms +step:858/1695 train_time:84036ms step_avg:97.94ms +step:859/1695 train_time:84135ms step_avg:97.95ms +step:860/1695 train_time:84233ms step_avg:97.95ms +step:861/1695 train_time:84332ms step_avg:97.95ms +step:862/1695 train_time:84430ms step_avg:97.95ms +step:863/1695 train_time:84529ms step_avg:97.95ms +step:864/1695 train_time:84627ms step_avg:97.95ms +step:865/1695 train_time:84725ms step_avg:97.95ms +step:866/1695 train_time:84823ms step_avg:97.95ms +step:867/1695 train_time:84920ms step_avg:97.95ms +step:868/1695 train_time:85018ms step_avg:97.95ms +step:869/1695 train_time:85117ms step_avg:97.95ms +step:870/1695 train_time:85216ms step_avg:97.95ms +step:871/1695 train_time:85316ms step_avg:97.95ms +step:872/1695 train_time:85416ms step_avg:97.95ms +step:873/1695 train_time:85515ms step_avg:97.96ms +step:874/1695 train_time:85614ms step_avg:97.96ms +step:875/1695 train_time:85712ms step_avg:97.96ms +step:875/1695 val_loss:3.5376 train_time:85809ms step_avg:98.07ms +step:876/1695 train_time:85837ms step_avg:97.99ms +step:877/1695 train_time:85918ms step_avg:97.97ms +step:878/1695 train_time:86019ms step_avg:97.97ms +step:879/1695 train_time:86118ms step_avg:97.97ms +step:880/1695 train_time:86216ms step_avg:97.97ms +step:881/1695 train_time:86313ms step_avg:97.97ms +step:882/1695 train_time:86412ms step_avg:97.97ms +step:883/1695 train_time:86511ms step_avg:97.97ms +step:884/1695 train_time:86609ms step_avg:97.97ms +step:885/1695 train_time:86708ms step_avg:97.98ms +step:886/1695 train_time:86808ms step_avg:97.98ms +step:887/1695 train_time:86910ms step_avg:97.98ms +step:888/1695 train_time:87011ms step_avg:97.99ms +step:889/1695 train_time:87112ms step_avg:97.99ms +step:890/1695 train_time:87212ms step_avg:97.99ms +step:891/1695 train_time:87311ms step_avg:97.99ms +step:892/1695 train_time:87410ms step_avg:97.99ms +step:893/1695 train_time:87510ms step_avg:98.00ms +step:894/1695 train_time:87608ms step_avg:98.00ms +step:895/1695 train_time:87707ms step_avg:98.00ms +step:896/1695 train_time:87807ms step_avg:98.00ms +step:897/1695 train_time:87907ms step_avg:98.00ms +step:898/1695 train_time:88008ms step_avg:98.00ms +step:899/1695 train_time:88109ms step_avg:98.01ms +step:900/1695 train_time:88210ms step_avg:98.01ms +step:901/1695 train_time:88310ms step_avg:98.01ms +step:902/1695 train_time:88410ms step_avg:98.02ms +step:903/1695 train_time:88510ms step_avg:98.02ms +step:904/1695 train_time:88609ms step_avg:98.02ms +step:905/1695 train_time:88708ms step_avg:98.02ms +step:906/1695 train_time:88807ms step_avg:98.02ms +step:907/1695 train_time:88906ms step_avg:98.02ms +step:908/1695 train_time:89007ms step_avg:98.03ms +step:909/1695 train_time:89108ms step_avg:98.03ms +step:910/1695 train_time:89209ms step_avg:98.03ms +step:911/1695 train_time:89309ms step_avg:98.03ms +step:912/1695 train_time:89409ms step_avg:98.04ms +step:913/1695 train_time:89509ms step_avg:98.04ms +step:914/1695 train_time:89608ms step_avg:98.04ms +step:915/1695 train_time:89707ms step_avg:98.04ms +step:916/1695 train_time:89806ms step_avg:98.04ms +step:917/1695 train_time:89906ms step_avg:98.04ms +step:918/1695 train_time:90007ms step_avg:98.05ms +step:919/1695 train_time:90107ms step_avg:98.05ms +step:920/1695 train_time:90208ms step_avg:98.05ms +step:921/1695 train_time:90309ms step_avg:98.06ms +step:922/1695 train_time:90409ms step_avg:98.06ms +step:923/1695 train_time:90509ms step_avg:98.06ms +step:924/1695 train_time:90609ms step_avg:98.06ms +step:925/1695 train_time:90708ms step_avg:98.06ms +step:926/1695 train_time:90809ms step_avg:98.07ms +step:927/1695 train_time:90908ms step_avg:98.07ms +step:928/1695 train_time:91007ms step_avg:98.07ms +step:929/1695 train_time:91106ms step_avg:98.07ms +step:930/1695 train_time:91207ms step_avg:98.07ms +step:931/1695 train_time:91308ms step_avg:98.07ms +step:932/1695 train_time:91408ms step_avg:98.08ms +step:933/1695 train_time:91509ms step_avg:98.08ms +step:934/1695 train_time:91608ms step_avg:98.08ms +step:935/1695 train_time:91708ms step_avg:98.08ms +step:936/1695 train_time:91807ms step_avg:98.08ms +step:937/1695 train_time:91907ms step_avg:98.09ms +step:938/1695 train_time:92007ms step_avg:98.09ms +step:939/1695 train_time:92108ms step_avg:98.09ms +step:940/1695 train_time:92209ms step_avg:98.09ms +step:941/1695 train_time:92309ms step_avg:98.10ms +step:942/1695 train_time:92409ms step_avg:98.10ms +step:943/1695 train_time:92509ms step_avg:98.10ms +step:944/1695 train_time:92609ms step_avg:98.10ms +step:945/1695 train_time:92709ms step_avg:98.10ms +step:946/1695 train_time:92808ms step_avg:98.11ms +step:947/1695 train_time:92908ms step_avg:98.11ms +step:948/1695 train_time:93008ms step_avg:98.11ms +step:949/1695 train_time:93107ms step_avg:98.11ms +step:950/1695 train_time:93207ms step_avg:98.11ms +step:951/1695 train_time:93307ms step_avg:98.11ms +step:952/1695 train_time:93408ms step_avg:98.12ms +step:953/1695 train_time:93509ms step_avg:98.12ms +step:954/1695 train_time:93609ms step_avg:98.12ms +step:955/1695 train_time:93709ms step_avg:98.12ms +step:956/1695 train_time:93808ms step_avg:98.13ms +step:957/1695 train_time:93908ms step_avg:98.13ms +step:958/1695 train_time:94008ms step_avg:98.13ms +step:959/1695 train_time:94107ms step_avg:98.13ms +step:960/1695 train_time:94208ms step_avg:98.13ms +step:961/1695 train_time:94309ms step_avg:98.14ms +step:962/1695 train_time:94409ms step_avg:98.14ms +step:963/1695 train_time:94509ms step_avg:98.14ms +step:964/1695 train_time:94608ms step_avg:98.14ms +step:965/1695 train_time:94708ms step_avg:98.14ms +step:966/1695 train_time:94808ms step_avg:98.14ms +step:967/1695 train_time:94907ms step_avg:98.15ms +step:968/1695 train_time:95007ms step_avg:98.15ms +step:969/1695 train_time:95108ms step_avg:98.15ms +step:970/1695 train_time:95208ms step_avg:98.15ms +step:971/1695 train_time:95308ms step_avg:98.15ms +step:972/1695 train_time:95409ms step_avg:98.16ms +step:973/1695 train_time:95509ms step_avg:98.16ms +step:974/1695 train_time:95608ms step_avg:98.16ms +step:975/1695 train_time:95708ms step_avg:98.16ms +step:976/1695 train_time:95808ms step_avg:98.16ms +step:977/1695 train_time:95908ms step_avg:98.17ms +step:978/1695 train_time:96007ms step_avg:98.17ms +step:979/1695 train_time:96107ms step_avg:98.17ms +step:980/1695 train_time:96207ms step_avg:98.17ms +step:981/1695 train_time:96306ms step_avg:98.17ms +step:982/1695 train_time:96408ms step_avg:98.17ms +step:983/1695 train_time:96508ms step_avg:98.18ms +step:984/1695 train_time:96608ms step_avg:98.18ms +step:985/1695 train_time:96708ms step_avg:98.18ms +step:986/1695 train_time:96808ms step_avg:98.18ms +step:987/1695 train_time:96908ms step_avg:98.18ms +step:988/1695 train_time:97008ms step_avg:98.19ms +step:989/1695 train_time:97108ms step_avg:98.19ms +step:990/1695 train_time:97208ms step_avg:98.19ms +step:991/1695 train_time:97308ms step_avg:98.19ms +step:992/1695 train_time:97407ms step_avg:98.19ms +step:993/1695 train_time:97507ms step_avg:98.19ms +step:994/1695 train_time:97607ms step_avg:98.20ms +step:995/1695 train_time:97707ms step_avg:98.20ms +step:996/1695 train_time:97807ms step_avg:98.20ms +step:997/1695 train_time:97907ms step_avg:98.20ms +step:998/1695 train_time:98006ms step_avg:98.20ms +step:999/1695 train_time:98106ms step_avg:98.20ms +step:1000/1695 train_time:98206ms step_avg:98.21ms +step:1000/1695 val_loss:3.4945 train_time:98304ms step_avg:98.30ms +step:1001/1695 train_time:98332ms step_avg:98.23ms +step:1002/1695 train_time:98415ms step_avg:98.22ms +step:1003/1695 train_time:98515ms step_avg:98.22ms +step:1004/1695 train_time:98614ms step_avg:98.22ms +step:1005/1695 train_time:98713ms step_avg:98.22ms +step:1006/1695 train_time:98811ms step_avg:98.22ms +step:1007/1695 train_time:98910ms step_avg:98.22ms +step:1008/1695 train_time:99008ms step_avg:98.22ms +step:1009/1695 train_time:99107ms step_avg:98.22ms +step:1010/1695 train_time:99205ms step_avg:98.22ms +step:1011/1695 train_time:99305ms step_avg:98.22ms +step:1012/1695 train_time:99406ms step_avg:98.23ms +step:1013/1695 train_time:99507ms step_avg:98.23ms +step:1014/1695 train_time:99607ms step_avg:98.23ms +step:1015/1695 train_time:99708ms step_avg:98.23ms +step:1016/1695 train_time:99807ms step_avg:98.23ms +step:1017/1695 train_time:99907ms step_avg:98.24ms +step:1018/1695 train_time:100006ms step_avg:98.24ms +step:1019/1695 train_time:100104ms step_avg:98.24ms +step:1020/1695 train_time:100204ms step_avg:98.24ms +step:1021/1695 train_time:100305ms step_avg:98.24ms +step:1022/1695 train_time:100405ms step_avg:98.24ms +step:1023/1695 train_time:100505ms step_avg:98.25ms +step:1024/1695 train_time:100608ms step_avg:98.25ms +step:1025/1695 train_time:100707ms step_avg:98.25ms +step:1026/1695 train_time:100807ms step_avg:98.25ms +step:1027/1695 train_time:100907ms step_avg:98.25ms +step:1028/1695 train_time:101006ms step_avg:98.26ms +step:1029/1695 train_time:101107ms step_avg:98.26ms +step:1030/1695 train_time:101205ms step_avg:98.26ms +step:1031/1695 train_time:101305ms step_avg:98.26ms +step:1032/1695 train_time:101405ms step_avg:98.26ms +step:1033/1695 train_time:101505ms step_avg:98.26ms +step:1034/1695 train_time:101605ms step_avg:98.26ms +step:1035/1695 train_time:101705ms step_avg:98.27ms +step:1036/1695 train_time:101804ms step_avg:98.27ms +step:1037/1695 train_time:101906ms step_avg:98.27ms +step:1038/1695 train_time:102006ms step_avg:98.27ms +step:1039/1695 train_time:102105ms step_avg:98.27ms +step:1040/1695 train_time:102204ms step_avg:98.27ms +step:1041/1695 train_time:102304ms step_avg:98.27ms +step:1042/1695 train_time:102404ms step_avg:98.28ms +step:1043/1695 train_time:102504ms step_avg:98.28ms +step:1044/1695 train_time:102603ms step_avg:98.28ms +step:1045/1695 train_time:102704ms step_avg:98.28ms +step:1046/1695 train_time:102805ms step_avg:98.28ms +step:1047/1695 train_time:102905ms step_avg:98.29ms +step:1048/1695 train_time:103005ms step_avg:98.29ms +step:1049/1695 train_time:103104ms step_avg:98.29ms +step:1050/1695 train_time:103204ms step_avg:98.29ms +step:1051/1695 train_time:103305ms step_avg:98.29ms +step:1052/1695 train_time:103404ms step_avg:98.29ms +step:1053/1695 train_time:103504ms step_avg:98.29ms +step:1054/1695 train_time:103604ms step_avg:98.30ms +step:1055/1695 train_time:103704ms step_avg:98.30ms +step:1056/1695 train_time:103804ms step_avg:98.30ms +step:1057/1695 train_time:103904ms step_avg:98.30ms +step:1058/1695 train_time:104003ms step_avg:98.30ms +step:1059/1695 train_time:104103ms step_avg:98.30ms +step:1060/1695 train_time:104203ms step_avg:98.30ms +step:1061/1695 train_time:104304ms step_avg:98.31ms +step:1062/1695 train_time:104405ms step_avg:98.31ms +step:1063/1695 train_time:104505ms step_avg:98.31ms +step:1064/1695 train_time:104604ms step_avg:98.31ms +step:1065/1695 train_time:104705ms step_avg:98.31ms +step:1066/1695 train_time:104805ms step_avg:98.32ms +step:1067/1695 train_time:104906ms step_avg:98.32ms +step:1068/1695 train_time:105005ms step_avg:98.32ms +step:1069/1695 train_time:105105ms step_avg:98.32ms +step:1070/1695 train_time:105206ms step_avg:98.32ms +step:1071/1695 train_time:105305ms step_avg:98.32ms +step:1072/1695 train_time:105405ms step_avg:98.33ms +step:1073/1695 train_time:105504ms step_avg:98.33ms +step:1074/1695 train_time:105605ms step_avg:98.33ms +step:1075/1695 train_time:105705ms step_avg:98.33ms +step:1076/1695 train_time:105805ms step_avg:98.33ms +step:1077/1695 train_time:105906ms step_avg:98.33ms +step:1078/1695 train_time:106006ms step_avg:98.34ms +step:1079/1695 train_time:106106ms step_avg:98.34ms +step:1080/1695 train_time:106205ms step_avg:98.34ms +step:1081/1695 train_time:106305ms step_avg:98.34ms +step:1082/1695 train_time:106405ms step_avg:98.34ms +step:1083/1695 train_time:106504ms step_avg:98.34ms +step:1084/1695 train_time:106604ms step_avg:98.34ms +step:1085/1695 train_time:106704ms step_avg:98.35ms +step:1086/1695 train_time:106804ms step_avg:98.35ms +step:1087/1695 train_time:106905ms step_avg:98.35ms +step:1088/1695 train_time:107005ms step_avg:98.35ms +step:1089/1695 train_time:107105ms step_avg:98.35ms +step:1090/1695 train_time:107205ms step_avg:98.35ms +step:1091/1695 train_time:107305ms step_avg:98.36ms +step:1092/1695 train_time:107406ms step_avg:98.36ms +step:1093/1695 train_time:107506ms step_avg:98.36ms +step:1094/1695 train_time:107606ms step_avg:98.36ms +step:1095/1695 train_time:107706ms step_avg:98.36ms +step:1096/1695 train_time:107806ms step_avg:98.36ms +step:1097/1695 train_time:107906ms step_avg:98.36ms +step:1098/1695 train_time:108005ms step_avg:98.37ms +step:1099/1695 train_time:108104ms step_avg:98.37ms +step:1100/1695 train_time:108205ms step_avg:98.37ms +step:1101/1695 train_time:108304ms step_avg:98.37ms +step:1102/1695 train_time:108404ms step_avg:98.37ms +step:1103/1695 train_time:108504ms step_avg:98.37ms +step:1104/1695 train_time:108603ms step_avg:98.37ms +step:1105/1695 train_time:108703ms step_avg:98.37ms +step:1106/1695 train_time:108803ms step_avg:98.38ms +step:1107/1695 train_time:108903ms step_avg:98.38ms +step:1108/1695 train_time:109004ms step_avg:98.38ms +step:1109/1695 train_time:109103ms step_avg:98.38ms +step:1110/1695 train_time:109204ms step_avg:98.38ms +step:1111/1695 train_time:109304ms step_avg:98.38ms +step:1112/1695 train_time:109404ms step_avg:98.39ms +step:1113/1695 train_time:109504ms step_avg:98.39ms +step:1114/1695 train_time:109604ms step_avg:98.39ms +step:1115/1695 train_time:109704ms step_avg:98.39ms +step:1116/1695 train_time:109803ms step_avg:98.39ms +step:1117/1695 train_time:109904ms step_avg:98.39ms +step:1118/1695 train_time:110004ms step_avg:98.39ms +step:1119/1695 train_time:110104ms step_avg:98.40ms +step:1120/1695 train_time:110205ms step_avg:98.40ms +step:1121/1695 train_time:110304ms step_avg:98.40ms +step:1122/1695 train_time:110404ms step_avg:98.40ms +step:1123/1695 train_time:110504ms step_avg:98.40ms +step:1124/1695 train_time:110603ms step_avg:98.40ms +step:1125/1695 train_time:110705ms step_avg:98.40ms +step:1125/1695 val_loss:3.4411 train_time:110802ms step_avg:98.49ms +step:1126/1695 train_time:110830ms step_avg:98.43ms +step:1127/1695 train_time:110915ms step_avg:98.42ms +step:1128/1695 train_time:111018ms step_avg:98.42ms +step:1129/1695 train_time:111117ms step_avg:98.42ms +step:1130/1695 train_time:111216ms step_avg:98.42ms +step:1131/1695 train_time:111316ms step_avg:98.42ms +step:1132/1695 train_time:111415ms step_avg:98.42ms +step:1133/1695 train_time:111515ms step_avg:98.42ms +step:1134/1695 train_time:111616ms step_avg:98.43ms +step:1135/1695 train_time:111715ms step_avg:98.43ms +step:1136/1695 train_time:111817ms step_avg:98.43ms +step:1137/1695 train_time:111921ms step_avg:98.44ms +step:1138/1695 train_time:112022ms step_avg:98.44ms +step:1139/1695 train_time:112123ms step_avg:98.44ms +step:1140/1695 train_time:112224ms step_avg:98.44ms +step:1141/1695 train_time:112323ms step_avg:98.44ms +step:1142/1695 train_time:112423ms step_avg:98.44ms +step:1143/1695 train_time:112523ms step_avg:98.45ms +step:1144/1695 train_time:112623ms step_avg:98.45ms +step:1145/1695 train_time:112724ms step_avg:98.45ms +step:1146/1695 train_time:112824ms step_avg:98.45ms +step:1147/1695 train_time:112924ms step_avg:98.45ms +step:1148/1695 train_time:113026ms step_avg:98.45ms +step:1149/1695 train_time:113126ms step_avg:98.46ms +step:1150/1695 train_time:113227ms step_avg:98.46ms +step:1151/1695 train_time:113327ms step_avg:98.46ms +step:1152/1695 train_time:113427ms step_avg:98.46ms +step:1153/1695 train_time:113527ms step_avg:98.46ms +step:1154/1695 train_time:113629ms step_avg:98.47ms +step:1155/1695 train_time:113729ms step_avg:98.47ms +step:1156/1695 train_time:113830ms step_avg:98.47ms +step:1157/1695 train_time:113931ms step_avg:98.47ms +step:1158/1695 train_time:114033ms step_avg:98.47ms +step:1159/1695 train_time:114134ms step_avg:98.48ms +step:1160/1695 train_time:114236ms step_avg:98.48ms +step:1161/1695 train_time:114338ms step_avg:98.48ms +step:1162/1695 train_time:114438ms step_avg:98.48ms +step:1163/1695 train_time:114542ms step_avg:98.49ms +step:1164/1695 train_time:114642ms step_avg:98.49ms +step:1165/1695 train_time:114742ms step_avg:98.49ms +step:1166/1695 train_time:114843ms step_avg:98.49ms +step:1167/1695 train_time:114942ms step_avg:98.49ms +step:1168/1695 train_time:115043ms step_avg:98.50ms +step:1169/1695 train_time:115142ms step_avg:98.50ms +step:1170/1695 train_time:115243ms step_avg:98.50ms +step:1171/1695 train_time:115343ms step_avg:98.50ms +step:1172/1695 train_time:115446ms step_avg:98.50ms +step:1173/1695 train_time:115547ms step_avg:98.51ms +step:1174/1695 train_time:115648ms step_avg:98.51ms +step:1175/1695 train_time:115748ms step_avg:98.51ms +step:1176/1695 train_time:115849ms step_avg:98.51ms +step:1177/1695 train_time:115949ms step_avg:98.51ms +step:1178/1695 train_time:116050ms step_avg:98.51ms +step:1179/1695 train_time:116152ms step_avg:98.52ms +step:1180/1695 train_time:116253ms step_avg:98.52ms +step:1181/1695 train_time:116355ms step_avg:98.52ms +step:1182/1695 train_time:116457ms step_avg:98.53ms +step:1183/1695 train_time:116558ms step_avg:98.53ms +step:1184/1695 train_time:116660ms step_avg:98.53ms +step:1185/1695 train_time:116761ms step_avg:98.53ms +step:1186/1695 train_time:116861ms step_avg:98.53ms +step:1187/1695 train_time:116961ms step_avg:98.53ms +step:1188/1695 train_time:117061ms step_avg:98.54ms +step:1189/1695 train_time:117162ms step_avg:98.54ms +step:1190/1695 train_time:117264ms step_avg:98.54ms +step:1191/1695 train_time:117364ms step_avg:98.54ms +step:1192/1695 train_time:117463ms step_avg:98.54ms +step:1193/1695 train_time:117563ms step_avg:98.54ms +step:1194/1695 train_time:117663ms step_avg:98.55ms +step:1195/1695 train_time:117763ms step_avg:98.55ms +step:1196/1695 train_time:117864ms step_avg:98.55ms +step:1197/1695 train_time:117964ms step_avg:98.55ms +step:1198/1695 train_time:118065ms step_avg:98.55ms +step:1199/1695 train_time:118165ms step_avg:98.55ms +step:1200/1695 train_time:118265ms step_avg:98.55ms +step:1201/1695 train_time:118366ms step_avg:98.56ms +step:1202/1695 train_time:118466ms step_avg:98.56ms +step:1203/1695 train_time:118568ms step_avg:98.56ms +step:1204/1695 train_time:118669ms step_avg:98.56ms +step:1205/1695 train_time:118771ms step_avg:98.56ms +step:1206/1695 train_time:118872ms step_avg:98.57ms +step:1207/1695 train_time:118973ms step_avg:98.57ms +step:1208/1695 train_time:119074ms step_avg:98.57ms +step:1209/1695 train_time:119175ms step_avg:98.57ms +step:1210/1695 train_time:119277ms step_avg:98.58ms +step:1211/1695 train_time:119378ms step_avg:98.58ms +step:1212/1695 train_time:119479ms step_avg:98.58ms +step:1213/1695 train_time:119579ms step_avg:98.58ms +step:1214/1695 train_time:119679ms step_avg:98.58ms +step:1215/1695 train_time:119780ms step_avg:98.58ms +step:1216/1695 train_time:119881ms step_avg:98.59ms +step:1217/1695 train_time:119982ms step_avg:98.59ms +step:1218/1695 train_time:120082ms step_avg:98.59ms +step:1219/1695 train_time:120182ms step_avg:98.59ms +step:1220/1695 train_time:120283ms step_avg:98.59ms +step:1221/1695 train_time:120383ms step_avg:98.59ms +step:1222/1695 train_time:120482ms step_avg:98.59ms +step:1223/1695 train_time:120582ms step_avg:98.60ms +step:1224/1695 train_time:120682ms step_avg:98.60ms +step:1225/1695 train_time:120782ms step_avg:98.60ms +step:1226/1695 train_time:120882ms step_avg:98.60ms +step:1227/1695 train_time:120982ms step_avg:98.60ms +step:1228/1695 train_time:121081ms step_avg:98.60ms +step:1229/1695 train_time:121181ms step_avg:98.60ms +step:1230/1695 train_time:121282ms step_avg:98.60ms +step:1231/1695 train_time:121381ms step_avg:98.60ms +step:1232/1695 train_time:121482ms step_avg:98.61ms +step:1233/1695 train_time:121582ms step_avg:98.61ms +step:1234/1695 train_time:121683ms step_avg:98.61ms +step:1235/1695 train_time:121782ms step_avg:98.61ms +step:1236/1695 train_time:121883ms step_avg:98.61ms +step:1237/1695 train_time:121983ms step_avg:98.61ms +step:1238/1695 train_time:122082ms step_avg:98.61ms +step:1239/1695 train_time:122183ms step_avg:98.61ms +step:1240/1695 train_time:122283ms step_avg:98.62ms +step:1241/1695 train_time:122385ms step_avg:98.62ms +step:1242/1695 train_time:122485ms step_avg:98.62ms +step:1243/1695 train_time:122586ms step_avg:98.62ms +step:1244/1695 train_time:122686ms step_avg:98.62ms +step:1245/1695 train_time:122786ms step_avg:98.62ms +step:1246/1695 train_time:122886ms step_avg:98.62ms +step:1247/1695 train_time:122987ms step_avg:98.63ms +step:1248/1695 train_time:123087ms step_avg:98.63ms +step:1249/1695 train_time:123188ms step_avg:98.63ms +step:1250/1695 train_time:123288ms step_avg:98.63ms +step:1250/1695 val_loss:3.3962 train_time:123387ms step_avg:98.71ms +step:1251/1695 train_time:123415ms step_avg:98.65ms +step:1252/1695 train_time:123497ms step_avg:98.64ms +step:1253/1695 train_time:123601ms step_avg:98.64ms +step:1254/1695 train_time:123703ms step_avg:98.65ms +step:1255/1695 train_time:123805ms step_avg:98.65ms +step:1256/1695 train_time:123904ms step_avg:98.65ms +step:1257/1695 train_time:124004ms step_avg:98.65ms +step:1258/1695 train_time:124104ms step_avg:98.65ms +step:1259/1695 train_time:124203ms step_avg:98.65ms +step:1260/1695 train_time:124304ms step_avg:98.65ms +step:1261/1695 train_time:124406ms step_avg:98.66ms +step:1262/1695 train_time:124508ms step_avg:98.66ms +step:1263/1695 train_time:124609ms step_avg:98.66ms +step:1264/1695 train_time:124709ms step_avg:98.66ms +step:1265/1695 train_time:124809ms step_avg:98.66ms +step:1266/1695 train_time:124909ms step_avg:98.66ms +step:1267/1695 train_time:125009ms step_avg:98.67ms +step:1268/1695 train_time:125109ms step_avg:98.67ms +step:1269/1695 train_time:125210ms step_avg:98.67ms +step:1270/1695 train_time:125310ms step_avg:98.67ms +step:1271/1695 train_time:125411ms step_avg:98.67ms +step:1272/1695 train_time:125512ms step_avg:98.67ms +step:1273/1695 train_time:125613ms step_avg:98.68ms +step:1274/1695 train_time:125714ms step_avg:98.68ms +step:1275/1695 train_time:125815ms step_avg:98.68ms +step:1276/1695 train_time:125917ms step_avg:98.68ms +step:1277/1695 train_time:126018ms step_avg:98.68ms +step:1278/1695 train_time:126120ms step_avg:98.69ms +step:1279/1695 train_time:126222ms step_avg:98.69ms +step:1280/1695 train_time:126322ms step_avg:98.69ms +step:1281/1695 train_time:126424ms step_avg:98.69ms +step:1282/1695 train_time:126525ms step_avg:98.69ms +step:1283/1695 train_time:126625ms step_avg:98.69ms +step:1284/1695 train_time:126725ms step_avg:98.70ms +step:1285/1695 train_time:126825ms step_avg:98.70ms +step:1286/1695 train_time:126926ms step_avg:98.70ms +step:1287/1695 train_time:127026ms step_avg:98.70ms +step:1288/1695 train_time:127126ms step_avg:98.70ms +step:1289/1695 train_time:127226ms step_avg:98.70ms +step:1290/1695 train_time:127327ms step_avg:98.70ms +step:1291/1695 train_time:127427ms step_avg:98.70ms +step:1292/1695 train_time:127527ms step_avg:98.71ms +step:1293/1695 train_time:127628ms step_avg:98.71ms +step:1294/1695 train_time:127729ms step_avg:98.71ms +step:1295/1695 train_time:127830ms step_avg:98.71ms +step:1296/1695 train_time:127930ms step_avg:98.71ms +step:1297/1695 train_time:128030ms step_avg:98.71ms +step:1298/1695 train_time:128130ms step_avg:98.71ms +step:1299/1695 train_time:128231ms step_avg:98.72ms +step:1300/1695 train_time:128332ms step_avg:98.72ms +step:1301/1695 train_time:128433ms step_avg:98.72ms +step:1302/1695 train_time:128535ms step_avg:98.72ms +step:1303/1695 train_time:128636ms step_avg:98.72ms +step:1304/1695 train_time:128738ms step_avg:98.73ms +step:1305/1695 train_time:128841ms step_avg:98.73ms +step:1306/1695 train_time:128942ms step_avg:98.73ms +step:1307/1695 train_time:129043ms step_avg:98.73ms +step:1308/1695 train_time:129144ms step_avg:98.73ms +step:1309/1695 train_time:129244ms step_avg:98.73ms +step:1310/1695 train_time:129346ms step_avg:98.74ms +step:1311/1695 train_time:129446ms step_avg:98.74ms +step:1312/1695 train_time:129546ms step_avg:98.74ms +step:1313/1695 train_time:129647ms step_avg:98.74ms +step:1314/1695 train_time:129747ms step_avg:98.74ms +step:1315/1695 train_time:129848ms step_avg:98.74ms +step:1316/1695 train_time:129948ms step_avg:98.74ms +step:1317/1695 train_time:130049ms step_avg:98.75ms +step:1318/1695 train_time:130149ms step_avg:98.75ms +step:1319/1695 train_time:130250ms step_avg:98.75ms +step:1320/1695 train_time:130351ms step_avg:98.75ms +step:1321/1695 train_time:130454ms step_avg:98.75ms +step:1322/1695 train_time:130554ms step_avg:98.76ms +step:1323/1695 train_time:130656ms step_avg:98.76ms +step:1324/1695 train_time:130757ms step_avg:98.76ms +step:1325/1695 train_time:130859ms step_avg:98.76ms +step:1326/1695 train_time:130961ms step_avg:98.76ms +step:1327/1695 train_time:131064ms step_avg:98.77ms +step:1328/1695 train_time:131164ms step_avg:98.77ms +step:1329/1695 train_time:131264ms step_avg:98.77ms +step:1330/1695 train_time:131365ms step_avg:98.77ms +step:1331/1695 train_time:131465ms step_avg:98.77ms +step:1332/1695 train_time:131567ms step_avg:98.77ms +step:1333/1695 train_time:131668ms step_avg:98.78ms +step:1334/1695 train_time:131768ms step_avg:98.78ms +step:1335/1695 train_time:131869ms step_avg:98.78ms +step:1336/1695 train_time:131970ms step_avg:98.78ms +step:1337/1695 train_time:132070ms step_avg:98.78ms +step:1338/1695 train_time:132170ms step_avg:98.78ms +step:1339/1695 train_time:132272ms step_avg:98.78ms +step:1340/1695 train_time:132374ms step_avg:98.79ms +step:1341/1695 train_time:132476ms step_avg:98.79ms +step:1342/1695 train_time:132576ms step_avg:98.79ms +step:1343/1695 train_time:132676ms step_avg:98.79ms +step:1344/1695 train_time:132777ms step_avg:98.79ms +step:1345/1695 train_time:132879ms step_avg:98.79ms +step:1346/1695 train_time:132982ms step_avg:98.80ms +step:1347/1695 train_time:133084ms step_avg:98.80ms +step:1348/1695 train_time:133184ms step_avg:98.80ms +step:1349/1695 train_time:133285ms step_avg:98.80ms +step:1350/1695 train_time:133386ms step_avg:98.80ms +step:1351/1695 train_time:133487ms step_avg:98.81ms +step:1352/1695 train_time:133587ms step_avg:98.81ms +step:1353/1695 train_time:133687ms step_avg:98.81ms +step:1354/1695 train_time:133786ms step_avg:98.81ms +step:1355/1695 train_time:133886ms step_avg:98.81ms +step:1356/1695 train_time:133986ms step_avg:98.81ms +step:1357/1695 train_time:134086ms step_avg:98.81ms +step:1358/1695 train_time:134187ms step_avg:98.81ms +step:1359/1695 train_time:134286ms step_avg:98.81ms +step:1360/1695 train_time:134386ms step_avg:98.81ms +step:1361/1695 train_time:134487ms step_avg:98.81ms +step:1362/1695 train_time:134587ms step_avg:98.82ms +step:1363/1695 train_time:134687ms step_avg:98.82ms +step:1364/1695 train_time:134788ms step_avg:98.82ms +step:1365/1695 train_time:134888ms step_avg:98.82ms +step:1366/1695 train_time:134990ms step_avg:98.82ms +step:1367/1695 train_time:135090ms step_avg:98.82ms +step:1368/1695 train_time:135192ms step_avg:98.82ms +step:1369/1695 train_time:135293ms step_avg:98.83ms +step:1370/1695 train_time:135394ms step_avg:98.83ms +step:1371/1695 train_time:135495ms step_avg:98.83ms +step:1372/1695 train_time:135595ms step_avg:98.83ms +step:1373/1695 train_time:135696ms step_avg:98.83ms +step:1374/1695 train_time:135796ms step_avg:98.83ms +step:1375/1695 train_time:135901ms step_avg:98.84ms +step:1375/1695 val_loss:3.3563 train_time:135999ms step_avg:98.91ms +step:1376/1695 train_time:136030ms step_avg:98.86ms +step:1377/1695 train_time:136115ms step_avg:98.85ms +step:1378/1695 train_time:136216ms step_avg:98.85ms +step:1379/1695 train_time:136316ms step_avg:98.85ms +step:1380/1695 train_time:136417ms step_avg:98.85ms +step:1381/1695 train_time:136516ms step_avg:98.85ms +step:1382/1695 train_time:136616ms step_avg:98.85ms +step:1383/1695 train_time:136716ms step_avg:98.85ms +step:1384/1695 train_time:136816ms step_avg:98.86ms +step:1385/1695 train_time:136919ms step_avg:98.86ms +step:1386/1695 train_time:137023ms step_avg:98.86ms +step:1387/1695 train_time:137125ms step_avg:98.86ms +step:1388/1695 train_time:137228ms step_avg:98.87ms +step:1389/1695 train_time:137330ms step_avg:98.87ms +step:1390/1695 train_time:137431ms step_avg:98.87ms +step:1391/1695 train_time:137533ms step_avg:98.87ms +step:1392/1695 train_time:137634ms step_avg:98.88ms +step:1393/1695 train_time:137736ms step_avg:98.88ms +step:1394/1695 train_time:137837ms step_avg:98.88ms +step:1395/1695 train_time:137938ms step_avg:98.88ms +step:1396/1695 train_time:138041ms step_avg:98.88ms +step:1397/1695 train_time:138143ms step_avg:98.89ms +step:1398/1695 train_time:138246ms step_avg:98.89ms +step:1399/1695 train_time:138349ms step_avg:98.89ms +step:1400/1695 train_time:138451ms step_avg:98.89ms +step:1401/1695 train_time:138552ms step_avg:98.90ms +step:1402/1695 train_time:138654ms step_avg:98.90ms +step:1403/1695 train_time:138756ms step_avg:98.90ms +step:1404/1695 train_time:138858ms step_avg:98.90ms +step:1405/1695 train_time:138960ms step_avg:98.90ms +step:1406/1695 train_time:139062ms step_avg:98.91ms +step:1407/1695 train_time:139163ms step_avg:98.91ms +step:1408/1695 train_time:139265ms step_avg:98.91ms +step:1409/1695 train_time:139370ms step_avg:98.91ms +step:1410/1695 train_time:139472ms step_avg:98.92ms +step:1411/1695 train_time:139574ms step_avg:98.92ms +step:1412/1695 train_time:139678ms step_avg:98.92ms +step:1413/1695 train_time:139778ms step_avg:98.92ms +step:1414/1695 train_time:139879ms step_avg:98.92ms +step:1415/1695 train_time:139981ms step_avg:98.93ms +step:1416/1695 train_time:140082ms step_avg:98.93ms +step:1417/1695 train_time:140183ms step_avg:98.93ms +step:1418/1695 train_time:140284ms step_avg:98.93ms +step:1419/1695 train_time:140387ms step_avg:98.93ms +step:1420/1695 train_time:140491ms step_avg:98.94ms +step:1421/1695 train_time:140593ms step_avg:98.94ms +step:1422/1695 train_time:140695ms step_avg:98.94ms +step:1423/1695 train_time:140796ms step_avg:98.94ms +step:1424/1695 train_time:140898ms step_avg:98.95ms +step:1425/1695 train_time:141000ms step_avg:98.95ms +step:1426/1695 train_time:141102ms step_avg:98.95ms +step:1427/1695 train_time:141203ms step_avg:98.95ms +step:1428/1695 train_time:141305ms step_avg:98.95ms +step:1429/1695 train_time:141409ms step_avg:98.96ms +step:1430/1695 train_time:141510ms step_avg:98.96ms +step:1431/1695 train_time:141612ms step_avg:98.96ms +step:1432/1695 train_time:141713ms step_avg:98.96ms +step:1433/1695 train_time:141814ms step_avg:98.96ms +step:1434/1695 train_time:141914ms step_avg:98.96ms +step:1435/1695 train_time:142016ms step_avg:98.97ms +step:1436/1695 train_time:142118ms step_avg:98.97ms +step:1437/1695 train_time:142220ms step_avg:98.97ms +step:1438/1695 train_time:142323ms step_avg:98.97ms +step:1439/1695 train_time:142425ms step_avg:98.98ms +step:1440/1695 train_time:142528ms step_avg:98.98ms +step:1441/1695 train_time:142631ms step_avg:98.98ms +step:1442/1695 train_time:142731ms step_avg:98.98ms +step:1443/1695 train_time:142832ms step_avg:98.98ms +step:1444/1695 train_time:142933ms step_avg:98.98ms +step:1445/1695 train_time:143035ms step_avg:98.99ms +step:1446/1695 train_time:143135ms step_avg:98.99ms +step:1447/1695 train_time:143236ms step_avg:98.99ms +step:1448/1695 train_time:143339ms step_avg:98.99ms +step:1449/1695 train_time:143440ms step_avg:98.99ms +step:1450/1695 train_time:143542ms step_avg:98.99ms +step:1451/1695 train_time:143644ms step_avg:99.00ms +step:1452/1695 train_time:143746ms step_avg:99.00ms +step:1453/1695 train_time:143850ms step_avg:99.00ms +step:1454/1695 train_time:143953ms step_avg:99.00ms +step:1455/1695 train_time:144055ms step_avg:99.01ms +step:1456/1695 train_time:144156ms step_avg:99.01ms +step:1457/1695 train_time:144258ms step_avg:99.01ms +step:1458/1695 train_time:144360ms step_avg:99.01ms +step:1459/1695 train_time:144461ms step_avg:99.01ms +step:1460/1695 train_time:144562ms step_avg:99.01ms +step:1461/1695 train_time:144664ms step_avg:99.02ms +step:1462/1695 train_time:144766ms step_avg:99.02ms +step:1463/1695 train_time:144868ms step_avg:99.02ms +step:1464/1695 train_time:144971ms step_avg:99.02ms +step:1465/1695 train_time:145072ms step_avg:99.03ms +step:1466/1695 train_time:145173ms step_avg:99.03ms +step:1467/1695 train_time:145273ms step_avg:99.03ms +step:1468/1695 train_time:145376ms step_avg:99.03ms +step:1469/1695 train_time:145478ms step_avg:99.03ms +step:1470/1695 train_time:145579ms step_avg:99.03ms +step:1471/1695 train_time:145683ms step_avg:99.04ms +step:1472/1695 train_time:145785ms step_avg:99.04ms +step:1473/1695 train_time:145887ms step_avg:99.04ms +step:1474/1695 train_time:145989ms step_avg:99.04ms +step:1475/1695 train_time:146091ms step_avg:99.04ms +step:1476/1695 train_time:146193ms step_avg:99.05ms +step:1477/1695 train_time:146294ms step_avg:99.05ms +step:1478/1695 train_time:146395ms step_avg:99.05ms +step:1479/1695 train_time:146496ms step_avg:99.05ms +step:1480/1695 train_time:146598ms step_avg:99.05ms +step:1481/1695 train_time:146702ms step_avg:99.06ms +step:1482/1695 train_time:146803ms step_avg:99.06ms +step:1483/1695 train_time:146906ms step_avg:99.06ms +step:1484/1695 train_time:147009ms step_avg:99.06ms +step:1485/1695 train_time:147111ms step_avg:99.06ms +step:1486/1695 train_time:147211ms step_avg:99.07ms +step:1487/1695 train_time:147312ms step_avg:99.07ms +step:1488/1695 train_time:147415ms step_avg:99.07ms +step:1489/1695 train_time:147516ms step_avg:99.07ms +step:1490/1695 train_time:147618ms step_avg:99.07ms +step:1491/1695 train_time:147720ms step_avg:99.07ms +step:1492/1695 train_time:147822ms step_avg:99.08ms +step:1493/1695 train_time:147924ms step_avg:99.08ms +step:1494/1695 train_time:148027ms step_avg:99.08ms +step:1495/1695 train_time:148129ms step_avg:99.08ms +step:1496/1695 train_time:148231ms step_avg:99.08ms +step:1497/1695 train_time:148331ms step_avg:99.09ms +step:1498/1695 train_time:148433ms step_avg:99.09ms +step:1499/1695 train_time:148534ms step_avg:99.09ms +step:1500/1695 train_time:148635ms step_avg:99.09ms +step:1500/1695 val_loss:3.3218 train_time:148735ms step_avg:99.16ms +step:1501/1695 train_time:148763ms step_avg:99.11ms +step:1502/1695 train_time:148848ms step_avg:99.10ms +step:1503/1695 train_time:148949ms step_avg:99.10ms +step:1504/1695 train_time:149049ms step_avg:99.10ms +step:1505/1695 train_time:149150ms step_avg:99.10ms +step:1506/1695 train_time:149251ms step_avg:99.10ms +step:1507/1695 train_time:149352ms step_avg:99.11ms +step:1508/1695 train_time:149452ms step_avg:99.11ms +step:1509/1695 train_time:149554ms step_avg:99.11ms +step:1510/1695 train_time:149656ms step_avg:99.11ms +step:1511/1695 train_time:149759ms step_avg:99.11ms +step:1512/1695 train_time:149863ms step_avg:99.12ms +step:1513/1695 train_time:149965ms step_avg:99.12ms +step:1514/1695 train_time:150067ms step_avg:99.12ms +step:1515/1695 train_time:150173ms step_avg:99.12ms +step:1516/1695 train_time:150274ms step_avg:99.13ms +step:1517/1695 train_time:150374ms step_avg:99.13ms +step:1518/1695 train_time:150475ms step_avg:99.13ms +step:1519/1695 train_time:150579ms step_avg:99.13ms +step:1520/1695 train_time:150680ms step_avg:99.13ms +step:1521/1695 train_time:150782ms step_avg:99.13ms +step:1522/1695 train_time:150884ms step_avg:99.14ms +step:1523/1695 train_time:150986ms step_avg:99.14ms +step:1524/1695 train_time:151090ms step_avg:99.14ms +step:1525/1695 train_time:151194ms step_avg:99.14ms +step:1526/1695 train_time:151295ms step_avg:99.15ms +step:1527/1695 train_time:151397ms step_avg:99.15ms +step:1528/1695 train_time:151503ms step_avg:99.15ms +step:1529/1695 train_time:151603ms step_avg:99.15ms +step:1530/1695 train_time:151706ms step_avg:99.15ms +step:1531/1695 train_time:151808ms step_avg:99.16ms +step:1532/1695 train_time:151910ms step_avg:99.16ms +step:1533/1695 train_time:152013ms step_avg:99.16ms +step:1534/1695 train_time:152115ms step_avg:99.16ms +step:1535/1695 train_time:152217ms step_avg:99.16ms +step:1536/1695 train_time:152319ms step_avg:99.17ms +step:1537/1695 train_time:152420ms step_avg:99.17ms +step:1538/1695 train_time:152522ms step_avg:99.17ms +step:1539/1695 train_time:152623ms step_avg:99.17ms +step:1540/1695 train_time:152725ms step_avg:99.17ms +step:1541/1695 train_time:152828ms step_avg:99.17ms +step:1542/1695 train_time:152932ms step_avg:99.18ms +step:1543/1695 train_time:153035ms step_avg:99.18ms +step:1544/1695 train_time:153136ms step_avg:99.18ms +step:1545/1695 train_time:153238ms step_avg:99.18ms +step:1546/1695 train_time:153340ms step_avg:99.18ms +step:1547/1695 train_time:153443ms step_avg:99.19ms +step:1548/1695 train_time:153545ms step_avg:99.19ms +step:1549/1695 train_time:153647ms step_avg:99.19ms +step:1550/1695 train_time:153748ms step_avg:99.19ms +step:1551/1695 train_time:153850ms step_avg:99.19ms +step:1552/1695 train_time:153953ms step_avg:99.20ms +step:1553/1695 train_time:154056ms step_avg:99.20ms +step:1554/1695 train_time:154158ms step_avg:99.20ms +step:1555/1695 train_time:154259ms step_avg:99.20ms +step:1556/1695 train_time:154361ms step_avg:99.20ms +step:1557/1695 train_time:154464ms step_avg:99.21ms +step:1558/1695 train_time:154567ms step_avg:99.21ms +step:1559/1695 train_time:154669ms step_avg:99.21ms +step:1560/1695 train_time:154771ms step_avg:99.21ms +step:1561/1695 train_time:154873ms step_avg:99.21ms +step:1562/1695 train_time:154977ms step_avg:99.22ms +step:1563/1695 train_time:155081ms step_avg:99.22ms +step:1564/1695 train_time:155183ms step_avg:99.22ms +step:1565/1695 train_time:155283ms step_avg:99.22ms +step:1566/1695 train_time:155384ms step_avg:99.22ms +step:1567/1695 train_time:155485ms step_avg:99.22ms +step:1568/1695 train_time:155587ms step_avg:99.23ms +step:1569/1695 train_time:155688ms step_avg:99.23ms +step:1570/1695 train_time:155792ms step_avg:99.23ms +step:1571/1695 train_time:155893ms step_avg:99.23ms +step:1572/1695 train_time:155994ms step_avg:99.23ms +step:1573/1695 train_time:156096ms step_avg:99.23ms +step:1574/1695 train_time:156198ms step_avg:99.24ms +step:1575/1695 train_time:156299ms step_avg:99.24ms +step:1576/1695 train_time:156401ms step_avg:99.24ms +step:1577/1695 train_time:156505ms step_avg:99.24ms +step:1578/1695 train_time:156606ms step_avg:99.24ms +step:1579/1695 train_time:156707ms step_avg:99.24ms +step:1580/1695 train_time:156810ms step_avg:99.25ms +step:1581/1695 train_time:156911ms step_avg:99.25ms +step:1582/1695 train_time:157012ms step_avg:99.25ms +step:1583/1695 train_time:157115ms step_avg:99.25ms +step:1584/1695 train_time:157217ms step_avg:99.25ms +step:1585/1695 train_time:157319ms step_avg:99.25ms +step:1586/1695 train_time:157423ms step_avg:99.26ms +step:1587/1695 train_time:157525ms step_avg:99.26ms +step:1588/1695 train_time:157626ms step_avg:99.26ms +step:1589/1695 train_time:157728ms step_avg:99.26ms +step:1590/1695 train_time:157830ms step_avg:99.26ms +step:1591/1695 train_time:157931ms step_avg:99.27ms +step:1592/1695 train_time:158033ms step_avg:99.27ms +step:1593/1695 train_time:158134ms step_avg:99.27ms +step:1594/1695 train_time:158238ms step_avg:99.27ms +step:1595/1695 train_time:158340ms step_avg:99.27ms +step:1596/1695 train_time:158442ms step_avg:99.27ms +step:1597/1695 train_time:158544ms step_avg:99.28ms +step:1598/1695 train_time:158648ms step_avg:99.28ms +step:1599/1695 train_time:158750ms step_avg:99.28ms +step:1600/1695 train_time:158852ms step_avg:99.28ms +step:1601/1695 train_time:158954ms step_avg:99.28ms +step:1602/1695 train_time:159056ms step_avg:99.29ms +step:1603/1695 train_time:159157ms step_avg:99.29ms +step:1604/1695 train_time:159259ms step_avg:99.29ms +step:1605/1695 train_time:159362ms step_avg:99.29ms +step:1606/1695 train_time:159465ms step_avg:99.29ms +step:1607/1695 train_time:159566ms step_avg:99.29ms +step:1608/1695 train_time:159666ms step_avg:99.29ms +step:1609/1695 train_time:159767ms step_avg:99.30ms +step:1610/1695 train_time:159870ms step_avg:99.30ms +step:1611/1695 train_time:159972ms step_avg:99.30ms +step:1612/1695 train_time:160073ms step_avg:99.30ms +step:1613/1695 train_time:160174ms step_avg:99.30ms +step:1614/1695 train_time:160276ms step_avg:99.30ms +step:1615/1695 train_time:160378ms step_avg:99.31ms +step:1616/1695 train_time:160480ms step_avg:99.31ms +step:1617/1695 train_time:160583ms step_avg:99.31ms +step:1618/1695 train_time:160686ms step_avg:99.31ms +step:1619/1695 train_time:160788ms step_avg:99.31ms +step:1620/1695 train_time:160890ms step_avg:99.32ms +step:1621/1695 train_time:160991ms step_avg:99.32ms +step:1622/1695 train_time:161092ms step_avg:99.32ms +step:1623/1695 train_time:161195ms step_avg:99.32ms +step:1624/1695 train_time:161297ms step_avg:99.32ms +step:1625/1695 train_time:161401ms step_avg:99.32ms +step:1625/1695 val_loss:3.2929 train_time:161502ms step_avg:99.39ms +step:1626/1695 train_time:161530ms step_avg:99.34ms +step:1627/1695 train_time:161615ms step_avg:99.33ms +step:1628/1695 train_time:161716ms step_avg:99.33ms +step:1629/1695 train_time:161818ms step_avg:99.34ms +step:1630/1695 train_time:161920ms step_avg:99.34ms +step:1631/1695 train_time:162021ms step_avg:99.34ms +step:1632/1695 train_time:162121ms step_avg:99.34ms +step:1633/1695 train_time:162222ms step_avg:99.34ms +step:1634/1695 train_time:162325ms step_avg:99.34ms +step:1635/1695 train_time:162427ms step_avg:99.34ms +step:1636/1695 train_time:162529ms step_avg:99.35ms +step:1637/1695 train_time:162634ms step_avg:99.35ms +step:1638/1695 train_time:162736ms step_avg:99.35ms +step:1639/1695 train_time:162838ms step_avg:99.35ms +step:1640/1695 train_time:162941ms step_avg:99.35ms +step:1641/1695 train_time:163043ms step_avg:99.36ms +step:1642/1695 train_time:163145ms step_avg:99.36ms +step:1643/1695 train_time:163247ms step_avg:99.36ms +step:1644/1695 train_time:163349ms step_avg:99.36ms +step:1645/1695 train_time:163452ms step_avg:99.36ms +step:1646/1695 train_time:163556ms step_avg:99.37ms +step:1647/1695 train_time:163661ms step_avg:99.37ms +step:1648/1695 train_time:163764ms step_avg:99.37ms +step:1649/1695 train_time:163867ms step_avg:99.37ms +step:1650/1695 train_time:163971ms step_avg:99.38ms +step:1651/1695 train_time:164074ms step_avg:99.38ms +step:1652/1695 train_time:164178ms step_avg:99.38ms +step:1653/1695 train_time:164281ms step_avg:99.38ms +step:1654/1695 train_time:164383ms step_avg:99.38ms +step:1655/1695 train_time:164484ms step_avg:99.39ms +step:1656/1695 train_time:164587ms step_avg:99.39ms +step:1657/1695 train_time:164690ms step_avg:99.39ms +step:1658/1695 train_time:164794ms step_avg:99.39ms +step:1659/1695 train_time:164899ms step_avg:99.40ms +step:1660/1695 train_time:165001ms step_avg:99.40ms +step:1661/1695 train_time:165105ms step_avg:99.40ms +step:1662/1695 train_time:165213ms step_avg:99.41ms +step:1663/1695 train_time:165316ms step_avg:99.41ms +step:1664/1695 train_time:165417ms step_avg:99.41ms +step:1665/1695 train_time:165523ms step_avg:99.41ms +step:1666/1695 train_time:165626ms step_avg:99.42ms +step:1667/1695 train_time:165727ms step_avg:99.42ms +step:1668/1695 train_time:165833ms step_avg:99.42ms +step:1669/1695 train_time:165938ms step_avg:99.42ms +step:1670/1695 train_time:166041ms step_avg:99.43ms +step:1671/1695 train_time:166143ms step_avg:99.43ms +step:1672/1695 train_time:166247ms step_avg:99.43ms +step:1673/1695 train_time:166349ms step_avg:99.43ms +step:1674/1695 train_time:166451ms step_avg:99.43ms +step:1675/1695 train_time:166555ms step_avg:99.44ms +step:1676/1695 train_time:166659ms step_avg:99.44ms +step:1677/1695 train_time:166760ms step_avg:99.44ms +step:1678/1695 train_time:166864ms step_avg:99.44ms +step:1679/1695 train_time:166968ms step_avg:99.45ms +step:1680/1695 train_time:167071ms step_avg:99.45ms +step:1681/1695 train_time:167175ms step_avg:99.45ms +step:1682/1695 train_time:167282ms step_avg:99.45ms +step:1683/1695 train_time:167383ms step_avg:99.46ms +step:1684/1695 train_time:167486ms step_avg:99.46ms +step:1685/1695 train_time:167590ms step_avg:99.46ms +step:1686/1695 train_time:167693ms step_avg:99.46ms +step:1687/1695 train_time:167796ms step_avg:99.46ms +step:1688/1695 train_time:167899ms step_avg:99.47ms +step:1689/1695 train_time:168000ms step_avg:99.47ms +step:1690/1695 train_time:168103ms step_avg:99.47ms +step:1691/1695 train_time:168206ms step_avg:99.47ms +step:1692/1695 train_time:168309ms step_avg:99.47ms +step:1693/1695 train_time:168412ms step_avg:99.48ms +step:1694/1695 train_time:168516ms step_avg:99.48ms +step:1695/1695 train_time:168619ms step_avg:99.48ms +step:1695/1695 val_loss:3.2800 train_time:168718ms step_avg:99.54ms +peak memory allocated: 34221 MiB reserved: 49440 MiB diff --git a/records/082325_SparseAttnGate/d3e1ea3c-521c-4abd-a549-950c698d6cbf.txt b/records/082325_SparseAttnGate/d3e1ea3c-521c-4abd-a549-950c698d6cbf.txt new file mode 100644 index 000000000..050c5f3ef --- /dev/null +++ b/records/082325_SparseAttnGate/d3e1ea3c-521c-4abd-a549-950c698d6cbf.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:16:15 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 31C P0 118W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 123W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 301329 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 301330 C /usr/bin/python3 614MiB | +| 0 N/A N/A 301331 C /usr/bin/python3 614MiB | +| 0 N/A N/A 301332 C /usr/bin/python3 614MiB | +| 0 N/A N/A 301333 C /usr/bin/python3 614MiB | +| 0 N/A N/A 301334 C /usr/bin/python3 614MiB | +| 0 N/A N/A 301335 C /usr/bin/python3 614MiB | +| 0 N/A N/A 301336 C /usr/bin/python3 614MiB | +| 1 N/A N/A 301330 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 301331 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 301332 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 301333 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 301334 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 301335 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 301336 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:159ms step_avg:159.07ms +step:2/1695 train_time:185ms step_avg:92.35ms +step:3/1695 train_time:255ms step_avg:84.99ms +step:4/1695 train_time:347ms step_avg:86.81ms +step:5/1695 train_time:439ms step_avg:87.81ms +step:6/1695 train_time:532ms step_avg:88.59ms +step:7/1695 train_time:624ms step_avg:89.20ms +step:8/1695 train_time:717ms step_avg:89.59ms +step:9/1695 train_time:810ms step_avg:89.98ms +step:10/1695 train_time:903ms step_avg:90.31ms +step:11/1695 train_time:996ms step_avg:90.52ms +step:12/1695 train_time:1091ms step_avg:90.88ms +step:13/1695 train_time:1189ms step_avg:91.46ms +step:14/1695 train_time:1285ms step_avg:91.78ms +step:15/1695 train_time:1379ms step_avg:91.96ms +step:16/1695 train_time:1473ms step_avg:92.04ms +step:17/1695 train_time:1567ms step_avg:92.17ms +step:18/1695 train_time:1660ms step_avg:92.24ms +step:19/1695 train_time:1753ms step_avg:92.28ms +step:20/1695 train_time:1847ms step_avg:92.36ms +step:21/1695 train_time:1940ms step_avg:92.36ms +step:22/1695 train_time:2032ms step_avg:92.38ms +step:23/1695 train_time:2127ms step_avg:92.46ms +step:24/1695 train_time:2222ms step_avg:92.58ms +step:25/1695 train_time:2316ms step_avg:92.62ms +step:26/1695 train_time:2410ms step_avg:92.69ms +step:27/1695 train_time:2504ms step_avg:92.74ms +step:28/1695 train_time:2599ms step_avg:92.81ms +step:29/1695 train_time:2692ms step_avg:92.83ms +step:30/1695 train_time:2786ms step_avg:92.87ms +step:31/1695 train_time:2880ms step_avg:92.89ms +step:32/1695 train_time:2973ms step_avg:92.90ms +step:33/1695 train_time:3067ms step_avg:92.93ms +step:34/1695 train_time:3161ms step_avg:92.97ms +step:35/1695 train_time:3254ms step_avg:92.98ms +step:36/1695 train_time:3349ms step_avg:93.03ms +step:37/1695 train_time:3443ms step_avg:93.06ms +step:38/1695 train_time:3538ms step_avg:93.10ms +step:39/1695 train_time:3632ms step_avg:93.12ms +step:40/1695 train_time:3725ms step_avg:93.14ms +step:41/1695 train_time:3819ms step_avg:93.14ms +step:42/1695 train_time:3912ms step_avg:93.14ms +step:43/1695 train_time:4006ms step_avg:93.17ms +step:44/1695 train_time:4100ms step_avg:93.18ms +step:45/1695 train_time:4194ms step_avg:93.20ms +step:46/1695 train_time:4288ms step_avg:93.22ms +step:47/1695 train_time:4385ms step_avg:93.29ms +step:48/1695 train_time:4476ms step_avg:93.26ms +step:49/1695 train_time:4570ms step_avg:93.28ms +step:50/1695 train_time:4664ms step_avg:93.29ms +step:51/1695 train_time:4758ms step_avg:93.29ms +step:52/1695 train_time:4851ms step_avg:93.30ms +step:53/1695 train_time:4944ms step_avg:93.29ms +step:54/1695 train_time:5038ms step_avg:93.30ms +step:55/1695 train_time:5131ms step_avg:93.29ms +step:56/1695 train_time:5225ms step_avg:93.30ms +step:57/1695 train_time:5319ms step_avg:93.31ms +step:58/1695 train_time:5412ms step_avg:93.31ms +step:59/1695 train_time:5506ms step_avg:93.32ms +step:60/1695 train_time:5600ms step_avg:93.33ms +step:61/1695 train_time:5693ms step_avg:93.33ms +step:62/1695 train_time:5787ms step_avg:93.34ms +step:63/1695 train_time:5881ms step_avg:93.35ms +step:64/1695 train_time:5974ms step_avg:93.34ms +step:65/1695 train_time:6067ms step_avg:93.34ms +step:66/1695 train_time:6160ms step_avg:93.34ms +step:67/1695 train_time:6255ms step_avg:93.36ms +step:68/1695 train_time:6349ms step_avg:93.37ms +step:69/1695 train_time:6443ms step_avg:93.38ms +step:70/1695 train_time:6537ms step_avg:93.39ms +step:71/1695 train_time:6630ms step_avg:93.38ms +step:72/1695 train_time:6724ms step_avg:93.39ms +step:73/1695 train_time:6817ms step_avg:93.39ms +step:74/1695 train_time:6910ms step_avg:93.38ms +step:75/1695 train_time:7004ms step_avg:93.39ms +step:76/1695 train_time:7098ms step_avg:93.40ms +step:77/1695 train_time:7191ms step_avg:93.39ms +step:78/1695 train_time:7285ms step_avg:93.40ms +step:79/1695 train_time:7379ms step_avg:93.41ms +step:80/1695 train_time:7472ms step_avg:93.41ms +step:81/1695 train_time:7567ms step_avg:93.42ms +step:82/1695 train_time:7660ms step_avg:93.42ms +step:83/1695 train_time:7754ms step_avg:93.42ms +step:84/1695 train_time:7848ms step_avg:93.43ms +step:85/1695 train_time:7942ms step_avg:93.43ms +step:86/1695 train_time:8035ms step_avg:93.43ms +step:87/1695 train_time:8129ms step_avg:93.44ms +step:88/1695 train_time:8223ms step_avg:93.45ms +step:89/1695 train_time:8317ms step_avg:93.45ms +step:90/1695 train_time:8411ms step_avg:93.46ms +step:91/1695 train_time:8506ms step_avg:93.47ms +step:92/1695 train_time:8600ms step_avg:93.48ms +step:93/1695 train_time:8694ms step_avg:93.48ms +step:94/1695 train_time:8788ms step_avg:93.49ms +step:95/1695 train_time:8881ms step_avg:93.49ms +step:96/1695 train_time:8975ms step_avg:93.49ms +step:97/1695 train_time:9068ms step_avg:93.49ms +step:98/1695 train_time:9162ms step_avg:93.49ms +step:99/1695 train_time:9255ms step_avg:93.49ms +step:100/1695 train_time:9349ms step_avg:93.49ms +step:101/1695 train_time:9442ms step_avg:93.49ms +step:102/1695 train_time:9536ms step_avg:93.49ms +step:103/1695 train_time:9630ms step_avg:93.50ms +step:104/1695 train_time:9724ms step_avg:93.50ms +step:105/1695 train_time:9818ms step_avg:93.50ms +step:106/1695 train_time:9911ms step_avg:93.50ms +step:107/1695 train_time:10005ms step_avg:93.50ms +step:108/1695 train_time:10099ms step_avg:93.51ms +step:109/1695 train_time:10192ms step_avg:93.51ms +step:110/1695 train_time:10286ms step_avg:93.51ms +step:111/1695 train_time:10380ms step_avg:93.51ms +step:112/1695 train_time:10473ms step_avg:93.51ms +step:113/1695 train_time:10567ms step_avg:93.52ms +step:114/1695 train_time:10661ms step_avg:93.52ms +step:115/1695 train_time:10755ms step_avg:93.52ms +step:116/1695 train_time:10849ms step_avg:93.53ms +step:117/1695 train_time:10943ms step_avg:93.53ms +step:118/1695 train_time:11037ms step_avg:93.53ms +step:119/1695 train_time:11130ms step_avg:93.53ms +step:120/1695 train_time:11224ms step_avg:93.53ms +step:121/1695 train_time:11317ms step_avg:93.53ms +step:122/1695 train_time:11411ms step_avg:93.53ms +step:123/1695 train_time:11505ms step_avg:93.54ms +step:124/1695 train_time:11599ms step_avg:93.54ms +step:125/1695 train_time:11692ms step_avg:93.54ms +step:125/1695 val_loss:4.6040 train_time:11785ms step_avg:94.28ms +step:126/1695 train_time:11811ms step_avg:93.74ms +step:127/1695 train_time:11889ms step_avg:93.61ms +step:128/1695 train_time:11992ms step_avg:93.69ms +step:129/1695 train_time:12088ms step_avg:93.71ms +step:130/1695 train_time:12183ms step_avg:93.71ms +step:131/1695 train_time:12277ms step_avg:93.71ms +step:132/1695 train_time:12370ms step_avg:93.71ms +step:133/1695 train_time:12463ms step_avg:93.71ms +step:134/1695 train_time:12557ms step_avg:93.71ms +step:135/1695 train_time:12651ms step_avg:93.71ms +step:136/1695 train_time:12744ms step_avg:93.71ms +step:137/1695 train_time:12839ms step_avg:93.71ms +step:138/1695 train_time:12935ms step_avg:93.73ms +step:139/1695 train_time:13030ms step_avg:93.74ms +step:140/1695 train_time:13126ms step_avg:93.76ms +step:141/1695 train_time:13221ms step_avg:93.76ms +step:142/1695 train_time:13315ms step_avg:93.77ms +step:143/1695 train_time:13408ms step_avg:93.76ms +step:144/1695 train_time:13501ms step_avg:93.76ms +step:145/1695 train_time:13595ms step_avg:93.76ms +step:146/1695 train_time:13689ms step_avg:93.76ms +step:147/1695 train_time:13782ms step_avg:93.75ms +step:148/1695 train_time:13878ms step_avg:93.77ms +step:149/1695 train_time:13972ms step_avg:93.77ms +step:150/1695 train_time:14067ms step_avg:93.78ms +step:151/1695 train_time:14161ms step_avg:93.78ms +step:152/1695 train_time:14256ms step_avg:93.79ms +step:153/1695 train_time:14351ms step_avg:93.79ms +step:154/1695 train_time:14445ms step_avg:93.80ms +step:155/1695 train_time:14540ms step_avg:93.81ms +step:156/1695 train_time:14634ms step_avg:93.80ms +step:157/1695 train_time:14727ms step_avg:93.80ms +step:158/1695 train_time:14822ms step_avg:93.81ms +step:159/1695 train_time:14917ms step_avg:93.82ms +step:160/1695 train_time:15011ms step_avg:93.82ms +step:161/1695 train_time:15105ms step_avg:93.82ms +step:162/1695 train_time:15200ms step_avg:93.83ms +step:163/1695 train_time:15294ms step_avg:93.83ms +step:164/1695 train_time:15389ms step_avg:93.84ms +step:165/1695 train_time:15483ms step_avg:93.84ms +step:166/1695 train_time:15577ms step_avg:93.84ms +step:167/1695 train_time:15671ms step_avg:93.84ms +step:168/1695 train_time:15764ms step_avg:93.83ms +step:169/1695 train_time:15859ms step_avg:93.84ms +step:170/1695 train_time:15952ms step_avg:93.84ms +step:171/1695 train_time:16046ms step_avg:93.84ms +step:172/1695 train_time:16143ms step_avg:93.85ms +step:173/1695 train_time:16237ms step_avg:93.86ms +step:174/1695 train_time:16331ms step_avg:93.86ms +step:175/1695 train_time:16424ms step_avg:93.85ms +step:176/1695 train_time:16519ms step_avg:93.86ms +step:177/1695 train_time:16612ms step_avg:93.86ms +step:178/1695 train_time:16707ms step_avg:93.86ms +step:179/1695 train_time:16801ms step_avg:93.86ms +step:180/1695 train_time:16896ms step_avg:93.87ms +step:181/1695 train_time:16990ms step_avg:93.87ms +step:182/1695 train_time:17084ms step_avg:93.87ms +step:183/1695 train_time:17179ms step_avg:93.88ms +step:184/1695 train_time:17273ms step_avg:93.87ms +step:185/1695 train_time:17367ms step_avg:93.87ms +step:186/1695 train_time:17461ms step_avg:93.87ms +step:187/1695 train_time:17555ms step_avg:93.87ms +step:188/1695 train_time:17648ms step_avg:93.87ms +step:189/1695 train_time:17743ms step_avg:93.88ms +step:190/1695 train_time:17838ms step_avg:93.88ms +step:191/1695 train_time:17932ms step_avg:93.88ms +step:192/1695 train_time:18025ms step_avg:93.88ms +step:193/1695 train_time:18120ms step_avg:93.88ms +step:194/1695 train_time:18214ms step_avg:93.89ms +step:195/1695 train_time:18307ms step_avg:93.88ms +step:196/1695 train_time:18403ms step_avg:93.89ms +step:197/1695 train_time:18498ms step_avg:93.90ms +step:198/1695 train_time:18592ms step_avg:93.90ms +step:199/1695 train_time:18686ms step_avg:93.90ms +step:200/1695 train_time:18780ms step_avg:93.90ms +step:201/1695 train_time:18874ms step_avg:93.90ms +step:202/1695 train_time:18968ms step_avg:93.90ms +step:203/1695 train_time:19062ms step_avg:93.90ms +step:204/1695 train_time:19156ms step_avg:93.90ms +step:205/1695 train_time:19251ms step_avg:93.91ms +step:206/1695 train_time:19345ms step_avg:93.91ms +step:207/1695 train_time:19439ms step_avg:93.91ms +step:208/1695 train_time:19533ms step_avg:93.91ms +step:209/1695 train_time:19626ms step_avg:93.90ms +step:210/1695 train_time:19720ms step_avg:93.90ms +step:211/1695 train_time:19814ms step_avg:93.90ms +step:212/1695 train_time:19908ms step_avg:93.90ms +step:213/1695 train_time:20003ms step_avg:93.91ms +step:214/1695 train_time:20098ms step_avg:93.91ms +step:215/1695 train_time:20191ms step_avg:93.91ms +step:216/1695 train_time:20286ms step_avg:93.91ms +step:217/1695 train_time:20380ms step_avg:93.92ms +step:218/1695 train_time:20475ms step_avg:93.92ms +step:219/1695 train_time:20569ms step_avg:93.92ms +step:220/1695 train_time:20663ms step_avg:93.92ms +step:221/1695 train_time:20757ms step_avg:93.92ms +step:222/1695 train_time:20851ms step_avg:93.92ms +step:223/1695 train_time:20945ms step_avg:93.92ms +step:224/1695 train_time:21039ms step_avg:93.92ms +step:225/1695 train_time:21133ms step_avg:93.92ms +step:226/1695 train_time:21227ms step_avg:93.92ms +step:227/1695 train_time:21322ms step_avg:93.93ms +step:228/1695 train_time:21416ms step_avg:93.93ms +step:229/1695 train_time:21510ms step_avg:93.93ms +step:230/1695 train_time:21605ms step_avg:93.94ms +step:231/1695 train_time:21699ms step_avg:93.94ms +step:232/1695 train_time:21794ms step_avg:93.94ms +step:233/1695 train_time:21887ms step_avg:93.94ms +step:234/1695 train_time:21981ms step_avg:93.94ms +step:235/1695 train_time:22076ms step_avg:93.94ms +step:236/1695 train_time:22170ms step_avg:93.94ms +step:237/1695 train_time:22263ms step_avg:93.94ms +step:238/1695 train_time:22358ms step_avg:93.94ms +step:239/1695 train_time:22452ms step_avg:93.94ms +step:240/1695 train_time:22546ms step_avg:93.94ms +step:241/1695 train_time:22640ms step_avg:93.94ms +step:242/1695 train_time:22734ms step_avg:93.94ms +step:243/1695 train_time:22828ms step_avg:93.94ms +step:244/1695 train_time:22921ms step_avg:93.94ms +step:245/1695 train_time:23016ms step_avg:93.94ms +step:246/1695 train_time:23110ms step_avg:93.94ms +step:247/1695 train_time:23204ms step_avg:93.94ms +step:248/1695 train_time:23298ms step_avg:93.94ms +step:249/1695 train_time:23392ms step_avg:93.94ms +step:250/1695 train_time:23486ms step_avg:93.94ms +step:250/1695 val_loss:4.0758 train_time:23579ms step_avg:94.31ms +step:251/1695 train_time:23605ms step_avg:94.04ms +step:252/1695 train_time:23686ms step_avg:93.99ms +step:253/1695 train_time:23789ms step_avg:94.03ms +step:254/1695 train_time:23885ms step_avg:94.04ms +step:255/1695 train_time:23980ms step_avg:94.04ms +step:256/1695 train_time:24074ms step_avg:94.04ms +step:257/1695 train_time:24168ms step_avg:94.04ms +step:258/1695 train_time:24261ms step_avg:94.04ms +step:259/1695 train_time:24355ms step_avg:94.03ms +step:260/1695 train_time:24449ms step_avg:94.03ms +step:261/1695 train_time:24542ms step_avg:94.03ms +step:262/1695 train_time:24638ms step_avg:94.04ms +step:263/1695 train_time:24734ms step_avg:94.05ms +step:264/1695 train_time:24830ms step_avg:94.05ms +step:265/1695 train_time:24927ms step_avg:94.06ms +step:266/1695 train_time:25022ms step_avg:94.07ms +step:267/1695 train_time:25117ms step_avg:94.07ms +step:268/1695 train_time:25211ms step_avg:94.07ms +step:269/1695 train_time:25304ms step_avg:94.07ms +step:270/1695 train_time:25398ms step_avg:94.07ms +step:271/1695 train_time:25492ms step_avg:94.06ms +step:272/1695 train_time:25586ms step_avg:94.07ms +step:273/1695 train_time:25682ms step_avg:94.07ms +step:274/1695 train_time:25777ms step_avg:94.08ms +step:275/1695 train_time:25872ms step_avg:94.08ms +step:276/1695 train_time:25967ms step_avg:94.08ms +step:277/1695 train_time:26062ms step_avg:94.09ms +step:278/1695 train_time:26156ms step_avg:94.09ms +step:279/1695 train_time:26249ms step_avg:94.08ms +step:280/1695 train_time:26344ms step_avg:94.09ms +step:281/1695 train_time:26438ms step_avg:94.09ms +step:282/1695 train_time:26532ms step_avg:94.09ms +step:283/1695 train_time:26626ms step_avg:94.09ms +step:284/1695 train_time:26722ms step_avg:94.09ms +step:285/1695 train_time:26817ms step_avg:94.09ms +step:286/1695 train_time:26911ms step_avg:94.10ms +step:287/1695 train_time:27006ms step_avg:94.10ms +step:288/1695 train_time:27101ms step_avg:94.10ms +step:289/1695 train_time:27195ms step_avg:94.10ms +step:290/1695 train_time:27289ms step_avg:94.10ms +step:291/1695 train_time:27383ms step_avg:94.10ms +step:292/1695 train_time:27477ms step_avg:94.10ms +step:293/1695 train_time:27571ms step_avg:94.10ms +step:294/1695 train_time:27666ms step_avg:94.10ms +step:295/1695 train_time:27760ms step_avg:94.10ms +step:296/1695 train_time:27855ms step_avg:94.10ms +step:297/1695 train_time:27949ms step_avg:94.10ms +step:298/1695 train_time:28044ms step_avg:94.11ms +step:299/1695 train_time:28139ms step_avg:94.11ms +step:300/1695 train_time:28232ms step_avg:94.11ms +step:301/1695 train_time:28327ms step_avg:94.11ms +step:302/1695 train_time:28421ms step_avg:94.11ms +step:303/1695 train_time:28515ms step_avg:94.11ms +step:304/1695 train_time:28609ms step_avg:94.11ms +step:305/1695 train_time:28705ms step_avg:94.11ms +step:306/1695 train_time:28799ms step_avg:94.11ms +step:307/1695 train_time:28893ms step_avg:94.11ms +step:308/1695 train_time:28987ms step_avg:94.11ms +step:309/1695 train_time:29081ms step_avg:94.11ms +step:310/1695 train_time:29175ms step_avg:94.11ms +step:311/1695 train_time:29269ms step_avg:94.11ms +step:312/1695 train_time:29364ms step_avg:94.12ms +step:313/1695 train_time:29459ms step_avg:94.12ms +step:314/1695 train_time:29553ms step_avg:94.12ms +step:315/1695 train_time:29647ms step_avg:94.12ms +step:316/1695 train_time:29742ms step_avg:94.12ms +step:317/1695 train_time:29836ms step_avg:94.12ms +step:318/1695 train_time:29930ms step_avg:94.12ms +step:319/1695 train_time:30025ms step_avg:94.12ms +step:320/1695 train_time:30120ms step_avg:94.13ms +step:321/1695 train_time:30214ms step_avg:94.12ms +step:322/1695 train_time:30308ms step_avg:94.12ms +step:323/1695 train_time:30402ms step_avg:94.12ms +step:324/1695 train_time:30495ms step_avg:94.12ms +step:325/1695 train_time:30589ms step_avg:94.12ms +step:326/1695 train_time:30684ms step_avg:94.12ms +step:327/1695 train_time:30779ms step_avg:94.13ms +step:328/1695 train_time:30874ms step_avg:94.13ms +step:329/1695 train_time:30968ms step_avg:94.13ms +step:330/1695 train_time:31062ms step_avg:94.13ms +step:331/1695 train_time:31156ms step_avg:94.13ms +step:332/1695 train_time:31251ms step_avg:94.13ms +step:333/1695 train_time:31346ms step_avg:94.13ms +step:334/1695 train_time:31441ms step_avg:94.13ms +step:335/1695 train_time:31535ms step_avg:94.13ms +step:336/1695 train_time:31629ms step_avg:94.13ms +step:337/1695 train_time:31724ms step_avg:94.14ms +step:338/1695 train_time:31819ms step_avg:94.14ms +step:339/1695 train_time:31913ms step_avg:94.14ms +step:340/1695 train_time:32007ms step_avg:94.14ms +step:341/1695 train_time:32102ms step_avg:94.14ms +step:342/1695 train_time:32196ms step_avg:94.14ms +step:343/1695 train_time:32290ms step_avg:94.14ms +step:344/1695 train_time:32385ms step_avg:94.14ms +step:345/1695 train_time:32480ms step_avg:94.15ms +step:346/1695 train_time:32574ms step_avg:94.14ms +step:347/1695 train_time:32668ms step_avg:94.14ms +step:348/1695 train_time:32763ms step_avg:94.15ms +step:349/1695 train_time:32857ms step_avg:94.15ms +step:350/1695 train_time:32952ms step_avg:94.15ms +step:351/1695 train_time:33047ms step_avg:94.15ms +step:352/1695 train_time:33140ms step_avg:94.15ms +step:353/1695 train_time:33234ms step_avg:94.15ms +step:354/1695 train_time:33328ms step_avg:94.15ms +step:355/1695 train_time:33423ms step_avg:94.15ms +step:356/1695 train_time:33517ms step_avg:94.15ms +step:357/1695 train_time:33611ms step_avg:94.15ms +step:358/1695 train_time:33706ms step_avg:94.15ms +step:359/1695 train_time:33800ms step_avg:94.15ms +step:360/1695 train_time:33894ms step_avg:94.15ms +step:361/1695 train_time:33988ms step_avg:94.15ms +step:362/1695 train_time:34083ms step_avg:94.15ms +step:363/1695 train_time:34177ms step_avg:94.15ms +step:364/1695 train_time:34272ms step_avg:94.15ms +step:365/1695 train_time:34366ms step_avg:94.15ms +step:366/1695 train_time:34460ms step_avg:94.15ms +step:367/1695 train_time:34554ms step_avg:94.15ms +step:368/1695 train_time:34650ms step_avg:94.16ms +step:369/1695 train_time:34745ms step_avg:94.16ms +step:370/1695 train_time:34840ms step_avg:94.16ms +step:371/1695 train_time:34934ms step_avg:94.16ms +step:372/1695 train_time:35028ms step_avg:94.16ms +step:373/1695 train_time:35123ms step_avg:94.16ms +step:374/1695 train_time:35217ms step_avg:94.16ms +step:375/1695 train_time:35311ms step_avg:94.16ms +step:375/1695 val_loss:3.8846 train_time:35404ms step_avg:94.41ms +step:376/1695 train_time:35430ms step_avg:94.23ms +step:377/1695 train_time:35510ms step_avg:94.19ms +step:378/1695 train_time:35609ms step_avg:94.20ms +step:379/1695 train_time:35707ms step_avg:94.21ms +step:380/1695 train_time:35803ms step_avg:94.22ms +step:381/1695 train_time:35899ms step_avg:94.22ms +step:382/1695 train_time:35995ms step_avg:94.23ms +step:383/1695 train_time:36091ms step_avg:94.23ms +step:384/1695 train_time:36186ms step_avg:94.23ms +step:385/1695 train_time:36281ms step_avg:94.24ms +step:386/1695 train_time:36377ms step_avg:94.24ms +step:387/1695 train_time:36475ms step_avg:94.25ms +step:388/1695 train_time:36574ms step_avg:94.26ms +step:389/1695 train_time:36672ms step_avg:94.27ms +step:390/1695 train_time:36768ms step_avg:94.28ms +step:391/1695 train_time:36863ms step_avg:94.28ms +step:392/1695 train_time:36959ms step_avg:94.28ms +step:393/1695 train_time:37055ms step_avg:94.29ms +step:394/1695 train_time:37151ms step_avg:94.29ms +step:395/1695 train_time:37246ms step_avg:94.29ms +step:396/1695 train_time:37342ms step_avg:94.30ms +step:397/1695 train_time:37438ms step_avg:94.30ms +step:398/1695 train_time:37536ms step_avg:94.31ms +step:399/1695 train_time:37634ms step_avg:94.32ms +step:400/1695 train_time:37731ms step_avg:94.33ms +step:401/1695 train_time:37828ms step_avg:94.33ms +step:402/1695 train_time:37923ms step_avg:94.34ms +step:403/1695 train_time:38020ms step_avg:94.34ms +step:404/1695 train_time:38116ms step_avg:94.35ms +step:405/1695 train_time:38212ms step_avg:94.35ms +step:406/1695 train_time:38307ms step_avg:94.35ms +step:407/1695 train_time:38403ms step_avg:94.36ms +step:408/1695 train_time:38499ms step_avg:94.36ms +step:409/1695 train_time:38597ms step_avg:94.37ms +step:410/1695 train_time:38693ms step_avg:94.37ms +step:411/1695 train_time:38790ms step_avg:94.38ms +step:412/1695 train_time:38886ms step_avg:94.38ms +step:413/1695 train_time:38982ms step_avg:94.39ms +step:414/1695 train_time:39078ms step_avg:94.39ms +step:415/1695 train_time:39175ms step_avg:94.40ms +step:416/1695 train_time:39271ms step_avg:94.40ms +step:417/1695 train_time:39367ms step_avg:94.41ms +step:418/1695 train_time:39463ms step_avg:94.41ms +step:419/1695 train_time:39560ms step_avg:94.41ms +step:420/1695 train_time:39656ms step_avg:94.42ms +step:421/1695 train_time:39753ms step_avg:94.42ms +step:422/1695 train_time:39849ms step_avg:94.43ms +step:423/1695 train_time:39945ms step_avg:94.43ms +step:424/1695 train_time:40041ms step_avg:94.44ms +step:425/1695 train_time:40138ms step_avg:94.44ms +step:426/1695 train_time:40235ms step_avg:94.45ms +step:427/1695 train_time:40331ms step_avg:94.45ms +step:428/1695 train_time:40427ms step_avg:94.46ms +step:429/1695 train_time:40523ms step_avg:94.46ms +step:430/1695 train_time:40620ms step_avg:94.47ms +step:431/1695 train_time:40717ms step_avg:94.47ms +step:432/1695 train_time:40815ms step_avg:94.48ms +step:433/1695 train_time:40912ms step_avg:94.49ms +step:434/1695 train_time:41009ms step_avg:94.49ms +step:435/1695 train_time:41104ms step_avg:94.49ms +step:436/1695 train_time:41200ms step_avg:94.49ms +step:437/1695 train_time:41296ms step_avg:94.50ms +step:438/1695 train_time:41392ms step_avg:94.50ms +step:439/1695 train_time:41488ms step_avg:94.51ms +step:440/1695 train_time:41583ms step_avg:94.51ms +step:441/1695 train_time:41680ms step_avg:94.51ms +step:442/1695 train_time:41778ms step_avg:94.52ms +step:443/1695 train_time:41874ms step_avg:94.52ms +step:444/1695 train_time:41971ms step_avg:94.53ms +step:445/1695 train_time:42067ms step_avg:94.53ms +step:446/1695 train_time:42163ms step_avg:94.53ms +step:447/1695 train_time:42259ms step_avg:94.54ms +step:448/1695 train_time:42356ms step_avg:94.54ms +step:449/1695 train_time:42452ms step_avg:94.55ms +step:450/1695 train_time:42548ms step_avg:94.55ms +step:451/1695 train_time:42643ms step_avg:94.55ms +step:452/1695 train_time:42739ms step_avg:94.56ms +step:453/1695 train_time:42836ms step_avg:94.56ms +step:454/1695 train_time:42933ms step_avg:94.57ms +step:455/1695 train_time:43029ms step_avg:94.57ms +step:456/1695 train_time:43125ms step_avg:94.57ms +step:457/1695 train_time:43220ms step_avg:94.57ms +step:458/1695 train_time:43316ms step_avg:94.58ms +step:459/1695 train_time:43413ms step_avg:94.58ms +step:460/1695 train_time:43509ms step_avg:94.59ms +step:461/1695 train_time:43605ms step_avg:94.59ms +step:462/1695 train_time:43700ms step_avg:94.59ms +step:463/1695 train_time:43796ms step_avg:94.59ms +step:464/1695 train_time:43893ms step_avg:94.60ms +step:465/1695 train_time:43989ms step_avg:94.60ms +step:466/1695 train_time:44085ms step_avg:94.60ms +step:467/1695 train_time:44180ms step_avg:94.60ms +step:468/1695 train_time:44276ms step_avg:94.61ms +step:469/1695 train_time:44372ms step_avg:94.61ms +step:470/1695 train_time:44469ms step_avg:94.61ms +step:471/1695 train_time:44564ms step_avg:94.62ms +step:472/1695 train_time:44661ms step_avg:94.62ms +step:473/1695 train_time:44757ms step_avg:94.62ms +step:474/1695 train_time:44854ms step_avg:94.63ms +step:475/1695 train_time:44951ms step_avg:94.63ms +step:476/1695 train_time:45047ms step_avg:94.64ms +step:477/1695 train_time:45142ms step_avg:94.64ms +step:478/1695 train_time:45238ms step_avg:94.64ms +step:479/1695 train_time:45334ms step_avg:94.64ms +step:480/1695 train_time:45430ms step_avg:94.65ms +step:481/1695 train_time:45527ms step_avg:94.65ms +step:482/1695 train_time:45622ms step_avg:94.65ms +step:483/1695 train_time:45718ms step_avg:94.66ms +step:484/1695 train_time:45816ms step_avg:94.66ms +step:485/1695 train_time:45913ms step_avg:94.67ms +step:486/1695 train_time:46009ms step_avg:94.67ms +step:487/1695 train_time:46105ms step_avg:94.67ms +step:488/1695 train_time:46200ms step_avg:94.67ms +step:489/1695 train_time:46296ms step_avg:94.68ms +step:490/1695 train_time:46393ms step_avg:94.68ms +step:491/1695 train_time:46490ms step_avg:94.68ms +step:492/1695 train_time:46586ms step_avg:94.69ms +step:493/1695 train_time:46681ms step_avg:94.69ms +step:494/1695 train_time:46777ms step_avg:94.69ms +step:495/1695 train_time:46874ms step_avg:94.69ms +step:496/1695 train_time:46970ms step_avg:94.70ms +step:497/1695 train_time:47066ms step_avg:94.70ms +step:498/1695 train_time:47161ms step_avg:94.70ms +step:499/1695 train_time:47258ms step_avg:94.71ms +step:500/1695 train_time:47356ms step_avg:94.71ms +step:500/1695 val_loss:3.7382 train_time:47450ms step_avg:94.90ms +step:501/1695 train_time:47475ms step_avg:94.76ms +step:502/1695 train_time:47558ms step_avg:94.74ms +step:503/1695 train_time:47660ms step_avg:94.75ms +step:504/1695 train_time:47758ms step_avg:94.76ms +step:505/1695 train_time:47854ms step_avg:94.76ms +step:506/1695 train_time:47950ms step_avg:94.76ms +step:507/1695 train_time:48046ms step_avg:94.76ms +step:508/1695 train_time:48142ms step_avg:94.77ms +step:509/1695 train_time:48238ms step_avg:94.77ms +step:510/1695 train_time:48334ms step_avg:94.77ms +step:511/1695 train_time:48429ms step_avg:94.77ms +step:512/1695 train_time:48526ms step_avg:94.78ms +step:513/1695 train_time:48624ms step_avg:94.78ms +step:514/1695 train_time:48723ms step_avg:94.79ms +step:515/1695 train_time:48821ms step_avg:94.80ms +step:516/1695 train_time:48919ms step_avg:94.80ms +step:517/1695 train_time:49016ms step_avg:94.81ms +step:518/1695 train_time:49112ms step_avg:94.81ms +step:519/1695 train_time:49208ms step_avg:94.81ms +step:520/1695 train_time:49303ms step_avg:94.81ms +step:521/1695 train_time:49399ms step_avg:94.82ms +step:522/1695 train_time:49497ms step_avg:94.82ms +step:523/1695 train_time:49595ms step_avg:94.83ms +step:524/1695 train_time:49692ms step_avg:94.83ms +step:525/1695 train_time:49789ms step_avg:94.84ms +step:526/1695 train_time:49884ms step_avg:94.84ms +step:527/1695 train_time:49981ms step_avg:94.84ms +step:528/1695 train_time:50078ms step_avg:94.85ms +step:529/1695 train_time:50177ms step_avg:94.85ms +step:530/1695 train_time:50272ms step_avg:94.85ms +step:531/1695 train_time:50367ms step_avg:94.85ms +step:532/1695 train_time:50463ms step_avg:94.86ms +step:533/1695 train_time:50560ms step_avg:94.86ms +step:534/1695 train_time:50657ms step_avg:94.86ms +step:535/1695 train_time:50755ms step_avg:94.87ms +step:536/1695 train_time:50851ms step_avg:94.87ms +step:537/1695 train_time:50948ms step_avg:94.87ms +step:538/1695 train_time:51044ms step_avg:94.88ms +step:539/1695 train_time:51140ms step_avg:94.88ms +step:540/1695 train_time:51237ms step_avg:94.88ms +step:541/1695 train_time:51333ms step_avg:94.89ms +step:542/1695 train_time:51429ms step_avg:94.89ms +step:543/1695 train_time:51525ms step_avg:94.89ms +step:544/1695 train_time:51621ms step_avg:94.89ms +step:545/1695 train_time:51718ms step_avg:94.90ms +step:546/1695 train_time:51815ms step_avg:94.90ms +step:547/1695 train_time:51911ms step_avg:94.90ms +step:548/1695 train_time:52007ms step_avg:94.90ms +step:549/1695 train_time:52103ms step_avg:94.91ms +step:550/1695 train_time:52201ms step_avg:94.91ms +step:551/1695 train_time:52297ms step_avg:94.91ms +step:552/1695 train_time:52394ms step_avg:94.92ms +step:553/1695 train_time:52490ms step_avg:94.92ms +step:554/1695 train_time:52586ms step_avg:94.92ms +step:555/1695 train_time:52683ms step_avg:94.92ms +step:556/1695 train_time:52780ms step_avg:94.93ms +step:557/1695 train_time:52878ms step_avg:94.93ms +step:558/1695 train_time:52975ms step_avg:94.94ms +step:559/1695 train_time:53071ms step_avg:94.94ms +step:560/1695 train_time:53167ms step_avg:94.94ms +step:561/1695 train_time:53263ms step_avg:94.94ms +step:562/1695 train_time:53361ms step_avg:94.95ms +step:563/1695 train_time:53458ms step_avg:94.95ms +step:564/1695 train_time:53555ms step_avg:94.96ms +step:565/1695 train_time:53651ms step_avg:94.96ms +step:566/1695 train_time:53747ms step_avg:94.96ms +step:567/1695 train_time:53844ms step_avg:94.96ms +step:568/1695 train_time:53942ms step_avg:94.97ms +step:569/1695 train_time:54039ms step_avg:94.97ms +step:570/1695 train_time:54136ms step_avg:94.98ms +step:571/1695 train_time:54233ms step_avg:94.98ms +step:572/1695 train_time:54330ms step_avg:94.98ms +step:573/1695 train_time:54425ms step_avg:94.98ms +step:574/1695 train_time:54522ms step_avg:94.99ms +step:575/1695 train_time:54619ms step_avg:94.99ms +step:576/1695 train_time:54716ms step_avg:94.99ms +step:577/1695 train_time:54812ms step_avg:95.00ms +step:578/1695 train_time:54909ms step_avg:95.00ms +step:579/1695 train_time:55005ms step_avg:95.00ms +step:580/1695 train_time:55101ms step_avg:95.00ms +step:581/1695 train_time:55198ms step_avg:95.01ms +step:582/1695 train_time:55295ms step_avg:95.01ms +step:583/1695 train_time:55391ms step_avg:95.01ms +step:584/1695 train_time:55487ms step_avg:95.01ms +step:585/1695 train_time:55584ms step_avg:95.01ms +step:586/1695 train_time:55681ms step_avg:95.02ms +step:587/1695 train_time:55777ms step_avg:95.02ms +step:588/1695 train_time:55875ms step_avg:95.03ms +step:589/1695 train_time:55972ms step_avg:95.03ms +step:590/1695 train_time:56068ms step_avg:95.03ms +step:591/1695 train_time:56165ms step_avg:95.03ms +step:592/1695 train_time:56262ms step_avg:95.04ms +step:593/1695 train_time:56360ms step_avg:95.04ms +step:594/1695 train_time:56457ms step_avg:95.05ms +step:595/1695 train_time:56554ms step_avg:95.05ms +step:596/1695 train_time:56650ms step_avg:95.05ms +step:597/1695 train_time:56746ms step_avg:95.05ms +step:598/1695 train_time:56842ms step_avg:95.05ms +step:599/1695 train_time:56939ms step_avg:95.06ms +step:600/1695 train_time:57036ms step_avg:95.06ms +step:601/1695 train_time:57133ms step_avg:95.06ms +step:602/1695 train_time:57229ms step_avg:95.06ms +step:603/1695 train_time:57325ms step_avg:95.07ms +step:604/1695 train_time:57421ms step_avg:95.07ms +step:605/1695 train_time:57518ms step_avg:95.07ms +step:606/1695 train_time:57615ms step_avg:95.07ms +step:607/1695 train_time:57712ms step_avg:95.08ms +step:608/1695 train_time:57808ms step_avg:95.08ms +step:609/1695 train_time:57904ms step_avg:95.08ms +step:610/1695 train_time:58000ms step_avg:95.08ms +step:611/1695 train_time:58096ms step_avg:95.08ms +step:612/1695 train_time:58193ms step_avg:95.09ms +step:613/1695 train_time:58289ms step_avg:95.09ms +step:614/1695 train_time:58385ms step_avg:95.09ms +step:615/1695 train_time:58481ms step_avg:95.09ms +step:616/1695 train_time:58578ms step_avg:95.09ms +step:617/1695 train_time:58675ms step_avg:95.10ms +step:618/1695 train_time:58772ms step_avg:95.10ms +step:619/1695 train_time:58868ms step_avg:95.10ms +step:620/1695 train_time:58964ms step_avg:95.10ms +step:621/1695 train_time:59062ms step_avg:95.11ms +step:622/1695 train_time:59158ms step_avg:95.11ms +step:623/1695 train_time:59255ms step_avg:95.11ms +step:624/1695 train_time:59351ms step_avg:95.11ms +step:625/1695 train_time:59448ms step_avg:95.12ms +step:625/1695 val_loss:3.6536 train_time:59542ms step_avg:95.27ms +step:626/1695 train_time:59568ms step_avg:95.16ms +step:627/1695 train_time:59649ms step_avg:95.13ms +step:628/1695 train_time:59750ms step_avg:95.14ms +step:629/1695 train_time:59847ms step_avg:95.15ms +step:630/1695 train_time:59945ms step_avg:95.15ms +step:631/1695 train_time:60041ms step_avg:95.15ms +step:632/1695 train_time:60139ms step_avg:95.16ms +step:633/1695 train_time:60236ms step_avg:95.16ms +step:634/1695 train_time:60333ms step_avg:95.16ms +step:635/1695 train_time:60429ms step_avg:95.16ms +step:636/1695 train_time:60527ms step_avg:95.17ms +step:637/1695 train_time:60625ms step_avg:95.17ms +step:638/1695 train_time:60723ms step_avg:95.18ms +step:639/1695 train_time:60823ms step_avg:95.18ms +step:640/1695 train_time:60922ms step_avg:95.19ms +step:641/1695 train_time:61020ms step_avg:95.19ms +step:642/1695 train_time:61118ms step_avg:95.20ms +step:643/1695 train_time:61216ms step_avg:95.20ms +step:644/1695 train_time:61313ms step_avg:95.21ms +step:645/1695 train_time:61410ms step_avg:95.21ms +step:646/1695 train_time:61507ms step_avg:95.21ms +step:647/1695 train_time:61825ms step_avg:95.56ms +step:648/1695 train_time:61921ms step_avg:95.56ms +step:649/1695 train_time:62019ms step_avg:95.56ms +step:650/1695 train_time:62117ms step_avg:95.56ms +step:651/1695 train_time:62213ms step_avg:95.57ms +step:652/1695 train_time:62310ms step_avg:95.57ms +step:653/1695 train_time:62407ms step_avg:95.57ms +step:654/1695 train_time:62503ms step_avg:95.57ms +step:655/1695 train_time:62600ms step_avg:95.57ms +step:656/1695 train_time:62700ms step_avg:95.58ms +step:657/1695 train_time:62802ms step_avg:95.59ms +step:658/1695 train_time:62902ms step_avg:95.60ms +step:659/1695 train_time:63001ms step_avg:95.60ms +step:660/1695 train_time:63099ms step_avg:95.60ms +step:661/1695 train_time:63197ms step_avg:95.61ms +step:662/1695 train_time:63295ms step_avg:95.61ms +step:663/1695 train_time:63392ms step_avg:95.61ms +step:664/1695 train_time:63490ms step_avg:95.62ms +step:665/1695 train_time:63586ms step_avg:95.62ms +step:666/1695 train_time:63684ms step_avg:95.62ms +step:667/1695 train_time:63782ms step_avg:95.63ms +step:668/1695 train_time:63881ms step_avg:95.63ms +step:669/1695 train_time:63979ms step_avg:95.63ms +step:670/1695 train_time:64077ms step_avg:95.64ms +step:671/1695 train_time:64175ms step_avg:95.64ms +step:672/1695 train_time:64273ms step_avg:95.64ms +step:673/1695 train_time:64370ms step_avg:95.65ms +step:674/1695 train_time:64467ms step_avg:95.65ms +step:675/1695 train_time:64563ms step_avg:95.65ms +step:676/1695 train_time:64661ms step_avg:95.65ms +step:677/1695 train_time:64759ms step_avg:95.66ms +step:678/1695 train_time:64858ms step_avg:95.66ms +step:679/1695 train_time:64957ms step_avg:95.67ms +step:680/1695 train_time:65055ms step_avg:95.67ms +step:681/1695 train_time:65153ms step_avg:95.67ms +step:682/1695 train_time:65251ms step_avg:95.68ms +step:683/1695 train_time:65349ms step_avg:95.68ms +step:684/1695 train_time:65447ms step_avg:95.68ms +step:685/1695 train_time:65544ms step_avg:95.68ms +step:686/1695 train_time:65641ms step_avg:95.69ms +step:687/1695 train_time:65739ms step_avg:95.69ms +step:688/1695 train_time:65837ms step_avg:95.69ms +step:689/1695 train_time:65936ms step_avg:95.70ms +step:690/1695 train_time:66035ms step_avg:95.70ms +step:691/1695 train_time:66133ms step_avg:95.71ms +step:692/1695 train_time:66231ms step_avg:95.71ms +step:693/1695 train_time:66329ms step_avg:95.71ms +step:694/1695 train_time:66427ms step_avg:95.72ms +step:695/1695 train_time:66525ms step_avg:95.72ms +step:696/1695 train_time:66622ms step_avg:95.72ms +step:697/1695 train_time:66720ms step_avg:95.72ms +step:698/1695 train_time:66818ms step_avg:95.73ms +step:699/1695 train_time:66917ms step_avg:95.73ms +step:700/1695 train_time:67015ms step_avg:95.74ms +step:701/1695 train_time:67358ms step_avg:96.09ms +step:702/1695 train_time:67454ms step_avg:96.09ms +step:703/1695 train_time:67784ms step_avg:96.42ms +step:704/1695 train_time:67879ms step_avg:96.42ms +step:705/1695 train_time:67976ms step_avg:96.42ms +step:706/1695 train_time:68073ms step_avg:96.42ms +step:707/1695 train_time:68170ms step_avg:96.42ms +step:708/1695 train_time:68267ms step_avg:96.42ms +step:709/1695 train_time:68364ms step_avg:96.42ms +step:710/1695 train_time:68461ms step_avg:96.42ms +step:711/1695 train_time:68558ms step_avg:96.42ms +step:712/1695 train_time:68655ms step_avg:96.43ms +step:713/1695 train_time:68757ms step_avg:96.43ms +step:714/1695 train_time:68860ms step_avg:96.44ms +step:715/1695 train_time:68958ms step_avg:96.45ms +step:716/1695 train_time:69056ms step_avg:96.45ms +step:717/1695 train_time:69153ms step_avg:96.45ms +step:718/1695 train_time:69251ms step_avg:96.45ms +step:719/1695 train_time:69348ms step_avg:96.45ms +step:720/1695 train_time:69445ms step_avg:96.45ms +step:721/1695 train_time:69541ms step_avg:96.45ms +step:722/1695 train_time:69639ms step_avg:96.45ms +step:723/1695 train_time:69738ms step_avg:96.46ms +step:724/1695 train_time:69837ms step_avg:96.46ms +step:725/1695 train_time:69936ms step_avg:96.46ms +step:726/1695 train_time:70035ms step_avg:96.47ms +step:727/1695 train_time:70133ms step_avg:96.47ms +step:728/1695 train_time:70231ms step_avg:96.47ms +step:729/1695 train_time:70329ms step_avg:96.47ms +step:730/1695 train_time:70426ms step_avg:96.47ms +step:731/1695 train_time:70523ms step_avg:96.48ms +step:732/1695 train_time:70620ms step_avg:96.48ms +step:733/1695 train_time:70718ms step_avg:96.48ms +step:734/1695 train_time:70816ms step_avg:96.48ms +step:735/1695 train_time:70914ms step_avg:96.48ms +step:736/1695 train_time:71012ms step_avg:96.48ms +step:737/1695 train_time:71110ms step_avg:96.49ms +step:738/1695 train_time:71208ms step_avg:96.49ms +step:739/1695 train_time:71306ms step_avg:96.49ms +step:740/1695 train_time:71402ms step_avg:96.49ms +step:741/1695 train_time:71500ms step_avg:96.49ms +step:742/1695 train_time:71598ms step_avg:96.49ms +step:743/1695 train_time:71696ms step_avg:96.50ms +step:744/1695 train_time:71794ms step_avg:96.50ms +step:745/1695 train_time:71893ms step_avg:96.50ms +step:746/1695 train_time:71990ms step_avg:96.50ms +step:747/1695 train_time:72088ms step_avg:96.50ms +step:748/1695 train_time:72187ms step_avg:96.51ms +step:749/1695 train_time:72285ms step_avg:96.51ms +step:750/1695 train_time:72382ms step_avg:96.51ms +step:750/1695 val_loss:3.5872 train_time:72478ms step_avg:96.64ms +step:751/1695 train_time:72504ms step_avg:96.54ms +step:752/1695 train_time:72586ms step_avg:96.52ms +step:753/1695 train_time:72687ms step_avg:96.53ms +step:754/1695 train_time:72784ms step_avg:96.53ms +step:755/1695 train_time:72882ms step_avg:96.53ms +step:756/1695 train_time:72981ms step_avg:96.54ms +step:757/1695 train_time:73079ms step_avg:96.54ms +step:758/1695 train_time:73177ms step_avg:96.54ms +step:759/1695 train_time:73275ms step_avg:96.54ms +step:760/1695 train_time:73372ms step_avg:96.54ms +step:761/1695 train_time:73471ms step_avg:96.54ms +step:762/1695 train_time:73569ms step_avg:96.55ms +step:763/1695 train_time:73668ms step_avg:96.55ms +step:764/1695 train_time:73766ms step_avg:96.55ms +step:765/1695 train_time:73864ms step_avg:96.55ms +step:766/1695 train_time:73961ms step_avg:96.56ms +step:767/1695 train_time:74060ms step_avg:96.56ms +step:768/1695 train_time:74158ms step_avg:96.56ms +step:769/1695 train_time:74255ms step_avg:96.56ms +step:770/1695 train_time:74354ms step_avg:96.56ms +step:771/1695 train_time:74452ms step_avg:96.57ms +step:772/1695 train_time:74551ms step_avg:96.57ms +step:773/1695 train_time:74650ms step_avg:96.57ms +step:774/1695 train_time:74748ms step_avg:96.57ms +step:775/1695 train_time:74846ms step_avg:96.58ms +step:776/1695 train_time:74944ms step_avg:96.58ms +step:777/1695 train_time:75042ms step_avg:96.58ms +step:778/1695 train_time:75360ms step_avg:96.86ms +step:779/1695 train_time:75456ms step_avg:96.86ms +step:780/1695 train_time:75554ms step_avg:96.86ms +step:781/1695 train_time:75650ms step_avg:96.86ms +step:782/1695 train_time:75747ms step_avg:96.86ms +step:783/1695 train_time:75844ms step_avg:96.86ms +step:784/1695 train_time:75942ms step_avg:96.86ms +step:785/1695 train_time:76040ms step_avg:96.87ms +step:786/1695 train_time:76138ms step_avg:96.87ms +step:787/1695 train_time:76236ms step_avg:96.87ms +step:788/1695 train_time:76339ms step_avg:96.88ms +step:789/1695 train_time:76728ms step_avg:97.25ms +step:790/1695 train_time:76779ms step_avg:97.19ms +step:791/1695 train_time:76876ms step_avg:97.19ms +step:792/1695 train_time:76973ms step_avg:97.19ms +step:793/1695 train_time:77070ms step_avg:97.19ms +step:794/1695 train_time:77167ms step_avg:97.19ms +step:795/1695 train_time:77264ms step_avg:97.19ms +step:796/1695 train_time:77362ms step_avg:97.19ms +step:797/1695 train_time:77783ms step_avg:97.59ms +step:798/1695 train_time:77879ms step_avg:97.59ms +step:799/1695 train_time:77976ms step_avg:97.59ms +step:800/1695 train_time:78074ms step_avg:97.59ms +step:801/1695 train_time:78170ms step_avg:97.59ms +step:802/1695 train_time:78513ms step_avg:97.90ms +step:803/1695 train_time:78608ms step_avg:97.89ms +step:804/1695 train_time:78706ms step_avg:97.89ms +step:805/1695 train_time:78803ms step_avg:97.89ms +step:806/1695 train_time:78901ms step_avg:97.89ms +step:807/1695 train_time:78999ms step_avg:97.89ms +step:808/1695 train_time:79096ms step_avg:97.89ms +step:809/1695 train_time:79194ms step_avg:97.89ms +step:810/1695 train_time:79291ms step_avg:97.89ms +step:811/1695 train_time:79388ms step_avg:97.89ms +step:812/1695 train_time:79489ms step_avg:97.89ms +step:813/1695 train_time:79589ms step_avg:97.90ms +step:814/1695 train_time:79686ms step_avg:97.89ms +step:815/1695 train_time:79784ms step_avg:97.89ms +step:816/1695 train_time:79881ms step_avg:97.89ms +step:817/1695 train_time:79979ms step_avg:97.89ms +step:818/1695 train_time:80077ms step_avg:97.89ms +step:819/1695 train_time:80173ms step_avg:97.89ms +step:820/1695 train_time:80271ms step_avg:97.89ms +step:821/1695 train_time:80368ms step_avg:97.89ms +step:822/1695 train_time:80467ms step_avg:97.89ms +step:823/1695 train_time:80565ms step_avg:97.89ms +step:824/1695 train_time:80663ms step_avg:97.89ms +step:825/1695 train_time:80762ms step_avg:97.89ms +step:826/1695 train_time:80860ms step_avg:97.89ms +step:827/1695 train_time:80959ms step_avg:97.89ms +step:828/1695 train_time:81057ms step_avg:97.89ms +step:829/1695 train_time:81154ms step_avg:97.89ms +step:830/1695 train_time:81251ms step_avg:97.89ms +step:831/1695 train_time:81350ms step_avg:97.89ms +step:832/1695 train_time:81448ms step_avg:97.89ms +step:833/1695 train_time:81546ms step_avg:97.89ms +step:834/1695 train_time:81644ms step_avg:97.89ms +step:835/1695 train_time:81742ms step_avg:97.89ms +step:836/1695 train_time:81841ms step_avg:97.90ms +step:837/1695 train_time:81939ms step_avg:97.90ms +step:838/1695 train_time:82038ms step_avg:97.90ms +step:839/1695 train_time:82135ms step_avg:97.90ms +step:840/1695 train_time:82233ms step_avg:97.90ms +step:841/1695 train_time:82332ms step_avg:97.90ms +step:842/1695 train_time:82431ms step_avg:97.90ms +step:843/1695 train_time:82529ms step_avg:97.90ms +step:844/1695 train_time:82627ms step_avg:97.90ms +step:845/1695 train_time:82724ms step_avg:97.90ms +step:846/1695 train_time:82822ms step_avg:97.90ms +step:847/1695 train_time:82920ms step_avg:97.90ms +step:848/1695 train_time:83018ms step_avg:97.90ms +step:849/1695 train_time:83116ms step_avg:97.90ms +step:850/1695 train_time:83214ms step_avg:97.90ms +step:851/1695 train_time:83312ms step_avg:97.90ms +step:852/1695 train_time:83410ms step_avg:97.90ms +step:853/1695 train_time:83509ms step_avg:97.90ms +step:854/1695 train_time:83606ms step_avg:97.90ms +step:855/1695 train_time:83704ms step_avg:97.90ms +step:856/1695 train_time:83802ms step_avg:97.90ms +step:857/1695 train_time:83899ms step_avg:97.90ms +step:858/1695 train_time:83998ms step_avg:97.90ms +step:859/1695 train_time:84097ms step_avg:97.90ms +step:860/1695 train_time:84196ms step_avg:97.90ms +step:861/1695 train_time:84294ms step_avg:97.90ms +step:862/1695 train_time:84392ms step_avg:97.90ms +step:863/1695 train_time:84490ms step_avg:97.90ms +step:864/1695 train_time:84589ms step_avg:97.90ms +step:865/1695 train_time:84688ms step_avg:97.90ms +step:866/1695 train_time:84786ms step_avg:97.91ms +step:867/1695 train_time:84884ms step_avg:97.90ms +step:868/1695 train_time:84981ms step_avg:97.90ms +step:869/1695 train_time:85078ms step_avg:97.90ms +step:870/1695 train_time:85177ms step_avg:97.90ms +step:871/1695 train_time:85276ms step_avg:97.91ms +step:872/1695 train_time:85375ms step_avg:97.91ms +step:873/1695 train_time:85473ms step_avg:97.91ms +step:874/1695 train_time:85572ms step_avg:97.91ms +step:875/1695 train_time:85671ms step_avg:97.91ms +step:875/1695 val_loss:3.5420 train_time:85767ms step_avg:98.02ms +step:876/1695 train_time:85793ms step_avg:97.94ms +step:877/1695 train_time:85875ms step_avg:97.92ms +step:878/1695 train_time:85975ms step_avg:97.92ms +step:879/1695 train_time:86074ms step_avg:97.92ms +step:880/1695 train_time:86172ms step_avg:97.92ms +step:881/1695 train_time:86270ms step_avg:97.92ms +step:882/1695 train_time:86369ms step_avg:97.92ms +step:883/1695 train_time:86467ms step_avg:97.92ms +step:884/1695 train_time:86566ms step_avg:97.93ms +step:885/1695 train_time:86664ms step_avg:97.93ms +step:886/1695 train_time:86763ms step_avg:97.93ms +step:887/1695 train_time:86863ms step_avg:97.93ms +step:888/1695 train_time:86964ms step_avg:97.93ms +step:889/1695 train_time:87064ms step_avg:97.93ms +step:890/1695 train_time:87163ms step_avg:97.94ms +step:891/1695 train_time:87262ms step_avg:97.94ms +step:892/1695 train_time:87362ms step_avg:97.94ms +step:893/1695 train_time:87462ms step_avg:97.94ms +step:894/1695 train_time:87562ms step_avg:97.94ms +step:895/1695 train_time:87661ms step_avg:97.94ms +step:896/1695 train_time:87761ms step_avg:97.95ms +step:897/1695 train_time:87861ms step_avg:97.95ms +step:898/1695 train_time:87962ms step_avg:97.95ms +step:899/1695 train_time:88062ms step_avg:97.96ms +step:900/1695 train_time:88162ms step_avg:97.96ms +step:901/1695 train_time:88262ms step_avg:97.96ms +step:902/1695 train_time:88361ms step_avg:97.96ms +step:903/1695 train_time:88461ms step_avg:97.96ms +step:904/1695 train_time:88561ms step_avg:97.97ms +step:905/1695 train_time:88660ms step_avg:97.97ms +step:906/1695 train_time:88760ms step_avg:97.97ms +step:907/1695 train_time:88860ms step_avg:97.97ms +step:908/1695 train_time:88960ms step_avg:97.97ms +step:909/1695 train_time:89061ms step_avg:97.98ms +step:910/1695 train_time:89161ms step_avg:97.98ms +step:911/1695 train_time:89262ms step_avg:97.98ms +step:912/1695 train_time:89362ms step_avg:97.98ms +step:913/1695 train_time:89461ms step_avg:97.99ms +step:914/1695 train_time:89561ms step_avg:97.99ms +step:915/1695 train_time:89661ms step_avg:97.99ms +step:916/1695 train_time:89760ms step_avg:97.99ms +step:917/1695 train_time:89860ms step_avg:97.99ms +step:918/1695 train_time:89960ms step_avg:98.00ms +step:919/1695 train_time:90059ms step_avg:98.00ms +step:920/1695 train_time:90159ms step_avg:98.00ms +step:921/1695 train_time:90259ms step_avg:98.00ms +step:922/1695 train_time:90359ms step_avg:98.00ms +step:923/1695 train_time:90459ms step_avg:98.01ms +step:924/1695 train_time:90560ms step_avg:98.01ms +step:925/1695 train_time:90661ms step_avg:98.01ms +step:926/1695 train_time:90760ms step_avg:98.01ms +step:927/1695 train_time:90859ms step_avg:98.01ms +step:928/1695 train_time:90959ms step_avg:98.02ms +step:929/1695 train_time:91059ms step_avg:98.02ms +step:930/1695 train_time:91160ms step_avg:98.02ms +step:931/1695 train_time:91260ms step_avg:98.02ms +step:932/1695 train_time:91362ms step_avg:98.03ms +step:933/1695 train_time:91462ms step_avg:98.03ms +step:934/1695 train_time:91561ms step_avg:98.03ms +step:935/1695 train_time:91661ms step_avg:98.03ms +step:936/1695 train_time:91762ms step_avg:98.04ms +step:937/1695 train_time:91862ms step_avg:98.04ms +step:938/1695 train_time:91961ms step_avg:98.04ms +step:939/1695 train_time:92062ms step_avg:98.04ms +step:940/1695 train_time:92162ms step_avg:98.04ms +step:941/1695 train_time:92262ms step_avg:98.05ms +step:942/1695 train_time:92362ms step_avg:98.05ms +step:943/1695 train_time:92462ms step_avg:98.05ms +step:944/1695 train_time:92561ms step_avg:98.05ms +step:945/1695 train_time:92662ms step_avg:98.05ms +step:946/1695 train_time:92761ms step_avg:98.06ms +step:947/1695 train_time:92861ms step_avg:98.06ms +step:948/1695 train_time:92961ms step_avg:98.06ms +step:949/1695 train_time:93060ms step_avg:98.06ms +step:950/1695 train_time:93160ms step_avg:98.06ms +step:951/1695 train_time:93260ms step_avg:98.07ms +step:952/1695 train_time:93360ms step_avg:98.07ms +step:953/1695 train_time:93460ms step_avg:98.07ms +step:954/1695 train_time:93561ms step_avg:98.07ms +step:955/1695 train_time:93662ms step_avg:98.08ms +step:956/1695 train_time:93762ms step_avg:98.08ms +step:957/1695 train_time:93861ms step_avg:98.08ms +step:958/1695 train_time:93961ms step_avg:98.08ms +step:959/1695 train_time:94060ms step_avg:98.08ms +step:960/1695 train_time:94160ms step_avg:98.08ms +step:961/1695 train_time:94261ms step_avg:98.09ms +step:962/1695 train_time:94362ms step_avg:98.09ms +step:963/1695 train_time:94462ms step_avg:98.09ms +step:964/1695 train_time:94563ms step_avg:98.09ms +step:965/1695 train_time:94663ms step_avg:98.10ms +step:966/1695 train_time:94762ms step_avg:98.10ms +step:967/1695 train_time:94862ms step_avg:98.10ms +step:968/1695 train_time:94962ms step_avg:98.10ms +step:969/1695 train_time:95061ms step_avg:98.10ms +step:970/1695 train_time:95160ms step_avg:98.10ms +step:971/1695 train_time:95260ms step_avg:98.10ms +step:972/1695 train_time:95359ms step_avg:98.11ms +step:973/1695 train_time:95459ms step_avg:98.11ms +step:974/1695 train_time:95559ms step_avg:98.11ms +step:975/1695 train_time:95659ms step_avg:98.11ms +step:976/1695 train_time:95760ms step_avg:98.11ms +step:977/1695 train_time:95860ms step_avg:98.12ms +step:978/1695 train_time:95960ms step_avg:98.12ms +step:979/1695 train_time:96060ms step_avg:98.12ms +step:980/1695 train_time:96160ms step_avg:98.12ms +step:981/1695 train_time:96259ms step_avg:98.12ms +step:982/1695 train_time:96360ms step_avg:98.13ms +step:983/1695 train_time:96460ms step_avg:98.13ms +step:984/1695 train_time:96561ms step_avg:98.13ms +step:985/1695 train_time:96661ms step_avg:98.13ms +step:986/1695 train_time:96762ms step_avg:98.14ms +step:987/1695 train_time:96862ms step_avg:98.14ms +step:988/1695 train_time:96961ms step_avg:98.14ms +step:989/1695 train_time:97061ms step_avg:98.14ms +step:990/1695 train_time:97161ms step_avg:98.14ms +step:991/1695 train_time:97262ms step_avg:98.14ms +step:992/1695 train_time:97361ms step_avg:98.15ms +step:993/1695 train_time:97460ms step_avg:98.15ms +step:994/1695 train_time:97561ms step_avg:98.15ms +step:995/1695 train_time:97662ms step_avg:98.15ms +step:996/1695 train_time:97762ms step_avg:98.15ms +step:997/1695 train_time:97862ms step_avg:98.16ms +step:998/1695 train_time:97961ms step_avg:98.16ms +step:999/1695 train_time:98061ms step_avg:98.16ms +step:1000/1695 train_time:98160ms step_avg:98.16ms +step:1000/1695 val_loss:3.4970 train_time:98257ms step_avg:98.26ms +step:1001/1695 train_time:98283ms step_avg:98.19ms +step:1002/1695 train_time:98371ms step_avg:98.17ms +step:1003/1695 train_time:98472ms step_avg:98.18ms +step:1004/1695 train_time:98572ms step_avg:98.18ms +step:1005/1695 train_time:98672ms step_avg:98.18ms +step:1006/1695 train_time:98772ms step_avg:98.18ms +step:1007/1695 train_time:98871ms step_avg:98.18ms +step:1008/1695 train_time:98970ms step_avg:98.18ms +step:1009/1695 train_time:99069ms step_avg:98.19ms +step:1010/1695 train_time:99168ms step_avg:98.19ms +step:1011/1695 train_time:99271ms step_avg:98.19ms +step:1012/1695 train_time:99373ms step_avg:98.20ms +step:1013/1695 train_time:99475ms step_avg:98.20ms +step:1014/1695 train_time:99575ms step_avg:98.20ms +step:1015/1695 train_time:99675ms step_avg:98.20ms +step:1016/1695 train_time:99774ms step_avg:98.20ms +step:1017/1695 train_time:99874ms step_avg:98.20ms +step:1018/1695 train_time:99972ms step_avg:98.20ms +step:1019/1695 train_time:100072ms step_avg:98.21ms +step:1020/1695 train_time:100172ms step_avg:98.21ms +step:1021/1695 train_time:100274ms step_avg:98.21ms +step:1022/1695 train_time:100375ms step_avg:98.21ms +step:1023/1695 train_time:100476ms step_avg:98.22ms +step:1024/1695 train_time:100578ms step_avg:98.22ms +step:1025/1695 train_time:100677ms step_avg:98.22ms +step:1026/1695 train_time:100778ms step_avg:98.22ms +step:1027/1695 train_time:100877ms step_avg:98.23ms +step:1028/1695 train_time:100978ms step_avg:98.23ms +step:1029/1695 train_time:101079ms step_avg:98.23ms +step:1030/1695 train_time:101179ms step_avg:98.23ms +step:1031/1695 train_time:101279ms step_avg:98.23ms +step:1032/1695 train_time:101378ms step_avg:98.23ms +step:1033/1695 train_time:101478ms step_avg:98.24ms +step:1034/1695 train_time:101577ms step_avg:98.24ms +step:1035/1695 train_time:101677ms step_avg:98.24ms +step:1036/1695 train_time:101777ms step_avg:98.24ms +step:1037/1695 train_time:101877ms step_avg:98.24ms +step:1038/1695 train_time:101977ms step_avg:98.24ms +step:1039/1695 train_time:102077ms step_avg:98.25ms +step:1040/1695 train_time:102178ms step_avg:98.25ms +step:1041/1695 train_time:102278ms step_avg:98.25ms +step:1042/1695 train_time:102377ms step_avg:98.25ms +step:1043/1695 train_time:102477ms step_avg:98.25ms +step:1044/1695 train_time:102577ms step_avg:98.25ms +step:1045/1695 train_time:102677ms step_avg:98.26ms +step:1046/1695 train_time:102778ms step_avg:98.26ms +step:1047/1695 train_time:102878ms step_avg:98.26ms +step:1048/1695 train_time:102978ms step_avg:98.26ms +step:1049/1695 train_time:103078ms step_avg:98.26ms +step:1050/1695 train_time:103178ms step_avg:98.26ms +step:1051/1695 train_time:103277ms step_avg:98.27ms +step:1052/1695 train_time:103377ms step_avg:98.27ms +step:1053/1695 train_time:103477ms step_avg:98.27ms +step:1054/1695 train_time:103577ms step_avg:98.27ms +step:1055/1695 train_time:103677ms step_avg:98.27ms +step:1056/1695 train_time:103776ms step_avg:98.27ms +step:1057/1695 train_time:103876ms step_avg:98.27ms +step:1058/1695 train_time:103977ms step_avg:98.28ms +step:1059/1695 train_time:104077ms step_avg:98.28ms +step:1060/1695 train_time:104177ms step_avg:98.28ms +step:1061/1695 train_time:104277ms step_avg:98.28ms +step:1062/1695 train_time:104377ms step_avg:98.28ms +step:1063/1695 train_time:104477ms step_avg:98.28ms +step:1064/1695 train_time:104577ms step_avg:98.29ms +step:1065/1695 train_time:104677ms step_avg:98.29ms +step:1066/1695 train_time:104776ms step_avg:98.29ms +step:1067/1695 train_time:104877ms step_avg:98.29ms +step:1068/1695 train_time:104976ms step_avg:98.29ms +step:1069/1695 train_time:105077ms step_avg:98.29ms +step:1070/1695 train_time:105178ms step_avg:98.30ms +step:1071/1695 train_time:105278ms step_avg:98.30ms +step:1072/1695 train_time:105378ms step_avg:98.30ms +step:1073/1695 train_time:105478ms step_avg:98.30ms +step:1074/1695 train_time:105578ms step_avg:98.30ms +step:1075/1695 train_time:105677ms step_avg:98.30ms +step:1076/1695 train_time:105777ms step_avg:98.31ms +step:1077/1695 train_time:105877ms step_avg:98.31ms +step:1078/1695 train_time:105977ms step_avg:98.31ms +step:1079/1695 train_time:106079ms step_avg:98.31ms +step:1080/1695 train_time:106178ms step_avg:98.31ms +step:1081/1695 train_time:106278ms step_avg:98.31ms +step:1082/1695 train_time:106378ms step_avg:98.32ms +step:1083/1695 train_time:106478ms step_avg:98.32ms +step:1084/1695 train_time:106577ms step_avg:98.32ms +step:1085/1695 train_time:106677ms step_avg:98.32ms +step:1086/1695 train_time:106777ms step_avg:98.32ms +step:1087/1695 train_time:106877ms step_avg:98.32ms +step:1088/1695 train_time:106976ms step_avg:98.32ms +step:1089/1695 train_time:107077ms step_avg:98.33ms +step:1090/1695 train_time:107177ms step_avg:98.33ms +step:1091/1695 train_time:107277ms step_avg:98.33ms +step:1092/1695 train_time:107378ms step_avg:98.33ms +step:1093/1695 train_time:107477ms step_avg:98.33ms +step:1094/1695 train_time:107578ms step_avg:98.33ms +step:1095/1695 train_time:107678ms step_avg:98.34ms +step:1096/1695 train_time:107778ms step_avg:98.34ms +step:1097/1695 train_time:107877ms step_avg:98.34ms +step:1098/1695 train_time:107978ms step_avg:98.34ms +step:1099/1695 train_time:108077ms step_avg:98.34ms +step:1100/1695 train_time:108177ms step_avg:98.34ms +step:1101/1695 train_time:108277ms step_avg:98.34ms +step:1102/1695 train_time:108377ms step_avg:98.35ms +step:1103/1695 train_time:108477ms step_avg:98.35ms +step:1104/1695 train_time:108576ms step_avg:98.35ms +step:1105/1695 train_time:108677ms step_avg:98.35ms +step:1106/1695 train_time:108777ms step_avg:98.35ms +step:1107/1695 train_time:108877ms step_avg:98.35ms +step:1108/1695 train_time:108977ms step_avg:98.36ms +step:1109/1695 train_time:109077ms step_avg:98.36ms +step:1110/1695 train_time:109178ms step_avg:98.36ms +step:1111/1695 train_time:109279ms step_avg:98.36ms +step:1112/1695 train_time:109379ms step_avg:98.36ms +step:1113/1695 train_time:109479ms step_avg:98.36ms +step:1114/1695 train_time:109579ms step_avg:98.37ms +step:1115/1695 train_time:109678ms step_avg:98.37ms +step:1116/1695 train_time:109778ms step_avg:98.37ms +step:1117/1695 train_time:109878ms step_avg:98.37ms +step:1118/1695 train_time:109977ms step_avg:98.37ms +step:1119/1695 train_time:110077ms step_avg:98.37ms +step:1120/1695 train_time:110177ms step_avg:98.37ms +step:1121/1695 train_time:110277ms step_avg:98.37ms +step:1122/1695 train_time:110378ms step_avg:98.38ms +step:1123/1695 train_time:110479ms step_avg:98.38ms +step:1124/1695 train_time:110578ms step_avg:98.38ms +step:1125/1695 train_time:110679ms step_avg:98.38ms +step:1125/1695 val_loss:3.4440 train_time:110776ms step_avg:98.47ms +step:1126/1695 train_time:110802ms step_avg:98.40ms +step:1127/1695 train_time:110888ms step_avg:98.39ms +step:1128/1695 train_time:110993ms step_avg:98.40ms +step:1129/1695 train_time:111094ms step_avg:98.40ms +step:1130/1695 train_time:111193ms step_avg:98.40ms +step:1131/1695 train_time:111293ms step_avg:98.40ms +step:1132/1695 train_time:111392ms step_avg:98.40ms +step:1133/1695 train_time:111493ms step_avg:98.40ms +step:1134/1695 train_time:111593ms step_avg:98.41ms +step:1135/1695 train_time:111693ms step_avg:98.41ms +step:1136/1695 train_time:111793ms step_avg:98.41ms +step:1137/1695 train_time:111896ms step_avg:98.41ms +step:1138/1695 train_time:111998ms step_avg:98.42ms +step:1139/1695 train_time:112099ms step_avg:98.42ms +step:1140/1695 train_time:112199ms step_avg:98.42ms +step:1141/1695 train_time:112299ms step_avg:98.42ms +step:1142/1695 train_time:112399ms step_avg:98.42ms +step:1143/1695 train_time:112499ms step_avg:98.42ms +step:1144/1695 train_time:112600ms step_avg:98.43ms +step:1145/1695 train_time:112700ms step_avg:98.43ms +step:1146/1695 train_time:112801ms step_avg:98.43ms +step:1147/1695 train_time:112902ms step_avg:98.43ms +step:1148/1695 train_time:113004ms step_avg:98.44ms +step:1149/1695 train_time:113105ms step_avg:98.44ms +step:1150/1695 train_time:113205ms step_avg:98.44ms +step:1151/1695 train_time:113306ms step_avg:98.44ms +step:1152/1695 train_time:113407ms step_avg:98.44ms +step:1153/1695 train_time:113508ms step_avg:98.45ms +step:1154/1695 train_time:113609ms step_avg:98.45ms +step:1155/1695 train_time:113710ms step_avg:98.45ms +step:1156/1695 train_time:113811ms step_avg:98.45ms +step:1157/1695 train_time:113914ms step_avg:98.46ms +step:1158/1695 train_time:114015ms step_avg:98.46ms +step:1159/1695 train_time:114116ms step_avg:98.46ms +step:1160/1695 train_time:114216ms step_avg:98.46ms +step:1161/1695 train_time:114317ms step_avg:98.46ms +step:1162/1695 train_time:114416ms step_avg:98.47ms +step:1163/1695 train_time:114519ms step_avg:98.47ms +step:1164/1695 train_time:114619ms step_avg:98.47ms +step:1165/1695 train_time:114719ms step_avg:98.47ms +step:1166/1695 train_time:114819ms step_avg:98.47ms +step:1167/1695 train_time:114919ms step_avg:98.47ms +step:1168/1695 train_time:115020ms step_avg:98.48ms +step:1169/1695 train_time:115120ms step_avg:98.48ms +step:1170/1695 train_time:115220ms step_avg:98.48ms +step:1171/1695 train_time:115321ms step_avg:98.48ms +step:1172/1695 train_time:115424ms step_avg:98.48ms +step:1173/1695 train_time:115525ms step_avg:98.49ms +step:1174/1695 train_time:115626ms step_avg:98.49ms +step:1175/1695 train_time:115727ms step_avg:98.49ms +step:1176/1695 train_time:115828ms step_avg:98.49ms +step:1177/1695 train_time:115930ms step_avg:98.50ms +step:1178/1695 train_time:116031ms step_avg:98.50ms +step:1179/1695 train_time:116135ms step_avg:98.50ms +step:1180/1695 train_time:116235ms step_avg:98.50ms +step:1181/1695 train_time:116335ms step_avg:98.51ms +step:1182/1695 train_time:116437ms step_avg:98.51ms +step:1183/1695 train_time:116536ms step_avg:98.51ms +step:1184/1695 train_time:116637ms step_avg:98.51ms +step:1185/1695 train_time:116738ms step_avg:98.51ms +step:1186/1695 train_time:116838ms step_avg:98.51ms +step:1187/1695 train_time:116938ms step_avg:98.52ms +step:1188/1695 train_time:117039ms step_avg:98.52ms +step:1189/1695 train_time:117140ms step_avg:98.52ms +step:1190/1695 train_time:117240ms step_avg:98.52ms +step:1191/1695 train_time:117342ms step_avg:98.52ms +step:1192/1695 train_time:117442ms step_avg:98.53ms +step:1193/1695 train_time:117543ms step_avg:98.53ms +step:1194/1695 train_time:117643ms step_avg:98.53ms +step:1195/1695 train_time:117744ms step_avg:98.53ms +step:1196/1695 train_time:117845ms step_avg:98.53ms +step:1197/1695 train_time:117946ms step_avg:98.53ms +step:1198/1695 train_time:118048ms step_avg:98.54ms +step:1199/1695 train_time:118150ms step_avg:98.54ms +step:1200/1695 train_time:118252ms step_avg:98.54ms +step:1201/1695 train_time:118354ms step_avg:98.55ms +step:1202/1695 train_time:118456ms step_avg:98.55ms +step:1203/1695 train_time:118556ms step_avg:98.55ms +step:1204/1695 train_time:118656ms step_avg:98.55ms +step:1205/1695 train_time:118756ms step_avg:98.55ms +step:1206/1695 train_time:118856ms step_avg:98.55ms +step:1207/1695 train_time:118957ms step_avg:98.56ms +step:1208/1695 train_time:119057ms step_avg:98.56ms +step:1209/1695 train_time:119158ms step_avg:98.56ms +step:1210/1695 train_time:119258ms step_avg:98.56ms +step:1211/1695 train_time:119359ms step_avg:98.56ms +step:1212/1695 train_time:119459ms step_avg:98.56ms +step:1213/1695 train_time:119560ms step_avg:98.57ms +step:1214/1695 train_time:119659ms step_avg:98.57ms +step:1215/1695 train_time:119760ms step_avg:98.57ms +step:1216/1695 train_time:119862ms step_avg:98.57ms +step:1217/1695 train_time:119962ms step_avg:98.57ms +step:1218/1695 train_time:120063ms step_avg:98.57ms +step:1219/1695 train_time:120163ms step_avg:98.58ms +step:1220/1695 train_time:120265ms step_avg:98.58ms +step:1221/1695 train_time:120366ms step_avg:98.58ms +step:1222/1695 train_time:120467ms step_avg:98.58ms +step:1223/1695 train_time:120570ms step_avg:98.59ms +step:1224/1695 train_time:120671ms step_avg:98.59ms +step:1225/1695 train_time:120773ms step_avg:98.59ms +step:1226/1695 train_time:120874ms step_avg:98.59ms +step:1227/1695 train_time:120975ms step_avg:98.59ms +step:1228/1695 train_time:121076ms step_avg:98.60ms +step:1229/1695 train_time:121176ms step_avg:98.60ms +step:1230/1695 train_time:121277ms step_avg:98.60ms +step:1231/1695 train_time:121377ms step_avg:98.60ms +step:1232/1695 train_time:121478ms step_avg:98.60ms +step:1233/1695 train_time:121578ms step_avg:98.60ms +step:1234/1695 train_time:121680ms step_avg:98.61ms +step:1235/1695 train_time:121779ms step_avg:98.61ms +step:1236/1695 train_time:121880ms step_avg:98.61ms +step:1237/1695 train_time:121980ms step_avg:98.61ms +step:1238/1695 train_time:122080ms step_avg:98.61ms +step:1239/1695 train_time:122180ms step_avg:98.61ms +step:1240/1695 train_time:122282ms step_avg:98.61ms +step:1241/1695 train_time:122382ms step_avg:98.62ms +step:1242/1695 train_time:122483ms step_avg:98.62ms +step:1243/1695 train_time:122584ms step_avg:98.62ms +step:1244/1695 train_time:122685ms step_avg:98.62ms +step:1245/1695 train_time:122786ms step_avg:98.62ms +step:1246/1695 train_time:122888ms step_avg:98.63ms +step:1247/1695 train_time:122988ms step_avg:98.63ms +step:1248/1695 train_time:123090ms step_avg:98.63ms +step:1249/1695 train_time:123192ms step_avg:98.63ms +step:1250/1695 train_time:123293ms step_avg:98.63ms +step:1250/1695 val_loss:3.3986 train_time:123392ms step_avg:98.71ms +step:1251/1695 train_time:123417ms step_avg:98.65ms +step:1252/1695 train_time:123507ms step_avg:98.65ms +step:1253/1695 train_time:123608ms step_avg:98.65ms +step:1254/1695 train_time:123710ms step_avg:98.65ms +step:1255/1695 train_time:123811ms step_avg:98.65ms +step:1256/1695 train_time:123911ms step_avg:98.66ms +step:1257/1695 train_time:124011ms step_avg:98.66ms +step:1258/1695 train_time:124112ms step_avg:98.66ms +step:1259/1695 train_time:124212ms step_avg:98.66ms +step:1260/1695 train_time:124313ms step_avg:98.66ms +step:1261/1695 train_time:124417ms step_avg:98.67ms +step:1262/1695 train_time:124520ms step_avg:98.67ms +step:1263/1695 train_time:124621ms step_avg:98.67ms +step:1264/1695 train_time:124720ms step_avg:98.67ms +step:1265/1695 train_time:124821ms step_avg:98.67ms +step:1266/1695 train_time:124921ms step_avg:98.67ms +step:1267/1695 train_time:125021ms step_avg:98.67ms +step:1268/1695 train_time:125121ms step_avg:98.68ms +step:1269/1695 train_time:125221ms step_avg:98.68ms +step:1270/1695 train_time:125321ms step_avg:98.68ms +step:1271/1695 train_time:125422ms step_avg:98.68ms +step:1272/1695 train_time:125523ms step_avg:98.68ms +step:1273/1695 train_time:125625ms step_avg:98.68ms +step:1274/1695 train_time:125727ms step_avg:98.69ms +step:1275/1695 train_time:125828ms step_avg:98.69ms +step:1276/1695 train_time:125929ms step_avg:98.69ms +step:1277/1695 train_time:126030ms step_avg:98.69ms +step:1278/1695 train_time:126130ms step_avg:98.69ms +step:1279/1695 train_time:126232ms step_avg:98.70ms +step:1280/1695 train_time:126334ms step_avg:98.70ms +step:1281/1695 train_time:126438ms step_avg:98.70ms +step:1282/1695 train_time:126539ms step_avg:98.70ms +step:1283/1695 train_time:126640ms step_avg:98.71ms +step:1284/1695 train_time:126740ms step_avg:98.71ms +step:1285/1695 train_time:126840ms step_avg:98.71ms +step:1286/1695 train_time:126939ms step_avg:98.71ms +step:1287/1695 train_time:127040ms step_avg:98.71ms +step:1288/1695 train_time:127140ms step_avg:98.71ms +step:1289/1695 train_time:127242ms step_avg:98.71ms +step:1290/1695 train_time:127342ms step_avg:98.71ms +step:1291/1695 train_time:127443ms step_avg:98.72ms +step:1292/1695 train_time:127543ms step_avg:98.72ms +step:1293/1695 train_time:127644ms step_avg:98.72ms +step:1294/1695 train_time:127745ms step_avg:98.72ms +step:1295/1695 train_time:127846ms step_avg:98.72ms +step:1296/1695 train_time:127947ms step_avg:98.72ms +step:1297/1695 train_time:128047ms step_avg:98.73ms +step:1298/1695 train_time:128148ms step_avg:98.73ms +step:1299/1695 train_time:128249ms step_avg:98.73ms +step:1300/1695 train_time:128349ms step_avg:98.73ms +step:1301/1695 train_time:128450ms step_avg:98.73ms +step:1302/1695 train_time:128552ms step_avg:98.73ms +step:1303/1695 train_time:128654ms step_avg:98.74ms +step:1304/1695 train_time:128756ms step_avg:98.74ms +step:1305/1695 train_time:128857ms step_avg:98.74ms +step:1306/1695 train_time:128958ms step_avg:98.74ms +step:1307/1695 train_time:129058ms step_avg:98.74ms +step:1308/1695 train_time:129158ms step_avg:98.74ms +step:1309/1695 train_time:129260ms step_avg:98.75ms +step:1310/1695 train_time:129360ms step_avg:98.75ms +step:1311/1695 train_time:129461ms step_avg:98.75ms +step:1312/1695 train_time:129561ms step_avg:98.75ms +step:1313/1695 train_time:129662ms step_avg:98.75ms +step:1314/1695 train_time:129762ms step_avg:98.75ms +step:1315/1695 train_time:129863ms step_avg:98.76ms +step:1316/1695 train_time:129963ms step_avg:98.76ms +step:1317/1695 train_time:130064ms step_avg:98.76ms +step:1318/1695 train_time:130164ms step_avg:98.76ms +step:1319/1695 train_time:130265ms step_avg:98.76ms +step:1320/1695 train_time:130366ms step_avg:98.76ms +step:1321/1695 train_time:130468ms step_avg:98.76ms +step:1322/1695 train_time:130568ms step_avg:98.77ms +step:1323/1695 train_time:130669ms step_avg:98.77ms +step:1324/1695 train_time:130771ms step_avg:98.77ms +step:1325/1695 train_time:130872ms step_avg:98.77ms +step:1326/1695 train_time:130973ms step_avg:98.77ms +step:1327/1695 train_time:131076ms step_avg:98.78ms +step:1328/1695 train_time:131177ms step_avg:98.78ms +step:1329/1695 train_time:131278ms step_avg:98.78ms +step:1330/1695 train_time:131378ms step_avg:98.78ms +step:1331/1695 train_time:131479ms step_avg:98.78ms +step:1332/1695 train_time:131579ms step_avg:98.78ms +step:1333/1695 train_time:131681ms step_avg:98.79ms +step:1334/1695 train_time:131780ms step_avg:98.79ms +step:1335/1695 train_time:131880ms step_avg:98.79ms +step:1336/1695 train_time:131982ms step_avg:98.79ms +step:1337/1695 train_time:132082ms step_avg:98.79ms +step:1338/1695 train_time:132182ms step_avg:98.79ms +step:1339/1695 train_time:132282ms step_avg:98.79ms +step:1340/1695 train_time:132383ms step_avg:98.79ms +step:1341/1695 train_time:132485ms step_avg:98.80ms +step:1342/1695 train_time:132585ms step_avg:98.80ms +step:1343/1695 train_time:132684ms step_avg:98.80ms +step:1344/1695 train_time:132784ms step_avg:98.80ms +step:1345/1695 train_time:132884ms step_avg:98.80ms +step:1346/1695 train_time:132986ms step_avg:98.80ms +step:1347/1695 train_time:133087ms step_avg:98.80ms +step:1348/1695 train_time:133188ms step_avg:98.80ms +step:1349/1695 train_time:133288ms step_avg:98.81ms +step:1350/1695 train_time:133390ms step_avg:98.81ms +step:1351/1695 train_time:133491ms step_avg:98.81ms +step:1352/1695 train_time:133593ms step_avg:98.81ms +step:1353/1695 train_time:133695ms step_avg:98.81ms +step:1354/1695 train_time:133796ms step_avg:98.82ms +step:1355/1695 train_time:133897ms step_avg:98.82ms +step:1356/1695 train_time:133998ms step_avg:98.82ms +step:1357/1695 train_time:134098ms step_avg:98.82ms +step:1358/1695 train_time:134200ms step_avg:98.82ms +step:1359/1695 train_time:134301ms step_avg:98.82ms +step:1360/1695 train_time:134402ms step_avg:98.83ms +step:1361/1695 train_time:134502ms step_avg:98.83ms +step:1362/1695 train_time:134601ms step_avg:98.83ms +step:1363/1695 train_time:134702ms step_avg:98.83ms +step:1364/1695 train_time:134803ms step_avg:98.83ms +step:1365/1695 train_time:134905ms step_avg:98.83ms +step:1366/1695 train_time:135006ms step_avg:98.83ms +step:1367/1695 train_time:135108ms step_avg:98.84ms +step:1368/1695 train_time:135209ms step_avg:98.84ms +step:1369/1695 train_time:135310ms step_avg:98.84ms +step:1370/1695 train_time:135411ms step_avg:98.84ms +step:1371/1695 train_time:135513ms step_avg:98.84ms +step:1372/1695 train_time:135616ms step_avg:98.85ms +step:1373/1695 train_time:135717ms step_avg:98.85ms +step:1374/1695 train_time:135819ms step_avg:98.85ms +step:1375/1695 train_time:135919ms step_avg:98.85ms +step:1375/1695 val_loss:3.3581 train_time:136018ms step_avg:98.92ms +step:1376/1695 train_time:136044ms step_avg:98.87ms +step:1377/1695 train_time:136129ms step_avg:98.86ms +step:1378/1695 train_time:136230ms step_avg:98.86ms +step:1379/1695 train_time:136332ms step_avg:98.86ms +step:1380/1695 train_time:136435ms step_avg:98.87ms +step:1381/1695 train_time:136535ms step_avg:98.87ms +step:1382/1695 train_time:136634ms step_avg:98.87ms +step:1383/1695 train_time:136734ms step_avg:98.87ms +step:1384/1695 train_time:136834ms step_avg:98.87ms +step:1385/1695 train_time:136936ms step_avg:98.87ms +step:1386/1695 train_time:137039ms step_avg:98.87ms +step:1387/1695 train_time:137141ms step_avg:98.88ms +step:1388/1695 train_time:137242ms step_avg:98.88ms +step:1389/1695 train_time:137344ms step_avg:98.88ms +step:1390/1695 train_time:137446ms step_avg:98.88ms +step:1391/1695 train_time:137548ms step_avg:98.88ms +step:1392/1695 train_time:137650ms step_avg:98.89ms +step:1393/1695 train_time:137753ms step_avg:98.89ms +step:1394/1695 train_time:137855ms step_avg:98.89ms +step:1395/1695 train_time:137956ms step_avg:98.89ms +step:1396/1695 train_time:138059ms step_avg:98.90ms +step:1397/1695 train_time:138162ms step_avg:98.90ms +step:1398/1695 train_time:138264ms step_avg:98.90ms +step:1399/1695 train_time:138366ms step_avg:98.90ms +step:1400/1695 train_time:138469ms step_avg:98.91ms +step:1401/1695 train_time:138570ms step_avg:98.91ms +step:1402/1695 train_time:138673ms step_avg:98.91ms +step:1403/1695 train_time:138775ms step_avg:98.91ms +step:1404/1695 train_time:138877ms step_avg:98.92ms +step:1405/1695 train_time:138979ms step_avg:98.92ms +step:1406/1695 train_time:139082ms step_avg:98.92ms +step:1407/1695 train_time:139183ms step_avg:98.92ms +step:1408/1695 train_time:139283ms step_avg:98.92ms +step:1409/1695 train_time:139387ms step_avg:98.93ms +step:1410/1695 train_time:139488ms step_avg:98.93ms +step:1411/1695 train_time:139589ms step_avg:98.93ms +step:1412/1695 train_time:139694ms step_avg:98.93ms +step:1413/1695 train_time:139796ms step_avg:98.94ms +step:1414/1695 train_time:139897ms step_avg:98.94ms +step:1415/1695 train_time:139999ms step_avg:98.94ms +step:1416/1695 train_time:140100ms step_avg:98.94ms +step:1417/1695 train_time:140200ms step_avg:98.94ms +step:1418/1695 train_time:140301ms step_avg:98.94ms +step:1419/1695 train_time:140403ms step_avg:98.94ms +step:1420/1695 train_time:140504ms step_avg:98.95ms +step:1421/1695 train_time:140606ms step_avg:98.95ms +step:1422/1695 train_time:140708ms step_avg:98.95ms +step:1423/1695 train_time:140812ms step_avg:98.95ms +step:1424/1695 train_time:140914ms step_avg:98.96ms +step:1425/1695 train_time:141015ms step_avg:98.96ms +step:1426/1695 train_time:141119ms step_avg:98.96ms +step:1427/1695 train_time:141220ms step_avg:98.96ms +step:1428/1695 train_time:141322ms step_avg:98.97ms +step:1429/1695 train_time:141424ms step_avg:98.97ms +step:1430/1695 train_time:141525ms step_avg:98.97ms +step:1431/1695 train_time:141627ms step_avg:98.97ms +step:1432/1695 train_time:141728ms step_avg:98.97ms +step:1433/1695 train_time:141831ms step_avg:98.98ms +step:1434/1695 train_time:141932ms step_avg:98.98ms +step:1435/1695 train_time:142035ms step_avg:98.98ms +step:1436/1695 train_time:142137ms step_avg:98.98ms +step:1437/1695 train_time:142239ms step_avg:98.98ms +step:1438/1695 train_time:142339ms step_avg:98.98ms +step:1439/1695 train_time:142442ms step_avg:98.99ms +step:1440/1695 train_time:142546ms step_avg:98.99ms +step:1441/1695 train_time:142648ms step_avg:98.99ms +step:1442/1695 train_time:142749ms step_avg:98.99ms +step:1443/1695 train_time:142850ms step_avg:99.00ms +step:1444/1695 train_time:142952ms step_avg:99.00ms +step:1445/1695 train_time:143054ms step_avg:99.00ms +step:1446/1695 train_time:143155ms step_avg:99.00ms +step:1447/1695 train_time:143256ms step_avg:99.00ms +step:1448/1695 train_time:143361ms step_avg:99.01ms +step:1449/1695 train_time:143461ms step_avg:99.01ms +step:1450/1695 train_time:143563ms step_avg:99.01ms +step:1451/1695 train_time:143665ms step_avg:99.01ms +step:1452/1695 train_time:143768ms step_avg:99.01ms +step:1453/1695 train_time:143869ms step_avg:99.02ms +step:1454/1695 train_time:143972ms step_avg:99.02ms +step:1455/1695 train_time:144075ms step_avg:99.02ms +step:1456/1695 train_time:144177ms step_avg:99.02ms +step:1457/1695 train_time:144279ms step_avg:99.02ms +step:1458/1695 train_time:144382ms step_avg:99.03ms +step:1459/1695 train_time:144484ms step_avg:99.03ms +step:1460/1695 train_time:144585ms step_avg:99.03ms +step:1461/1695 train_time:144688ms step_avg:99.03ms +step:1462/1695 train_time:144788ms step_avg:99.03ms +step:1463/1695 train_time:144890ms step_avg:99.04ms +step:1464/1695 train_time:144992ms step_avg:99.04ms +step:1465/1695 train_time:145093ms step_avg:99.04ms +step:1466/1695 train_time:145195ms step_avg:99.04ms +step:1467/1695 train_time:145296ms step_avg:99.04ms +step:1468/1695 train_time:145399ms step_avg:99.05ms +step:1469/1695 train_time:145501ms step_avg:99.05ms +step:1470/1695 train_time:145601ms step_avg:99.05ms +step:1471/1695 train_time:145704ms step_avg:99.05ms +step:1472/1695 train_time:145807ms step_avg:99.05ms +step:1473/1695 train_time:145908ms step_avg:99.05ms +step:1474/1695 train_time:146009ms step_avg:99.06ms +step:1475/1695 train_time:146111ms step_avg:99.06ms +step:1476/1695 train_time:146214ms step_avg:99.06ms +step:1477/1695 train_time:146316ms step_avg:99.06ms +step:1478/1695 train_time:146418ms step_avg:99.06ms +step:1479/1695 train_time:146518ms step_avg:99.07ms +step:1480/1695 train_time:146620ms step_avg:99.07ms +step:1481/1695 train_time:146721ms step_avg:99.07ms +step:1482/1695 train_time:146823ms step_avg:99.07ms +step:1483/1695 train_time:146926ms step_avg:99.07ms +step:1484/1695 train_time:147028ms step_avg:99.08ms +step:1485/1695 train_time:147129ms step_avg:99.08ms +step:1486/1695 train_time:147231ms step_avg:99.08ms +step:1487/1695 train_time:147332ms step_avg:99.08ms +step:1488/1695 train_time:147435ms step_avg:99.08ms +step:1489/1695 train_time:147538ms step_avg:99.09ms +step:1490/1695 train_time:147640ms step_avg:99.09ms +step:1491/1695 train_time:147741ms step_avg:99.09ms +step:1492/1695 train_time:147842ms step_avg:99.09ms +step:1493/1695 train_time:147944ms step_avg:99.09ms +step:1494/1695 train_time:148046ms step_avg:99.09ms +step:1495/1695 train_time:148148ms step_avg:99.10ms +step:1496/1695 train_time:148250ms step_avg:99.10ms +step:1497/1695 train_time:148351ms step_avg:99.10ms +step:1498/1695 train_time:148454ms step_avg:99.10ms +step:1499/1695 train_time:148556ms step_avg:99.10ms +step:1500/1695 train_time:148658ms step_avg:99.11ms +step:1500/1695 val_loss:3.3231 train_time:148757ms step_avg:99.17ms +step:1501/1695 train_time:148784ms step_avg:99.12ms +step:1502/1695 train_time:148870ms step_avg:99.11ms +step:1503/1695 train_time:148971ms step_avg:99.12ms +step:1504/1695 train_time:149071ms step_avg:99.12ms +step:1505/1695 train_time:149172ms step_avg:99.12ms +step:1506/1695 train_time:149272ms step_avg:99.12ms +step:1507/1695 train_time:149372ms step_avg:99.12ms +step:1508/1695 train_time:149473ms step_avg:99.12ms +step:1509/1695 train_time:149575ms step_avg:99.12ms +step:1510/1695 train_time:149677ms step_avg:99.12ms +step:1511/1695 train_time:149781ms step_avg:99.13ms +step:1512/1695 train_time:149884ms step_avg:99.13ms +step:1513/1695 train_time:149986ms step_avg:99.13ms +step:1514/1695 train_time:150088ms step_avg:99.13ms +step:1515/1695 train_time:150193ms step_avg:99.14ms +step:1516/1695 train_time:150294ms step_avg:99.14ms +step:1517/1695 train_time:150394ms step_avg:99.14ms +step:1518/1695 train_time:150495ms step_avg:99.14ms +step:1519/1695 train_time:150598ms step_avg:99.14ms +step:1520/1695 train_time:150700ms step_avg:99.14ms +step:1521/1695 train_time:150802ms step_avg:99.15ms +step:1522/1695 train_time:150903ms step_avg:99.15ms +step:1523/1695 train_time:151006ms step_avg:99.15ms +step:1524/1695 train_time:151111ms step_avg:99.15ms +step:1525/1695 train_time:151213ms step_avg:99.16ms +step:1526/1695 train_time:151315ms step_avg:99.16ms +step:1527/1695 train_time:151416ms step_avg:99.16ms +step:1528/1695 train_time:151522ms step_avg:99.16ms +step:1529/1695 train_time:151624ms step_avg:99.17ms +step:1530/1695 train_time:151728ms step_avg:99.17ms +step:1531/1695 train_time:151829ms step_avg:99.17ms +step:1532/1695 train_time:151930ms step_avg:99.17ms +step:1533/1695 train_time:152032ms step_avg:99.17ms +step:1534/1695 train_time:152133ms step_avg:99.17ms +step:1535/1695 train_time:152236ms step_avg:99.18ms +step:1536/1695 train_time:152336ms step_avg:99.18ms +step:1537/1695 train_time:152438ms step_avg:99.18ms +step:1538/1695 train_time:152540ms step_avg:99.18ms +step:1539/1695 train_time:152641ms step_avg:99.18ms +step:1540/1695 train_time:152745ms step_avg:99.18ms +step:1541/1695 train_time:152848ms step_avg:99.19ms +step:1542/1695 train_time:152953ms step_avg:99.19ms +step:1543/1695 train_time:153056ms step_avg:99.19ms +step:1544/1695 train_time:153159ms step_avg:99.20ms +step:1545/1695 train_time:153260ms step_avg:99.20ms +step:1546/1695 train_time:153361ms step_avg:99.20ms +step:1547/1695 train_time:153464ms step_avg:99.20ms +step:1548/1695 train_time:153566ms step_avg:99.20ms +step:1549/1695 train_time:153668ms step_avg:99.20ms +step:1550/1695 train_time:153769ms step_avg:99.21ms +step:1551/1695 train_time:153872ms step_avg:99.21ms +step:1552/1695 train_time:153974ms step_avg:99.21ms +step:1553/1695 train_time:154077ms step_avg:99.21ms +step:1554/1695 train_time:154177ms step_avg:99.21ms +step:1555/1695 train_time:154280ms step_avg:99.22ms +step:1556/1695 train_time:154381ms step_avg:99.22ms +step:1557/1695 train_time:154484ms step_avg:99.22ms +step:1558/1695 train_time:154586ms step_avg:99.22ms +step:1559/1695 train_time:154689ms step_avg:99.22ms +step:1560/1695 train_time:154791ms step_avg:99.22ms +step:1561/1695 train_time:154892ms step_avg:99.23ms +step:1562/1695 train_time:154995ms step_avg:99.23ms +step:1563/1695 train_time:155099ms step_avg:99.23ms +step:1564/1695 train_time:155201ms step_avg:99.23ms +step:1565/1695 train_time:155302ms step_avg:99.23ms +step:1566/1695 train_time:155403ms step_avg:99.24ms +step:1567/1695 train_time:155505ms step_avg:99.24ms +step:1568/1695 train_time:155607ms step_avg:99.24ms +step:1569/1695 train_time:155708ms step_avg:99.24ms +step:1570/1695 train_time:155811ms step_avg:99.24ms +step:1571/1695 train_time:155913ms step_avg:99.24ms +step:1572/1695 train_time:156014ms step_avg:99.25ms +step:1573/1695 train_time:156115ms step_avg:99.25ms +step:1574/1695 train_time:156216ms step_avg:99.25ms +step:1575/1695 train_time:156317ms step_avg:99.25ms +step:1576/1695 train_time:156420ms step_avg:99.25ms +step:1577/1695 train_time:156524ms step_avg:99.25ms +step:1578/1695 train_time:156625ms step_avg:99.26ms +step:1579/1695 train_time:156727ms step_avg:99.26ms +step:1580/1695 train_time:156829ms step_avg:99.26ms +step:1581/1695 train_time:156931ms step_avg:99.26ms +step:1582/1695 train_time:157032ms step_avg:99.26ms +step:1583/1695 train_time:157135ms step_avg:99.26ms +step:1584/1695 train_time:157238ms step_avg:99.27ms +step:1585/1695 train_time:157340ms step_avg:99.27ms +step:1586/1695 train_time:157443ms step_avg:99.27ms +step:1587/1695 train_time:157545ms step_avg:99.27ms +step:1588/1695 train_time:157646ms step_avg:99.27ms +step:1589/1695 train_time:157748ms step_avg:99.27ms +step:1590/1695 train_time:157850ms step_avg:99.28ms +step:1591/1695 train_time:157951ms step_avg:99.28ms +step:1592/1695 train_time:158053ms step_avg:99.28ms +step:1593/1695 train_time:158155ms step_avg:99.28ms +step:1594/1695 train_time:158259ms step_avg:99.28ms +step:1595/1695 train_time:158361ms step_avg:99.29ms +step:1596/1695 train_time:158462ms step_avg:99.29ms +step:1597/1695 train_time:158565ms step_avg:99.29ms +step:1598/1695 train_time:158668ms step_avg:99.29ms +step:1599/1695 train_time:158769ms step_avg:99.29ms +step:1600/1695 train_time:158872ms step_avg:99.29ms +step:1601/1695 train_time:158975ms step_avg:99.30ms +step:1602/1695 train_time:159077ms step_avg:99.30ms +step:1603/1695 train_time:159178ms step_avg:99.30ms +step:1604/1695 train_time:159279ms step_avg:99.30ms +step:1605/1695 train_time:159381ms step_avg:99.30ms +step:1606/1695 train_time:159483ms step_avg:99.30ms +step:1607/1695 train_time:159584ms step_avg:99.31ms +step:1608/1695 train_time:159685ms step_avg:99.31ms +step:1609/1695 train_time:159787ms step_avg:99.31ms +step:1610/1695 train_time:159890ms step_avg:99.31ms +step:1611/1695 train_time:159993ms step_avg:99.31ms +step:1612/1695 train_time:160095ms step_avg:99.31ms +step:1613/1695 train_time:160196ms step_avg:99.32ms +step:1614/1695 train_time:160297ms step_avg:99.32ms +step:1615/1695 train_time:160399ms step_avg:99.32ms +step:1616/1695 train_time:160500ms step_avg:99.32ms +step:1617/1695 train_time:160601ms step_avg:99.32ms +step:1618/1695 train_time:160703ms step_avg:99.32ms +step:1619/1695 train_time:160806ms step_avg:99.32ms +step:1620/1695 train_time:160909ms step_avg:99.33ms +step:1621/1695 train_time:161011ms step_avg:99.33ms +step:1622/1695 train_time:161112ms step_avg:99.33ms +step:1623/1695 train_time:161214ms step_avg:99.33ms +step:1624/1695 train_time:161316ms step_avg:99.33ms +step:1625/1695 train_time:161419ms step_avg:99.33ms +step:1625/1695 val_loss:3.2939 train_time:161518ms step_avg:99.40ms +step:1626/1695 train_time:161544ms step_avg:99.35ms +step:1627/1695 train_time:161636ms step_avg:99.35ms +step:1628/1695 train_time:161739ms step_avg:99.35ms +step:1629/1695 train_time:161842ms step_avg:99.35ms +step:1630/1695 train_time:161943ms step_avg:99.35ms +step:1631/1695 train_time:162045ms step_avg:99.35ms +step:1632/1695 train_time:162147ms step_avg:99.35ms +step:1633/1695 train_time:162248ms step_avg:99.36ms +step:1634/1695 train_time:162350ms step_avg:99.36ms +step:1635/1695 train_time:162452ms step_avg:99.36ms +step:1636/1695 train_time:162555ms step_avg:99.36ms +step:1637/1695 train_time:162658ms step_avg:99.36ms +step:1638/1695 train_time:162761ms step_avg:99.37ms +step:1639/1695 train_time:162864ms step_avg:99.37ms +step:1640/1695 train_time:162966ms step_avg:99.37ms +step:1641/1695 train_time:163069ms step_avg:99.37ms +step:1642/1695 train_time:163171ms step_avg:99.37ms +step:1643/1695 train_time:163273ms step_avg:99.38ms +step:1644/1695 train_time:163375ms step_avg:99.38ms +step:1645/1695 train_time:163478ms step_avg:99.38ms +step:1646/1695 train_time:163580ms step_avg:99.38ms +step:1647/1695 train_time:163684ms step_avg:99.38ms +step:1648/1695 train_time:163788ms step_avg:99.39ms +step:1649/1695 train_time:163890ms step_avg:99.39ms +step:1650/1695 train_time:163993ms step_avg:99.39ms +step:1651/1695 train_time:164094ms step_avg:99.39ms +step:1652/1695 train_time:164197ms step_avg:99.39ms +step:1653/1695 train_time:164299ms step_avg:99.39ms +step:1654/1695 train_time:164402ms step_avg:99.40ms +step:1655/1695 train_time:164504ms step_avg:99.40ms +step:1656/1695 train_time:164607ms step_avg:99.40ms +step:1657/1695 train_time:164710ms step_avg:99.40ms +step:1658/1695 train_time:164812ms step_avg:99.40ms +step:1659/1695 train_time:164918ms step_avg:99.41ms +step:1660/1695 train_time:165021ms step_avg:99.41ms +step:1661/1695 train_time:165125ms step_avg:99.41ms +step:1662/1695 train_time:165229ms step_avg:99.42ms +step:1663/1695 train_time:165332ms step_avg:99.42ms +step:1664/1695 train_time:165434ms step_avg:99.42ms +step:1665/1695 train_time:165539ms step_avg:99.42ms +step:1666/1695 train_time:165642ms step_avg:99.43ms +step:1667/1695 train_time:165744ms step_avg:99.43ms +step:1668/1695 train_time:165850ms step_avg:99.43ms +step:1669/1695 train_time:165954ms step_avg:99.43ms +step:1670/1695 train_time:166055ms step_avg:99.43ms +step:1671/1695 train_time:166158ms step_avg:99.44ms +step:1672/1695 train_time:166260ms step_avg:99.44ms +step:1673/1695 train_time:166363ms step_avg:99.44ms +step:1674/1695 train_time:166465ms step_avg:99.44ms +step:1675/1695 train_time:166570ms step_avg:99.45ms +step:1676/1695 train_time:166675ms step_avg:99.45ms +step:1677/1695 train_time:166776ms step_avg:99.45ms +step:1678/1695 train_time:166879ms step_avg:99.45ms +step:1679/1695 train_time:166984ms step_avg:99.45ms +step:1680/1695 train_time:167086ms step_avg:99.46ms +step:1681/1695 train_time:167189ms step_avg:99.46ms +step:1682/1695 train_time:167295ms step_avg:99.46ms +step:1683/1695 train_time:167396ms step_avg:99.46ms +step:1684/1695 train_time:167499ms step_avg:99.46ms +step:1685/1695 train_time:167603ms step_avg:99.47ms +step:1686/1695 train_time:167705ms step_avg:99.47ms +step:1687/1695 train_time:167808ms step_avg:99.47ms +step:1688/1695 train_time:167911ms step_avg:99.47ms +step:1689/1695 train_time:168013ms step_avg:99.47ms +step:1690/1695 train_time:168114ms step_avg:99.48ms +step:1691/1695 train_time:168216ms step_avg:99.48ms +step:1692/1695 train_time:168318ms step_avg:99.48ms +step:1693/1695 train_time:168420ms step_avg:99.48ms +step:1694/1695 train_time:168524ms step_avg:99.48ms +step:1695/1695 train_time:168628ms step_avg:99.49ms +step:1695/1695 val_loss:3.2810 train_time:168728ms step_avg:99.54ms +peak memory allocated: 34004 MiB reserved: 49660 MiB diff --git a/records/082325_SparseAttnGate/e8891a98-8bf2-43cc-bac5-728aa53482ce.txt b/records/082325_SparseAttnGate/e8891a98-8bf2-43cc-bac5-728aa53482ce.txt new file mode 100644 index 000000000..1eed97d0a --- /dev/null +++ b/records/082325_SparseAttnGate/e8891a98-8bf2-43cc-bac5-728aa53482ce.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:36:07 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 31C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 122W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 119W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 312571 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 312572 C /usr/bin/python3 614MiB | +| 0 N/A N/A 312573 C /usr/bin/python3 614MiB | +| 0 N/A N/A 312574 C /usr/bin/python3 614MiB | +| 0 N/A N/A 312575 C /usr/bin/python3 614MiB | +| 0 N/A N/A 312576 C /usr/bin/python3 614MiB | +| 0 N/A N/A 312577 C /usr/bin/python3 614MiB | +| 0 N/A N/A 312578 C /usr/bin/python3 614MiB | +| 1 N/A N/A 312572 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 312573 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 312574 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 312575 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 312576 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 312577 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 312578 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:157ms step_avg:157.06ms +step:2/1695 train_time:183ms step_avg:91.36ms +step:3/1695 train_time:253ms step_avg:84.35ms +step:4/1695 train_time:344ms step_avg:86.09ms +step:5/1695 train_time:438ms step_avg:87.63ms +step:6/1695 train_time:531ms step_avg:88.54ms +step:7/1695 train_time:624ms step_avg:89.16ms +step:8/1695 train_time:718ms step_avg:89.72ms +step:9/1695 train_time:810ms step_avg:90.03ms +step:10/1695 train_time:903ms step_avg:90.28ms +step:11/1695 train_time:996ms step_avg:90.53ms +step:12/1695 train_time:1090ms step_avg:90.82ms +step:13/1695 train_time:1185ms step_avg:91.13ms +step:14/1695 train_time:1280ms step_avg:91.40ms +step:15/1695 train_time:1375ms step_avg:91.65ms +step:16/1695 train_time:1469ms step_avg:91.81ms +step:17/1695 train_time:1562ms step_avg:91.88ms +step:18/1695 train_time:1655ms step_avg:91.96ms +step:19/1695 train_time:1748ms step_avg:92.00ms +step:20/1695 train_time:1841ms step_avg:92.03ms +step:21/1695 train_time:1934ms step_avg:92.12ms +step:22/1695 train_time:2029ms step_avg:92.25ms +step:23/1695 train_time:2123ms step_avg:92.29ms +step:24/1695 train_time:2217ms step_avg:92.36ms +step:25/1695 train_time:2311ms step_avg:92.45ms +step:26/1695 train_time:2406ms step_avg:92.52ms +step:27/1695 train_time:2500ms step_avg:92.59ms +step:28/1695 train_time:2595ms step_avg:92.69ms +step:29/1695 train_time:2689ms step_avg:92.72ms +step:30/1695 train_time:2782ms step_avg:92.72ms +step:31/1695 train_time:2875ms step_avg:92.74ms +step:32/1695 train_time:2969ms step_avg:92.77ms +step:33/1695 train_time:3062ms step_avg:92.80ms +step:34/1695 train_time:3156ms step_avg:92.81ms +step:35/1695 train_time:3250ms step_avg:92.86ms +step:36/1695 train_time:3343ms step_avg:92.87ms +step:37/1695 train_time:3437ms step_avg:92.89ms +step:38/1695 train_time:3531ms step_avg:92.93ms +step:39/1695 train_time:3625ms step_avg:92.96ms +step:40/1695 train_time:3719ms step_avg:92.97ms +step:41/1695 train_time:3812ms step_avg:92.98ms +step:42/1695 train_time:3906ms step_avg:92.99ms +step:43/1695 train_time:3999ms step_avg:93.00ms +step:44/1695 train_time:4093ms step_avg:93.03ms +step:45/1695 train_time:4187ms step_avg:93.04ms +step:46/1695 train_time:4280ms step_avg:93.05ms +step:47/1695 train_time:4374ms step_avg:93.06ms +step:48/1695 train_time:4468ms step_avg:93.08ms +step:49/1695 train_time:4562ms step_avg:93.10ms +step:50/1695 train_time:4656ms step_avg:93.12ms +step:51/1695 train_time:4750ms step_avg:93.13ms +step:52/1695 train_time:4844ms step_avg:93.15ms +step:53/1695 train_time:4937ms step_avg:93.15ms +step:54/1695 train_time:5030ms step_avg:93.15ms +step:55/1695 train_time:5123ms step_avg:93.15ms +step:56/1695 train_time:5217ms step_avg:93.16ms +step:57/1695 train_time:5311ms step_avg:93.17ms +step:58/1695 train_time:5404ms step_avg:93.18ms +step:59/1695 train_time:5498ms step_avg:93.19ms +step:60/1695 train_time:5592ms step_avg:93.20ms +step:61/1695 train_time:5686ms step_avg:93.22ms +step:62/1695 train_time:5779ms step_avg:93.22ms +step:63/1695 train_time:5874ms step_avg:93.23ms +step:64/1695 train_time:5969ms step_avg:93.26ms +step:65/1695 train_time:6062ms step_avg:93.26ms +step:66/1695 train_time:6155ms step_avg:93.26ms +step:67/1695 train_time:6248ms step_avg:93.25ms +step:68/1695 train_time:6342ms step_avg:93.26ms +step:69/1695 train_time:6435ms step_avg:93.27ms +step:70/1695 train_time:6529ms step_avg:93.27ms +step:71/1695 train_time:6623ms step_avg:93.28ms +step:72/1695 train_time:6717ms step_avg:93.30ms +step:73/1695 train_time:6811ms step_avg:93.31ms +step:74/1695 train_time:6906ms step_avg:93.32ms +step:75/1695 train_time:6999ms step_avg:93.32ms +step:76/1695 train_time:7093ms step_avg:93.33ms +step:77/1695 train_time:7186ms step_avg:93.33ms +step:78/1695 train_time:7280ms step_avg:93.33ms +step:79/1695 train_time:7374ms step_avg:93.34ms +step:80/1695 train_time:7468ms step_avg:93.35ms +step:81/1695 train_time:7561ms step_avg:93.35ms +step:82/1695 train_time:7656ms step_avg:93.36ms +step:83/1695 train_time:7749ms step_avg:93.36ms +step:84/1695 train_time:7843ms step_avg:93.37ms +step:85/1695 train_time:7937ms step_avg:93.38ms +step:86/1695 train_time:8031ms step_avg:93.38ms +step:87/1695 train_time:8123ms step_avg:93.37ms +step:88/1695 train_time:8217ms step_avg:93.37ms +step:89/1695 train_time:8310ms step_avg:93.37ms +step:90/1695 train_time:8403ms step_avg:93.37ms +step:91/1695 train_time:8497ms step_avg:93.37ms +step:92/1695 train_time:8591ms step_avg:93.38ms +step:93/1695 train_time:8685ms step_avg:93.38ms +step:94/1695 train_time:8778ms step_avg:93.39ms +step:95/1695 train_time:8872ms step_avg:93.39ms +step:96/1695 train_time:8968ms step_avg:93.41ms +step:97/1695 train_time:9060ms step_avg:93.40ms +step:98/1695 train_time:9153ms step_avg:93.40ms +step:99/1695 train_time:9247ms step_avg:93.40ms +step:100/1695 train_time:9340ms step_avg:93.40ms +step:101/1695 train_time:9433ms step_avg:93.40ms +step:102/1695 train_time:9526ms step_avg:93.40ms +step:103/1695 train_time:9619ms step_avg:93.39ms +step:104/1695 train_time:9713ms step_avg:93.40ms +step:105/1695 train_time:9808ms step_avg:93.41ms +step:106/1695 train_time:9901ms step_avg:93.41ms +step:107/1695 train_time:9994ms step_avg:93.41ms +step:108/1695 train_time:10088ms step_avg:93.41ms +step:109/1695 train_time:10181ms step_avg:93.40ms +step:110/1695 train_time:10275ms step_avg:93.41ms +step:111/1695 train_time:10369ms step_avg:93.41ms +step:112/1695 train_time:10462ms step_avg:93.41ms +step:113/1695 train_time:10556ms step_avg:93.41ms +step:114/1695 train_time:10649ms step_avg:93.41ms +step:115/1695 train_time:10742ms step_avg:93.41ms +step:116/1695 train_time:10836ms step_avg:93.41ms +step:117/1695 train_time:10930ms step_avg:93.42ms +step:118/1695 train_time:11024ms step_avg:93.42ms +step:119/1695 train_time:11117ms step_avg:93.42ms +step:120/1695 train_time:11211ms step_avg:93.43ms +step:121/1695 train_time:11305ms step_avg:93.43ms +step:122/1695 train_time:11398ms step_avg:93.43ms +step:123/1695 train_time:11492ms step_avg:93.43ms +step:124/1695 train_time:11586ms step_avg:93.43ms +step:125/1695 train_time:11679ms step_avg:93.43ms +step:125/1695 val_loss:4.6043 train_time:11771ms step_avg:94.17ms +step:126/1695 train_time:11798ms step_avg:93.64ms +step:127/1695 train_time:11873ms step_avg:93.49ms +step:128/1695 train_time:11977ms step_avg:93.57ms +step:129/1695 train_time:12072ms step_avg:93.58ms +step:130/1695 train_time:12166ms step_avg:93.58ms +step:131/1695 train_time:12260ms step_avg:93.59ms +step:132/1695 train_time:12353ms step_avg:93.58ms +step:133/1695 train_time:12447ms step_avg:93.58ms +step:134/1695 train_time:12541ms step_avg:93.59ms +step:135/1695 train_time:12634ms step_avg:93.59ms +step:136/1695 train_time:12727ms step_avg:93.58ms +step:137/1695 train_time:12821ms step_avg:93.59ms +step:138/1695 train_time:12917ms step_avg:93.60ms +step:139/1695 train_time:13011ms step_avg:93.61ms +step:140/1695 train_time:13105ms step_avg:93.61ms +step:141/1695 train_time:13200ms step_avg:93.62ms +step:142/1695 train_time:13294ms step_avg:93.62ms +step:143/1695 train_time:13387ms step_avg:93.62ms +step:144/1695 train_time:13481ms step_avg:93.62ms +step:145/1695 train_time:13575ms step_avg:93.62ms +step:146/1695 train_time:13668ms step_avg:93.62ms +step:147/1695 train_time:13762ms step_avg:93.62ms +step:148/1695 train_time:13856ms step_avg:93.62ms +step:149/1695 train_time:13950ms step_avg:93.63ms +step:150/1695 train_time:14045ms step_avg:93.63ms +step:151/1695 train_time:14140ms step_avg:93.64ms +step:152/1695 train_time:14234ms step_avg:93.65ms +step:153/1695 train_time:14328ms step_avg:93.65ms +step:154/1695 train_time:14422ms step_avg:93.65ms +step:155/1695 train_time:14516ms step_avg:93.65ms +step:156/1695 train_time:14611ms step_avg:93.66ms +step:157/1695 train_time:14705ms step_avg:93.66ms +step:158/1695 train_time:14798ms step_avg:93.66ms +step:159/1695 train_time:14892ms step_avg:93.66ms +step:160/1695 train_time:14986ms step_avg:93.66ms +step:161/1695 train_time:15081ms step_avg:93.67ms +step:162/1695 train_time:15175ms step_avg:93.67ms +step:163/1695 train_time:15270ms step_avg:93.68ms +step:164/1695 train_time:15363ms step_avg:93.68ms +step:165/1695 train_time:15457ms step_avg:93.68ms +step:166/1695 train_time:15552ms step_avg:93.69ms +step:167/1695 train_time:15645ms step_avg:93.69ms +step:168/1695 train_time:15739ms step_avg:93.69ms +step:169/1695 train_time:15834ms step_avg:93.69ms +step:170/1695 train_time:15928ms step_avg:93.69ms +step:171/1695 train_time:16021ms step_avg:93.69ms +step:172/1695 train_time:16116ms step_avg:93.70ms +step:173/1695 train_time:16209ms step_avg:93.69ms +step:174/1695 train_time:16303ms step_avg:93.70ms +step:175/1695 train_time:16398ms step_avg:93.70ms +step:176/1695 train_time:16492ms step_avg:93.70ms +step:177/1695 train_time:16586ms step_avg:93.70ms +step:178/1695 train_time:16680ms step_avg:93.71ms +step:179/1695 train_time:16774ms step_avg:93.71ms +step:180/1695 train_time:16868ms step_avg:93.71ms +step:181/1695 train_time:16962ms step_avg:93.71ms +step:182/1695 train_time:17057ms step_avg:93.72ms +step:183/1695 train_time:17151ms step_avg:93.72ms +step:184/1695 train_time:17244ms step_avg:93.72ms +step:185/1695 train_time:17338ms step_avg:93.72ms +step:186/1695 train_time:17433ms step_avg:93.73ms +step:187/1695 train_time:17526ms step_avg:93.72ms +step:188/1695 train_time:17621ms step_avg:93.73ms +step:189/1695 train_time:17715ms step_avg:93.73ms +step:190/1695 train_time:17809ms step_avg:93.73ms +step:191/1695 train_time:17903ms step_avg:93.73ms +step:192/1695 train_time:17996ms step_avg:93.73ms +step:193/1695 train_time:18090ms step_avg:93.73ms +step:194/1695 train_time:18183ms step_avg:93.73ms +step:195/1695 train_time:18278ms step_avg:93.73ms +step:196/1695 train_time:18372ms step_avg:93.74ms +step:197/1695 train_time:18466ms step_avg:93.74ms +step:198/1695 train_time:18560ms step_avg:93.74ms +step:199/1695 train_time:18654ms step_avg:93.74ms +step:200/1695 train_time:18747ms step_avg:93.73ms +step:201/1695 train_time:18840ms step_avg:93.73ms +step:202/1695 train_time:18934ms step_avg:93.73ms +step:203/1695 train_time:19028ms step_avg:93.73ms +step:204/1695 train_time:19121ms step_avg:93.73ms +step:205/1695 train_time:19216ms step_avg:93.74ms +step:206/1695 train_time:19311ms step_avg:93.74ms +step:207/1695 train_time:19404ms step_avg:93.74ms +step:208/1695 train_time:19499ms step_avg:93.74ms +step:209/1695 train_time:19593ms step_avg:93.75ms +step:210/1695 train_time:19687ms step_avg:93.75ms +step:211/1695 train_time:19781ms step_avg:93.75ms +step:212/1695 train_time:19877ms step_avg:93.76ms +step:213/1695 train_time:19971ms step_avg:93.76ms +step:214/1695 train_time:20065ms step_avg:93.76ms +step:215/1695 train_time:20159ms step_avg:93.76ms +step:216/1695 train_time:20253ms step_avg:93.77ms +step:217/1695 train_time:20347ms step_avg:93.77ms +step:218/1695 train_time:20441ms step_avg:93.77ms +step:219/1695 train_time:20536ms step_avg:93.77ms +step:220/1695 train_time:20630ms step_avg:93.77ms +step:221/1695 train_time:20723ms step_avg:93.77ms +step:222/1695 train_time:20817ms step_avg:93.77ms +step:223/1695 train_time:20912ms step_avg:93.78ms +step:224/1695 train_time:21005ms step_avg:93.77ms +step:225/1695 train_time:21099ms step_avg:93.77ms +step:226/1695 train_time:21192ms step_avg:93.77ms +step:227/1695 train_time:21285ms step_avg:93.77ms +step:228/1695 train_time:21379ms step_avg:93.77ms +step:229/1695 train_time:21473ms step_avg:93.77ms +step:230/1695 train_time:21567ms step_avg:93.77ms +step:231/1695 train_time:21661ms step_avg:93.77ms +step:232/1695 train_time:21755ms step_avg:93.77ms +step:233/1695 train_time:21849ms step_avg:93.77ms +step:234/1695 train_time:21943ms step_avg:93.77ms +step:235/1695 train_time:22037ms step_avg:93.78ms +step:236/1695 train_time:22131ms step_avg:93.78ms +step:237/1695 train_time:22225ms step_avg:93.78ms +step:238/1695 train_time:22320ms step_avg:93.78ms +step:239/1695 train_time:22414ms step_avg:93.78ms +step:240/1695 train_time:22507ms step_avg:93.78ms +step:241/1695 train_time:22602ms step_avg:93.79ms +step:242/1695 train_time:22697ms step_avg:93.79ms +step:243/1695 train_time:22791ms step_avg:93.79ms +step:244/1695 train_time:22884ms step_avg:93.79ms +step:245/1695 train_time:22980ms step_avg:93.80ms +step:246/1695 train_time:23076ms step_avg:93.81ms +step:247/1695 train_time:23169ms step_avg:93.80ms +step:248/1695 train_time:23263ms step_avg:93.80ms +step:249/1695 train_time:23356ms step_avg:93.80ms +step:250/1695 train_time:23450ms step_avg:93.80ms +step:250/1695 val_loss:4.0858 train_time:23541ms step_avg:94.16ms +step:251/1695 train_time:23567ms step_avg:93.89ms +step:252/1695 train_time:23645ms step_avg:93.83ms +step:253/1695 train_time:23744ms step_avg:93.85ms +step:254/1695 train_time:23840ms step_avg:93.86ms +step:255/1695 train_time:23933ms step_avg:93.86ms +step:256/1695 train_time:24027ms step_avg:93.86ms +step:257/1695 train_time:24121ms step_avg:93.85ms +step:258/1695 train_time:24214ms step_avg:93.85ms +step:259/1695 train_time:24308ms step_avg:93.85ms +step:260/1695 train_time:24402ms step_avg:93.85ms +step:261/1695 train_time:24496ms step_avg:93.85ms +step:262/1695 train_time:24593ms step_avg:93.87ms +step:263/1695 train_time:24691ms step_avg:93.88ms +step:264/1695 train_time:24788ms step_avg:93.89ms +step:265/1695 train_time:24885ms step_avg:93.90ms +step:266/1695 train_time:24978ms step_avg:93.90ms +step:267/1695 train_time:25072ms step_avg:93.90ms +step:268/1695 train_time:25166ms step_avg:93.90ms +step:269/1695 train_time:25260ms step_avg:93.90ms +step:270/1695 train_time:25353ms step_avg:93.90ms +step:271/1695 train_time:25448ms step_avg:93.90ms +step:272/1695 train_time:25542ms step_avg:93.90ms +step:273/1695 train_time:25636ms step_avg:93.91ms +step:274/1695 train_time:25733ms step_avg:93.92ms +step:275/1695 train_time:25829ms step_avg:93.92ms +step:276/1695 train_time:25923ms step_avg:93.92ms +step:277/1695 train_time:26017ms step_avg:93.92ms +step:278/1695 train_time:26112ms step_avg:93.93ms +step:279/1695 train_time:26207ms step_avg:93.93ms +step:280/1695 train_time:26300ms step_avg:93.93ms +step:281/1695 train_time:26394ms step_avg:93.93ms +step:282/1695 train_time:26488ms step_avg:93.93ms +step:283/1695 train_time:26583ms step_avg:93.93ms +step:284/1695 train_time:26678ms step_avg:93.93ms +step:285/1695 train_time:26772ms step_avg:93.94ms +step:286/1695 train_time:26867ms step_avg:93.94ms +step:287/1695 train_time:26962ms step_avg:93.95ms +step:288/1695 train_time:27057ms step_avg:93.95ms +step:289/1695 train_time:27151ms step_avg:93.95ms +step:290/1695 train_time:27246ms step_avg:93.95ms +step:291/1695 train_time:27340ms step_avg:93.95ms +step:292/1695 train_time:27433ms step_avg:93.95ms +step:293/1695 train_time:27527ms step_avg:93.95ms +step:294/1695 train_time:27622ms step_avg:93.95ms +step:295/1695 train_time:27717ms step_avg:93.96ms +step:296/1695 train_time:27813ms step_avg:93.96ms +step:297/1695 train_time:27908ms step_avg:93.97ms +step:298/1695 train_time:28003ms step_avg:93.97ms +step:299/1695 train_time:28096ms step_avg:93.97ms +step:300/1695 train_time:28193ms step_avg:93.98ms +step:301/1695 train_time:28288ms step_avg:93.98ms +step:302/1695 train_time:28382ms step_avg:93.98ms +step:303/1695 train_time:28476ms step_avg:93.98ms +step:304/1695 train_time:28570ms step_avg:93.98ms +step:305/1695 train_time:28664ms step_avg:93.98ms +step:306/1695 train_time:28758ms step_avg:93.98ms +step:307/1695 train_time:28853ms step_avg:93.98ms +step:308/1695 train_time:28948ms step_avg:93.99ms +step:309/1695 train_time:29043ms step_avg:93.99ms +step:310/1695 train_time:29137ms step_avg:93.99ms +step:311/1695 train_time:29233ms step_avg:94.00ms +step:312/1695 train_time:29328ms step_avg:94.00ms +step:313/1695 train_time:29422ms step_avg:94.00ms +step:314/1695 train_time:29516ms step_avg:94.00ms +step:315/1695 train_time:29611ms step_avg:94.00ms +step:316/1695 train_time:29706ms step_avg:94.00ms +step:317/1695 train_time:29800ms step_avg:94.01ms +step:318/1695 train_time:29894ms step_avg:94.01ms +step:319/1695 train_time:29989ms step_avg:94.01ms +step:320/1695 train_time:30084ms step_avg:94.01ms +step:321/1695 train_time:30178ms step_avg:94.01ms +step:322/1695 train_time:30273ms step_avg:94.01ms +step:323/1695 train_time:30368ms step_avg:94.02ms +step:324/1695 train_time:30463ms step_avg:94.02ms +step:325/1695 train_time:30556ms step_avg:94.02ms +step:326/1695 train_time:30651ms step_avg:94.02ms +step:327/1695 train_time:30746ms step_avg:94.02ms +step:328/1695 train_time:30839ms step_avg:94.02ms +step:329/1695 train_time:30934ms step_avg:94.02ms +step:330/1695 train_time:31029ms step_avg:94.03ms +step:331/1695 train_time:31124ms step_avg:94.03ms +step:332/1695 train_time:31218ms step_avg:94.03ms +step:333/1695 train_time:31313ms step_avg:94.03ms +step:334/1695 train_time:31407ms step_avg:94.03ms +step:335/1695 train_time:31502ms step_avg:94.03ms +step:336/1695 train_time:31595ms step_avg:94.03ms +step:337/1695 train_time:31691ms step_avg:94.04ms +step:338/1695 train_time:31785ms step_avg:94.04ms +step:339/1695 train_time:31879ms step_avg:94.04ms +step:340/1695 train_time:31973ms step_avg:94.04ms +step:341/1695 train_time:32069ms step_avg:94.04ms +step:342/1695 train_time:32163ms step_avg:94.04ms +step:343/1695 train_time:32257ms step_avg:94.04ms +step:344/1695 train_time:32352ms step_avg:94.05ms +step:345/1695 train_time:32446ms step_avg:94.05ms +step:346/1695 train_time:32541ms step_avg:94.05ms +step:347/1695 train_time:32634ms step_avg:94.05ms +step:348/1695 train_time:32728ms step_avg:94.05ms +step:349/1695 train_time:32822ms step_avg:94.05ms +step:350/1695 train_time:32916ms step_avg:94.05ms +step:351/1695 train_time:33012ms step_avg:94.05ms +step:352/1695 train_time:33107ms step_avg:94.05ms +step:353/1695 train_time:33201ms step_avg:94.05ms +step:354/1695 train_time:33295ms step_avg:94.05ms +step:355/1695 train_time:33389ms step_avg:94.05ms +step:356/1695 train_time:33484ms step_avg:94.06ms +step:357/1695 train_time:33578ms step_avg:94.05ms +step:358/1695 train_time:33672ms step_avg:94.05ms +step:359/1695 train_time:33767ms step_avg:94.06ms +step:360/1695 train_time:33860ms step_avg:94.06ms +step:361/1695 train_time:33954ms step_avg:94.06ms +step:362/1695 train_time:34050ms step_avg:94.06ms +step:363/1695 train_time:34145ms step_avg:94.06ms +step:364/1695 train_time:34239ms step_avg:94.06ms +step:365/1695 train_time:34332ms step_avg:94.06ms +step:366/1695 train_time:34427ms step_avg:94.06ms +step:367/1695 train_time:34521ms step_avg:94.06ms +step:368/1695 train_time:34615ms step_avg:94.06ms +step:369/1695 train_time:34709ms step_avg:94.06ms +step:370/1695 train_time:34805ms step_avg:94.07ms +step:371/1695 train_time:34899ms step_avg:94.07ms +step:372/1695 train_time:34993ms step_avg:94.07ms +step:373/1695 train_time:35088ms step_avg:94.07ms +step:374/1695 train_time:35183ms step_avg:94.07ms +step:375/1695 train_time:35277ms step_avg:94.07ms +step:375/1695 val_loss:3.8837 train_time:35370ms step_avg:94.32ms +step:376/1695 train_time:35397ms step_avg:94.14ms +step:377/1695 train_time:35476ms step_avg:94.10ms +step:378/1695 train_time:35573ms step_avg:94.11ms +step:379/1695 train_time:35670ms step_avg:94.12ms +step:380/1695 train_time:35765ms step_avg:94.12ms +step:381/1695 train_time:35861ms step_avg:94.12ms +step:382/1695 train_time:35956ms step_avg:94.13ms +step:383/1695 train_time:36051ms step_avg:94.13ms +step:384/1695 train_time:36146ms step_avg:94.13ms +step:385/1695 train_time:36242ms step_avg:94.13ms +step:386/1695 train_time:36337ms step_avg:94.14ms +step:387/1695 train_time:36434ms step_avg:94.14ms +step:388/1695 train_time:36531ms step_avg:94.15ms +step:389/1695 train_time:36628ms step_avg:94.16ms +step:390/1695 train_time:36726ms step_avg:94.17ms +step:391/1695 train_time:36822ms step_avg:94.17ms +step:392/1695 train_time:36918ms step_avg:94.18ms +step:393/1695 train_time:37014ms step_avg:94.18ms +step:394/1695 train_time:37109ms step_avg:94.18ms +step:395/1695 train_time:37205ms step_avg:94.19ms +step:396/1695 train_time:37302ms step_avg:94.20ms +step:397/1695 train_time:37398ms step_avg:94.20ms +step:398/1695 train_time:37494ms step_avg:94.21ms +step:399/1695 train_time:37590ms step_avg:94.21ms +step:400/1695 train_time:37687ms step_avg:94.22ms +step:401/1695 train_time:37785ms step_avg:94.23ms +step:402/1695 train_time:37881ms step_avg:94.23ms +step:403/1695 train_time:37978ms step_avg:94.24ms +step:404/1695 train_time:38073ms step_avg:94.24ms +step:405/1695 train_time:38169ms step_avg:94.24ms +step:406/1695 train_time:38265ms step_avg:94.25ms +step:407/1695 train_time:38362ms step_avg:94.25ms +step:408/1695 train_time:38459ms step_avg:94.26ms +step:409/1695 train_time:38555ms step_avg:94.27ms +step:410/1695 train_time:38650ms step_avg:94.27ms +step:411/1695 train_time:38746ms step_avg:94.27ms +step:412/1695 train_time:38842ms step_avg:94.28ms +step:413/1695 train_time:38939ms step_avg:94.28ms +step:414/1695 train_time:39035ms step_avg:94.29ms +step:415/1695 train_time:39131ms step_avg:94.29ms +step:416/1695 train_time:39227ms step_avg:94.30ms +step:417/1695 train_time:39323ms step_avg:94.30ms +step:418/1695 train_time:39420ms step_avg:94.31ms +step:419/1695 train_time:39516ms step_avg:94.31ms +step:420/1695 train_time:39612ms step_avg:94.31ms +step:421/1695 train_time:39708ms step_avg:94.32ms +step:422/1695 train_time:39805ms step_avg:94.32ms +step:423/1695 train_time:39902ms step_avg:94.33ms +step:424/1695 train_time:39998ms step_avg:94.34ms +step:425/1695 train_time:40094ms step_avg:94.34ms +step:426/1695 train_time:40190ms step_avg:94.34ms +step:427/1695 train_time:40286ms step_avg:94.35ms +step:428/1695 train_time:40383ms step_avg:94.35ms +step:429/1695 train_time:40479ms step_avg:94.36ms +step:430/1695 train_time:40575ms step_avg:94.36ms +step:431/1695 train_time:40672ms step_avg:94.37ms +step:432/1695 train_time:40768ms step_avg:94.37ms +step:433/1695 train_time:40864ms step_avg:94.37ms +step:434/1695 train_time:40962ms step_avg:94.38ms +step:435/1695 train_time:41059ms step_avg:94.39ms +step:436/1695 train_time:41155ms step_avg:94.39ms +step:437/1695 train_time:41251ms step_avg:94.40ms +step:438/1695 train_time:41347ms step_avg:94.40ms +step:439/1695 train_time:41444ms step_avg:94.40ms +step:440/1695 train_time:41541ms step_avg:94.41ms +step:441/1695 train_time:41637ms step_avg:94.42ms +step:442/1695 train_time:41733ms step_avg:94.42ms +step:443/1695 train_time:41829ms step_avg:94.42ms +step:444/1695 train_time:41925ms step_avg:94.43ms +step:445/1695 train_time:42022ms step_avg:94.43ms +step:446/1695 train_time:42120ms step_avg:94.44ms +step:447/1695 train_time:42216ms step_avg:94.44ms +step:448/1695 train_time:42313ms step_avg:94.45ms +step:449/1695 train_time:42408ms step_avg:94.45ms +step:450/1695 train_time:42505ms step_avg:94.46ms +step:451/1695 train_time:42602ms step_avg:94.46ms +step:452/1695 train_time:42699ms step_avg:94.47ms +step:453/1695 train_time:42795ms step_avg:94.47ms +step:454/1695 train_time:42891ms step_avg:94.47ms +step:455/1695 train_time:42987ms step_avg:94.48ms +step:456/1695 train_time:43084ms step_avg:94.48ms +step:457/1695 train_time:43181ms step_avg:94.49ms +step:458/1695 train_time:43278ms step_avg:94.49ms +step:459/1695 train_time:43374ms step_avg:94.50ms +step:460/1695 train_time:43470ms step_avg:94.50ms +step:461/1695 train_time:43566ms step_avg:94.50ms +step:462/1695 train_time:43663ms step_avg:94.51ms +step:463/1695 train_time:43760ms step_avg:94.51ms +step:464/1695 train_time:43856ms step_avg:94.52ms +step:465/1695 train_time:43952ms step_avg:94.52ms +step:466/1695 train_time:44048ms step_avg:94.52ms +step:467/1695 train_time:44144ms step_avg:94.53ms +step:468/1695 train_time:44241ms step_avg:94.53ms +step:469/1695 train_time:44338ms step_avg:94.54ms +step:470/1695 train_time:44434ms step_avg:94.54ms +step:471/1695 train_time:44529ms step_avg:94.54ms +step:472/1695 train_time:44625ms step_avg:94.54ms +step:473/1695 train_time:44722ms step_avg:94.55ms +step:474/1695 train_time:44819ms step_avg:94.55ms +step:475/1695 train_time:44915ms step_avg:94.56ms +step:476/1695 train_time:45011ms step_avg:94.56ms +step:477/1695 train_time:45107ms step_avg:94.56ms +step:478/1695 train_time:45204ms step_avg:94.57ms +step:479/1695 train_time:45300ms step_avg:94.57ms +step:480/1695 train_time:45397ms step_avg:94.58ms +step:481/1695 train_time:45493ms step_avg:94.58ms +step:482/1695 train_time:45588ms step_avg:94.58ms +step:483/1695 train_time:45685ms step_avg:94.59ms +step:484/1695 train_time:45782ms step_avg:94.59ms +step:485/1695 train_time:45877ms step_avg:94.59ms +step:486/1695 train_time:45973ms step_avg:94.60ms +step:487/1695 train_time:46069ms step_avg:94.60ms +step:488/1695 train_time:46166ms step_avg:94.60ms +step:489/1695 train_time:46262ms step_avg:94.61ms +step:490/1695 train_time:46359ms step_avg:94.61ms +step:491/1695 train_time:46456ms step_avg:94.61ms +step:492/1695 train_time:46550ms step_avg:94.61ms +step:493/1695 train_time:46646ms step_avg:94.62ms +step:494/1695 train_time:46743ms step_avg:94.62ms +step:495/1695 train_time:46839ms step_avg:94.62ms +step:496/1695 train_time:46936ms step_avg:94.63ms +step:497/1695 train_time:47031ms step_avg:94.63ms +step:498/1695 train_time:47127ms step_avg:94.63ms +step:499/1695 train_time:47223ms step_avg:94.64ms +step:500/1695 train_time:47320ms step_avg:94.64ms +step:500/1695 val_loss:3.7375 train_time:47413ms step_avg:94.83ms +step:501/1695 train_time:47441ms step_avg:94.69ms +step:502/1695 train_time:47522ms step_avg:94.66ms +step:503/1695 train_time:47624ms step_avg:94.68ms +step:504/1695 train_time:47721ms step_avg:94.68ms +step:505/1695 train_time:47817ms step_avg:94.69ms +step:506/1695 train_time:47912ms step_avg:94.69ms +step:507/1695 train_time:48008ms step_avg:94.69ms +step:508/1695 train_time:48104ms step_avg:94.69ms +step:509/1695 train_time:48200ms step_avg:94.69ms +step:510/1695 train_time:48295ms step_avg:94.70ms +step:511/1695 train_time:48391ms step_avg:94.70ms +step:512/1695 train_time:48489ms step_avg:94.70ms +step:513/1695 train_time:48588ms step_avg:94.71ms +step:514/1695 train_time:48686ms step_avg:94.72ms +step:515/1695 train_time:48783ms step_avg:94.72ms +step:516/1695 train_time:48880ms step_avg:94.73ms +step:517/1695 train_time:48976ms step_avg:94.73ms +step:518/1695 train_time:49072ms step_avg:94.73ms +step:519/1695 train_time:49168ms step_avg:94.74ms +step:520/1695 train_time:49264ms step_avg:94.74ms +step:521/1695 train_time:49360ms step_avg:94.74ms +step:522/1695 train_time:49456ms step_avg:94.74ms +step:523/1695 train_time:49553ms step_avg:94.75ms +step:524/1695 train_time:49650ms step_avg:94.75ms +step:525/1695 train_time:49747ms step_avg:94.76ms +step:526/1695 train_time:49845ms step_avg:94.76ms +step:527/1695 train_time:49942ms step_avg:94.77ms +step:528/1695 train_time:50039ms step_avg:94.77ms +step:529/1695 train_time:50136ms step_avg:94.77ms +step:530/1695 train_time:50231ms step_avg:94.78ms +step:531/1695 train_time:50327ms step_avg:94.78ms +step:532/1695 train_time:50424ms step_avg:94.78ms +step:533/1695 train_time:50521ms step_avg:94.79ms +step:534/1695 train_time:50618ms step_avg:94.79ms +step:535/1695 train_time:50713ms step_avg:94.79ms +step:536/1695 train_time:50810ms step_avg:94.80ms +step:537/1695 train_time:50908ms step_avg:94.80ms +step:538/1695 train_time:51006ms step_avg:94.81ms +step:539/1695 train_time:51105ms step_avg:94.81ms +step:540/1695 train_time:51200ms step_avg:94.82ms +step:541/1695 train_time:51296ms step_avg:94.82ms +step:542/1695 train_time:51392ms step_avg:94.82ms +step:543/1695 train_time:51488ms step_avg:94.82ms +step:544/1695 train_time:51585ms step_avg:94.83ms +step:545/1695 train_time:51682ms step_avg:94.83ms +step:546/1695 train_time:51779ms step_avg:94.83ms +step:547/1695 train_time:51875ms step_avg:94.84ms +step:548/1695 train_time:51972ms step_avg:94.84ms +step:549/1695 train_time:52069ms step_avg:94.84ms +step:550/1695 train_time:52166ms step_avg:94.85ms +step:551/1695 train_time:52263ms step_avg:94.85ms +step:552/1695 train_time:52360ms step_avg:94.86ms +step:553/1695 train_time:52456ms step_avg:94.86ms +step:554/1695 train_time:52552ms step_avg:94.86ms +step:555/1695 train_time:52648ms step_avg:94.86ms +step:556/1695 train_time:52746ms step_avg:94.87ms +step:557/1695 train_time:52842ms step_avg:94.87ms +step:558/1695 train_time:52939ms step_avg:94.87ms +step:559/1695 train_time:53036ms step_avg:94.88ms +step:560/1695 train_time:53132ms step_avg:94.88ms +step:561/1695 train_time:53228ms step_avg:94.88ms +step:562/1695 train_time:53326ms step_avg:94.89ms +step:563/1695 train_time:53423ms step_avg:94.89ms +step:564/1695 train_time:53520ms step_avg:94.89ms +step:565/1695 train_time:53616ms step_avg:94.90ms +step:566/1695 train_time:53712ms step_avg:94.90ms +step:567/1695 train_time:53809ms step_avg:94.90ms +step:568/1695 train_time:53906ms step_avg:94.91ms +step:569/1695 train_time:54003ms step_avg:94.91ms +step:570/1695 train_time:54100ms step_avg:94.91ms +step:571/1695 train_time:54196ms step_avg:94.91ms +step:572/1695 train_time:54292ms step_avg:94.92ms +step:573/1695 train_time:54389ms step_avg:94.92ms +step:574/1695 train_time:54486ms step_avg:94.92ms +step:575/1695 train_time:54584ms step_avg:94.93ms +step:576/1695 train_time:54681ms step_avg:94.93ms +step:577/1695 train_time:54778ms step_avg:94.94ms +step:578/1695 train_time:54874ms step_avg:94.94ms +step:579/1695 train_time:54972ms step_avg:94.94ms +step:580/1695 train_time:55069ms step_avg:94.95ms +step:581/1695 train_time:55166ms step_avg:94.95ms +step:582/1695 train_time:55263ms step_avg:94.95ms +step:583/1695 train_time:55361ms step_avg:94.96ms +step:584/1695 train_time:55457ms step_avg:94.96ms +step:585/1695 train_time:55553ms step_avg:94.96ms +step:586/1695 train_time:55649ms step_avg:94.96ms +step:587/1695 train_time:55747ms step_avg:94.97ms +step:588/1695 train_time:55844ms step_avg:94.97ms +step:589/1695 train_time:55941ms step_avg:94.98ms +step:590/1695 train_time:56037ms step_avg:94.98ms +step:591/1695 train_time:56133ms step_avg:94.98ms +step:592/1695 train_time:56230ms step_avg:94.98ms +step:593/1695 train_time:56328ms step_avg:94.99ms +step:594/1695 train_time:56426ms step_avg:94.99ms +step:595/1695 train_time:56523ms step_avg:95.00ms +step:596/1695 train_time:56618ms step_avg:95.00ms +step:597/1695 train_time:56714ms step_avg:95.00ms +step:598/1695 train_time:56811ms step_avg:95.00ms +step:599/1695 train_time:56908ms step_avg:95.00ms +step:600/1695 train_time:57006ms step_avg:95.01ms +step:601/1695 train_time:57103ms step_avg:95.01ms +step:602/1695 train_time:57199ms step_avg:95.02ms +step:603/1695 train_time:57295ms step_avg:95.02ms +step:604/1695 train_time:57392ms step_avg:95.02ms +step:605/1695 train_time:57489ms step_avg:95.02ms +step:606/1695 train_time:57587ms step_avg:95.03ms +step:607/1695 train_time:57684ms step_avg:95.03ms +step:608/1695 train_time:57780ms step_avg:95.03ms +step:609/1695 train_time:57877ms step_avg:95.04ms +step:610/1695 train_time:57973ms step_avg:95.04ms +step:611/1695 train_time:58070ms step_avg:95.04ms +step:612/1695 train_time:58167ms step_avg:95.04ms +step:613/1695 train_time:58264ms step_avg:95.05ms +step:614/1695 train_time:58361ms step_avg:95.05ms +step:615/1695 train_time:58457ms step_avg:95.05ms +step:616/1695 train_time:58553ms step_avg:95.05ms +step:617/1695 train_time:58649ms step_avg:95.05ms +step:618/1695 train_time:58745ms step_avg:95.06ms +step:619/1695 train_time:58843ms step_avg:95.06ms +step:620/1695 train_time:58939ms step_avg:95.06ms +step:621/1695 train_time:59035ms step_avg:95.07ms +step:622/1695 train_time:59131ms step_avg:95.07ms +step:623/1695 train_time:59228ms step_avg:95.07ms +step:624/1695 train_time:59325ms step_avg:95.07ms +step:625/1695 train_time:59423ms step_avg:95.08ms +step:625/1695 val_loss:3.6553 train_time:59518ms step_avg:95.23ms +step:626/1695 train_time:59544ms step_avg:95.12ms +step:627/1695 train_time:59627ms step_avg:95.10ms +step:628/1695 train_time:59729ms step_avg:95.11ms +step:629/1695 train_time:59826ms step_avg:95.11ms +step:630/1695 train_time:59924ms step_avg:95.12ms +step:631/1695 train_time:60021ms step_avg:95.12ms +step:632/1695 train_time:60119ms step_avg:95.13ms +step:633/1695 train_time:60216ms step_avg:95.13ms +step:634/1695 train_time:60313ms step_avg:95.13ms +step:635/1695 train_time:60642ms step_avg:95.50ms +step:636/1695 train_time:60738ms step_avg:95.50ms +step:637/1695 train_time:60834ms step_avg:95.50ms +step:638/1695 train_time:60931ms step_avg:95.50ms +step:639/1695 train_time:61028ms step_avg:95.51ms +step:640/1695 train_time:61125ms step_avg:95.51ms +step:641/1695 train_time:61222ms step_avg:95.51ms +step:642/1695 train_time:61318ms step_avg:95.51ms +step:643/1695 train_time:61415ms step_avg:95.51ms +step:644/1695 train_time:61512ms step_avg:95.51ms +step:645/1695 train_time:61612ms step_avg:95.52ms +step:646/1695 train_time:61711ms step_avg:95.53ms +step:647/1695 train_time:61810ms step_avg:95.53ms +step:648/1695 train_time:61907ms step_avg:95.54ms +step:649/1695 train_time:62005ms step_avg:95.54ms +step:650/1695 train_time:62103ms step_avg:95.54ms +step:651/1695 train_time:62201ms step_avg:95.55ms +step:652/1695 train_time:62298ms step_avg:95.55ms +step:653/1695 train_time:62395ms step_avg:95.55ms +step:654/1695 train_time:62493ms step_avg:95.55ms +step:655/1695 train_time:62590ms step_avg:95.56ms +step:656/1695 train_time:62689ms step_avg:95.56ms +step:657/1695 train_time:62788ms step_avg:95.57ms +step:658/1695 train_time:62886ms step_avg:95.57ms +step:659/1695 train_time:62984ms step_avg:95.57ms +step:660/1695 train_time:63081ms step_avg:95.58ms +step:661/1695 train_time:63177ms step_avg:95.58ms +step:662/1695 train_time:63274ms step_avg:95.58ms +step:663/1695 train_time:63371ms step_avg:95.58ms +step:664/1695 train_time:63469ms step_avg:95.59ms +step:665/1695 train_time:63567ms step_avg:95.59ms +step:666/1695 train_time:63666ms step_avg:95.59ms +step:667/1695 train_time:63765ms step_avg:95.60ms +step:668/1695 train_time:63864ms step_avg:95.60ms +step:669/1695 train_time:63963ms step_avg:95.61ms +step:670/1695 train_time:64061ms step_avg:95.61ms +step:671/1695 train_time:64158ms step_avg:95.62ms +step:672/1695 train_time:64255ms step_avg:95.62ms +step:673/1695 train_time:64352ms step_avg:95.62ms +step:674/1695 train_time:64449ms step_avg:95.62ms +step:675/1695 train_time:64546ms step_avg:95.62ms +step:676/1695 train_time:64644ms step_avg:95.63ms +step:677/1695 train_time:64742ms step_avg:95.63ms +step:678/1695 train_time:64841ms step_avg:95.64ms +step:679/1695 train_time:64940ms step_avg:95.64ms +step:680/1695 train_time:65039ms step_avg:95.65ms +step:681/1695 train_time:65136ms step_avg:95.65ms +step:682/1695 train_time:65233ms step_avg:95.65ms +step:683/1695 train_time:65330ms step_avg:95.65ms +step:684/1695 train_time:65429ms step_avg:95.66ms +step:685/1695 train_time:65526ms step_avg:95.66ms +step:686/1695 train_time:65625ms step_avg:95.66ms +step:687/1695 train_time:65724ms step_avg:95.67ms +step:688/1695 train_time:65822ms step_avg:95.67ms +step:689/1695 train_time:65920ms step_avg:95.68ms +step:690/1695 train_time:66018ms step_avg:95.68ms +step:691/1695 train_time:66115ms step_avg:95.68ms +step:692/1695 train_time:66213ms step_avg:95.68ms +step:693/1695 train_time:66310ms step_avg:95.69ms +step:694/1695 train_time:66408ms step_avg:95.69ms +step:695/1695 train_time:66506ms step_avg:95.69ms +step:696/1695 train_time:66892ms step_avg:96.11ms +step:697/1695 train_time:66988ms step_avg:96.11ms +step:698/1695 train_time:67085ms step_avg:96.11ms +step:699/1695 train_time:67182ms step_avg:96.11ms +step:700/1695 train_time:67280ms step_avg:96.11ms +step:701/1695 train_time:67377ms step_avg:96.12ms +step:702/1695 train_time:67473ms step_avg:96.12ms +step:703/1695 train_time:67570ms step_avg:96.12ms +step:704/1695 train_time:67668ms step_avg:96.12ms +step:705/1695 train_time:67765ms step_avg:96.12ms +step:706/1695 train_time:67869ms step_avg:96.13ms +step:707/1695 train_time:67968ms step_avg:96.14ms +step:708/1695 train_time:68067ms step_avg:96.14ms +step:709/1695 train_time:68165ms step_avg:96.14ms +step:710/1695 train_time:68263ms step_avg:96.14ms +step:711/1695 train_time:68361ms step_avg:96.15ms +step:712/1695 train_time:68459ms step_avg:96.15ms +step:713/1695 train_time:68557ms step_avg:96.15ms +step:714/1695 train_time:68656ms step_avg:96.16ms +step:715/1695 train_time:68753ms step_avg:96.16ms +step:716/1695 train_time:68851ms step_avg:96.16ms +step:717/1695 train_time:68950ms step_avg:96.16ms +step:718/1695 train_time:69047ms step_avg:96.17ms +step:719/1695 train_time:69389ms step_avg:96.51ms +step:720/1695 train_time:69486ms step_avg:96.51ms +step:721/1695 train_time:69583ms step_avg:96.51ms +step:722/1695 train_time:69681ms step_avg:96.51ms +step:723/1695 train_time:69777ms step_avg:96.51ms +step:724/1695 train_time:69874ms step_avg:96.51ms +step:725/1695 train_time:69971ms step_avg:96.51ms +step:726/1695 train_time:70068ms step_avg:96.51ms +step:727/1695 train_time:70164ms step_avg:96.51ms +step:728/1695 train_time:70265ms step_avg:96.52ms +step:729/1695 train_time:70367ms step_avg:96.53ms +step:730/1695 train_time:70466ms step_avg:96.53ms +step:731/1695 train_time:70564ms step_avg:96.53ms +step:732/1695 train_time:70662ms step_avg:96.53ms +step:733/1695 train_time:70760ms step_avg:96.53ms +step:734/1695 train_time:70858ms step_avg:96.54ms +step:735/1695 train_time:70956ms step_avg:96.54ms +step:736/1695 train_time:71053ms step_avg:96.54ms +step:737/1695 train_time:71149ms step_avg:96.54ms +step:738/1695 train_time:71246ms step_avg:96.54ms +step:739/1695 train_time:71344ms step_avg:96.54ms +step:740/1695 train_time:71442ms step_avg:96.54ms +step:741/1695 train_time:71540ms step_avg:96.55ms +step:742/1695 train_time:71638ms step_avg:96.55ms +step:743/1695 train_time:71736ms step_avg:96.55ms +step:744/1695 train_time:71833ms step_avg:96.55ms +step:745/1695 train_time:71930ms step_avg:96.55ms +step:746/1695 train_time:72029ms step_avg:96.55ms +step:747/1695 train_time:72127ms step_avg:96.55ms +step:748/1695 train_time:72224ms step_avg:96.56ms +step:749/1695 train_time:72322ms step_avg:96.56ms +step:750/1695 train_time:72420ms step_avg:96.56ms +step:750/1695 val_loss:3.5918 train_time:72516ms step_avg:96.69ms +step:751/1695 train_time:72543ms step_avg:96.59ms +step:752/1695 train_time:72623ms step_avg:96.57ms +step:753/1695 train_time:72722ms step_avg:96.58ms +step:754/1695 train_time:72821ms step_avg:96.58ms +step:755/1695 train_time:72918ms step_avg:96.58ms +step:756/1695 train_time:73015ms step_avg:96.58ms +step:757/1695 train_time:73113ms step_avg:96.58ms +step:758/1695 train_time:73210ms step_avg:96.58ms +step:759/1695 train_time:73308ms step_avg:96.58ms +step:760/1695 train_time:73405ms step_avg:96.59ms +step:761/1695 train_time:73503ms step_avg:96.59ms +step:762/1695 train_time:73602ms step_avg:96.59ms +step:763/1695 train_time:73701ms step_avg:96.59ms +step:764/1695 train_time:73800ms step_avg:96.60ms +step:765/1695 train_time:73898ms step_avg:96.60ms +step:766/1695 train_time:73995ms step_avg:96.60ms +step:767/1695 train_time:74093ms step_avg:96.60ms +step:768/1695 train_time:74190ms step_avg:96.60ms +step:769/1695 train_time:74288ms step_avg:96.60ms +step:770/1695 train_time:74386ms step_avg:96.60ms +step:771/1695 train_time:74484ms step_avg:96.61ms +step:772/1695 train_time:74867ms step_avg:96.98ms +step:773/1695 train_time:74963ms step_avg:96.98ms +step:774/1695 train_time:75060ms step_avg:96.98ms +step:775/1695 train_time:75158ms step_avg:96.98ms +step:776/1695 train_time:75255ms step_avg:96.98ms +step:777/1695 train_time:75353ms step_avg:96.98ms +step:778/1695 train_time:75683ms step_avg:97.28ms +step:779/1695 train_time:75780ms step_avg:97.28ms +step:780/1695 train_time:75877ms step_avg:97.28ms +step:781/1695 train_time:75974ms step_avg:97.28ms +step:782/1695 train_time:76072ms step_avg:97.28ms +step:783/1695 train_time:76170ms step_avg:97.28ms +step:784/1695 train_time:76268ms step_avg:97.28ms +step:785/1695 train_time:76365ms step_avg:97.28ms +step:786/1695 train_time:76462ms step_avg:97.28ms +step:787/1695 train_time:76562ms step_avg:97.28ms +step:788/1695 train_time:76663ms step_avg:97.29ms +step:789/1695 train_time:77083ms step_avg:97.70ms +step:790/1695 train_time:77133ms step_avg:97.64ms +step:791/1695 train_time:77239ms step_avg:97.65ms +step:792/1695 train_time:77336ms step_avg:97.65ms +step:793/1695 train_time:77433ms step_avg:97.65ms +step:794/1695 train_time:77530ms step_avg:97.65ms +step:795/1695 train_time:77628ms step_avg:97.65ms +step:796/1695 train_time:77726ms step_avg:97.65ms +step:797/1695 train_time:77823ms step_avg:97.65ms +step:798/1695 train_time:78155ms step_avg:97.94ms +step:799/1695 train_time:78249ms step_avg:97.93ms +step:800/1695 train_time:78346ms step_avg:97.93ms +step:801/1695 train_time:78444ms step_avg:97.93ms +step:802/1695 train_time:78541ms step_avg:97.93ms +step:803/1695 train_time:78638ms step_avg:97.93ms +step:804/1695 train_time:78735ms step_avg:97.93ms +step:805/1695 train_time:78832ms step_avg:97.93ms +step:806/1695 train_time:78930ms step_avg:97.93ms +step:807/1695 train_time:79030ms step_avg:97.93ms +step:808/1695 train_time:79132ms step_avg:97.94ms +step:809/1695 train_time:79232ms step_avg:97.94ms +step:810/1695 train_time:79331ms step_avg:97.94ms +step:811/1695 train_time:79430ms step_avg:97.94ms +step:812/1695 train_time:79529ms step_avg:97.94ms +step:813/1695 train_time:79627ms step_avg:97.94ms +step:814/1695 train_time:79724ms step_avg:97.94ms +step:815/1695 train_time:79822ms step_avg:97.94ms +step:816/1695 train_time:79918ms step_avg:97.94ms +step:817/1695 train_time:80016ms step_avg:97.94ms +step:818/1695 train_time:80114ms step_avg:97.94ms +step:819/1695 train_time:80213ms step_avg:97.94ms +step:820/1695 train_time:80311ms step_avg:97.94ms +step:821/1695 train_time:80410ms step_avg:97.94ms +step:822/1695 train_time:80509ms step_avg:97.94ms +step:823/1695 train_time:80607ms step_avg:97.94ms +step:824/1695 train_time:80705ms step_avg:97.94ms +step:825/1695 train_time:80802ms step_avg:97.94ms +step:826/1695 train_time:80900ms step_avg:97.94ms +step:827/1695 train_time:80998ms step_avg:97.94ms +step:828/1695 train_time:81096ms step_avg:97.94ms +step:829/1695 train_time:81194ms step_avg:97.94ms +step:830/1695 train_time:81292ms step_avg:97.94ms +step:831/1695 train_time:81391ms step_avg:97.94ms +step:832/1695 train_time:81490ms step_avg:97.94ms +step:833/1695 train_time:81588ms step_avg:97.94ms +step:834/1695 train_time:81686ms step_avg:97.94ms +step:835/1695 train_time:81783ms step_avg:97.94ms +step:836/1695 train_time:81881ms step_avg:97.94ms +step:837/1695 train_time:81978ms step_avg:97.94ms +step:838/1695 train_time:82076ms step_avg:97.94ms +step:839/1695 train_time:82174ms step_avg:97.94ms +step:840/1695 train_time:82272ms step_avg:97.94ms +step:841/1695 train_time:82370ms step_avg:97.94ms +step:842/1695 train_time:82469ms step_avg:97.94ms +step:843/1695 train_time:82567ms step_avg:97.94ms +step:844/1695 train_time:82666ms step_avg:97.95ms +step:845/1695 train_time:82763ms step_avg:97.94ms +step:846/1695 train_time:82861ms step_avg:97.94ms +step:847/1695 train_time:82959ms step_avg:97.94ms +step:848/1695 train_time:83058ms step_avg:97.95ms +step:849/1695 train_time:83156ms step_avg:97.95ms +step:850/1695 train_time:83254ms step_avg:97.95ms +step:851/1695 train_time:83352ms step_avg:97.95ms +step:852/1695 train_time:83451ms step_avg:97.95ms +step:853/1695 train_time:83549ms step_avg:97.95ms +step:854/1695 train_time:83647ms step_avg:97.95ms +step:855/1695 train_time:83745ms step_avg:97.95ms +step:856/1695 train_time:83843ms step_avg:97.95ms +step:857/1695 train_time:83941ms step_avg:97.95ms +step:858/1695 train_time:84039ms step_avg:97.95ms +step:859/1695 train_time:84138ms step_avg:97.95ms +step:860/1695 train_time:84235ms step_avg:97.95ms +step:861/1695 train_time:84334ms step_avg:97.95ms +step:862/1695 train_time:84431ms step_avg:97.95ms +step:863/1695 train_time:84529ms step_avg:97.95ms +step:864/1695 train_time:84626ms step_avg:97.95ms +step:865/1695 train_time:84725ms step_avg:97.95ms +step:866/1695 train_time:84823ms step_avg:97.95ms +step:867/1695 train_time:84922ms step_avg:97.95ms +step:868/1695 train_time:85020ms step_avg:97.95ms +step:869/1695 train_time:85119ms step_avg:97.95ms +step:870/1695 train_time:85216ms step_avg:97.95ms +step:871/1695 train_time:85315ms step_avg:97.95ms +step:872/1695 train_time:85413ms step_avg:97.95ms +step:873/1695 train_time:85511ms step_avg:97.95ms +step:874/1695 train_time:85610ms step_avg:97.95ms +step:875/1695 train_time:85709ms step_avg:97.95ms +step:875/1695 val_loss:3.5440 train_time:85805ms step_avg:98.06ms +step:876/1695 train_time:85831ms step_avg:97.98ms +step:877/1695 train_time:85916ms step_avg:97.97ms +step:878/1695 train_time:86017ms step_avg:97.97ms +step:879/1695 train_time:86116ms step_avg:97.97ms +step:880/1695 train_time:86214ms step_avg:97.97ms +step:881/1695 train_time:86313ms step_avg:97.97ms +step:882/1695 train_time:86413ms step_avg:97.97ms +step:883/1695 train_time:86512ms step_avg:97.98ms +step:884/1695 train_time:86612ms step_avg:97.98ms +step:885/1695 train_time:86711ms step_avg:97.98ms +step:886/1695 train_time:86813ms step_avg:97.98ms +step:887/1695 train_time:86915ms step_avg:97.99ms +step:888/1695 train_time:87016ms step_avg:97.99ms +step:889/1695 train_time:87116ms step_avg:97.99ms +step:890/1695 train_time:87215ms step_avg:97.99ms +step:891/1695 train_time:87314ms step_avg:98.00ms +step:892/1695 train_time:87413ms step_avg:98.00ms +step:893/1695 train_time:87512ms step_avg:98.00ms +step:894/1695 train_time:87612ms step_avg:98.00ms +step:895/1695 train_time:87711ms step_avg:98.00ms +step:896/1695 train_time:87813ms step_avg:98.01ms +step:897/1695 train_time:87915ms step_avg:98.01ms +step:898/1695 train_time:88015ms step_avg:98.01ms +step:899/1695 train_time:88115ms step_avg:98.01ms +step:900/1695 train_time:88215ms step_avg:98.02ms +step:901/1695 train_time:88314ms step_avg:98.02ms +step:902/1695 train_time:88413ms step_avg:98.02ms +step:903/1695 train_time:88512ms step_avg:98.02ms +step:904/1695 train_time:88612ms step_avg:98.02ms +step:905/1695 train_time:88712ms step_avg:98.02ms +step:906/1695 train_time:88812ms step_avg:98.03ms +step:907/1695 train_time:88913ms step_avg:98.03ms +step:908/1695 train_time:89016ms step_avg:98.03ms +step:909/1695 train_time:89116ms step_avg:98.04ms +step:910/1695 train_time:89215ms step_avg:98.04ms +step:911/1695 train_time:89315ms step_avg:98.04ms +step:912/1695 train_time:89414ms step_avg:98.04ms +step:913/1695 train_time:89513ms step_avg:98.04ms +step:914/1695 train_time:89613ms step_avg:98.04ms +step:915/1695 train_time:89712ms step_avg:98.05ms +step:916/1695 train_time:89812ms step_avg:98.05ms +step:917/1695 train_time:89913ms step_avg:98.05ms +step:918/1695 train_time:90014ms step_avg:98.05ms +step:919/1695 train_time:90114ms step_avg:98.06ms +step:920/1695 train_time:90215ms step_avg:98.06ms +step:921/1695 train_time:90314ms step_avg:98.06ms +step:922/1695 train_time:90414ms step_avg:98.06ms +step:923/1695 train_time:90514ms step_avg:98.06ms +step:924/1695 train_time:90613ms step_avg:98.07ms +step:925/1695 train_time:90713ms step_avg:98.07ms +step:926/1695 train_time:90813ms step_avg:98.07ms +step:927/1695 train_time:90914ms step_avg:98.07ms +step:928/1695 train_time:91015ms step_avg:98.08ms +step:929/1695 train_time:91115ms step_avg:98.08ms +step:930/1695 train_time:91214ms step_avg:98.08ms +step:931/1695 train_time:91315ms step_avg:98.08ms +step:932/1695 train_time:91415ms step_avg:98.08ms +step:933/1695 train_time:91515ms step_avg:98.09ms +step:934/1695 train_time:91614ms step_avg:98.09ms +step:935/1695 train_time:91713ms step_avg:98.09ms +step:936/1695 train_time:91813ms step_avg:98.09ms +step:937/1695 train_time:91914ms step_avg:98.09ms +step:938/1695 train_time:92014ms step_avg:98.10ms +step:939/1695 train_time:92116ms step_avg:98.10ms +step:940/1695 train_time:92215ms step_avg:98.10ms +step:941/1695 train_time:92314ms step_avg:98.10ms +step:942/1695 train_time:92414ms step_avg:98.10ms +step:943/1695 train_time:92514ms step_avg:98.11ms +step:944/1695 train_time:92613ms step_avg:98.11ms +step:945/1695 train_time:92715ms step_avg:98.11ms +step:946/1695 train_time:92814ms step_avg:98.11ms +step:947/1695 train_time:92913ms step_avg:98.11ms +step:948/1695 train_time:93013ms step_avg:98.11ms +step:949/1695 train_time:93113ms step_avg:98.12ms +step:950/1695 train_time:93213ms step_avg:98.12ms +step:951/1695 train_time:93312ms step_avg:98.12ms +step:952/1695 train_time:93413ms step_avg:98.12ms +step:953/1695 train_time:93513ms step_avg:98.12ms +step:954/1695 train_time:93613ms step_avg:98.13ms +step:955/1695 train_time:93712ms step_avg:98.13ms +step:956/1695 train_time:93812ms step_avg:98.13ms +step:957/1695 train_time:93912ms step_avg:98.13ms +step:958/1695 train_time:94012ms step_avg:98.13ms +step:959/1695 train_time:94112ms step_avg:98.14ms +step:960/1695 train_time:94213ms step_avg:98.14ms +step:961/1695 train_time:94313ms step_avg:98.14ms +step:962/1695 train_time:94413ms step_avg:98.14ms +step:963/1695 train_time:94513ms step_avg:98.14ms +step:964/1695 train_time:94613ms step_avg:98.15ms +step:965/1695 train_time:94713ms step_avg:98.15ms +step:966/1695 train_time:94814ms step_avg:98.15ms +step:967/1695 train_time:94914ms step_avg:98.15ms +step:968/1695 train_time:95014ms step_avg:98.15ms +step:969/1695 train_time:95114ms step_avg:98.16ms +step:970/1695 train_time:95214ms step_avg:98.16ms +step:971/1695 train_time:95313ms step_avg:98.16ms +step:972/1695 train_time:95413ms step_avg:98.16ms +step:973/1695 train_time:95513ms step_avg:98.16ms +step:974/1695 train_time:95613ms step_avg:98.17ms +step:975/1695 train_time:95713ms step_avg:98.17ms +step:976/1695 train_time:95813ms step_avg:98.17ms +step:977/1695 train_time:95913ms step_avg:98.17ms +step:978/1695 train_time:96014ms step_avg:98.17ms +step:979/1695 train_time:96115ms step_avg:98.18ms +step:980/1695 train_time:96214ms step_avg:98.18ms +step:981/1695 train_time:96314ms step_avg:98.18ms +step:982/1695 train_time:96415ms step_avg:98.18ms +step:983/1695 train_time:96514ms step_avg:98.18ms +step:984/1695 train_time:96614ms step_avg:98.19ms +step:985/1695 train_time:96713ms step_avg:98.19ms +step:986/1695 train_time:96813ms step_avg:98.19ms +step:987/1695 train_time:96914ms step_avg:98.19ms +step:988/1695 train_time:97014ms step_avg:98.19ms +step:989/1695 train_time:97114ms step_avg:98.19ms +step:990/1695 train_time:97214ms step_avg:98.20ms +step:991/1695 train_time:97314ms step_avg:98.20ms +step:992/1695 train_time:97414ms step_avg:98.20ms +step:993/1695 train_time:97515ms step_avg:98.20ms +step:994/1695 train_time:97613ms step_avg:98.20ms +step:995/1695 train_time:97713ms step_avg:98.20ms +step:996/1695 train_time:97814ms step_avg:98.21ms +step:997/1695 train_time:97914ms step_avg:98.21ms +step:998/1695 train_time:98014ms step_avg:98.21ms +step:999/1695 train_time:98114ms step_avg:98.21ms +step:1000/1695 train_time:98215ms step_avg:98.21ms +step:1000/1695 val_loss:3.4977 train_time:98313ms step_avg:98.31ms +step:1001/1695 train_time:98340ms step_avg:98.24ms +step:1002/1695 train_time:98426ms step_avg:98.23ms +step:1003/1695 train_time:98528ms step_avg:98.23ms +step:1004/1695 train_time:98628ms step_avg:98.24ms +step:1005/1695 train_time:98728ms step_avg:98.24ms +step:1006/1695 train_time:98827ms step_avg:98.24ms +step:1007/1695 train_time:98928ms step_avg:98.24ms +step:1008/1695 train_time:99027ms step_avg:98.24ms +step:1009/1695 train_time:99126ms step_avg:98.24ms +step:1010/1695 train_time:99225ms step_avg:98.24ms +step:1011/1695 train_time:99328ms step_avg:98.25ms +step:1012/1695 train_time:99430ms step_avg:98.25ms +step:1013/1695 train_time:99531ms step_avg:98.25ms +step:1014/1695 train_time:99631ms step_avg:98.26ms +step:1015/1695 train_time:99731ms step_avg:98.26ms +step:1016/1695 train_time:99830ms step_avg:98.26ms +step:1017/1695 train_time:99929ms step_avg:98.26ms +step:1018/1695 train_time:100028ms step_avg:98.26ms +step:1019/1695 train_time:100126ms step_avg:98.26ms +step:1020/1695 train_time:100226ms step_avg:98.26ms +step:1021/1695 train_time:100329ms step_avg:98.27ms +step:1022/1695 train_time:100430ms step_avg:98.27ms +step:1023/1695 train_time:100530ms step_avg:98.27ms +step:1024/1695 train_time:100631ms step_avg:98.27ms +step:1025/1695 train_time:100731ms step_avg:98.27ms +step:1026/1695 train_time:100830ms step_avg:98.28ms +step:1027/1695 train_time:100929ms step_avg:98.28ms +step:1028/1695 train_time:101029ms step_avg:98.28ms +step:1029/1695 train_time:101129ms step_avg:98.28ms +step:1030/1695 train_time:101228ms step_avg:98.28ms +step:1031/1695 train_time:101329ms step_avg:98.28ms +step:1032/1695 train_time:101429ms step_avg:98.28ms +step:1033/1695 train_time:101529ms step_avg:98.29ms +step:1034/1695 train_time:101630ms step_avg:98.29ms +step:1035/1695 train_time:101730ms step_avg:98.29ms +step:1036/1695 train_time:101830ms step_avg:98.29ms +step:1037/1695 train_time:101930ms step_avg:98.29ms +step:1038/1695 train_time:102029ms step_avg:98.29ms +step:1039/1695 train_time:102128ms step_avg:98.29ms +step:1040/1695 train_time:102228ms step_avg:98.30ms +step:1041/1695 train_time:102328ms step_avg:98.30ms +step:1042/1695 train_time:102427ms step_avg:98.30ms +step:1043/1695 train_time:102528ms step_avg:98.30ms +step:1044/1695 train_time:102628ms step_avg:98.30ms +step:1045/1695 train_time:102728ms step_avg:98.30ms +step:1046/1695 train_time:102829ms step_avg:98.31ms +step:1047/1695 train_time:102929ms step_avg:98.31ms +step:1048/1695 train_time:103028ms step_avg:98.31ms +step:1049/1695 train_time:103127ms step_avg:98.31ms +step:1050/1695 train_time:103227ms step_avg:98.31ms +step:1051/1695 train_time:103328ms step_avg:98.31ms +step:1052/1695 train_time:103428ms step_avg:98.32ms +step:1053/1695 train_time:103528ms step_avg:98.32ms +step:1054/1695 train_time:103628ms step_avg:98.32ms +step:1055/1695 train_time:103728ms step_avg:98.32ms +step:1056/1695 train_time:103828ms step_avg:98.32ms +step:1057/1695 train_time:103928ms step_avg:98.32ms +step:1058/1695 train_time:104028ms step_avg:98.33ms +step:1059/1695 train_time:104127ms step_avg:98.33ms +step:1060/1695 train_time:104227ms step_avg:98.33ms +step:1061/1695 train_time:104327ms step_avg:98.33ms +step:1062/1695 train_time:104427ms step_avg:98.33ms +step:1063/1695 train_time:104528ms step_avg:98.33ms +step:1064/1695 train_time:104628ms step_avg:98.33ms +step:1065/1695 train_time:104728ms step_avg:98.34ms +step:1066/1695 train_time:104828ms step_avg:98.34ms +step:1067/1695 train_time:104928ms step_avg:98.34ms +step:1068/1695 train_time:105028ms step_avg:98.34ms +step:1069/1695 train_time:105128ms step_avg:98.34ms +step:1070/1695 train_time:105228ms step_avg:98.34ms +step:1071/1695 train_time:105328ms step_avg:98.35ms +step:1072/1695 train_time:105428ms step_avg:98.35ms +step:1073/1695 train_time:105527ms step_avg:98.35ms +step:1074/1695 train_time:105627ms step_avg:98.35ms +step:1075/1695 train_time:105728ms step_avg:98.35ms +step:1076/1695 train_time:105827ms step_avg:98.35ms +step:1077/1695 train_time:105928ms step_avg:98.36ms +step:1078/1695 train_time:106028ms step_avg:98.36ms +step:1079/1695 train_time:106128ms step_avg:98.36ms +step:1080/1695 train_time:106228ms step_avg:98.36ms +step:1081/1695 train_time:106328ms step_avg:98.36ms +step:1082/1695 train_time:106428ms step_avg:98.36ms +step:1083/1695 train_time:106527ms step_avg:98.36ms +step:1084/1695 train_time:106628ms step_avg:98.36ms +step:1085/1695 train_time:106728ms step_avg:98.37ms +step:1086/1695 train_time:106828ms step_avg:98.37ms +step:1087/1695 train_time:106928ms step_avg:98.37ms +step:1088/1695 train_time:107028ms step_avg:98.37ms +step:1089/1695 train_time:107128ms step_avg:98.37ms +step:1090/1695 train_time:107228ms step_avg:98.37ms +step:1091/1695 train_time:107328ms step_avg:98.38ms +step:1092/1695 train_time:107428ms step_avg:98.38ms +step:1093/1695 train_time:107528ms step_avg:98.38ms +step:1094/1695 train_time:107629ms step_avg:98.38ms +step:1095/1695 train_time:107727ms step_avg:98.38ms +step:1096/1695 train_time:107827ms step_avg:98.38ms +step:1097/1695 train_time:107927ms step_avg:98.38ms +step:1098/1695 train_time:108027ms step_avg:98.39ms +step:1099/1695 train_time:108127ms step_avg:98.39ms +step:1100/1695 train_time:108227ms step_avg:98.39ms +step:1101/1695 train_time:108326ms step_avg:98.39ms +step:1102/1695 train_time:108427ms step_avg:98.39ms +step:1103/1695 train_time:108526ms step_avg:98.39ms +step:1104/1695 train_time:108627ms step_avg:98.39ms +step:1105/1695 train_time:108726ms step_avg:98.39ms +step:1106/1695 train_time:108827ms step_avg:98.40ms +step:1107/1695 train_time:108927ms step_avg:98.40ms +step:1108/1695 train_time:109027ms step_avg:98.40ms +step:1109/1695 train_time:109127ms step_avg:98.40ms +step:1110/1695 train_time:109227ms step_avg:98.40ms +step:1111/1695 train_time:109328ms step_avg:98.40ms +step:1112/1695 train_time:109428ms step_avg:98.41ms +step:1113/1695 train_time:109528ms step_avg:98.41ms +step:1114/1695 train_time:109628ms step_avg:98.41ms +step:1115/1695 train_time:109728ms step_avg:98.41ms +step:1116/1695 train_time:109827ms step_avg:98.41ms +step:1117/1695 train_time:109928ms step_avg:98.41ms +step:1118/1695 train_time:110028ms step_avg:98.41ms +step:1119/1695 train_time:110127ms step_avg:98.42ms +step:1120/1695 train_time:110228ms step_avg:98.42ms +step:1121/1695 train_time:110327ms step_avg:98.42ms +step:1122/1695 train_time:110428ms step_avg:98.42ms +step:1123/1695 train_time:110528ms step_avg:98.42ms +step:1124/1695 train_time:110628ms step_avg:98.42ms +step:1125/1695 train_time:110729ms step_avg:98.43ms +step:1125/1695 val_loss:3.4459 train_time:110827ms step_avg:98.51ms +step:1126/1695 train_time:110853ms step_avg:98.45ms +step:1127/1695 train_time:110940ms step_avg:98.44ms +step:1128/1695 train_time:111042ms step_avg:98.44ms +step:1129/1695 train_time:111143ms step_avg:98.44ms +step:1130/1695 train_time:111243ms step_avg:98.44ms +step:1131/1695 train_time:111342ms step_avg:98.45ms +step:1132/1695 train_time:111442ms step_avg:98.45ms +step:1133/1695 train_time:111543ms step_avg:98.45ms +step:1134/1695 train_time:111642ms step_avg:98.45ms +step:1135/1695 train_time:111741ms step_avg:98.45ms +step:1136/1695 train_time:111844ms step_avg:98.45ms +step:1137/1695 train_time:111947ms step_avg:98.46ms +step:1138/1695 train_time:112048ms step_avg:98.46ms +step:1139/1695 train_time:112148ms step_avg:98.46ms +step:1140/1695 train_time:112248ms step_avg:98.46ms +step:1141/1695 train_time:112347ms step_avg:98.46ms +step:1142/1695 train_time:112447ms step_avg:98.46ms +step:1143/1695 train_time:112547ms step_avg:98.47ms +step:1144/1695 train_time:112648ms step_avg:98.47ms +step:1145/1695 train_time:112749ms step_avg:98.47ms +step:1146/1695 train_time:112849ms step_avg:98.47ms +step:1147/1695 train_time:112949ms step_avg:98.47ms +step:1148/1695 train_time:113050ms step_avg:98.48ms +step:1149/1695 train_time:113151ms step_avg:98.48ms +step:1150/1695 train_time:113252ms step_avg:98.48ms +step:1151/1695 train_time:113352ms step_avg:98.48ms +step:1152/1695 train_time:113453ms step_avg:98.48ms +step:1153/1695 train_time:113553ms step_avg:98.48ms +step:1154/1695 train_time:113655ms step_avg:98.49ms +step:1155/1695 train_time:113756ms step_avg:98.49ms +step:1156/1695 train_time:113857ms step_avg:98.49ms +step:1157/1695 train_time:113958ms step_avg:98.49ms +step:1158/1695 train_time:114059ms step_avg:98.50ms +step:1159/1695 train_time:114161ms step_avg:98.50ms +step:1160/1695 train_time:114263ms step_avg:98.50ms +step:1161/1695 train_time:114364ms step_avg:98.50ms +step:1162/1695 train_time:114464ms step_avg:98.51ms +step:1163/1695 train_time:114567ms step_avg:98.51ms +step:1164/1695 train_time:114668ms step_avg:98.51ms +step:1165/1695 train_time:114768ms step_avg:98.51ms +step:1166/1695 train_time:114868ms step_avg:98.51ms +step:1167/1695 train_time:114967ms step_avg:98.52ms +step:1168/1695 train_time:115068ms step_avg:98.52ms +step:1169/1695 train_time:115167ms step_avg:98.52ms +step:1170/1695 train_time:115268ms step_avg:98.52ms +step:1171/1695 train_time:115367ms step_avg:98.52ms +step:1172/1695 train_time:115469ms step_avg:98.52ms +step:1173/1695 train_time:115569ms step_avg:98.52ms +step:1174/1695 train_time:115670ms step_avg:98.53ms +step:1175/1695 train_time:115769ms step_avg:98.53ms +step:1176/1695 train_time:115869ms step_avg:98.53ms +step:1177/1695 train_time:115970ms step_avg:98.53ms +step:1178/1695 train_time:116069ms step_avg:98.53ms +step:1179/1695 train_time:116172ms step_avg:98.53ms +step:1180/1695 train_time:116273ms step_avg:98.54ms +step:1181/1695 train_time:116375ms step_avg:98.54ms +step:1182/1695 train_time:116475ms step_avg:98.54ms +step:1183/1695 train_time:116577ms step_avg:98.54ms +step:1184/1695 train_time:116679ms step_avg:98.55ms +step:1185/1695 train_time:116780ms step_avg:98.55ms +step:1186/1695 train_time:116881ms step_avg:98.55ms +step:1187/1695 train_time:116982ms step_avg:98.55ms +step:1188/1695 train_time:117084ms step_avg:98.56ms +step:1189/1695 train_time:117184ms step_avg:98.56ms +step:1190/1695 train_time:117284ms step_avg:98.56ms +step:1191/1695 train_time:117385ms step_avg:98.56ms +step:1192/1695 train_time:117486ms step_avg:98.56ms +step:1193/1695 train_time:117587ms step_avg:98.56ms +step:1194/1695 train_time:117688ms step_avg:98.57ms +step:1195/1695 train_time:117788ms step_avg:98.57ms +step:1196/1695 train_time:117889ms step_avg:98.57ms +step:1197/1695 train_time:117989ms step_avg:98.57ms +step:1198/1695 train_time:118089ms step_avg:98.57ms +step:1199/1695 train_time:118190ms step_avg:98.57ms +step:1200/1695 train_time:118289ms step_avg:98.57ms +step:1201/1695 train_time:118389ms step_avg:98.58ms +step:1202/1695 train_time:118491ms step_avg:98.58ms +step:1203/1695 train_time:118593ms step_avg:98.58ms +step:1204/1695 train_time:118694ms step_avg:98.58ms +step:1205/1695 train_time:118795ms step_avg:98.59ms +step:1206/1695 train_time:118897ms step_avg:98.59ms +step:1207/1695 train_time:118999ms step_avg:98.59ms +step:1208/1695 train_time:119100ms step_avg:98.59ms +step:1209/1695 train_time:119201ms step_avg:98.59ms +step:1210/1695 train_time:119302ms step_avg:98.60ms +step:1211/1695 train_time:119403ms step_avg:98.60ms +step:1212/1695 train_time:119503ms step_avg:98.60ms +step:1213/1695 train_time:119604ms step_avg:98.60ms +step:1214/1695 train_time:119705ms step_avg:98.60ms +step:1215/1695 train_time:119806ms step_avg:98.61ms +step:1216/1695 train_time:119907ms step_avg:98.61ms +step:1217/1695 train_time:120007ms step_avg:98.61ms +step:1218/1695 train_time:120108ms step_avg:98.61ms +step:1219/1695 train_time:120208ms step_avg:98.61ms +step:1220/1695 train_time:120309ms step_avg:98.61ms +step:1221/1695 train_time:120409ms step_avg:98.61ms +step:1222/1695 train_time:120508ms step_avg:98.62ms +step:1223/1695 train_time:120609ms step_avg:98.62ms +step:1224/1695 train_time:120709ms step_avg:98.62ms +step:1225/1695 train_time:120811ms step_avg:98.62ms +step:1226/1695 train_time:120912ms step_avg:98.62ms +step:1227/1695 train_time:121013ms step_avg:98.63ms +step:1228/1695 train_time:121113ms step_avg:98.63ms +step:1229/1695 train_time:121214ms step_avg:98.63ms +step:1230/1695 train_time:121315ms step_avg:98.63ms +step:1231/1695 train_time:121416ms step_avg:98.63ms +step:1232/1695 train_time:121518ms step_avg:98.63ms +step:1233/1695 train_time:121619ms step_avg:98.64ms +step:1234/1695 train_time:121723ms step_avg:98.64ms +step:1235/1695 train_time:121823ms step_avg:98.64ms +step:1236/1695 train_time:121925ms step_avg:98.64ms +step:1237/1695 train_time:122026ms step_avg:98.65ms +step:1238/1695 train_time:122127ms step_avg:98.65ms +step:1239/1695 train_time:122227ms step_avg:98.65ms +step:1240/1695 train_time:122327ms step_avg:98.65ms +step:1241/1695 train_time:122430ms step_avg:98.65ms +step:1242/1695 train_time:122530ms step_avg:98.66ms +step:1243/1695 train_time:122629ms step_avg:98.66ms +step:1244/1695 train_time:122729ms step_avg:98.66ms +step:1245/1695 train_time:122829ms step_avg:98.66ms +step:1246/1695 train_time:122931ms step_avg:98.66ms +step:1247/1695 train_time:123031ms step_avg:98.66ms +step:1248/1695 train_time:123133ms step_avg:98.66ms +step:1249/1695 train_time:123235ms step_avg:98.67ms +step:1250/1695 train_time:123336ms step_avg:98.67ms +step:1250/1695 val_loss:3.3987 train_time:123435ms step_avg:98.75ms +step:1251/1695 train_time:123462ms step_avg:98.69ms +step:1252/1695 train_time:123549ms step_avg:98.68ms +step:1253/1695 train_time:123649ms step_avg:98.68ms +step:1254/1695 train_time:123749ms step_avg:98.68ms +step:1255/1695 train_time:123849ms step_avg:98.68ms +step:1256/1695 train_time:123948ms step_avg:98.68ms +step:1257/1695 train_time:124048ms step_avg:98.69ms +step:1258/1695 train_time:124148ms step_avg:98.69ms +step:1259/1695 train_time:124247ms step_avg:98.69ms +step:1260/1695 train_time:124347ms step_avg:98.69ms +step:1261/1695 train_time:124449ms step_avg:98.69ms +step:1262/1695 train_time:124552ms step_avg:98.69ms +step:1263/1695 train_time:124652ms step_avg:98.70ms +step:1264/1695 train_time:124753ms step_avg:98.70ms +step:1265/1695 train_time:124853ms step_avg:98.70ms +step:1266/1695 train_time:124953ms step_avg:98.70ms +step:1267/1695 train_time:125054ms step_avg:98.70ms +step:1268/1695 train_time:125153ms step_avg:98.70ms +step:1269/1695 train_time:125254ms step_avg:98.70ms +step:1270/1695 train_time:125356ms step_avg:98.71ms +step:1271/1695 train_time:125458ms step_avg:98.71ms +step:1272/1695 train_time:125559ms step_avg:98.71ms +step:1273/1695 train_time:125661ms step_avg:98.71ms +step:1274/1695 train_time:125761ms step_avg:98.71ms +step:1275/1695 train_time:125861ms step_avg:98.71ms +step:1276/1695 train_time:125964ms step_avg:98.72ms +step:1277/1695 train_time:126065ms step_avg:98.72ms +step:1278/1695 train_time:126165ms step_avg:98.72ms +step:1279/1695 train_time:126267ms step_avg:98.72ms +step:1280/1695 train_time:126368ms step_avg:98.72ms +step:1281/1695 train_time:126468ms step_avg:98.73ms +step:1282/1695 train_time:126568ms step_avg:98.73ms +step:1283/1695 train_time:126668ms step_avg:98.73ms +step:1284/1695 train_time:126768ms step_avg:98.73ms +step:1285/1695 train_time:126868ms step_avg:98.73ms +step:1286/1695 train_time:126968ms step_avg:98.73ms +step:1287/1695 train_time:127068ms step_avg:98.73ms +step:1288/1695 train_time:127168ms step_avg:98.73ms +step:1289/1695 train_time:127268ms step_avg:98.73ms +step:1290/1695 train_time:127368ms step_avg:98.73ms +step:1291/1695 train_time:127468ms step_avg:98.74ms +step:1292/1695 train_time:127568ms step_avg:98.74ms +step:1293/1695 train_time:127669ms step_avg:98.74ms +step:1294/1695 train_time:127770ms step_avg:98.74ms +step:1295/1695 train_time:127871ms step_avg:98.74ms +step:1296/1695 train_time:127971ms step_avg:98.74ms +step:1297/1695 train_time:128073ms step_avg:98.75ms +step:1298/1695 train_time:128174ms step_avg:98.75ms +step:1299/1695 train_time:128274ms step_avg:98.75ms +step:1300/1695 train_time:128375ms step_avg:98.75ms +step:1301/1695 train_time:128477ms step_avg:98.75ms +step:1302/1695 train_time:128578ms step_avg:98.75ms +step:1303/1695 train_time:128681ms step_avg:98.76ms +step:1304/1695 train_time:128783ms step_avg:98.76ms +step:1305/1695 train_time:128885ms step_avg:98.76ms +step:1306/1695 train_time:128986ms step_avg:98.76ms +step:1307/1695 train_time:129087ms step_avg:98.77ms +step:1308/1695 train_time:129187ms step_avg:98.77ms +step:1309/1695 train_time:129287ms step_avg:98.77ms +step:1310/1695 train_time:129387ms step_avg:98.77ms +step:1311/1695 train_time:129488ms step_avg:98.77ms +step:1312/1695 train_time:129587ms step_avg:98.77ms +step:1313/1695 train_time:129688ms step_avg:98.77ms +step:1314/1695 train_time:129789ms step_avg:98.77ms +step:1315/1695 train_time:129890ms step_avg:98.78ms +step:1316/1695 train_time:129991ms step_avg:98.78ms +step:1317/1695 train_time:130092ms step_avg:98.78ms +step:1318/1695 train_time:130193ms step_avg:98.78ms +step:1319/1695 train_time:130295ms step_avg:98.78ms +step:1320/1695 train_time:130394ms step_avg:98.78ms +step:1321/1695 train_time:130496ms step_avg:98.79ms +step:1322/1695 train_time:130599ms step_avg:98.79ms +step:1323/1695 train_time:130699ms step_avg:98.79ms +step:1324/1695 train_time:130801ms step_avg:98.79ms +step:1325/1695 train_time:130902ms step_avg:98.79ms +step:1326/1695 train_time:131004ms step_avg:98.80ms +step:1327/1695 train_time:131105ms step_avg:98.80ms +step:1328/1695 train_time:131205ms step_avg:98.80ms +step:1329/1695 train_time:131306ms step_avg:98.80ms +step:1330/1695 train_time:131407ms step_avg:98.80ms +step:1331/1695 train_time:131508ms step_avg:98.80ms +step:1332/1695 train_time:131609ms step_avg:98.81ms +step:1333/1695 train_time:131710ms step_avg:98.81ms +step:1334/1695 train_time:131810ms step_avg:98.81ms +step:1335/1695 train_time:131910ms step_avg:98.81ms +step:1336/1695 train_time:132011ms step_avg:98.81ms +step:1337/1695 train_time:132113ms step_avg:98.81ms +step:1338/1695 train_time:132214ms step_avg:98.81ms +step:1339/1695 train_time:132315ms step_avg:98.82ms +step:1340/1695 train_time:132417ms step_avg:98.82ms +step:1341/1695 train_time:132518ms step_avg:98.82ms +step:1342/1695 train_time:132621ms step_avg:98.82ms +step:1343/1695 train_time:132723ms step_avg:98.83ms +step:1344/1695 train_time:132823ms step_avg:98.83ms +step:1345/1695 train_time:132923ms step_avg:98.83ms +step:1346/1695 train_time:133024ms step_avg:98.83ms +step:1347/1695 train_time:133125ms step_avg:98.83ms +step:1348/1695 train_time:133225ms step_avg:98.83ms +step:1349/1695 train_time:133326ms step_avg:98.83ms +step:1350/1695 train_time:133427ms step_avg:98.84ms +step:1351/1695 train_time:133529ms step_avg:98.84ms +step:1352/1695 train_time:133629ms step_avg:98.84ms +step:1353/1695 train_time:133729ms step_avg:98.84ms +step:1354/1695 train_time:133828ms step_avg:98.84ms +step:1355/1695 train_time:133928ms step_avg:98.84ms +step:1356/1695 train_time:134028ms step_avg:98.84ms +step:1357/1695 train_time:134128ms step_avg:98.84ms +step:1358/1695 train_time:134228ms step_avg:98.84ms +step:1359/1695 train_time:134328ms step_avg:98.84ms +step:1360/1695 train_time:134429ms step_avg:98.84ms +step:1361/1695 train_time:134529ms step_avg:98.85ms +step:1362/1695 train_time:134629ms step_avg:98.85ms +step:1363/1695 train_time:134731ms step_avg:98.85ms +step:1364/1695 train_time:134831ms step_avg:98.85ms +step:1365/1695 train_time:134933ms step_avg:98.85ms +step:1366/1695 train_time:135035ms step_avg:98.85ms +step:1367/1695 train_time:135135ms step_avg:98.86ms +step:1368/1695 train_time:135237ms step_avg:98.86ms +step:1369/1695 train_time:135338ms step_avg:98.86ms +step:1370/1695 train_time:135439ms step_avg:98.86ms +step:1371/1695 train_time:135540ms step_avg:98.86ms +step:1372/1695 train_time:135641ms step_avg:98.86ms +step:1373/1695 train_time:135743ms step_avg:98.87ms +step:1374/1695 train_time:135844ms step_avg:98.87ms +step:1375/1695 train_time:135945ms step_avg:98.87ms +step:1375/1695 val_loss:3.3591 train_time:136044ms step_avg:98.94ms +step:1376/1695 train_time:136071ms step_avg:98.89ms +step:1377/1695 train_time:136154ms step_avg:98.88ms +step:1378/1695 train_time:136255ms step_avg:98.88ms +step:1379/1695 train_time:136357ms step_avg:98.88ms +step:1380/1695 train_time:136459ms step_avg:98.88ms +step:1381/1695 train_time:136559ms step_avg:98.88ms +step:1382/1695 train_time:136660ms step_avg:98.89ms +step:1383/1695 train_time:136760ms step_avg:98.89ms +step:1384/1695 train_time:136861ms step_avg:98.89ms +step:1385/1695 train_time:136962ms step_avg:98.89ms +step:1386/1695 train_time:137067ms step_avg:98.89ms +step:1387/1695 train_time:137169ms step_avg:98.90ms +step:1388/1695 train_time:137271ms step_avg:98.90ms +step:1389/1695 train_time:137372ms step_avg:98.90ms +step:1390/1695 train_time:137473ms step_avg:98.90ms +step:1391/1695 train_time:137574ms step_avg:98.90ms +step:1392/1695 train_time:137675ms step_avg:98.90ms +step:1393/1695 train_time:137775ms step_avg:98.91ms +step:1394/1695 train_time:137877ms step_avg:98.91ms +step:1395/1695 train_time:137979ms step_avg:98.91ms +step:1396/1695 train_time:138082ms step_avg:98.91ms +step:1397/1695 train_time:138186ms step_avg:98.92ms +step:1398/1695 train_time:138289ms step_avg:98.92ms +step:1399/1695 train_time:138391ms step_avg:98.92ms +step:1400/1695 train_time:138493ms step_avg:98.92ms +step:1401/1695 train_time:138593ms step_avg:98.92ms +step:1402/1695 train_time:138695ms step_avg:98.93ms +step:1403/1695 train_time:138797ms step_avg:98.93ms +step:1404/1695 train_time:138898ms step_avg:98.93ms +step:1405/1695 train_time:138999ms step_avg:98.93ms +step:1406/1695 train_time:139102ms step_avg:98.93ms +step:1407/1695 train_time:139204ms step_avg:98.94ms +step:1408/1695 train_time:139306ms step_avg:98.94ms +step:1409/1695 train_time:139411ms step_avg:98.94ms +step:1410/1695 train_time:139512ms step_avg:98.94ms +step:1411/1695 train_time:139613ms step_avg:98.95ms +step:1412/1695 train_time:139716ms step_avg:98.95ms +step:1413/1695 train_time:139816ms step_avg:98.95ms +step:1414/1695 train_time:139918ms step_avg:98.95ms +step:1415/1695 train_time:140020ms step_avg:98.95ms +step:1416/1695 train_time:140121ms step_avg:98.96ms +step:1417/1695 train_time:140223ms step_avg:98.96ms +step:1418/1695 train_time:140325ms step_avg:98.96ms +step:1419/1695 train_time:140428ms step_avg:98.96ms +step:1420/1695 train_time:140530ms step_avg:98.96ms +step:1421/1695 train_time:140632ms step_avg:98.97ms +step:1422/1695 train_time:140733ms step_avg:98.97ms +step:1423/1695 train_time:140833ms step_avg:98.97ms +step:1424/1695 train_time:140935ms step_avg:98.97ms +step:1425/1695 train_time:141036ms step_avg:98.97ms +step:1426/1695 train_time:141138ms step_avg:98.97ms +step:1427/1695 train_time:141241ms step_avg:98.98ms +step:1428/1695 train_time:141343ms step_avg:98.98ms +step:1429/1695 train_time:141446ms step_avg:98.98ms +step:1430/1695 train_time:141548ms step_avg:98.98ms +step:1431/1695 train_time:141649ms step_avg:98.99ms +step:1432/1695 train_time:141750ms step_avg:98.99ms +step:1433/1695 train_time:141851ms step_avg:98.99ms +step:1434/1695 train_time:141951ms step_avg:98.99ms +step:1435/1695 train_time:142053ms step_avg:98.99ms +step:1436/1695 train_time:142156ms step_avg:98.99ms +step:1437/1695 train_time:142257ms step_avg:99.00ms +step:1438/1695 train_time:142360ms step_avg:99.00ms +step:1439/1695 train_time:142463ms step_avg:99.00ms +step:1440/1695 train_time:142565ms step_avg:99.00ms +step:1441/1695 train_time:142667ms step_avg:99.01ms +step:1442/1695 train_time:142768ms step_avg:99.01ms +step:1443/1695 train_time:142869ms step_avg:99.01ms +step:1444/1695 train_time:142970ms step_avg:99.01ms +step:1445/1695 train_time:143072ms step_avg:99.01ms +step:1446/1695 train_time:143172ms step_avg:99.01ms +step:1447/1695 train_time:143275ms step_avg:99.02ms +step:1448/1695 train_time:143377ms step_avg:99.02ms +step:1449/1695 train_time:143479ms step_avg:99.02ms +step:1450/1695 train_time:143582ms step_avg:99.02ms +step:1451/1695 train_time:143682ms step_avg:99.02ms +step:1452/1695 train_time:143785ms step_avg:99.03ms +step:1453/1695 train_time:143888ms step_avg:99.03ms +step:1454/1695 train_time:143990ms step_avg:99.03ms +step:1455/1695 train_time:144091ms step_avg:99.03ms +step:1456/1695 train_time:144192ms step_avg:99.03ms +step:1457/1695 train_time:144293ms step_avg:99.03ms +step:1458/1695 train_time:144395ms step_avg:99.04ms +step:1459/1695 train_time:144496ms step_avg:99.04ms +step:1460/1695 train_time:144597ms step_avg:99.04ms +step:1461/1695 train_time:144699ms step_avg:99.04ms +step:1462/1695 train_time:144801ms step_avg:99.04ms +step:1463/1695 train_time:144904ms step_avg:99.05ms +step:1464/1695 train_time:145007ms step_avg:99.05ms +step:1465/1695 train_time:145109ms step_avg:99.05ms +step:1466/1695 train_time:145210ms step_avg:99.05ms +step:1467/1695 train_time:145311ms step_avg:99.05ms +step:1468/1695 train_time:145412ms step_avg:99.05ms +step:1469/1695 train_time:145515ms step_avg:99.06ms +step:1470/1695 train_time:145615ms step_avg:99.06ms +step:1471/1695 train_time:145717ms step_avg:99.06ms +step:1472/1695 train_time:145820ms step_avg:99.06ms +step:1473/1695 train_time:145922ms step_avg:99.06ms +step:1474/1695 train_time:146024ms step_avg:99.07ms +step:1475/1695 train_time:146125ms step_avg:99.07ms +step:1476/1695 train_time:146227ms step_avg:99.07ms +step:1477/1695 train_time:146329ms step_avg:99.07ms +step:1478/1695 train_time:146431ms step_avg:99.07ms +step:1479/1695 train_time:146532ms step_avg:99.08ms +step:1480/1695 train_time:146633ms step_avg:99.08ms +step:1481/1695 train_time:146735ms step_avg:99.08ms +step:1482/1695 train_time:146837ms step_avg:99.08ms +step:1483/1695 train_time:146940ms step_avg:99.08ms +step:1484/1695 train_time:147042ms step_avg:99.08ms +step:1485/1695 train_time:147143ms step_avg:99.09ms +step:1486/1695 train_time:147244ms step_avg:99.09ms +step:1487/1695 train_time:147346ms step_avg:99.09ms +step:1488/1695 train_time:147450ms step_avg:99.09ms +step:1489/1695 train_time:147551ms step_avg:99.09ms +step:1490/1695 train_time:147652ms step_avg:99.10ms +step:1491/1695 train_time:147753ms step_avg:99.10ms +step:1492/1695 train_time:147853ms step_avg:99.10ms +step:1493/1695 train_time:147955ms step_avg:99.10ms +step:1494/1695 train_time:148056ms step_avg:99.10ms +step:1495/1695 train_time:148160ms step_avg:99.10ms +step:1496/1695 train_time:148262ms step_avg:99.11ms +step:1497/1695 train_time:148365ms step_avg:99.11ms +step:1498/1695 train_time:148468ms step_avg:99.11ms +step:1499/1695 train_time:148570ms step_avg:99.11ms +step:1500/1695 train_time:148672ms step_avg:99.11ms +step:1500/1695 val_loss:3.3236 train_time:148770ms step_avg:99.18ms +step:1501/1695 train_time:148796ms step_avg:99.13ms +step:1502/1695 train_time:148882ms step_avg:99.12ms +step:1503/1695 train_time:148986ms step_avg:99.13ms +step:1504/1695 train_time:149088ms step_avg:99.13ms +step:1505/1695 train_time:149189ms step_avg:99.13ms +step:1506/1695 train_time:149290ms step_avg:99.13ms +step:1507/1695 train_time:149391ms step_avg:99.13ms +step:1508/1695 train_time:149491ms step_avg:99.13ms +step:1509/1695 train_time:149593ms step_avg:99.13ms +step:1510/1695 train_time:149694ms step_avg:99.14ms +step:1511/1695 train_time:149798ms step_avg:99.14ms +step:1512/1695 train_time:149900ms step_avg:99.14ms +step:1513/1695 train_time:150002ms step_avg:99.14ms +step:1514/1695 train_time:150105ms step_avg:99.14ms +step:1515/1695 train_time:150210ms step_avg:99.15ms +step:1516/1695 train_time:150312ms step_avg:99.15ms +step:1517/1695 train_time:150412ms step_avg:99.15ms +step:1518/1695 train_time:150513ms step_avg:99.15ms +step:1519/1695 train_time:150616ms step_avg:99.15ms +step:1520/1695 train_time:150718ms step_avg:99.16ms +step:1521/1695 train_time:150819ms step_avg:99.16ms +step:1522/1695 train_time:150920ms step_avg:99.16ms +step:1523/1695 train_time:151022ms step_avg:99.16ms +step:1524/1695 train_time:151126ms step_avg:99.16ms +step:1525/1695 train_time:151230ms step_avg:99.17ms +step:1526/1695 train_time:151332ms step_avg:99.17ms +step:1527/1695 train_time:151434ms step_avg:99.17ms +step:1528/1695 train_time:151539ms step_avg:99.17ms +step:1529/1695 train_time:151641ms step_avg:99.18ms +step:1530/1695 train_time:151744ms step_avg:99.18ms +step:1531/1695 train_time:151845ms step_avg:99.18ms +step:1532/1695 train_time:151947ms step_avg:99.18ms +step:1533/1695 train_time:152049ms step_avg:99.18ms +step:1534/1695 train_time:152150ms step_avg:99.19ms +step:1535/1695 train_time:152252ms step_avg:99.19ms +step:1536/1695 train_time:152354ms step_avg:99.19ms +step:1537/1695 train_time:152455ms step_avg:99.19ms +step:1538/1695 train_time:152557ms step_avg:99.19ms +step:1539/1695 train_time:152658ms step_avg:99.19ms +step:1540/1695 train_time:152760ms step_avg:99.20ms +step:1541/1695 train_time:152866ms step_avg:99.20ms +step:1542/1695 train_time:152969ms step_avg:99.20ms +step:1543/1695 train_time:153071ms step_avg:99.20ms +step:1544/1695 train_time:153172ms step_avg:99.20ms +step:1545/1695 train_time:153273ms step_avg:99.21ms +step:1546/1695 train_time:153374ms step_avg:99.21ms +step:1547/1695 train_time:153476ms step_avg:99.21ms +step:1548/1695 train_time:153579ms step_avg:99.21ms +step:1549/1695 train_time:153681ms step_avg:99.21ms +step:1550/1695 train_time:153782ms step_avg:99.21ms +step:1551/1695 train_time:153885ms step_avg:99.22ms +step:1552/1695 train_time:153989ms step_avg:99.22ms +step:1553/1695 train_time:154092ms step_avg:99.22ms +step:1554/1695 train_time:154193ms step_avg:99.22ms +step:1555/1695 train_time:154293ms step_avg:99.22ms +step:1556/1695 train_time:154395ms step_avg:99.23ms +step:1557/1695 train_time:154498ms step_avg:99.23ms +step:1558/1695 train_time:154601ms step_avg:99.23ms +step:1559/1695 train_time:154703ms step_avg:99.23ms +step:1560/1695 train_time:154805ms step_avg:99.23ms +step:1561/1695 train_time:154906ms step_avg:99.24ms +step:1562/1695 train_time:155010ms step_avg:99.24ms +step:1563/1695 train_time:155114ms step_avg:99.24ms +step:1564/1695 train_time:155215ms step_avg:99.24ms +step:1565/1695 train_time:155316ms step_avg:99.24ms +step:1566/1695 train_time:155417ms step_avg:99.24ms +step:1567/1695 train_time:155517ms step_avg:99.25ms +step:1568/1695 train_time:155618ms step_avg:99.25ms +step:1569/1695 train_time:155718ms step_avg:99.25ms +step:1570/1695 train_time:155821ms step_avg:99.25ms +step:1571/1695 train_time:155923ms step_avg:99.25ms +step:1572/1695 train_time:156026ms step_avg:99.25ms +step:1573/1695 train_time:156129ms step_avg:99.26ms +step:1574/1695 train_time:156230ms step_avg:99.26ms +step:1575/1695 train_time:156332ms step_avg:99.26ms +step:1576/1695 train_time:156434ms step_avg:99.26ms +step:1577/1695 train_time:156538ms step_avg:99.26ms +step:1578/1695 train_time:156638ms step_avg:99.26ms +step:1579/1695 train_time:156739ms step_avg:99.26ms +step:1580/1695 train_time:156840ms step_avg:99.27ms +step:1581/1695 train_time:156944ms step_avg:99.27ms +step:1582/1695 train_time:157046ms step_avg:99.27ms +step:1583/1695 train_time:157148ms step_avg:99.27ms +step:1584/1695 train_time:157251ms step_avg:99.27ms +step:1585/1695 train_time:157352ms step_avg:99.28ms +step:1586/1695 train_time:157456ms step_avg:99.28ms +step:1587/1695 train_time:157557ms step_avg:99.28ms +step:1588/1695 train_time:157657ms step_avg:99.28ms +step:1589/1695 train_time:157758ms step_avg:99.28ms +step:1590/1695 train_time:157859ms step_avg:99.28ms +step:1591/1695 train_time:157961ms step_avg:99.28ms +step:1592/1695 train_time:158064ms step_avg:99.29ms +step:1593/1695 train_time:158165ms step_avg:99.29ms +step:1594/1695 train_time:158269ms step_avg:99.29ms +step:1595/1695 train_time:158371ms step_avg:99.29ms +step:1596/1695 train_time:158473ms step_avg:99.29ms +step:1597/1695 train_time:158575ms step_avg:99.30ms +step:1598/1695 train_time:158678ms step_avg:99.30ms +step:1599/1695 train_time:158779ms step_avg:99.30ms +step:1600/1695 train_time:158881ms step_avg:99.30ms +step:1601/1695 train_time:158983ms step_avg:99.30ms +step:1602/1695 train_time:159086ms step_avg:99.30ms +step:1603/1695 train_time:159187ms step_avg:99.31ms +step:1604/1695 train_time:159288ms step_avg:99.31ms +step:1605/1695 train_time:159391ms step_avg:99.31ms +step:1606/1695 train_time:159493ms step_avg:99.31ms +step:1607/1695 train_time:159595ms step_avg:99.31ms +step:1608/1695 train_time:159695ms step_avg:99.31ms +step:1609/1695 train_time:159796ms step_avg:99.31ms +step:1610/1695 train_time:159900ms step_avg:99.32ms +step:1611/1695 train_time:160002ms step_avg:99.32ms +step:1612/1695 train_time:160104ms step_avg:99.32ms +step:1613/1695 train_time:160205ms step_avg:99.32ms +step:1614/1695 train_time:160306ms step_avg:99.32ms +step:1615/1695 train_time:160409ms step_avg:99.32ms +step:1616/1695 train_time:160511ms step_avg:99.33ms +step:1617/1695 train_time:160614ms step_avg:99.33ms +step:1618/1695 train_time:160715ms step_avg:99.33ms +step:1619/1695 train_time:160816ms step_avg:99.33ms +step:1620/1695 train_time:160917ms step_avg:99.33ms +step:1621/1695 train_time:161018ms step_avg:99.33ms +step:1622/1695 train_time:161119ms step_avg:99.33ms +step:1623/1695 train_time:161221ms step_avg:99.34ms +step:1624/1695 train_time:161324ms step_avg:99.34ms +step:1625/1695 train_time:161428ms step_avg:99.34ms +step:1625/1695 val_loss:3.2946 train_time:161529ms step_avg:99.40ms +step:1626/1695 train_time:161558ms step_avg:99.36ms +step:1627/1695 train_time:161640ms step_avg:99.35ms +step:1628/1695 train_time:161743ms step_avg:99.35ms +step:1629/1695 train_time:161845ms step_avg:99.35ms +step:1630/1695 train_time:161946ms step_avg:99.35ms +step:1631/1695 train_time:162047ms step_avg:99.35ms +step:1632/1695 train_time:162148ms step_avg:99.36ms +step:1633/1695 train_time:162249ms step_avg:99.36ms +step:1634/1695 train_time:162351ms step_avg:99.36ms +step:1635/1695 train_time:162453ms step_avg:99.36ms +step:1636/1695 train_time:162557ms step_avg:99.36ms +step:1637/1695 train_time:162661ms step_avg:99.37ms +step:1638/1695 train_time:162763ms step_avg:99.37ms +step:1639/1695 train_time:162865ms step_avg:99.37ms +step:1640/1695 train_time:162968ms step_avg:99.37ms +step:1641/1695 train_time:163070ms step_avg:99.37ms +step:1642/1695 train_time:163172ms step_avg:99.37ms +step:1643/1695 train_time:163273ms step_avg:99.37ms +step:1644/1695 train_time:163375ms step_avg:99.38ms +step:1645/1695 train_time:163478ms step_avg:99.38ms +step:1646/1695 train_time:163581ms step_avg:99.38ms +step:1647/1695 train_time:163685ms step_avg:99.38ms +step:1648/1695 train_time:163789ms step_avg:99.39ms +step:1649/1695 train_time:163891ms step_avg:99.39ms +step:1650/1695 train_time:163994ms step_avg:99.39ms +step:1651/1695 train_time:164096ms step_avg:99.39ms +step:1652/1695 train_time:164199ms step_avg:99.39ms +step:1653/1695 train_time:164302ms step_avg:99.40ms +step:1654/1695 train_time:164403ms step_avg:99.40ms +step:1655/1695 train_time:164507ms step_avg:99.40ms +step:1656/1695 train_time:164608ms step_avg:99.40ms +step:1657/1695 train_time:164712ms step_avg:99.40ms +step:1658/1695 train_time:164814ms step_avg:99.41ms +step:1659/1695 train_time:164921ms step_avg:99.41ms +step:1660/1695 train_time:165023ms step_avg:99.41ms +step:1661/1695 train_time:165126ms step_avg:99.41ms +step:1662/1695 train_time:165231ms step_avg:99.42ms +step:1663/1695 train_time:165334ms step_avg:99.42ms +step:1664/1695 train_time:165436ms step_avg:99.42ms +step:1665/1695 train_time:165541ms step_avg:99.42ms +step:1666/1695 train_time:165644ms step_avg:99.43ms +step:1667/1695 train_time:165746ms step_avg:99.43ms +step:1668/1695 train_time:165851ms step_avg:99.43ms +step:1669/1695 train_time:165956ms step_avg:99.43ms +step:1670/1695 train_time:166059ms step_avg:99.44ms +step:1671/1695 train_time:166161ms step_avg:99.44ms +step:1672/1695 train_time:166264ms step_avg:99.44ms +step:1673/1695 train_time:166365ms step_avg:99.44ms +step:1674/1695 train_time:166467ms step_avg:99.44ms +step:1675/1695 train_time:166569ms step_avg:99.44ms +step:1676/1695 train_time:166673ms step_avg:99.45ms +step:1677/1695 train_time:166776ms step_avg:99.45ms +step:1678/1695 train_time:166880ms step_avg:99.45ms +step:1679/1695 train_time:166983ms step_avg:99.45ms +step:1680/1695 train_time:167085ms step_avg:99.46ms +step:1681/1695 train_time:167188ms step_avg:99.46ms +step:1682/1695 train_time:167293ms step_avg:99.46ms +step:1683/1695 train_time:167396ms step_avg:99.46ms +step:1684/1695 train_time:167499ms step_avg:99.47ms +step:1685/1695 train_time:167602ms step_avg:99.47ms +step:1686/1695 train_time:167704ms step_avg:99.47ms +step:1687/1695 train_time:167806ms step_avg:99.47ms +step:1688/1695 train_time:167908ms step_avg:99.47ms +step:1689/1695 train_time:168010ms step_avg:99.47ms +step:1690/1695 train_time:168112ms step_avg:99.47ms +step:1691/1695 train_time:168215ms step_avg:99.48ms +step:1692/1695 train_time:168318ms step_avg:99.48ms +step:1693/1695 train_time:168421ms step_avg:99.48ms +step:1694/1695 train_time:168525ms step_avg:99.48ms +step:1695/1695 train_time:168628ms step_avg:99.49ms +step:1695/1695 val_loss:3.2815 train_time:168727ms step_avg:99.54ms +peak memory allocated: 34004 MiB reserved: 49660 MiB diff --git a/records/082325_SparseAttnGate/eb6d347b-fd4a-4077-a490-436c64f97ce2.txt b/records/082325_SparseAttnGate/eb6d347b-fd4a-4077-a490-436c64f97ce2.txt new file mode 100644 index 000000000..33fd2ea91 --- /dev/null +++ b/records/082325_SparseAttnGate/eb6d347b-fd4a-4077-a490-436c64f97ce2.txt @@ -0,0 +1,2802 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import triton +import triton.language as tl + +@dataclass +class Hyperparameters: + # data + dampen_factor = 64 + run_id = f'final/{uuid.uuid4()}' + train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len = 48*1024 # FlexAttention sequence length + val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + # optimization + num_iterations = 1695 # number of iterations to run + cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint = False +args = Hyperparameters() + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0, bias=False): + super().__init__(in_features, out_features, bias=bias) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + self.dampen = CastedLinear(dim//args.dampen_factor, num_heads) + self.dampen.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + B, T, d_model = x.size(0), x.size(1), x.size(-1) # batch size, sequence length + assert B == 1, "Must use batch size = 1 for FlexAttention" + dampen_factor = torch.sigmoid(self.dampen(x[..., :d_model//args.dampen_factor])).view(B, T, self.num_heads, 1) + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=0.12).transpose(1, 2) + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * dampen_factor + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): + BLOCK_SIZE = 128 + docs = (input_seq == 50256).cumsum(0) + # increments = (input_seq == 50256) | torch.cat([torch.tensor([False], device="cuda"), input_seq[:-1] == 50256]) + # docs = increments.cumsum(0) + + def document_causal(b, h, q_idx, kv_idx): + causal_mask = q_idx >= kv_idx + document_mask = docs[q_idx] == docs[kv_idx] + return causal_mask & document_mask + + def dense_to_ordered(dense_blockmask: Tensor): + num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) + indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) + return num_blocks[None, None].contiguous(), indices[None, None].contiguous() + + # manual block mask creation by @YouJiacheng + assert len(input_seq) % BLOCK_SIZE == 0 + NUM_BLOCKS = len(input_seq) // BLOCK_SIZE + block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") + causal_blockmask_any = block_idx[:, None] >= block_idx + causal_blockmask_all = block_idx[:, None] > block_idx + docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() + docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() + document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) + document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) + blockmask_any = causal_blockmask_any & document_blockmask_any + blockmask_all = causal_blockmask_all & document_blockmask_all + partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) + full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) + def build_bm(window_size_blocks: Tensor) -> BlockMask: + return BlockMask.from_kv_blocks( + torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), + partial_kv_indices, + torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), + full_kv_indices, + BLOCK_SIZE=BLOCK_SIZE, + mask_mod=document_causal, + ) + # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper + return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) + + def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): + assert input_seq.ndim == 1 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) + block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(block_masks) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap +def find_batch_starts(tokens: Tensor, pos: int, seq_len: int, token_window: int): + boundary_mask = tokens[pos : pos + token_window] == 50256 + boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos + start = boundary_positions[0].item() + starts = [] + for i in range(1, len(boundary_positions)): + end = boundary_positions[i].item() + if end - start >= seq_len: + starts.append(start) # append start once end pos is confirmed + if len(starts) == dist.get_world_size(): + return starts, end - pos + start = end + assert False # increase token_window if necessary + +def distributed_data_generator(filename_pattern: str, seq_len: int, grad_accum_steps: int, align_to_bos: bool): + rank = dist.get_rank() + world_size = dist.get_world_size() + batch_size = seq_len * world_size + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + token_window = grad_accum_steps * (2 * batch_size if align_to_bos else batch_size) # provide buffer to handle samples up to length seq_len + if pos + token_window + 1 >= len(tokens): + tokens = _load_data_shard(next(file_iter)) + pos = 0 + for _ in range(grad_accum_steps): + if align_to_bos: + batch_starts, tokens_consumed = find_batch_starts(tokens, pos, seq_len, token_window) + start_idx = batch_starts[rank] + else: + tokens_consumed = batch_size + start_idx = pos + rank * seq_len + buf = tokens[start_idx:][:seq_len + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += tokens_consumed + token_window -= tokens_consumed + yield inputs, targets + +# ----------------------------------------------------------------------------- +# int main + + + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x < 1 + if x < 1 - args.cooldown_frac: + return 1.0 + else: + w = (1 - x) / args.cooldown_frac + return w * 1.0 + (1 - w) * 0.1 + +# attention window size schedule: linearly increase +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + assert 0 <= x <= 1 + # Linearly increase the block-wise sliding window size over training 128 -> 1792 + # increase by @fernbear.bsky.social; block-wise by @YouJiacheng + window_size = next_multiple_of_n(1728 * x, n=128) + return get_window_size_blocks_helper(window_size) + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 10 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +for _ in range(warmup_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(1)).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_seq_len, grad_accum_steps, align_to_bos=True) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + val_batch_size = world_size * args.val_seq_len + assert args.val_tokens % val_batch_size == 0 + val_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, args.val_seq_len, grad_accum_steps, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, get_window_size_blocks(step)).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * get_lr(step) + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250713+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Sat Aug 23 13:32:07 2025 ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 570.148.08 Driver Version: 570.148.08 CUDA Version: 12.8 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:61:00.0 Off | 0 | +| N/A 31C P0 117W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:62:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:63:00.0 Off | 0 | +| N/A 38C P0 124W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:64:00.0 Off | 0 | +| N/A 31C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:6A:00.0 Off | 0 | +| N/A 32C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:6B:00.0 Off | 0 | +| N/A 37C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:6C:00.0 Off | 0 | +| N/A 36C P0 120W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:6D:00.0 Off | 0 | +| N/A 34C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+------------------------+----------------------+ + ++-----------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=========================================================================================| +| 0 N/A N/A 310315 C /usr/bin/python3 1510MiB | +| 0 N/A N/A 310316 C /usr/bin/python3 614MiB | +| 0 N/A N/A 310317 C /usr/bin/python3 614MiB | +| 0 N/A N/A 310318 C /usr/bin/python3 614MiB | +| 0 N/A N/A 310319 C /usr/bin/python3 614MiB | +| 0 N/A N/A 310320 C /usr/bin/python3 614MiB | +| 0 N/A N/A 310321 C /usr/bin/python3 614MiB | +| 0 N/A N/A 310322 C /usr/bin/python3 614MiB | +| 1 N/A N/A 310316 C /usr/bin/python3 1510MiB | +| 2 N/A N/A 310317 C /usr/bin/python3 1510MiB | +| 3 N/A N/A 310318 C /usr/bin/python3 1510MiB | +| 4 N/A N/A 310319 C /usr/bin/python3 1510MiB | +| 5 N/A N/A 310320 C /usr/bin/python3 1510MiB | +| 6 N/A N/A 310321 C /usr/bin/python3 1510MiB | +| 7 N/A N/A 310322 C /usr/bin/python3 1510MiB | ++-----------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:152ms step_avg:152.47ms +step:2/1695 train_time:178ms step_avg:88.89ms +step:3/1695 train_time:255ms step_avg:84.86ms +step:4/1695 train_time:346ms step_avg:86.58ms +step:5/1695 train_time:439ms step_avg:87.77ms +step:6/1695 train_time:532ms step_avg:88.62ms +step:7/1695 train_time:625ms step_avg:89.24ms +step:8/1695 train_time:719ms step_avg:89.87ms +step:9/1695 train_time:812ms step_avg:90.20ms +step:10/1695 train_time:905ms step_avg:90.49ms +step:11/1695 train_time:998ms step_avg:90.74ms +step:12/1695 train_time:1092ms step_avg:90.98ms +step:13/1695 train_time:1187ms step_avg:91.28ms +step:14/1695 train_time:1281ms step_avg:91.50ms +step:15/1695 train_time:1375ms step_avg:91.69ms +step:16/1695 train_time:1470ms step_avg:91.86ms +step:17/1695 train_time:1564ms step_avg:91.99ms +step:18/1695 train_time:1658ms step_avg:92.08ms +step:19/1695 train_time:1751ms step_avg:92.16ms +step:20/1695 train_time:1844ms step_avg:92.19ms +step:21/1695 train_time:1938ms step_avg:92.27ms +step:22/1695 train_time:2031ms step_avg:92.34ms +step:23/1695 train_time:2126ms step_avg:92.42ms +step:24/1695 train_time:2220ms step_avg:92.49ms +step:25/1695 train_time:2314ms step_avg:92.56ms +step:26/1695 train_time:2407ms step_avg:92.60ms +step:27/1695 train_time:2502ms step_avg:92.66ms +step:28/1695 train_time:2597ms step_avg:92.74ms +step:29/1695 train_time:2691ms step_avg:92.80ms +step:30/1695 train_time:2784ms step_avg:92.80ms +step:31/1695 train_time:2878ms step_avg:92.82ms +step:32/1695 train_time:2971ms step_avg:92.86ms +step:33/1695 train_time:3065ms step_avg:92.88ms +step:34/1695 train_time:3159ms step_avg:92.92ms +step:35/1695 train_time:3253ms step_avg:92.94ms +step:36/1695 train_time:3346ms step_avg:92.95ms +step:37/1695 train_time:3441ms step_avg:92.99ms +step:38/1695 train_time:3535ms step_avg:93.04ms +step:39/1695 train_time:3630ms step_avg:93.07ms +step:40/1695 train_time:3724ms step_avg:93.09ms +step:41/1695 train_time:3818ms step_avg:93.11ms +step:42/1695 train_time:3911ms step_avg:93.13ms +step:43/1695 train_time:4005ms step_avg:93.13ms +step:44/1695 train_time:4098ms step_avg:93.14ms +step:45/1695 train_time:4192ms step_avg:93.15ms +step:46/1695 train_time:4285ms step_avg:93.16ms +step:47/1695 train_time:4379ms step_avg:93.17ms +step:48/1695 train_time:4473ms step_avg:93.19ms +step:49/1695 train_time:4567ms step_avg:93.20ms +step:50/1695 train_time:4660ms step_avg:93.21ms +step:51/1695 train_time:4754ms step_avg:93.21ms +step:52/1695 train_time:4848ms step_avg:93.23ms +step:53/1695 train_time:4942ms step_avg:93.24ms +step:54/1695 train_time:5036ms step_avg:93.27ms +step:55/1695 train_time:5131ms step_avg:93.28ms +step:56/1695 train_time:5224ms step_avg:93.28ms +step:57/1695 train_time:5318ms step_avg:93.29ms +step:58/1695 train_time:5412ms step_avg:93.31ms +step:59/1695 train_time:5505ms step_avg:93.31ms +step:60/1695 train_time:5599ms step_avg:93.32ms +step:61/1695 train_time:5693ms step_avg:93.32ms +step:62/1695 train_time:5786ms step_avg:93.32ms +step:63/1695 train_time:5879ms step_avg:93.32ms +step:64/1695 train_time:5973ms step_avg:93.33ms +step:65/1695 train_time:6067ms step_avg:93.34ms +step:66/1695 train_time:6160ms step_avg:93.34ms +step:67/1695 train_time:6254ms step_avg:93.35ms +step:68/1695 train_time:6349ms step_avg:93.36ms +step:69/1695 train_time:6442ms step_avg:93.37ms +step:70/1695 train_time:6536ms step_avg:93.37ms +step:71/1695 train_time:6630ms step_avg:93.39ms +step:72/1695 train_time:6724ms step_avg:93.39ms +step:73/1695 train_time:6818ms step_avg:93.39ms +step:74/1695 train_time:6912ms step_avg:93.41ms +step:75/1695 train_time:7005ms step_avg:93.41ms +step:76/1695 train_time:7099ms step_avg:93.41ms +step:77/1695 train_time:7193ms step_avg:93.41ms +step:78/1695 train_time:7286ms step_avg:93.41ms +step:79/1695 train_time:7379ms step_avg:93.41ms +step:80/1695 train_time:7473ms step_avg:93.41ms +step:81/1695 train_time:7566ms step_avg:93.41ms +step:82/1695 train_time:7660ms step_avg:93.41ms +step:83/1695 train_time:7753ms step_avg:93.42ms +step:84/1695 train_time:7847ms step_avg:93.41ms +step:85/1695 train_time:7940ms step_avg:93.42ms +step:86/1695 train_time:8035ms step_avg:93.42ms +step:87/1695 train_time:8128ms step_avg:93.42ms +step:88/1695 train_time:8221ms step_avg:93.43ms +step:89/1695 train_time:8316ms step_avg:93.44ms +step:90/1695 train_time:8410ms step_avg:93.45ms +step:91/1695 train_time:8504ms step_avg:93.45ms +step:92/1695 train_time:8598ms step_avg:93.46ms +step:93/1695 train_time:8692ms step_avg:93.46ms +step:94/1695 train_time:8785ms step_avg:93.45ms +step:95/1695 train_time:8879ms step_avg:93.47ms +step:96/1695 train_time:8973ms step_avg:93.47ms +step:97/1695 train_time:9067ms step_avg:93.47ms +step:98/1695 train_time:9160ms step_avg:93.47ms +step:99/1695 train_time:9254ms step_avg:93.47ms +step:100/1695 train_time:9347ms step_avg:93.47ms +step:101/1695 train_time:9441ms step_avg:93.48ms +step:102/1695 train_time:9534ms step_avg:93.47ms +step:103/1695 train_time:9627ms step_avg:93.47ms +step:104/1695 train_time:9721ms step_avg:93.47ms +step:105/1695 train_time:9815ms step_avg:93.48ms +step:106/1695 train_time:9909ms step_avg:93.48ms +step:107/1695 train_time:10002ms step_avg:93.48ms +step:108/1695 train_time:10097ms step_avg:93.49ms +step:109/1695 train_time:10191ms step_avg:93.50ms +step:110/1695 train_time:10284ms step_avg:93.49ms +step:111/1695 train_time:10378ms step_avg:93.50ms +step:112/1695 train_time:10472ms step_avg:93.50ms +step:113/1695 train_time:10566ms step_avg:93.51ms +step:114/1695 train_time:10659ms step_avg:93.50ms +step:115/1695 train_time:10753ms step_avg:93.51ms +step:116/1695 train_time:10847ms step_avg:93.51ms +step:117/1695 train_time:10941ms step_avg:93.51ms +step:118/1695 train_time:11035ms step_avg:93.52ms +step:119/1695 train_time:11130ms step_avg:93.53ms +step:120/1695 train_time:11223ms step_avg:93.53ms +step:121/1695 train_time:11316ms step_avg:93.52ms +step:122/1695 train_time:11411ms step_avg:93.53ms +step:123/1695 train_time:11503ms step_avg:93.52ms +step:124/1695 train_time:11598ms step_avg:93.53ms +step:125/1695 train_time:11691ms step_avg:93.53ms +step:125/1695 val_loss:4.6000 train_time:11783ms step_avg:94.26ms +step:126/1695 train_time:11809ms step_avg:93.72ms +step:127/1695 train_time:11885ms step_avg:93.59ms +step:128/1695 train_time:11987ms step_avg:93.65ms +step:129/1695 train_time:12084ms step_avg:93.67ms +step:130/1695 train_time:12177ms step_avg:93.67ms +step:131/1695 train_time:12270ms step_avg:93.67ms +step:132/1695 train_time:12364ms step_avg:93.67ms +step:133/1695 train_time:12458ms step_avg:93.67ms +step:134/1695 train_time:12551ms step_avg:93.67ms +step:135/1695 train_time:12645ms step_avg:93.67ms +step:136/1695 train_time:12738ms step_avg:93.66ms +step:137/1695 train_time:12834ms step_avg:93.68ms +step:138/1695 train_time:12931ms step_avg:93.70ms +step:139/1695 train_time:13027ms step_avg:93.72ms +step:140/1695 train_time:13122ms step_avg:93.73ms +step:141/1695 train_time:13215ms step_avg:93.72ms +step:142/1695 train_time:13309ms step_avg:93.72ms +step:143/1695 train_time:13402ms step_avg:93.72ms +step:144/1695 train_time:13495ms step_avg:93.72ms +step:145/1695 train_time:13589ms step_avg:93.72ms +step:146/1695 train_time:13682ms step_avg:93.71ms +step:147/1695 train_time:13775ms step_avg:93.71ms +step:148/1695 train_time:13870ms step_avg:93.72ms +step:149/1695 train_time:13966ms step_avg:93.73ms +step:150/1695 train_time:14060ms step_avg:93.73ms +step:151/1695 train_time:14155ms step_avg:93.74ms +step:152/1695 train_time:14250ms step_avg:93.75ms +step:153/1695 train_time:14344ms step_avg:93.75ms +step:154/1695 train_time:14437ms step_avg:93.75ms +step:155/1695 train_time:14531ms step_avg:93.75ms +step:156/1695 train_time:14626ms step_avg:93.75ms +step:157/1695 train_time:14719ms step_avg:93.75ms +step:158/1695 train_time:14813ms step_avg:93.75ms +step:159/1695 train_time:14907ms step_avg:93.75ms +step:160/1695 train_time:15001ms step_avg:93.76ms +step:161/1695 train_time:15096ms step_avg:93.76ms +step:162/1695 train_time:15191ms step_avg:93.77ms +step:163/1695 train_time:15285ms step_avg:93.77ms +step:164/1695 train_time:15379ms step_avg:93.77ms +step:165/1695 train_time:15472ms step_avg:93.77ms +step:166/1695 train_time:15567ms step_avg:93.78ms +step:167/1695 train_time:15661ms step_avg:93.78ms +step:168/1695 train_time:15754ms step_avg:93.78ms +step:169/1695 train_time:15850ms step_avg:93.78ms +step:170/1695 train_time:15943ms step_avg:93.78ms +step:171/1695 train_time:16037ms step_avg:93.78ms +step:172/1695 train_time:16133ms step_avg:93.80ms +step:173/1695 train_time:16228ms step_avg:93.80ms +step:174/1695 train_time:16322ms step_avg:93.80ms +step:175/1695 train_time:16415ms step_avg:93.80ms +step:176/1695 train_time:16510ms step_avg:93.80ms +step:177/1695 train_time:16604ms step_avg:93.81ms +step:178/1695 train_time:16697ms step_avg:93.81ms +step:179/1695 train_time:16792ms step_avg:93.81ms +step:180/1695 train_time:16886ms step_avg:93.81ms +step:181/1695 train_time:16979ms step_avg:93.81ms +step:182/1695 train_time:17074ms step_avg:93.81ms +step:183/1695 train_time:17168ms step_avg:93.81ms +step:184/1695 train_time:17262ms step_avg:93.81ms +step:185/1695 train_time:17357ms step_avg:93.82ms +step:186/1695 train_time:17451ms step_avg:93.83ms +step:187/1695 train_time:17545ms step_avg:93.83ms +step:188/1695 train_time:17639ms step_avg:93.83ms +step:189/1695 train_time:17733ms step_avg:93.83ms +step:190/1695 train_time:17828ms step_avg:93.83ms +step:191/1695 train_time:17922ms step_avg:93.83ms +step:192/1695 train_time:18015ms step_avg:93.83ms +step:193/1695 train_time:18109ms step_avg:93.83ms +step:194/1695 train_time:18203ms step_avg:93.83ms +step:195/1695 train_time:18297ms step_avg:93.83ms +step:196/1695 train_time:18392ms step_avg:93.84ms +step:197/1695 train_time:18486ms step_avg:93.84ms +step:198/1695 train_time:18580ms step_avg:93.84ms +step:199/1695 train_time:18674ms step_avg:93.84ms +step:200/1695 train_time:18768ms step_avg:93.84ms +step:201/1695 train_time:18862ms step_avg:93.84ms +step:202/1695 train_time:18957ms step_avg:93.85ms +step:203/1695 train_time:19051ms step_avg:93.85ms +step:204/1695 train_time:19146ms step_avg:93.85ms +step:205/1695 train_time:19239ms step_avg:93.85ms +step:206/1695 train_time:19334ms step_avg:93.85ms +step:207/1695 train_time:19427ms step_avg:93.85ms +step:208/1695 train_time:19521ms step_avg:93.85ms +step:209/1695 train_time:19615ms step_avg:93.85ms +step:210/1695 train_time:19710ms step_avg:93.86ms +step:211/1695 train_time:19805ms step_avg:93.86ms +step:212/1695 train_time:19899ms step_avg:93.86ms +step:213/1695 train_time:19993ms step_avg:93.86ms +step:214/1695 train_time:20087ms step_avg:93.86ms +step:215/1695 train_time:20181ms step_avg:93.86ms +step:216/1695 train_time:20274ms step_avg:93.86ms +step:217/1695 train_time:20368ms step_avg:93.86ms +step:218/1695 train_time:20462ms step_avg:93.86ms +step:219/1695 train_time:20555ms step_avg:93.86ms +step:220/1695 train_time:20650ms step_avg:93.86ms +step:221/1695 train_time:20744ms step_avg:93.86ms +step:222/1695 train_time:20838ms step_avg:93.87ms +step:223/1695 train_time:20933ms step_avg:93.87ms +step:224/1695 train_time:21027ms step_avg:93.87ms +step:225/1695 train_time:21122ms step_avg:93.87ms +step:226/1695 train_time:21216ms step_avg:93.88ms +step:227/1695 train_time:21310ms step_avg:93.88ms +step:228/1695 train_time:21405ms step_avg:93.88ms +step:229/1695 train_time:21498ms step_avg:93.88ms +step:230/1695 train_time:21592ms step_avg:93.88ms +step:231/1695 train_time:21686ms step_avg:93.88ms +step:232/1695 train_time:21780ms step_avg:93.88ms +step:233/1695 train_time:21874ms step_avg:93.88ms +step:234/1695 train_time:21968ms step_avg:93.88ms +step:235/1695 train_time:22062ms step_avg:93.88ms +step:236/1695 train_time:22156ms step_avg:93.88ms +step:237/1695 train_time:22250ms step_avg:93.88ms +step:238/1695 train_time:22345ms step_avg:93.89ms +step:239/1695 train_time:22438ms step_avg:93.88ms +step:240/1695 train_time:22533ms step_avg:93.89ms +step:241/1695 train_time:22627ms step_avg:93.89ms +step:242/1695 train_time:22720ms step_avg:93.89ms +step:243/1695 train_time:22814ms step_avg:93.88ms +step:244/1695 train_time:22908ms step_avg:93.88ms +step:245/1695 train_time:23002ms step_avg:93.88ms +step:246/1695 train_time:23096ms step_avg:93.89ms +step:247/1695 train_time:23191ms step_avg:93.89ms +step:248/1695 train_time:23286ms step_avg:93.89ms +step:249/1695 train_time:23380ms step_avg:93.90ms +step:250/1695 train_time:23473ms step_avg:93.89ms +step:250/1695 val_loss:4.0715 train_time:23566ms step_avg:94.26ms +step:251/1695 train_time:23592ms step_avg:93.99ms +step:252/1695 train_time:23670ms step_avg:93.93ms +step:253/1695 train_time:23772ms step_avg:93.96ms +step:254/1695 train_time:23867ms step_avg:93.97ms +step:255/1695 train_time:23961ms step_avg:93.97ms +step:256/1695 train_time:24055ms step_avg:93.96ms +step:257/1695 train_time:24149ms step_avg:93.96ms +step:258/1695 train_time:24242ms step_avg:93.96ms +step:259/1695 train_time:24335ms step_avg:93.96ms +step:260/1695 train_time:24429ms step_avg:93.96ms +step:261/1695 train_time:24522ms step_avg:93.96ms +step:262/1695 train_time:24617ms step_avg:93.96ms +step:263/1695 train_time:24714ms step_avg:93.97ms +step:264/1695 train_time:24811ms step_avg:93.98ms +step:265/1695 train_time:24907ms step_avg:93.99ms +step:266/1695 train_time:25001ms step_avg:93.99ms +step:267/1695 train_time:25096ms step_avg:93.99ms +step:268/1695 train_time:25190ms step_avg:93.99ms +step:269/1695 train_time:25283ms step_avg:93.99ms +step:270/1695 train_time:25377ms step_avg:93.99ms +step:271/1695 train_time:25471ms step_avg:93.99ms +step:272/1695 train_time:25565ms step_avg:93.99ms +step:273/1695 train_time:25659ms step_avg:93.99ms +step:274/1695 train_time:25754ms step_avg:93.99ms +step:275/1695 train_time:25851ms step_avg:94.00ms +step:276/1695 train_time:25947ms step_avg:94.01ms +step:277/1695 train_time:26041ms step_avg:94.01ms +step:278/1695 train_time:26135ms step_avg:94.01ms +step:279/1695 train_time:26230ms step_avg:94.01ms +step:280/1695 train_time:26324ms step_avg:94.01ms +step:281/1695 train_time:26417ms step_avg:94.01ms +step:282/1695 train_time:26511ms step_avg:94.01ms +step:283/1695 train_time:26606ms step_avg:94.01ms +step:284/1695 train_time:26701ms step_avg:94.02ms +step:285/1695 train_time:26796ms step_avg:94.02ms +step:286/1695 train_time:26891ms step_avg:94.02ms +step:287/1695 train_time:26987ms step_avg:94.03ms +step:288/1695 train_time:27081ms step_avg:94.03ms +step:289/1695 train_time:27175ms step_avg:94.03ms +step:290/1695 train_time:27270ms step_avg:94.04ms +step:291/1695 train_time:27364ms step_avg:94.03ms +step:292/1695 train_time:27458ms step_avg:94.03ms +step:293/1695 train_time:27552ms step_avg:94.03ms +step:294/1695 train_time:27648ms step_avg:94.04ms +step:295/1695 train_time:27741ms step_avg:94.04ms +step:296/1695 train_time:27836ms step_avg:94.04ms +step:297/1695 train_time:27932ms step_avg:94.05ms +step:298/1695 train_time:28026ms step_avg:94.05ms +step:299/1695 train_time:28121ms step_avg:94.05ms +step:300/1695 train_time:28215ms step_avg:94.05ms +step:301/1695 train_time:28310ms step_avg:94.05ms +step:302/1695 train_time:28405ms step_avg:94.05ms +step:303/1695 train_time:28499ms step_avg:94.05ms +step:304/1695 train_time:28593ms step_avg:94.06ms +step:305/1695 train_time:28688ms step_avg:94.06ms +step:306/1695 train_time:28782ms step_avg:94.06ms +step:307/1695 train_time:28876ms step_avg:94.06ms +step:308/1695 train_time:28972ms step_avg:94.06ms +step:309/1695 train_time:29067ms step_avg:94.07ms +step:310/1695 train_time:29162ms step_avg:94.07ms +step:311/1695 train_time:29255ms step_avg:94.07ms +step:312/1695 train_time:29350ms step_avg:94.07ms +step:313/1695 train_time:29445ms step_avg:94.07ms +step:314/1695 train_time:29539ms step_avg:94.07ms +step:315/1695 train_time:29634ms step_avg:94.08ms +step:316/1695 train_time:29729ms step_avg:94.08ms +step:317/1695 train_time:29823ms step_avg:94.08ms +step:318/1695 train_time:29917ms step_avg:94.08ms +step:319/1695 train_time:30013ms step_avg:94.08ms +step:320/1695 train_time:30109ms step_avg:94.09ms +step:321/1695 train_time:30203ms step_avg:94.09ms +step:322/1695 train_time:30297ms step_avg:94.09ms +step:323/1695 train_time:30391ms step_avg:94.09ms +step:324/1695 train_time:30486ms step_avg:94.09ms +step:325/1695 train_time:30581ms step_avg:94.10ms +step:326/1695 train_time:30675ms step_avg:94.10ms +step:327/1695 train_time:30770ms step_avg:94.10ms +step:328/1695 train_time:30865ms step_avg:94.10ms +step:329/1695 train_time:30960ms step_avg:94.10ms +step:330/1695 train_time:31055ms step_avg:94.11ms +step:331/1695 train_time:31150ms step_avg:94.11ms +step:332/1695 train_time:31244ms step_avg:94.11ms +step:333/1695 train_time:31338ms step_avg:94.11ms +step:334/1695 train_time:31433ms step_avg:94.11ms +step:335/1695 train_time:31528ms step_avg:94.11ms +step:336/1695 train_time:31622ms step_avg:94.11ms +step:337/1695 train_time:31716ms step_avg:94.11ms +step:338/1695 train_time:31811ms step_avg:94.12ms +step:339/1695 train_time:31906ms step_avg:94.12ms +step:340/1695 train_time:31999ms step_avg:94.12ms +step:341/1695 train_time:32094ms step_avg:94.12ms +step:342/1695 train_time:32189ms step_avg:94.12ms +step:343/1695 train_time:32283ms step_avg:94.12ms +step:344/1695 train_time:32378ms step_avg:94.12ms +step:345/1695 train_time:32472ms step_avg:94.12ms +step:346/1695 train_time:32567ms step_avg:94.13ms +step:347/1695 train_time:32661ms step_avg:94.13ms +step:348/1695 train_time:32755ms step_avg:94.12ms +step:349/1695 train_time:32850ms step_avg:94.13ms +step:350/1695 train_time:32945ms step_avg:94.13ms +step:351/1695 train_time:33039ms step_avg:94.13ms +step:352/1695 train_time:33134ms step_avg:94.13ms +step:353/1695 train_time:33229ms step_avg:94.13ms +step:354/1695 train_time:33323ms step_avg:94.13ms +step:355/1695 train_time:33417ms step_avg:94.13ms +step:356/1695 train_time:33513ms step_avg:94.14ms +step:357/1695 train_time:33608ms step_avg:94.14ms +step:358/1695 train_time:33701ms step_avg:94.14ms +step:359/1695 train_time:33795ms step_avg:94.14ms +step:360/1695 train_time:33890ms step_avg:94.14ms +step:361/1695 train_time:33985ms step_avg:94.14ms +step:362/1695 train_time:34079ms step_avg:94.14ms +step:363/1695 train_time:34173ms step_avg:94.14ms +step:364/1695 train_time:34268ms step_avg:94.14ms +step:365/1695 train_time:34362ms step_avg:94.14ms +step:366/1695 train_time:34457ms step_avg:94.14ms +step:367/1695 train_time:34552ms step_avg:94.15ms +step:368/1695 train_time:34648ms step_avg:94.15ms +step:369/1695 train_time:34741ms step_avg:94.15ms +step:370/1695 train_time:34835ms step_avg:94.15ms +step:371/1695 train_time:34931ms step_avg:94.15ms +step:372/1695 train_time:35025ms step_avg:94.15ms +step:373/1695 train_time:35119ms step_avg:94.15ms +step:374/1695 train_time:35213ms step_avg:94.15ms +step:375/1695 train_time:35308ms step_avg:94.15ms +step:375/1695 val_loss:3.8701 train_time:35400ms step_avg:94.40ms +step:376/1695 train_time:35426ms step_avg:94.22ms +step:377/1695 train_time:35504ms step_avg:94.18ms +step:378/1695 train_time:35605ms step_avg:94.19ms +step:379/1695 train_time:35702ms step_avg:94.20ms +step:380/1695 train_time:35798ms step_avg:94.20ms +step:381/1695 train_time:35893ms step_avg:94.21ms +step:382/1695 train_time:35988ms step_avg:94.21ms +step:383/1695 train_time:36084ms step_avg:94.21ms +step:384/1695 train_time:36179ms step_avg:94.22ms +step:385/1695 train_time:36274ms step_avg:94.22ms +step:386/1695 train_time:36370ms step_avg:94.22ms +step:387/1695 train_time:36467ms step_avg:94.23ms +step:388/1695 train_time:36566ms step_avg:94.24ms +step:389/1695 train_time:36665ms step_avg:94.25ms +step:390/1695 train_time:36762ms step_avg:94.26ms +step:391/1695 train_time:36859ms step_avg:94.27ms +step:392/1695 train_time:36955ms step_avg:94.27ms +step:393/1695 train_time:37051ms step_avg:94.28ms +step:394/1695 train_time:37146ms step_avg:94.28ms +step:395/1695 train_time:37242ms step_avg:94.28ms +step:396/1695 train_time:37338ms step_avg:94.29ms +step:397/1695 train_time:37434ms step_avg:94.29ms +step:398/1695 train_time:37530ms step_avg:94.30ms +step:399/1695 train_time:37626ms step_avg:94.30ms +step:400/1695 train_time:37724ms step_avg:94.31ms +step:401/1695 train_time:37821ms step_avg:94.32ms +step:402/1695 train_time:37918ms step_avg:94.32ms +step:403/1695 train_time:38013ms step_avg:94.33ms +step:404/1695 train_time:38109ms step_avg:94.33ms +step:405/1695 train_time:38205ms step_avg:94.33ms +step:406/1695 train_time:38301ms step_avg:94.34ms +step:407/1695 train_time:38397ms step_avg:94.34ms +step:408/1695 train_time:38493ms step_avg:94.35ms +step:409/1695 train_time:38589ms step_avg:94.35ms +step:410/1695 train_time:38685ms step_avg:94.35ms +step:411/1695 train_time:38782ms step_avg:94.36ms +step:412/1695 train_time:38880ms step_avg:94.37ms +step:413/1695 train_time:38976ms step_avg:94.37ms +step:414/1695 train_time:39072ms step_avg:94.38ms +step:415/1695 train_time:39168ms step_avg:94.38ms +step:416/1695 train_time:39264ms step_avg:94.38ms +step:417/1695 train_time:39361ms step_avg:94.39ms +step:418/1695 train_time:39457ms step_avg:94.39ms +step:419/1695 train_time:39553ms step_avg:94.40ms +step:420/1695 train_time:39649ms step_avg:94.40ms +step:421/1695 train_time:39744ms step_avg:94.40ms +step:422/1695 train_time:39842ms step_avg:94.41ms +step:423/1695 train_time:39939ms step_avg:94.42ms +step:424/1695 train_time:40034ms step_avg:94.42ms +step:425/1695 train_time:40130ms step_avg:94.42ms +step:426/1695 train_time:40226ms step_avg:94.43ms +step:427/1695 train_time:40322ms step_avg:94.43ms +step:428/1695 train_time:40419ms step_avg:94.44ms +step:429/1695 train_time:40516ms step_avg:94.44ms +step:430/1695 train_time:40611ms step_avg:94.44ms +step:431/1695 train_time:40707ms step_avg:94.45ms +step:432/1695 train_time:40803ms step_avg:94.45ms +step:433/1695 train_time:40900ms step_avg:94.46ms +step:434/1695 train_time:40997ms step_avg:94.46ms +step:435/1695 train_time:41093ms step_avg:94.47ms +step:436/1695 train_time:41188ms step_avg:94.47ms +step:437/1695 train_time:41285ms step_avg:94.47ms +step:438/1695 train_time:41382ms step_avg:94.48ms +step:439/1695 train_time:41478ms step_avg:94.48ms +step:440/1695 train_time:41574ms step_avg:94.49ms +step:441/1695 train_time:41669ms step_avg:94.49ms +step:442/1695 train_time:41766ms step_avg:94.49ms +step:443/1695 train_time:41863ms step_avg:94.50ms +step:444/1695 train_time:41960ms step_avg:94.50ms +step:445/1695 train_time:42057ms step_avg:94.51ms +step:446/1695 train_time:42153ms step_avg:94.51ms +step:447/1695 train_time:42249ms step_avg:94.52ms +step:448/1695 train_time:42345ms step_avg:94.52ms +step:449/1695 train_time:42441ms step_avg:94.52ms +step:450/1695 train_time:42538ms step_avg:94.53ms +step:451/1695 train_time:42634ms step_avg:94.53ms +step:452/1695 train_time:42729ms step_avg:94.53ms +step:453/1695 train_time:42826ms step_avg:94.54ms +step:454/1695 train_time:42922ms step_avg:94.54ms +step:455/1695 train_time:43019ms step_avg:94.55ms +step:456/1695 train_time:43115ms step_avg:94.55ms +step:457/1695 train_time:43211ms step_avg:94.55ms +step:458/1695 train_time:43307ms step_avg:94.56ms +step:459/1695 train_time:43404ms step_avg:94.56ms +step:460/1695 train_time:43500ms step_avg:94.57ms +step:461/1695 train_time:43596ms step_avg:94.57ms +step:462/1695 train_time:43692ms step_avg:94.57ms +step:463/1695 train_time:43788ms step_avg:94.57ms +step:464/1695 train_time:43885ms step_avg:94.58ms +step:465/1695 train_time:43981ms step_avg:94.58ms +step:466/1695 train_time:44078ms step_avg:94.59ms +step:467/1695 train_time:44174ms step_avg:94.59ms +step:468/1695 train_time:44270ms step_avg:94.59ms +step:469/1695 train_time:44366ms step_avg:94.60ms +step:470/1695 train_time:44463ms step_avg:94.60ms +step:471/1695 train_time:44560ms step_avg:94.61ms +step:472/1695 train_time:44656ms step_avg:94.61ms +step:473/1695 train_time:44752ms step_avg:94.61ms +step:474/1695 train_time:44847ms step_avg:94.61ms +step:475/1695 train_time:44943ms step_avg:94.62ms +step:476/1695 train_time:45039ms step_avg:94.62ms +step:477/1695 train_time:45135ms step_avg:94.62ms +step:478/1695 train_time:45230ms step_avg:94.62ms +step:479/1695 train_time:45326ms step_avg:94.63ms +step:480/1695 train_time:45423ms step_avg:94.63ms +step:481/1695 train_time:45520ms step_avg:94.64ms +step:482/1695 train_time:45617ms step_avg:94.64ms +step:483/1695 train_time:45713ms step_avg:94.64ms +step:484/1695 train_time:45809ms step_avg:94.65ms +step:485/1695 train_time:45906ms step_avg:94.65ms +step:486/1695 train_time:46002ms step_avg:94.65ms +step:487/1695 train_time:46099ms step_avg:94.66ms +step:488/1695 train_time:46195ms step_avg:94.66ms +step:489/1695 train_time:46291ms step_avg:94.66ms +step:490/1695 train_time:46387ms step_avg:94.67ms +step:491/1695 train_time:46484ms step_avg:94.67ms +step:492/1695 train_time:46581ms step_avg:94.68ms +step:493/1695 train_time:46678ms step_avg:94.68ms +step:494/1695 train_time:46774ms step_avg:94.68ms +step:495/1695 train_time:46869ms step_avg:94.69ms +step:496/1695 train_time:46965ms step_avg:94.69ms +step:497/1695 train_time:47063ms step_avg:94.69ms +step:498/1695 train_time:47160ms step_avg:94.70ms +step:499/1695 train_time:47257ms step_avg:94.70ms +step:500/1695 train_time:47353ms step_avg:94.71ms +step:500/1695 val_loss:3.7285 train_time:47446ms step_avg:94.89ms +step:501/1695 train_time:47472ms step_avg:94.76ms +step:502/1695 train_time:47553ms step_avg:94.73ms +step:503/1695 train_time:47653ms step_avg:94.74ms +step:504/1695 train_time:47748ms step_avg:94.74ms +step:505/1695 train_time:47844ms step_avg:94.74ms +step:506/1695 train_time:47941ms step_avg:94.74ms +step:507/1695 train_time:48037ms step_avg:94.75ms +step:508/1695 train_time:48132ms step_avg:94.75ms +step:509/1695 train_time:48227ms step_avg:94.75ms +step:510/1695 train_time:48324ms step_avg:94.75ms +step:511/1695 train_time:48420ms step_avg:94.76ms +step:512/1695 train_time:48518ms step_avg:94.76ms +step:513/1695 train_time:48616ms step_avg:94.77ms +step:514/1695 train_time:48714ms step_avg:94.78ms +step:515/1695 train_time:48810ms step_avg:94.78ms +step:516/1695 train_time:48906ms step_avg:94.78ms +step:517/1695 train_time:49003ms step_avg:94.78ms +step:518/1695 train_time:49099ms step_avg:94.79ms +step:519/1695 train_time:49195ms step_avg:94.79ms +step:520/1695 train_time:49290ms step_avg:94.79ms +step:521/1695 train_time:49386ms step_avg:94.79ms +step:522/1695 train_time:49484ms step_avg:94.80ms +step:523/1695 train_time:49581ms step_avg:94.80ms +step:524/1695 train_time:49678ms step_avg:94.81ms +step:525/1695 train_time:49776ms step_avg:94.81ms +step:526/1695 train_time:49871ms step_avg:94.81ms +step:527/1695 train_time:49968ms step_avg:94.82ms +step:528/1695 train_time:50064ms step_avg:94.82ms +step:529/1695 train_time:50160ms step_avg:94.82ms +step:530/1695 train_time:50256ms step_avg:94.82ms +step:531/1695 train_time:50352ms step_avg:94.82ms +step:532/1695 train_time:50447ms step_avg:94.83ms +step:533/1695 train_time:50544ms step_avg:94.83ms +step:534/1695 train_time:50641ms step_avg:94.83ms +step:535/1695 train_time:50737ms step_avg:94.84ms +step:536/1695 train_time:50834ms step_avg:94.84ms +step:537/1695 train_time:50930ms step_avg:94.84ms +step:538/1695 train_time:51026ms step_avg:94.84ms +step:539/1695 train_time:51123ms step_avg:94.85ms +step:540/1695 train_time:51220ms step_avg:94.85ms +step:541/1695 train_time:51316ms step_avg:94.85ms +step:542/1695 train_time:51412ms step_avg:94.86ms +step:543/1695 train_time:51507ms step_avg:94.86ms +step:544/1695 train_time:51604ms step_avg:94.86ms +step:545/1695 train_time:51701ms step_avg:94.86ms +step:546/1695 train_time:51798ms step_avg:94.87ms +step:547/1695 train_time:51894ms step_avg:94.87ms +step:548/1695 train_time:51990ms step_avg:94.87ms +step:549/1695 train_time:52086ms step_avg:94.87ms +step:550/1695 train_time:52183ms step_avg:94.88ms +step:551/1695 train_time:52281ms step_avg:94.88ms +step:552/1695 train_time:52378ms step_avg:94.89ms +step:553/1695 train_time:52473ms step_avg:94.89ms +step:554/1695 train_time:52569ms step_avg:94.89ms +step:555/1695 train_time:52665ms step_avg:94.89ms +step:556/1695 train_time:52763ms step_avg:94.90ms +step:557/1695 train_time:52860ms step_avg:94.90ms +step:558/1695 train_time:52956ms step_avg:94.90ms +step:559/1695 train_time:53052ms step_avg:94.91ms +step:560/1695 train_time:53148ms step_avg:94.91ms +step:561/1695 train_time:53245ms step_avg:94.91ms +step:562/1695 train_time:53342ms step_avg:94.91ms +step:563/1695 train_time:53439ms step_avg:94.92ms +step:564/1695 train_time:53535ms step_avg:94.92ms +step:565/1695 train_time:53632ms step_avg:94.92ms +step:566/1695 train_time:53728ms step_avg:94.93ms +step:567/1695 train_time:53824ms step_avg:94.93ms +step:568/1695 train_time:53921ms step_avg:94.93ms +step:569/1695 train_time:54018ms step_avg:94.93ms +step:570/1695 train_time:54114ms step_avg:94.94ms +step:571/1695 train_time:54210ms step_avg:94.94ms +step:572/1695 train_time:54306ms step_avg:94.94ms +step:573/1695 train_time:54403ms step_avg:94.94ms +step:574/1695 train_time:54500ms step_avg:94.95ms +step:575/1695 train_time:54598ms step_avg:94.95ms +step:576/1695 train_time:54696ms step_avg:94.96ms +step:577/1695 train_time:54792ms step_avg:94.96ms +step:578/1695 train_time:54888ms step_avg:94.96ms +step:579/1695 train_time:54985ms step_avg:94.97ms +step:580/1695 train_time:55082ms step_avg:94.97ms +step:581/1695 train_time:55179ms step_avg:94.97ms +step:582/1695 train_time:55276ms step_avg:94.98ms +step:583/1695 train_time:55372ms step_avg:94.98ms +step:584/1695 train_time:55468ms step_avg:94.98ms +step:585/1695 train_time:55566ms step_avg:94.98ms +step:586/1695 train_time:55663ms step_avg:94.99ms +step:587/1695 train_time:55760ms step_avg:94.99ms +step:588/1695 train_time:55858ms step_avg:95.00ms +step:589/1695 train_time:55953ms step_avg:95.00ms +step:590/1695 train_time:56048ms step_avg:95.00ms +step:591/1695 train_time:56145ms step_avg:95.00ms +step:592/1695 train_time:56242ms step_avg:95.00ms +step:593/1695 train_time:56338ms step_avg:95.01ms +step:594/1695 train_time:56435ms step_avg:95.01ms +step:595/1695 train_time:56530ms step_avg:95.01ms +step:596/1695 train_time:56627ms step_avg:95.01ms +step:597/1695 train_time:56723ms step_avg:95.01ms +step:598/1695 train_time:56821ms step_avg:95.02ms +step:599/1695 train_time:56918ms step_avg:95.02ms +step:600/1695 train_time:57014ms step_avg:95.02ms +step:601/1695 train_time:57110ms step_avg:95.02ms +step:602/1695 train_time:57205ms step_avg:95.03ms +step:603/1695 train_time:57302ms step_avg:95.03ms +step:604/1695 train_time:57399ms step_avg:95.03ms +step:605/1695 train_time:57496ms step_avg:95.03ms +step:606/1695 train_time:57592ms step_avg:95.04ms +step:607/1695 train_time:57688ms step_avg:95.04ms +step:608/1695 train_time:57785ms step_avg:95.04ms +step:609/1695 train_time:57882ms step_avg:95.04ms +step:610/1695 train_time:57978ms step_avg:95.05ms +step:611/1695 train_time:58074ms step_avg:95.05ms +step:612/1695 train_time:58169ms step_avg:95.05ms +step:613/1695 train_time:58266ms step_avg:95.05ms +step:614/1695 train_time:58364ms step_avg:95.06ms +step:615/1695 train_time:58461ms step_avg:95.06ms +step:616/1695 train_time:58558ms step_avg:95.06ms +step:617/1695 train_time:58655ms step_avg:95.06ms +step:618/1695 train_time:58751ms step_avg:95.07ms +step:619/1695 train_time:58847ms step_avg:95.07ms +step:620/1695 train_time:58945ms step_avg:95.07ms +step:621/1695 train_time:59042ms step_avg:95.08ms +step:622/1695 train_time:59139ms step_avg:95.08ms +step:623/1695 train_time:59236ms step_avg:95.08ms +step:624/1695 train_time:59332ms step_avg:95.08ms +step:625/1695 train_time:59429ms step_avg:95.09ms +step:625/1695 val_loss:3.6465 train_time:59524ms step_avg:95.24ms +step:626/1695 train_time:59550ms step_avg:95.13ms +step:627/1695 train_time:59630ms step_avg:95.10ms +step:628/1695 train_time:59729ms step_avg:95.11ms +step:629/1695 train_time:59825ms step_avg:95.11ms +step:630/1695 train_time:59922ms step_avg:95.11ms +step:631/1695 train_time:60019ms step_avg:95.12ms +step:632/1695 train_time:60117ms step_avg:95.12ms +step:633/1695 train_time:60214ms step_avg:95.13ms +step:634/1695 train_time:60311ms step_avg:95.13ms +step:635/1695 train_time:60408ms step_avg:95.13ms +step:636/1695 train_time:60505ms step_avg:95.13ms +step:637/1695 train_time:60603ms step_avg:95.14ms +step:638/1695 train_time:60702ms step_avg:95.14ms +step:639/1695 train_time:60800ms step_avg:95.15ms +step:640/1695 train_time:60899ms step_avg:95.15ms +step:641/1695 train_time:60996ms step_avg:95.16ms +step:642/1695 train_time:61094ms step_avg:95.16ms +step:643/1695 train_time:61191ms step_avg:95.16ms +step:644/1695 train_time:61289ms step_avg:95.17ms +step:645/1695 train_time:61386ms step_avg:95.17ms +step:646/1695 train_time:61484ms step_avg:95.18ms +step:647/1695 train_time:61888ms step_avg:95.65ms +step:648/1695 train_time:61984ms step_avg:95.65ms +step:649/1695 train_time:62080ms step_avg:95.65ms +step:650/1695 train_time:62177ms step_avg:95.66ms +step:651/1695 train_time:62274ms step_avg:95.66ms +step:652/1695 train_time:62371ms step_avg:95.66ms +step:653/1695 train_time:62468ms step_avg:95.66ms +step:654/1695 train_time:62565ms step_avg:95.66ms +step:655/1695 train_time:62661ms step_avg:95.67ms +step:656/1695 train_time:62759ms step_avg:95.67ms +step:657/1695 train_time:62862ms step_avg:95.68ms +step:658/1695 train_time:62961ms step_avg:95.69ms +step:659/1695 train_time:63059ms step_avg:95.69ms +step:660/1695 train_time:63157ms step_avg:95.69ms +step:661/1695 train_time:63254ms step_avg:95.70ms +step:662/1695 train_time:63352ms step_avg:95.70ms +step:663/1695 train_time:63449ms step_avg:95.70ms +step:664/1695 train_time:63547ms step_avg:95.70ms +step:665/1695 train_time:63643ms step_avg:95.70ms +step:666/1695 train_time:63740ms step_avg:95.71ms +step:667/1695 train_time:63839ms step_avg:95.71ms +step:668/1695 train_time:63938ms step_avg:95.72ms +step:669/1695 train_time:64037ms step_avg:95.72ms +step:670/1695 train_time:64135ms step_avg:95.72ms +step:671/1695 train_time:64233ms step_avg:95.73ms +step:672/1695 train_time:64330ms step_avg:95.73ms +step:673/1695 train_time:64429ms step_avg:95.73ms +step:674/1695 train_time:64527ms step_avg:95.74ms +step:675/1695 train_time:64624ms step_avg:95.74ms +step:676/1695 train_time:64722ms step_avg:95.74ms +step:677/1695 train_time:64819ms step_avg:95.74ms +step:678/1695 train_time:64918ms step_avg:95.75ms +step:679/1695 train_time:65016ms step_avg:95.75ms +step:680/1695 train_time:65115ms step_avg:95.76ms +step:681/1695 train_time:65213ms step_avg:95.76ms +step:682/1695 train_time:65310ms step_avg:95.76ms +step:683/1695 train_time:65408ms step_avg:95.77ms +step:684/1695 train_time:65505ms step_avg:95.77ms +step:685/1695 train_time:65602ms step_avg:95.77ms +step:686/1695 train_time:65699ms step_avg:95.77ms +step:687/1695 train_time:65797ms step_avg:95.77ms +step:688/1695 train_time:65896ms step_avg:95.78ms +step:689/1695 train_time:65994ms step_avg:95.78ms +step:690/1695 train_time:66092ms step_avg:95.79ms +step:691/1695 train_time:66190ms step_avg:95.79ms +step:692/1695 train_time:66288ms step_avg:95.79ms +step:693/1695 train_time:66385ms step_avg:95.79ms +step:694/1695 train_time:66483ms step_avg:95.80ms +step:695/1695 train_time:66815ms step_avg:96.14ms +step:696/1695 train_time:66911ms step_avg:96.14ms +step:697/1695 train_time:67008ms step_avg:96.14ms +step:698/1695 train_time:67105ms step_avg:96.14ms +step:699/1695 train_time:67202ms step_avg:96.14ms +step:700/1695 train_time:67299ms step_avg:96.14ms +step:701/1695 train_time:67626ms step_avg:96.47ms +step:702/1695 train_time:67721ms step_avg:96.47ms +step:703/1695 train_time:67818ms step_avg:96.47ms +step:704/1695 train_time:67916ms step_avg:96.47ms +step:705/1695 train_time:68013ms step_avg:96.47ms +step:706/1695 train_time:68110ms step_avg:96.47ms +step:707/1695 train_time:68207ms step_avg:96.47ms +step:708/1695 train_time:68304ms step_avg:96.47ms +step:709/1695 train_time:68401ms step_avg:96.48ms +step:710/1695 train_time:68500ms step_avg:96.48ms +step:711/1695 train_time:68601ms step_avg:96.49ms +step:712/1695 train_time:68700ms step_avg:96.49ms +step:713/1695 train_time:68797ms step_avg:96.49ms +step:714/1695 train_time:68895ms step_avg:96.49ms +step:715/1695 train_time:68993ms step_avg:96.49ms +step:716/1695 train_time:69090ms step_avg:96.50ms +step:717/1695 train_time:69188ms step_avg:96.50ms +step:718/1695 train_time:69284ms step_avg:96.50ms +step:719/1695 train_time:69381ms step_avg:96.50ms +step:720/1695 train_time:69478ms step_avg:96.50ms +step:721/1695 train_time:69578ms step_avg:96.50ms +step:722/1695 train_time:69677ms step_avg:96.51ms +step:723/1695 train_time:69776ms step_avg:96.51ms +step:724/1695 train_time:69873ms step_avg:96.51ms +step:725/1695 train_time:69971ms step_avg:96.51ms +step:726/1695 train_time:70069ms step_avg:96.51ms +step:727/1695 train_time:70167ms step_avg:96.52ms +step:728/1695 train_time:70265ms step_avg:96.52ms +step:729/1695 train_time:70362ms step_avg:96.52ms +step:730/1695 train_time:70459ms step_avg:96.52ms +step:731/1695 train_time:70557ms step_avg:96.52ms +step:732/1695 train_time:70656ms step_avg:96.52ms +step:733/1695 train_time:70754ms step_avg:96.53ms +step:734/1695 train_time:70852ms step_avg:96.53ms +step:735/1695 train_time:70950ms step_avg:96.53ms +step:736/1695 train_time:71049ms step_avg:96.53ms +step:737/1695 train_time:71145ms step_avg:96.53ms +step:738/1695 train_time:71243ms step_avg:96.53ms +step:739/1695 train_time:71340ms step_avg:96.54ms +step:740/1695 train_time:71438ms step_avg:96.54ms +step:741/1695 train_time:71537ms step_avg:96.54ms +step:742/1695 train_time:71635ms step_avg:96.54ms +step:743/1695 train_time:71733ms step_avg:96.54ms +step:744/1695 train_time:71831ms step_avg:96.55ms +step:745/1695 train_time:71929ms step_avg:96.55ms +step:746/1695 train_time:72027ms step_avg:96.55ms +step:747/1695 train_time:72125ms step_avg:96.55ms +step:748/1695 train_time:72222ms step_avg:96.55ms +step:749/1695 train_time:72319ms step_avg:96.55ms +step:750/1695 train_time:72417ms step_avg:96.56ms +step:750/1695 val_loss:3.5809 train_time:72513ms step_avg:96.68ms +step:751/1695 train_time:72539ms step_avg:96.59ms +step:752/1695 train_time:72622ms step_avg:96.57ms +step:753/1695 train_time:72723ms step_avg:96.58ms +step:754/1695 train_time:72822ms step_avg:96.58ms +step:755/1695 train_time:72920ms step_avg:96.58ms +step:756/1695 train_time:73018ms step_avg:96.58ms +step:757/1695 train_time:73116ms step_avg:96.59ms +step:758/1695 train_time:73213ms step_avg:96.59ms +step:759/1695 train_time:73311ms step_avg:96.59ms +step:760/1695 train_time:73408ms step_avg:96.59ms +step:761/1695 train_time:73506ms step_avg:96.59ms +step:762/1695 train_time:73605ms step_avg:96.59ms +step:763/1695 train_time:73705ms step_avg:96.60ms +step:764/1695 train_time:73803ms step_avg:96.60ms +step:765/1695 train_time:73902ms step_avg:96.60ms +step:766/1695 train_time:74000ms step_avg:96.61ms +step:767/1695 train_time:74099ms step_avg:96.61ms +step:768/1695 train_time:74196ms step_avg:96.61ms +step:769/1695 train_time:74294ms step_avg:96.61ms +step:770/1695 train_time:74392ms step_avg:96.61ms +step:771/1695 train_time:74490ms step_avg:96.62ms +step:772/1695 train_time:74589ms step_avg:96.62ms +step:773/1695 train_time:74687ms step_avg:96.62ms +step:774/1695 train_time:74785ms step_avg:96.62ms +step:775/1695 train_time:74883ms step_avg:96.62ms +step:776/1695 train_time:74982ms step_avg:96.63ms +step:777/1695 train_time:75080ms step_avg:96.63ms +step:778/1695 train_time:75477ms step_avg:97.01ms +step:779/1695 train_time:75573ms step_avg:97.01ms +step:780/1695 train_time:75670ms step_avg:97.01ms +step:781/1695 train_time:75767ms step_avg:97.01ms +step:782/1695 train_time:75865ms step_avg:97.01ms +step:783/1695 train_time:75962ms step_avg:97.01ms +step:784/1695 train_time:76287ms step_avg:97.31ms +step:785/1695 train_time:76384ms step_avg:97.30ms +step:786/1695 train_time:76481ms step_avg:97.30ms +step:787/1695 train_time:76578ms step_avg:97.30ms +step:788/1695 train_time:76675ms step_avg:97.30ms +step:789/1695 train_time:76772ms step_avg:97.30ms +step:790/1695 train_time:77113ms step_avg:97.61ms +step:791/1695 train_time:77209ms step_avg:97.61ms +step:792/1695 train_time:77306ms step_avg:97.61ms +step:793/1695 train_time:77404ms step_avg:97.61ms +step:794/1695 train_time:77501ms step_avg:97.61ms +step:795/1695 train_time:77599ms step_avg:97.61ms +step:796/1695 train_time:77697ms step_avg:97.61ms +step:797/1695 train_time:77794ms step_avg:97.61ms +step:798/1695 train_time:77891ms step_avg:97.61ms +step:799/1695 train_time:77988ms step_avg:97.61ms +step:800/1695 train_time:78090ms step_avg:97.61ms +step:801/1695 train_time:78471ms step_avg:97.97ms +step:802/1695 train_time:78522ms step_avg:97.91ms +step:803/1695 train_time:78618ms step_avg:97.91ms +step:804/1695 train_time:78715ms step_avg:97.90ms +step:805/1695 train_time:78812ms step_avg:97.90ms +step:806/1695 train_time:78909ms step_avg:97.90ms +step:807/1695 train_time:79006ms step_avg:97.90ms +step:808/1695 train_time:79104ms step_avg:97.90ms +step:809/1695 train_time:79201ms step_avg:97.90ms +step:810/1695 train_time:79299ms step_avg:97.90ms +step:811/1695 train_time:79398ms step_avg:97.90ms +step:812/1695 train_time:79498ms step_avg:97.90ms +step:813/1695 train_time:79598ms step_avg:97.91ms +step:814/1695 train_time:79696ms step_avg:97.91ms +step:815/1695 train_time:80037ms step_avg:98.20ms +step:816/1695 train_time:80133ms step_avg:98.20ms +step:817/1695 train_time:80229ms step_avg:98.20ms +step:818/1695 train_time:80327ms step_avg:98.20ms +step:819/1695 train_time:80424ms step_avg:98.20ms +step:820/1695 train_time:80522ms step_avg:98.20ms +step:821/1695 train_time:80620ms step_avg:98.20ms +step:822/1695 train_time:80717ms step_avg:98.20ms +step:823/1695 train_time:80815ms step_avg:98.20ms +step:824/1695 train_time:80915ms step_avg:98.20ms +step:825/1695 train_time:81016ms step_avg:98.20ms +step:826/1695 train_time:81116ms step_avg:98.20ms +step:827/1695 train_time:81215ms step_avg:98.20ms +step:828/1695 train_time:81313ms step_avg:98.20ms +step:829/1695 train_time:81410ms step_avg:98.20ms +step:830/1695 train_time:81508ms step_avg:98.20ms +step:831/1695 train_time:81606ms step_avg:98.20ms +step:832/1695 train_time:81704ms step_avg:98.20ms +step:833/1695 train_time:81802ms step_avg:98.20ms +step:834/1695 train_time:81900ms step_avg:98.20ms +step:835/1695 train_time:81999ms step_avg:98.20ms +step:836/1695 train_time:82098ms step_avg:98.20ms +step:837/1695 train_time:82196ms step_avg:98.20ms +step:838/1695 train_time:82295ms step_avg:98.20ms +step:839/1695 train_time:82394ms step_avg:98.21ms +step:840/1695 train_time:82492ms step_avg:98.20ms +step:841/1695 train_time:82590ms step_avg:98.20ms +step:842/1695 train_time:82687ms step_avg:98.20ms +step:843/1695 train_time:82785ms step_avg:98.20ms +step:844/1695 train_time:82882ms step_avg:98.20ms +step:845/1695 train_time:82980ms step_avg:98.20ms +step:846/1695 train_time:83079ms step_avg:98.20ms +step:847/1695 train_time:83178ms step_avg:98.20ms +step:848/1695 train_time:83277ms step_avg:98.20ms +step:849/1695 train_time:83376ms step_avg:98.20ms +step:850/1695 train_time:83474ms step_avg:98.20ms +step:851/1695 train_time:83572ms step_avg:98.21ms +step:852/1695 train_time:83671ms step_avg:98.21ms +step:853/1695 train_time:83769ms step_avg:98.21ms +step:854/1695 train_time:83868ms step_avg:98.21ms +step:855/1695 train_time:83966ms step_avg:98.21ms +step:856/1695 train_time:84063ms step_avg:98.20ms +step:857/1695 train_time:84161ms step_avg:98.20ms +step:858/1695 train_time:84260ms step_avg:98.21ms +step:859/1695 train_time:84360ms step_avg:98.21ms +step:860/1695 train_time:84459ms step_avg:98.21ms +step:861/1695 train_time:84558ms step_avg:98.21ms +step:862/1695 train_time:84657ms step_avg:98.21ms +step:863/1695 train_time:84756ms step_avg:98.21ms +step:864/1695 train_time:84854ms step_avg:98.21ms +step:865/1695 train_time:84953ms step_avg:98.21ms +step:866/1695 train_time:85052ms step_avg:98.21ms +step:867/1695 train_time:85148ms step_avg:98.21ms +step:868/1695 train_time:85246ms step_avg:98.21ms +step:869/1695 train_time:85345ms step_avg:98.21ms +step:870/1695 train_time:85442ms step_avg:98.21ms +step:871/1695 train_time:85542ms step_avg:98.21ms +step:872/1695 train_time:85641ms step_avg:98.21ms +step:873/1695 train_time:85740ms step_avg:98.21ms +step:874/1695 train_time:85840ms step_avg:98.22ms +step:875/1695 train_time:85939ms step_avg:98.22ms +step:875/1695 val_loss:3.5360 train_time:86035ms step_avg:98.33ms +step:876/1695 train_time:86061ms step_avg:98.24ms +step:877/1695 train_time:86146ms step_avg:98.23ms +step:878/1695 train_time:86249ms step_avg:98.23ms +step:879/1695 train_time:86349ms step_avg:98.24ms +step:880/1695 train_time:86447ms step_avg:98.24ms +step:881/1695 train_time:86547ms step_avg:98.24ms +step:882/1695 train_time:86647ms step_avg:98.24ms +step:883/1695 train_time:86746ms step_avg:98.24ms +step:884/1695 train_time:86845ms step_avg:98.24ms +step:885/1695 train_time:86944ms step_avg:98.24ms +step:886/1695 train_time:87045ms step_avg:98.24ms +step:887/1695 train_time:87147ms step_avg:98.25ms +step:888/1695 train_time:87249ms step_avg:98.25ms +step:889/1695 train_time:87350ms step_avg:98.26ms +step:890/1695 train_time:87450ms step_avg:98.26ms +step:891/1695 train_time:87549ms step_avg:98.26ms +step:892/1695 train_time:87648ms step_avg:98.26ms +step:893/1695 train_time:87748ms step_avg:98.26ms +step:894/1695 train_time:87847ms step_avg:98.26ms +step:895/1695 train_time:87946ms step_avg:98.26ms +step:896/1695 train_time:88046ms step_avg:98.27ms +step:897/1695 train_time:88147ms step_avg:98.27ms +step:898/1695 train_time:88249ms step_avg:98.27ms +step:899/1695 train_time:88349ms step_avg:98.28ms +step:900/1695 train_time:88450ms step_avg:98.28ms +step:901/1695 train_time:88549ms step_avg:98.28ms +step:902/1695 train_time:88648ms step_avg:98.28ms +step:903/1695 train_time:88748ms step_avg:98.28ms +step:904/1695 train_time:88847ms step_avg:98.28ms +step:905/1695 train_time:88946ms step_avg:98.28ms +step:906/1695 train_time:89046ms step_avg:98.28ms +step:907/1695 train_time:89147ms step_avg:98.29ms +step:908/1695 train_time:89248ms step_avg:98.29ms +step:909/1695 train_time:89349ms step_avg:98.29ms +step:910/1695 train_time:89450ms step_avg:98.30ms +step:911/1695 train_time:89549ms step_avg:98.30ms +step:912/1695 train_time:89649ms step_avg:98.30ms +step:913/1695 train_time:89749ms step_avg:98.30ms +step:914/1695 train_time:89848ms step_avg:98.30ms +step:915/1695 train_time:89948ms step_avg:98.30ms +step:916/1695 train_time:90047ms step_avg:98.30ms +step:917/1695 train_time:90148ms step_avg:98.31ms +step:918/1695 train_time:90249ms step_avg:98.31ms +step:919/1695 train_time:90349ms step_avg:98.31ms +step:920/1695 train_time:90450ms step_avg:98.32ms +step:921/1695 train_time:90550ms step_avg:98.32ms +step:922/1695 train_time:90649ms step_avg:98.32ms +step:923/1695 train_time:90749ms step_avg:98.32ms +step:924/1695 train_time:90849ms step_avg:98.32ms +step:925/1695 train_time:90949ms step_avg:98.32ms +step:926/1695 train_time:91050ms step_avg:98.33ms +step:927/1695 train_time:91149ms step_avg:98.33ms +step:928/1695 train_time:91249ms step_avg:98.33ms +step:929/1695 train_time:91350ms step_avg:98.33ms +step:930/1695 train_time:91449ms step_avg:98.33ms +step:931/1695 train_time:91549ms step_avg:98.33ms +step:932/1695 train_time:91649ms step_avg:98.34ms +step:933/1695 train_time:91748ms step_avg:98.34ms +step:934/1695 train_time:91848ms step_avg:98.34ms +step:935/1695 train_time:91947ms step_avg:98.34ms +step:936/1695 train_time:92047ms step_avg:98.34ms +step:937/1695 train_time:92148ms step_avg:98.34ms +step:938/1695 train_time:92248ms step_avg:98.35ms +step:939/1695 train_time:92348ms step_avg:98.35ms +step:940/1695 train_time:92449ms step_avg:98.35ms +step:941/1695 train_time:92550ms step_avg:98.35ms +step:942/1695 train_time:92650ms step_avg:98.35ms +step:943/1695 train_time:92750ms step_avg:98.36ms +step:944/1695 train_time:92849ms step_avg:98.36ms +step:945/1695 train_time:92950ms step_avg:98.36ms +step:946/1695 train_time:93049ms step_avg:98.36ms +step:947/1695 train_time:93149ms step_avg:98.36ms +step:948/1695 train_time:93249ms step_avg:98.36ms +step:949/1695 train_time:93348ms step_avg:98.36ms +step:950/1695 train_time:93448ms step_avg:98.37ms +step:951/1695 train_time:93548ms step_avg:98.37ms +step:952/1695 train_time:93649ms step_avg:98.37ms +step:953/1695 train_time:93749ms step_avg:98.37ms +step:954/1695 train_time:93848ms step_avg:98.37ms +step:955/1695 train_time:93948ms step_avg:98.37ms +step:956/1695 train_time:94048ms step_avg:98.38ms +step:957/1695 train_time:94148ms step_avg:98.38ms +step:958/1695 train_time:94248ms step_avg:98.38ms +step:959/1695 train_time:94348ms step_avg:98.38ms +step:960/1695 train_time:94448ms step_avg:98.38ms +step:961/1695 train_time:94549ms step_avg:98.39ms +step:962/1695 train_time:94649ms step_avg:98.39ms +step:963/1695 train_time:94748ms step_avg:98.39ms +step:964/1695 train_time:94848ms step_avg:98.39ms +step:965/1695 train_time:94948ms step_avg:98.39ms +step:966/1695 train_time:95048ms step_avg:98.39ms +step:967/1695 train_time:95148ms step_avg:98.39ms +step:968/1695 train_time:95248ms step_avg:98.40ms +step:969/1695 train_time:95349ms step_avg:98.40ms +step:970/1695 train_time:95448ms step_avg:98.40ms +step:971/1695 train_time:95548ms step_avg:98.40ms +step:972/1695 train_time:95648ms step_avg:98.40ms +step:973/1695 train_time:95748ms step_avg:98.40ms +step:974/1695 train_time:95847ms step_avg:98.41ms +step:975/1695 train_time:95947ms step_avg:98.41ms +step:976/1695 train_time:96047ms step_avg:98.41ms +step:977/1695 train_time:96147ms step_avg:98.41ms +step:978/1695 train_time:96248ms step_avg:98.41ms +step:979/1695 train_time:96349ms step_avg:98.42ms +step:980/1695 train_time:96449ms step_avg:98.42ms +step:981/1695 train_time:96548ms step_avg:98.42ms +step:982/1695 train_time:96649ms step_avg:98.42ms +step:983/1695 train_time:96749ms step_avg:98.42ms +step:984/1695 train_time:96848ms step_avg:98.42ms +step:985/1695 train_time:96948ms step_avg:98.42ms +step:986/1695 train_time:97049ms step_avg:98.43ms +step:987/1695 train_time:97150ms step_avg:98.43ms +step:988/1695 train_time:97250ms step_avg:98.43ms +step:989/1695 train_time:97350ms step_avg:98.43ms +step:990/1695 train_time:97449ms step_avg:98.43ms +step:991/1695 train_time:97550ms step_avg:98.44ms +step:992/1695 train_time:97650ms step_avg:98.44ms +step:993/1695 train_time:97750ms step_avg:98.44ms +step:994/1695 train_time:97850ms step_avg:98.44ms +step:995/1695 train_time:97949ms step_avg:98.44ms +step:996/1695 train_time:98049ms step_avg:98.44ms +step:997/1695 train_time:98149ms step_avg:98.44ms +step:998/1695 train_time:98248ms step_avg:98.44ms +step:999/1695 train_time:98349ms step_avg:98.45ms +step:1000/1695 train_time:98448ms step_avg:98.45ms +step:1000/1695 val_loss:3.4900 train_time:98546ms step_avg:98.55ms +step:1001/1695 train_time:98572ms step_avg:98.47ms +step:1002/1695 train_time:98657ms step_avg:98.46ms +step:1003/1695 train_time:98758ms step_avg:98.46ms +step:1004/1695 train_time:98858ms step_avg:98.46ms +step:1005/1695 train_time:98957ms step_avg:98.46ms +step:1006/1695 train_time:99056ms step_avg:98.47ms +step:1007/1695 train_time:99155ms step_avg:98.47ms +step:1008/1695 train_time:99255ms step_avg:98.47ms +step:1009/1695 train_time:99354ms step_avg:98.47ms +step:1010/1695 train_time:99452ms step_avg:98.47ms +step:1011/1695 train_time:99555ms step_avg:98.47ms +step:1012/1695 train_time:99658ms step_avg:98.48ms +step:1013/1695 train_time:99760ms step_avg:98.48ms +step:1014/1695 train_time:99860ms step_avg:98.48ms +step:1015/1695 train_time:99959ms step_avg:98.48ms +step:1016/1695 train_time:100058ms step_avg:98.48ms +step:1017/1695 train_time:100157ms step_avg:98.48ms +step:1018/1695 train_time:100256ms step_avg:98.48ms +step:1019/1695 train_time:100355ms step_avg:98.48ms +step:1020/1695 train_time:100455ms step_avg:98.49ms +step:1021/1695 train_time:100556ms step_avg:98.49ms +step:1022/1695 train_time:100658ms step_avg:98.49ms +step:1023/1695 train_time:100758ms step_avg:98.49ms +step:1024/1695 train_time:100860ms step_avg:98.50ms +step:1025/1695 train_time:100960ms step_avg:98.50ms +step:1026/1695 train_time:101060ms step_avg:98.50ms +step:1027/1695 train_time:101159ms step_avg:98.50ms +step:1028/1695 train_time:101258ms step_avg:98.50ms +step:1029/1695 train_time:101359ms step_avg:98.50ms +step:1030/1695 train_time:101459ms step_avg:98.50ms +step:1031/1695 train_time:101559ms step_avg:98.51ms +step:1032/1695 train_time:101658ms step_avg:98.51ms +step:1033/1695 train_time:101759ms step_avg:98.51ms +step:1034/1695 train_time:101859ms step_avg:98.51ms +step:1035/1695 train_time:101959ms step_avg:98.51ms +step:1036/1695 train_time:102059ms step_avg:98.51ms +step:1037/1695 train_time:102159ms step_avg:98.51ms +step:1038/1695 train_time:102259ms step_avg:98.52ms +step:1039/1695 train_time:102358ms step_avg:98.52ms +step:1040/1695 train_time:102457ms step_avg:98.52ms +step:1041/1695 train_time:102558ms step_avg:98.52ms +step:1042/1695 train_time:102657ms step_avg:98.52ms +step:1043/1695 train_time:102758ms step_avg:98.52ms +step:1044/1695 train_time:102858ms step_avg:98.52ms +step:1045/1695 train_time:102959ms step_avg:98.52ms +step:1046/1695 train_time:103059ms step_avg:98.53ms +step:1047/1695 train_time:103158ms step_avg:98.53ms +step:1048/1695 train_time:103258ms step_avg:98.53ms +step:1049/1695 train_time:103357ms step_avg:98.53ms +step:1050/1695 train_time:103457ms step_avg:98.53ms +step:1051/1695 train_time:103557ms step_avg:98.53ms +step:1052/1695 train_time:103657ms step_avg:98.53ms +step:1053/1695 train_time:103757ms step_avg:98.53ms +step:1054/1695 train_time:103857ms step_avg:98.54ms +step:1055/1695 train_time:103958ms step_avg:98.54ms +step:1056/1695 train_time:104058ms step_avg:98.54ms +step:1057/1695 train_time:104157ms step_avg:98.54ms +step:1058/1695 train_time:104258ms step_avg:98.54ms +step:1059/1695 train_time:104358ms step_avg:98.54ms +step:1060/1695 train_time:104458ms step_avg:98.54ms +step:1061/1695 train_time:104557ms step_avg:98.55ms +step:1062/1695 train_time:104657ms step_avg:98.55ms +step:1063/1695 train_time:104759ms step_avg:98.55ms +step:1064/1695 train_time:104859ms step_avg:98.55ms +step:1065/1695 train_time:104959ms step_avg:98.55ms +step:1066/1695 train_time:105058ms step_avg:98.55ms +step:1067/1695 train_time:105158ms step_avg:98.55ms +step:1068/1695 train_time:105258ms step_avg:98.56ms +step:1069/1695 train_time:105358ms step_avg:98.56ms +step:1070/1695 train_time:105459ms step_avg:98.56ms +step:1071/1695 train_time:105558ms step_avg:98.56ms +step:1072/1695 train_time:105658ms step_avg:98.56ms +step:1073/1695 train_time:105758ms step_avg:98.56ms +step:1074/1695 train_time:105858ms step_avg:98.56ms +step:1075/1695 train_time:105958ms step_avg:98.57ms +step:1076/1695 train_time:106057ms step_avg:98.57ms +step:1077/1695 train_time:106159ms step_avg:98.57ms +step:1078/1695 train_time:106258ms step_avg:98.57ms +step:1079/1695 train_time:106358ms step_avg:98.57ms +step:1080/1695 train_time:106458ms step_avg:98.57ms +step:1081/1695 train_time:106558ms step_avg:98.57ms +step:1082/1695 train_time:106658ms step_avg:98.58ms +step:1083/1695 train_time:106758ms step_avg:98.58ms +step:1084/1695 train_time:106858ms step_avg:98.58ms +step:1085/1695 train_time:106959ms step_avg:98.58ms +step:1086/1695 train_time:107059ms step_avg:98.58ms +step:1087/1695 train_time:107158ms step_avg:98.58ms +step:1088/1695 train_time:107259ms step_avg:98.58ms +step:1089/1695 train_time:107358ms step_avg:98.58ms +step:1090/1695 train_time:107459ms step_avg:98.59ms +step:1091/1695 train_time:107559ms step_avg:98.59ms +step:1092/1695 train_time:107659ms step_avg:98.59ms +step:1093/1695 train_time:107759ms step_avg:98.59ms +step:1094/1695 train_time:107860ms step_avg:98.59ms +step:1095/1695 train_time:107959ms step_avg:98.59ms +step:1096/1695 train_time:108059ms step_avg:98.59ms +step:1097/1695 train_time:108158ms step_avg:98.59ms +step:1098/1695 train_time:108258ms step_avg:98.60ms +step:1099/1695 train_time:108358ms step_avg:98.60ms +step:1100/1695 train_time:108457ms step_avg:98.60ms +step:1101/1695 train_time:108557ms step_avg:98.60ms +step:1102/1695 train_time:108658ms step_avg:98.60ms +step:1103/1695 train_time:108757ms step_avg:98.60ms +step:1104/1695 train_time:108857ms step_avg:98.60ms +step:1105/1695 train_time:108958ms step_avg:98.60ms +step:1106/1695 train_time:109058ms step_avg:98.61ms +step:1107/1695 train_time:109158ms step_avg:98.61ms +step:1108/1695 train_time:109258ms step_avg:98.61ms +step:1109/1695 train_time:109357ms step_avg:98.61ms +step:1110/1695 train_time:109458ms step_avg:98.61ms +step:1111/1695 train_time:109558ms step_avg:98.61ms +step:1112/1695 train_time:109658ms step_avg:98.61ms +step:1113/1695 train_time:109757ms step_avg:98.61ms +step:1114/1695 train_time:109858ms step_avg:98.62ms +step:1115/1695 train_time:109958ms step_avg:98.62ms +step:1116/1695 train_time:110057ms step_avg:98.62ms +step:1117/1695 train_time:110158ms step_avg:98.62ms +step:1118/1695 train_time:110258ms step_avg:98.62ms +step:1119/1695 train_time:110358ms step_avg:98.62ms +step:1120/1695 train_time:110457ms step_avg:98.62ms +step:1121/1695 train_time:110557ms step_avg:98.62ms +step:1122/1695 train_time:110657ms step_avg:98.63ms +step:1123/1695 train_time:110758ms step_avg:98.63ms +step:1124/1695 train_time:110858ms step_avg:98.63ms +step:1125/1695 train_time:110959ms step_avg:98.63ms +step:1125/1695 val_loss:3.4391 train_time:111057ms step_avg:98.72ms +step:1126/1695 train_time:111083ms step_avg:98.65ms +step:1127/1695 train_time:111170ms step_avg:98.64ms +step:1128/1695 train_time:111271ms step_avg:98.64ms +step:1129/1695 train_time:111371ms step_avg:98.65ms +step:1130/1695 train_time:111471ms step_avg:98.65ms +step:1131/1695 train_time:111570ms step_avg:98.65ms +step:1132/1695 train_time:111669ms step_avg:98.65ms +step:1133/1695 train_time:111769ms step_avg:98.65ms +step:1134/1695 train_time:111868ms step_avg:98.65ms +step:1135/1695 train_time:111969ms step_avg:98.65ms +step:1136/1695 train_time:112072ms step_avg:98.65ms +step:1137/1695 train_time:112174ms step_avg:98.66ms +step:1138/1695 train_time:112275ms step_avg:98.66ms +step:1139/1695 train_time:112375ms step_avg:98.66ms +step:1140/1695 train_time:112475ms step_avg:98.66ms +step:1141/1695 train_time:112575ms step_avg:98.66ms +step:1142/1695 train_time:112675ms step_avg:98.66ms +step:1143/1695 train_time:112774ms step_avg:98.67ms +step:1144/1695 train_time:112875ms step_avg:98.67ms +step:1145/1695 train_time:112976ms step_avg:98.67ms +step:1146/1695 train_time:113075ms step_avg:98.67ms +step:1147/1695 train_time:113176ms step_avg:98.67ms +step:1148/1695 train_time:113276ms step_avg:98.67ms +step:1149/1695 train_time:113376ms step_avg:98.67ms +step:1150/1695 train_time:113476ms step_avg:98.68ms +step:1151/1695 train_time:113576ms step_avg:98.68ms +step:1152/1695 train_time:113676ms step_avg:98.68ms +step:1153/1695 train_time:113777ms step_avg:98.68ms +step:1154/1695 train_time:113877ms step_avg:98.68ms +step:1155/1695 train_time:113977ms step_avg:98.68ms +step:1156/1695 train_time:114077ms step_avg:98.68ms +step:1157/1695 train_time:114178ms step_avg:98.68ms +step:1158/1695 train_time:114279ms step_avg:98.69ms +step:1159/1695 train_time:114379ms step_avg:98.69ms +step:1160/1695 train_time:114480ms step_avg:98.69ms +step:1161/1695 train_time:114580ms step_avg:98.69ms +step:1162/1695 train_time:114680ms step_avg:98.69ms +step:1163/1695 train_time:114782ms step_avg:98.69ms +step:1164/1695 train_time:114884ms step_avg:98.70ms +step:1165/1695 train_time:114986ms step_avg:98.70ms +step:1166/1695 train_time:115089ms step_avg:98.70ms +step:1167/1695 train_time:115190ms step_avg:98.71ms +step:1168/1695 train_time:115291ms step_avg:98.71ms +step:1169/1695 train_time:115392ms step_avg:98.71ms +step:1170/1695 train_time:115492ms step_avg:98.71ms +step:1171/1695 train_time:115593ms step_avg:98.71ms +step:1172/1695 train_time:115695ms step_avg:98.72ms +step:1173/1695 train_time:115795ms step_avg:98.72ms +step:1174/1695 train_time:115896ms step_avg:98.72ms +step:1175/1695 train_time:115997ms step_avg:98.72ms +step:1176/1695 train_time:116097ms step_avg:98.72ms +step:1177/1695 train_time:116197ms step_avg:98.72ms +step:1178/1695 train_time:116300ms step_avg:98.73ms +step:1179/1695 train_time:116402ms step_avg:98.73ms +step:1180/1695 train_time:116504ms step_avg:98.73ms +step:1181/1695 train_time:116605ms step_avg:98.73ms +step:1182/1695 train_time:116706ms step_avg:98.74ms +step:1183/1695 train_time:116806ms step_avg:98.74ms +step:1184/1695 train_time:116909ms step_avg:98.74ms +step:1185/1695 train_time:117011ms step_avg:98.74ms +step:1186/1695 train_time:117112ms step_avg:98.74ms +step:1187/1695 train_time:117212ms step_avg:98.75ms +step:1188/1695 train_time:117313ms step_avg:98.75ms +step:1189/1695 train_time:117414ms step_avg:98.75ms +step:1190/1695 train_time:117514ms step_avg:98.75ms +step:1191/1695 train_time:117616ms step_avg:98.75ms +step:1192/1695 train_time:117715ms step_avg:98.75ms +step:1193/1695 train_time:117816ms step_avg:98.76ms +step:1194/1695 train_time:117917ms step_avg:98.76ms +step:1195/1695 train_time:118016ms step_avg:98.76ms +step:1196/1695 train_time:118117ms step_avg:98.76ms +step:1197/1695 train_time:118217ms step_avg:98.76ms +step:1198/1695 train_time:118318ms step_avg:98.76ms +step:1199/1695 train_time:118419ms step_avg:98.76ms +step:1200/1695 train_time:118519ms step_avg:98.77ms +step:1201/1695 train_time:118620ms step_avg:98.77ms +step:1202/1695 train_time:118721ms step_avg:98.77ms +step:1203/1695 train_time:118823ms step_avg:98.77ms +step:1204/1695 train_time:118923ms step_avg:98.77ms +step:1205/1695 train_time:119024ms step_avg:98.78ms +step:1206/1695 train_time:119125ms step_avg:98.78ms +step:1207/1695 train_time:119228ms step_avg:98.78ms +step:1208/1695 train_time:119329ms step_avg:98.78ms +step:1209/1695 train_time:119430ms step_avg:98.78ms +step:1210/1695 train_time:119531ms step_avg:98.79ms +step:1211/1695 train_time:119632ms step_avg:98.79ms +step:1212/1695 train_time:119732ms step_avg:98.79ms +step:1213/1695 train_time:119833ms step_avg:98.79ms +step:1214/1695 train_time:119933ms step_avg:98.79ms +step:1215/1695 train_time:120034ms step_avg:98.79ms +step:1216/1695 train_time:120135ms step_avg:98.80ms +step:1217/1695 train_time:120235ms step_avg:98.80ms +step:1218/1695 train_time:120335ms step_avg:98.80ms +step:1219/1695 train_time:120435ms step_avg:98.80ms +step:1220/1695 train_time:120536ms step_avg:98.80ms +step:1221/1695 train_time:120636ms step_avg:98.80ms +step:1222/1695 train_time:120737ms step_avg:98.80ms +step:1223/1695 train_time:120837ms step_avg:98.80ms +step:1224/1695 train_time:120937ms step_avg:98.80ms +step:1225/1695 train_time:121038ms step_avg:98.81ms +step:1226/1695 train_time:121139ms step_avg:98.81ms +step:1227/1695 train_time:121241ms step_avg:98.81ms +step:1228/1695 train_time:121342ms step_avg:98.81ms +step:1229/1695 train_time:121443ms step_avg:98.81ms +step:1230/1695 train_time:121544ms step_avg:98.82ms +step:1231/1695 train_time:121644ms step_avg:98.82ms +step:1232/1695 train_time:121745ms step_avg:98.82ms +step:1233/1695 train_time:121845ms step_avg:98.82ms +step:1234/1695 train_time:121949ms step_avg:98.82ms +step:1235/1695 train_time:122050ms step_avg:98.83ms +step:1236/1695 train_time:122152ms step_avg:98.83ms +step:1237/1695 train_time:122253ms step_avg:98.83ms +step:1238/1695 train_time:122353ms step_avg:98.83ms +step:1239/1695 train_time:122454ms step_avg:98.83ms +step:1240/1695 train_time:122554ms step_avg:98.83ms +step:1241/1695 train_time:122655ms step_avg:98.84ms +step:1242/1695 train_time:122756ms step_avg:98.84ms +step:1243/1695 train_time:122857ms step_avg:98.84ms +step:1244/1695 train_time:122957ms step_avg:98.84ms +step:1245/1695 train_time:123057ms step_avg:98.84ms +step:1246/1695 train_time:123158ms step_avg:98.84ms +step:1247/1695 train_time:123259ms step_avg:98.84ms +step:1248/1695 train_time:123359ms step_avg:98.85ms +step:1249/1695 train_time:123459ms step_avg:98.85ms +step:1250/1695 train_time:123559ms step_avg:98.85ms +step:1250/1695 val_loss:3.3934 train_time:123659ms step_avg:98.93ms +step:1251/1695 train_time:123684ms step_avg:98.87ms +step:1252/1695 train_time:123775ms step_avg:98.86ms +step:1253/1695 train_time:123879ms step_avg:98.87ms +step:1254/1695 train_time:123981ms step_avg:98.87ms +step:1255/1695 train_time:124081ms step_avg:98.87ms +step:1256/1695 train_time:124181ms step_avg:98.87ms +step:1257/1695 train_time:124280ms step_avg:98.87ms +step:1258/1695 train_time:124381ms step_avg:98.87ms +step:1259/1695 train_time:124480ms step_avg:98.87ms +step:1260/1695 train_time:124581ms step_avg:98.87ms +step:1261/1695 train_time:124683ms step_avg:98.88ms +step:1262/1695 train_time:124786ms step_avg:98.88ms +step:1263/1695 train_time:124887ms step_avg:98.88ms +step:1264/1695 train_time:124987ms step_avg:98.88ms +step:1265/1695 train_time:125087ms step_avg:98.88ms +step:1266/1695 train_time:125186ms step_avg:98.88ms +step:1267/1695 train_time:125287ms step_avg:98.88ms +step:1268/1695 train_time:125387ms step_avg:98.89ms +step:1269/1695 train_time:125487ms step_avg:98.89ms +step:1270/1695 train_time:125588ms step_avg:98.89ms +step:1271/1695 train_time:125689ms step_avg:98.89ms +step:1272/1695 train_time:125788ms step_avg:98.89ms +step:1273/1695 train_time:125889ms step_avg:98.89ms +step:1274/1695 train_time:125990ms step_avg:98.89ms +step:1275/1695 train_time:126091ms step_avg:98.90ms +step:1276/1695 train_time:126194ms step_avg:98.90ms +step:1277/1695 train_time:126295ms step_avg:98.90ms +step:1278/1695 train_time:126396ms step_avg:98.90ms +step:1279/1695 train_time:126498ms step_avg:98.90ms +step:1280/1695 train_time:126599ms step_avg:98.91ms +step:1281/1695 train_time:126701ms step_avg:98.91ms +step:1282/1695 train_time:126801ms step_avg:98.91ms +step:1283/1695 train_time:126903ms step_avg:98.91ms +step:1284/1695 train_time:127004ms step_avg:98.91ms +step:1285/1695 train_time:127104ms step_avg:98.91ms +step:1286/1695 train_time:127205ms step_avg:98.92ms +step:1287/1695 train_time:127306ms step_avg:98.92ms +step:1288/1695 train_time:127406ms step_avg:98.92ms +step:1289/1695 train_time:127507ms step_avg:98.92ms +step:1290/1695 train_time:127607ms step_avg:98.92ms +step:1291/1695 train_time:127708ms step_avg:98.92ms +step:1292/1695 train_time:127808ms step_avg:98.92ms +step:1293/1695 train_time:127909ms step_avg:98.92ms +step:1294/1695 train_time:128010ms step_avg:98.93ms +step:1295/1695 train_time:128112ms step_avg:98.93ms +step:1296/1695 train_time:128213ms step_avg:98.93ms +step:1297/1695 train_time:128313ms step_avg:98.93ms +step:1298/1695 train_time:128416ms step_avg:98.93ms +step:1299/1695 train_time:128518ms step_avg:98.94ms +step:1300/1695 train_time:128619ms step_avg:98.94ms +step:1301/1695 train_time:128721ms step_avg:98.94ms +step:1302/1695 train_time:128822ms step_avg:98.94ms +step:1303/1695 train_time:128924ms step_avg:98.94ms +step:1304/1695 train_time:129025ms step_avg:98.95ms +step:1305/1695 train_time:129125ms step_avg:98.95ms +step:1306/1695 train_time:129225ms step_avg:98.95ms +step:1307/1695 train_time:129325ms step_avg:98.95ms +step:1308/1695 train_time:129426ms step_avg:98.95ms +step:1309/1695 train_time:129527ms step_avg:98.95ms +step:1310/1695 train_time:129628ms step_avg:98.95ms +step:1311/1695 train_time:129729ms step_avg:98.95ms +step:1312/1695 train_time:129829ms step_avg:98.95ms +step:1313/1695 train_time:129930ms step_avg:98.96ms +step:1314/1695 train_time:130031ms step_avg:98.96ms +step:1315/1695 train_time:130133ms step_avg:98.96ms +step:1316/1695 train_time:130234ms step_avg:98.96ms +step:1317/1695 train_time:130336ms step_avg:98.96ms +step:1318/1695 train_time:130436ms step_avg:98.97ms +step:1319/1695 train_time:130538ms step_avg:98.97ms +step:1320/1695 train_time:130641ms step_avg:98.97ms +step:1321/1695 train_time:130740ms step_avg:98.97ms +step:1322/1695 train_time:130842ms step_avg:98.97ms +step:1323/1695 train_time:130942ms step_avg:98.97ms +step:1324/1695 train_time:131043ms step_avg:98.98ms +step:1325/1695 train_time:131144ms step_avg:98.98ms +step:1326/1695 train_time:131246ms step_avg:98.98ms +step:1327/1695 train_time:131346ms step_avg:98.98ms +step:1328/1695 train_time:131447ms step_avg:98.98ms +step:1329/1695 train_time:131546ms step_avg:98.98ms +step:1330/1695 train_time:131646ms step_avg:98.98ms +step:1331/1695 train_time:131747ms step_avg:98.98ms +step:1332/1695 train_time:131848ms step_avg:98.98ms +step:1333/1695 train_time:131948ms step_avg:98.99ms +step:1334/1695 train_time:132050ms step_avg:98.99ms +step:1335/1695 train_time:132151ms step_avg:98.99ms +step:1336/1695 train_time:132254ms step_avg:98.99ms +step:1337/1695 train_time:132355ms step_avg:98.99ms +step:1338/1695 train_time:132456ms step_avg:99.00ms +step:1339/1695 train_time:132558ms step_avg:99.00ms +step:1340/1695 train_time:132659ms step_avg:99.00ms +step:1341/1695 train_time:132760ms step_avg:99.00ms +step:1342/1695 train_time:132861ms step_avg:99.00ms +step:1343/1695 train_time:132962ms step_avg:99.00ms +step:1344/1695 train_time:133062ms step_avg:99.00ms +step:1345/1695 train_time:133166ms step_avg:99.01ms +step:1346/1695 train_time:133266ms step_avg:99.01ms +step:1347/1695 train_time:133368ms step_avg:99.01ms +step:1348/1695 train_time:133467ms step_avg:99.01ms +step:1349/1695 train_time:133567ms step_avg:99.01ms +step:1350/1695 train_time:133668ms step_avg:99.01ms +step:1351/1695 train_time:133767ms step_avg:99.01ms +step:1352/1695 train_time:133867ms step_avg:99.01ms +step:1353/1695 train_time:133968ms step_avg:99.02ms +step:1354/1695 train_time:134069ms step_avg:99.02ms +step:1355/1695 train_time:134170ms step_avg:99.02ms +step:1356/1695 train_time:134271ms step_avg:99.02ms +step:1357/1695 train_time:134371ms step_avg:99.02ms +step:1358/1695 train_time:134472ms step_avg:99.02ms +step:1359/1695 train_time:134572ms step_avg:99.02ms +step:1360/1695 train_time:134674ms step_avg:99.03ms +step:1361/1695 train_time:134775ms step_avg:99.03ms +step:1362/1695 train_time:134876ms step_avg:99.03ms +step:1363/1695 train_time:134978ms step_avg:99.03ms +step:1364/1695 train_time:135080ms step_avg:99.03ms +step:1365/1695 train_time:135182ms step_avg:99.03ms +step:1366/1695 train_time:135282ms step_avg:99.04ms +step:1367/1695 train_time:135383ms step_avg:99.04ms +step:1368/1695 train_time:135485ms step_avg:99.04ms +step:1369/1695 train_time:135585ms step_avg:99.04ms +step:1370/1695 train_time:135686ms step_avg:99.04ms +step:1371/1695 train_time:135786ms step_avg:99.04ms +step:1372/1695 train_time:135887ms step_avg:99.04ms +step:1373/1695 train_time:135988ms step_avg:99.04ms +step:1374/1695 train_time:136089ms step_avg:99.05ms +step:1375/1695 train_time:136190ms step_avg:99.05ms +step:1375/1695 val_loss:3.3541 train_time:136290ms step_avg:99.12ms +step:1376/1695 train_time:136316ms step_avg:99.07ms +step:1377/1695 train_time:136402ms step_avg:99.06ms +step:1378/1695 train_time:136505ms step_avg:99.06ms +step:1379/1695 train_time:136606ms step_avg:99.06ms +step:1380/1695 train_time:136709ms step_avg:99.06ms +step:1381/1695 train_time:136809ms step_avg:99.07ms +step:1382/1695 train_time:136909ms step_avg:99.07ms +step:1383/1695 train_time:137008ms step_avg:99.07ms +step:1384/1695 train_time:137109ms step_avg:99.07ms +step:1385/1695 train_time:137210ms step_avg:99.07ms +step:1386/1695 train_time:137313ms step_avg:99.07ms +step:1387/1695 train_time:137416ms step_avg:99.07ms +step:1388/1695 train_time:137516ms step_avg:99.07ms +step:1389/1695 train_time:137618ms step_avg:99.08ms +step:1390/1695 train_time:137720ms step_avg:99.08ms +step:1391/1695 train_time:137823ms step_avg:99.08ms +step:1392/1695 train_time:137924ms step_avg:99.08ms +step:1393/1695 train_time:138026ms step_avg:99.09ms +step:1394/1695 train_time:138127ms step_avg:99.09ms +step:1395/1695 train_time:138229ms step_avg:99.09ms +step:1396/1695 train_time:138330ms step_avg:99.09ms +step:1397/1695 train_time:138433ms step_avg:99.09ms +step:1398/1695 train_time:138534ms step_avg:99.09ms +step:1399/1695 train_time:138636ms step_avg:99.10ms +step:1400/1695 train_time:138739ms step_avg:99.10ms +step:1401/1695 train_time:138842ms step_avg:99.10ms +step:1402/1695 train_time:138944ms step_avg:99.10ms +step:1403/1695 train_time:139047ms step_avg:99.11ms +step:1404/1695 train_time:139150ms step_avg:99.11ms +step:1405/1695 train_time:139251ms step_avg:99.11ms +step:1406/1695 train_time:139352ms step_avg:99.11ms +step:1407/1695 train_time:139454ms step_avg:99.11ms +step:1408/1695 train_time:139556ms step_avg:99.12ms +step:1409/1695 train_time:139660ms step_avg:99.12ms +step:1410/1695 train_time:139761ms step_avg:99.12ms +step:1411/1695 train_time:139862ms step_avg:99.12ms +step:1412/1695 train_time:139967ms step_avg:99.13ms +step:1413/1695 train_time:140068ms step_avg:99.13ms +step:1414/1695 train_time:140170ms step_avg:99.13ms +step:1415/1695 train_time:140272ms step_avg:99.13ms +step:1416/1695 train_time:140372ms step_avg:99.13ms +step:1417/1695 train_time:140473ms step_avg:99.13ms +step:1418/1695 train_time:140574ms step_avg:99.14ms +step:1419/1695 train_time:140676ms step_avg:99.14ms +step:1420/1695 train_time:140778ms step_avg:99.14ms +step:1421/1695 train_time:140880ms step_avg:99.14ms +step:1422/1695 train_time:140981ms step_avg:99.14ms +step:1423/1695 train_time:141083ms step_avg:99.14ms +step:1424/1695 train_time:141187ms step_avg:99.15ms +step:1425/1695 train_time:141288ms step_avg:99.15ms +step:1426/1695 train_time:141391ms step_avg:99.15ms +step:1427/1695 train_time:141492ms step_avg:99.15ms +step:1428/1695 train_time:141594ms step_avg:99.16ms +step:1429/1695 train_time:141696ms step_avg:99.16ms +step:1430/1695 train_time:141797ms step_avg:99.16ms +step:1431/1695 train_time:141900ms step_avg:99.16ms +step:1432/1695 train_time:142001ms step_avg:99.16ms +step:1433/1695 train_time:142104ms step_avg:99.17ms +step:1434/1695 train_time:142206ms step_avg:99.17ms +step:1435/1695 train_time:142309ms step_avg:99.17ms +step:1436/1695 train_time:142411ms step_avg:99.17ms +step:1437/1695 train_time:142513ms step_avg:99.17ms +step:1438/1695 train_time:142614ms step_avg:99.17ms +step:1439/1695 train_time:142716ms step_avg:99.18ms +step:1440/1695 train_time:142819ms step_avg:99.18ms +step:1441/1695 train_time:142922ms step_avg:99.18ms +step:1442/1695 train_time:143022ms step_avg:99.18ms +step:1443/1695 train_time:143124ms step_avg:99.18ms +step:1444/1695 train_time:143227ms step_avg:99.19ms +step:1445/1695 train_time:143328ms step_avg:99.19ms +step:1446/1695 train_time:143430ms step_avg:99.19ms +step:1447/1695 train_time:143530ms step_avg:99.19ms +step:1448/1695 train_time:143633ms step_avg:99.19ms +step:1449/1695 train_time:143734ms step_avg:99.20ms +step:1450/1695 train_time:143835ms step_avg:99.20ms +step:1451/1695 train_time:143937ms step_avg:99.20ms +step:1452/1695 train_time:144040ms step_avg:99.20ms +step:1453/1695 train_time:144142ms step_avg:99.20ms +step:1454/1695 train_time:144245ms step_avg:99.21ms +step:1455/1695 train_time:144348ms step_avg:99.21ms +step:1456/1695 train_time:144449ms step_avg:99.21ms +step:1457/1695 train_time:144551ms step_avg:99.21ms +step:1458/1695 train_time:144653ms step_avg:99.21ms +step:1459/1695 train_time:144755ms step_avg:99.22ms +step:1460/1695 train_time:144856ms step_avg:99.22ms +step:1461/1695 train_time:144958ms step_avg:99.22ms +step:1462/1695 train_time:145060ms step_avg:99.22ms +step:1463/1695 train_time:145162ms step_avg:99.22ms +step:1464/1695 train_time:145264ms step_avg:99.22ms +step:1465/1695 train_time:145366ms step_avg:99.23ms +step:1466/1695 train_time:145468ms step_avg:99.23ms +step:1467/1695 train_time:145570ms step_avg:99.23ms +step:1468/1695 train_time:145671ms step_avg:99.23ms +step:1469/1695 train_time:145774ms step_avg:99.23ms +step:1470/1695 train_time:145875ms step_avg:99.23ms +step:1471/1695 train_time:145976ms step_avg:99.24ms +step:1472/1695 train_time:146078ms step_avg:99.24ms +step:1473/1695 train_time:146179ms step_avg:99.24ms +step:1474/1695 train_time:146282ms step_avg:99.24ms +step:1475/1695 train_time:146384ms step_avg:99.24ms +step:1476/1695 train_time:146487ms step_avg:99.25ms +step:1477/1695 train_time:146590ms step_avg:99.25ms +step:1478/1695 train_time:146691ms step_avg:99.25ms +step:1479/1695 train_time:146792ms step_avg:99.25ms +step:1480/1695 train_time:146894ms step_avg:99.25ms +step:1481/1695 train_time:146996ms step_avg:99.25ms +step:1482/1695 train_time:147097ms step_avg:99.26ms +step:1483/1695 train_time:147201ms step_avg:99.26ms +step:1484/1695 train_time:147303ms step_avg:99.26ms +step:1485/1695 train_time:147404ms step_avg:99.26ms +step:1486/1695 train_time:147505ms step_avg:99.26ms +step:1487/1695 train_time:147607ms step_avg:99.27ms +step:1488/1695 train_time:147711ms step_avg:99.27ms +step:1489/1695 train_time:147812ms step_avg:99.27ms +step:1490/1695 train_time:147915ms step_avg:99.27ms +step:1491/1695 train_time:148017ms step_avg:99.27ms +step:1492/1695 train_time:148117ms step_avg:99.27ms +step:1493/1695 train_time:148220ms step_avg:99.28ms +step:1494/1695 train_time:148322ms step_avg:99.28ms +step:1495/1695 train_time:148424ms step_avg:99.28ms +step:1496/1695 train_time:148527ms step_avg:99.28ms +step:1497/1695 train_time:148629ms step_avg:99.28ms +step:1498/1695 train_time:148731ms step_avg:99.29ms +step:1499/1695 train_time:148831ms step_avg:99.29ms +step:1500/1695 train_time:148932ms step_avg:99.29ms +step:1500/1695 val_loss:3.3188 train_time:149031ms step_avg:99.35ms +step:1501/1695 train_time:149057ms step_avg:99.31ms +step:1502/1695 train_time:149145ms step_avg:99.30ms +step:1503/1695 train_time:149246ms step_avg:99.30ms +step:1504/1695 train_time:149347ms step_avg:99.30ms +step:1505/1695 train_time:149447ms step_avg:99.30ms +step:1506/1695 train_time:149549ms step_avg:99.30ms +step:1507/1695 train_time:149650ms step_avg:99.30ms +step:1508/1695 train_time:149750ms step_avg:99.30ms +step:1509/1695 train_time:149853ms step_avg:99.31ms +step:1510/1695 train_time:149955ms step_avg:99.31ms +step:1511/1695 train_time:150060ms step_avg:99.31ms +step:1512/1695 train_time:150163ms step_avg:99.31ms +step:1513/1695 train_time:150264ms step_avg:99.32ms +step:1514/1695 train_time:150366ms step_avg:99.32ms +step:1515/1695 train_time:150471ms step_avg:99.32ms +step:1516/1695 train_time:150573ms step_avg:99.32ms +step:1517/1695 train_time:150674ms step_avg:99.32ms +step:1518/1695 train_time:150775ms step_avg:99.32ms +step:1519/1695 train_time:150878ms step_avg:99.33ms +step:1520/1695 train_time:150979ms step_avg:99.33ms +step:1521/1695 train_time:151082ms step_avg:99.33ms +step:1522/1695 train_time:151184ms step_avg:99.33ms +step:1523/1695 train_time:151285ms step_avg:99.33ms +step:1524/1695 train_time:151389ms step_avg:99.34ms +step:1525/1695 train_time:151492ms step_avg:99.34ms +step:1526/1695 train_time:151594ms step_avg:99.34ms +step:1527/1695 train_time:151696ms step_avg:99.34ms +step:1528/1695 train_time:151802ms step_avg:99.35ms +step:1529/1695 train_time:151903ms step_avg:99.35ms +step:1530/1695 train_time:152007ms step_avg:99.35ms +step:1531/1695 train_time:152109ms step_avg:99.35ms +step:1532/1695 train_time:152212ms step_avg:99.36ms +step:1533/1695 train_time:152315ms step_avg:99.36ms +step:1534/1695 train_time:152418ms step_avg:99.36ms +step:1535/1695 train_time:152520ms step_avg:99.36ms +step:1536/1695 train_time:152621ms step_avg:99.36ms +step:1537/1695 train_time:152722ms step_avg:99.36ms +step:1538/1695 train_time:152824ms step_avg:99.37ms +step:1539/1695 train_time:152925ms step_avg:99.37ms +step:1540/1695 train_time:153026ms step_avg:99.37ms +step:1541/1695 train_time:153131ms step_avg:99.37ms +step:1542/1695 train_time:153236ms step_avg:99.37ms +step:1543/1695 train_time:153338ms step_avg:99.38ms +step:1544/1695 train_time:153439ms step_avg:99.38ms +step:1545/1695 train_time:153541ms step_avg:99.38ms +step:1546/1695 train_time:153643ms step_avg:99.38ms +step:1547/1695 train_time:153746ms step_avg:99.38ms +step:1548/1695 train_time:153847ms step_avg:99.38ms +step:1549/1695 train_time:153948ms step_avg:99.39ms +step:1550/1695 train_time:154050ms step_avg:99.39ms +step:1551/1695 train_time:154152ms step_avg:99.39ms +step:1552/1695 train_time:154254ms step_avg:99.39ms +step:1553/1695 train_time:154358ms step_avg:99.39ms +step:1554/1695 train_time:154459ms step_avg:99.39ms +step:1555/1695 train_time:154560ms step_avg:99.40ms +step:1556/1695 train_time:154662ms step_avg:99.40ms +step:1557/1695 train_time:154765ms step_avg:99.40ms +step:1558/1695 train_time:154867ms step_avg:99.40ms +step:1559/1695 train_time:154970ms step_avg:99.40ms +step:1560/1695 train_time:155071ms step_avg:99.40ms +step:1561/1695 train_time:155173ms step_avg:99.41ms +step:1562/1695 train_time:155277ms step_avg:99.41ms +step:1563/1695 train_time:155381ms step_avg:99.41ms +step:1564/1695 train_time:155482ms step_avg:99.41ms +step:1565/1695 train_time:155584ms step_avg:99.41ms +step:1566/1695 train_time:155684ms step_avg:99.42ms +step:1567/1695 train_time:155786ms step_avg:99.42ms +step:1568/1695 train_time:155886ms step_avg:99.42ms +step:1569/1695 train_time:155986ms step_avg:99.42ms +step:1570/1695 train_time:156089ms step_avg:99.42ms +step:1571/1695 train_time:156190ms step_avg:99.42ms +step:1572/1695 train_time:156292ms step_avg:99.42ms +step:1573/1695 train_time:156395ms step_avg:99.42ms +step:1574/1695 train_time:156496ms step_avg:99.43ms +step:1575/1695 train_time:156599ms step_avg:99.43ms +step:1576/1695 train_time:156703ms step_avg:99.43ms +step:1577/1695 train_time:156807ms step_avg:99.43ms +step:1578/1695 train_time:156907ms step_avg:99.43ms +step:1579/1695 train_time:157009ms step_avg:99.44ms +step:1580/1695 train_time:157110ms step_avg:99.44ms +step:1581/1695 train_time:157212ms step_avg:99.44ms +step:1582/1695 train_time:157314ms step_avg:99.44ms +step:1583/1695 train_time:157417ms step_avg:99.44ms +step:1584/1695 train_time:157521ms step_avg:99.44ms +step:1585/1695 train_time:157622ms step_avg:99.45ms +step:1586/1695 train_time:157725ms step_avg:99.45ms +step:1587/1695 train_time:157827ms step_avg:99.45ms +step:1588/1695 train_time:157928ms step_avg:99.45ms +step:1589/1695 train_time:158029ms step_avg:99.45ms +step:1590/1695 train_time:158130ms step_avg:99.45ms +step:1591/1695 train_time:158231ms step_avg:99.45ms +step:1592/1695 train_time:158334ms step_avg:99.46ms +step:1593/1695 train_time:158434ms step_avg:99.46ms +step:1594/1695 train_time:158538ms step_avg:99.46ms +step:1595/1695 train_time:158641ms step_avg:99.46ms +step:1596/1695 train_time:158743ms step_avg:99.46ms +step:1597/1695 train_time:158845ms step_avg:99.46ms +step:1598/1695 train_time:158947ms step_avg:99.47ms +step:1599/1695 train_time:159048ms step_avg:99.47ms +step:1600/1695 train_time:159150ms step_avg:99.47ms +step:1601/1695 train_time:159252ms step_avg:99.47ms +step:1602/1695 train_time:159355ms step_avg:99.47ms +step:1603/1695 train_time:159456ms step_avg:99.47ms +step:1604/1695 train_time:159558ms step_avg:99.48ms +step:1605/1695 train_time:159661ms step_avg:99.48ms +step:1606/1695 train_time:159764ms step_avg:99.48ms +step:1607/1695 train_time:159864ms step_avg:99.48ms +step:1608/1695 train_time:159965ms step_avg:99.48ms +step:1609/1695 train_time:160067ms step_avg:99.48ms +step:1610/1695 train_time:160169ms step_avg:99.48ms +step:1611/1695 train_time:160272ms step_avg:99.49ms +step:1612/1695 train_time:160374ms step_avg:99.49ms +step:1613/1695 train_time:160475ms step_avg:99.49ms +step:1614/1695 train_time:160576ms step_avg:99.49ms +step:1615/1695 train_time:160678ms step_avg:99.49ms +step:1616/1695 train_time:160780ms step_avg:99.49ms +step:1617/1695 train_time:160883ms step_avg:99.49ms +step:1618/1695 train_time:160985ms step_avg:99.50ms +step:1619/1695 train_time:161086ms step_avg:99.50ms +step:1620/1695 train_time:161188ms step_avg:99.50ms +step:1621/1695 train_time:161289ms step_avg:99.50ms +step:1622/1695 train_time:161391ms step_avg:99.50ms +step:1623/1695 train_time:161493ms step_avg:99.50ms +step:1624/1695 train_time:161595ms step_avg:99.50ms +step:1625/1695 train_time:161700ms step_avg:99.51ms +step:1625/1695 val_loss:3.2904 train_time:161801ms step_avg:99.57ms +step:1626/1695 train_time:161827ms step_avg:99.52ms +step:1627/1695 train_time:161915ms step_avg:99.52ms +step:1628/1695 train_time:162017ms step_avg:99.52ms +step:1629/1695 train_time:162119ms step_avg:99.52ms +step:1630/1695 train_time:162220ms step_avg:99.52ms +step:1631/1695 train_time:162321ms step_avg:99.52ms +step:1632/1695 train_time:162422ms step_avg:99.52ms +step:1633/1695 train_time:162522ms step_avg:99.52ms +step:1634/1695 train_time:162625ms step_avg:99.53ms +step:1635/1695 train_time:162728ms step_avg:99.53ms +step:1636/1695 train_time:162831ms step_avg:99.53ms +step:1637/1695 train_time:162935ms step_avg:99.53ms +step:1638/1695 train_time:163038ms step_avg:99.53ms +step:1639/1695 train_time:163140ms step_avg:99.54ms +step:1640/1695 train_time:163243ms step_avg:99.54ms +step:1641/1695 train_time:163347ms step_avg:99.54ms +step:1642/1695 train_time:163449ms step_avg:99.54ms +step:1643/1695 train_time:163552ms step_avg:99.54ms +step:1644/1695 train_time:163655ms step_avg:99.55ms +step:1645/1695 train_time:163758ms step_avg:99.55ms +step:1646/1695 train_time:163861ms step_avg:99.55ms +step:1647/1695 train_time:163965ms step_avg:99.55ms +step:1648/1695 train_time:164070ms step_avg:99.56ms +step:1649/1695 train_time:164173ms step_avg:99.56ms +step:1650/1695 train_time:164275ms step_avg:99.56ms +step:1651/1695 train_time:164379ms step_avg:99.56ms +step:1652/1695 train_time:164481ms step_avg:99.56ms +step:1653/1695 train_time:164584ms step_avg:99.57ms +step:1654/1695 train_time:164686ms step_avg:99.57ms +step:1655/1695 train_time:164789ms step_avg:99.57ms +step:1656/1695 train_time:164894ms step_avg:99.57ms +step:1657/1695 train_time:164996ms step_avg:99.57ms +step:1658/1695 train_time:165098ms step_avg:99.58ms +step:1659/1695 train_time:165204ms step_avg:99.58ms +step:1660/1695 train_time:165306ms step_avg:99.58ms +step:1661/1695 train_time:165412ms step_avg:99.59ms +step:1662/1695 train_time:165517ms step_avg:99.59ms +step:1663/1695 train_time:165620ms step_avg:99.59ms +step:1664/1695 train_time:165722ms step_avg:99.59ms +step:1665/1695 train_time:165828ms step_avg:99.60ms +step:1666/1695 train_time:165931ms step_avg:99.60ms +step:1667/1695 train_time:166033ms step_avg:99.60ms +step:1668/1695 train_time:166139ms step_avg:99.60ms +step:1669/1695 train_time:166242ms step_avg:99.61ms +step:1670/1695 train_time:166344ms step_avg:99.61ms +step:1671/1695 train_time:166447ms step_avg:99.61ms +step:1672/1695 train_time:166551ms step_avg:99.61ms +step:1673/1695 train_time:166654ms step_avg:99.61ms +step:1674/1695 train_time:166757ms step_avg:99.62ms +step:1675/1695 train_time:166859ms step_avg:99.62ms +step:1676/1695 train_time:166963ms step_avg:99.62ms +step:1677/1695 train_time:167065ms step_avg:99.62ms +step:1678/1695 train_time:167169ms step_avg:99.62ms +step:1679/1695 train_time:167273ms step_avg:99.63ms +step:1680/1695 train_time:167376ms step_avg:99.63ms +step:1681/1695 train_time:167479ms step_avg:99.63ms +step:1682/1695 train_time:167585ms step_avg:99.63ms +step:1683/1695 train_time:167688ms step_avg:99.64ms +step:1684/1695 train_time:167792ms step_avg:99.64ms +step:1685/1695 train_time:167895ms step_avg:99.64ms +step:1686/1695 train_time:167998ms step_avg:99.64ms +step:1687/1695 train_time:168100ms step_avg:99.64ms +step:1688/1695 train_time:168202ms step_avg:99.65ms +step:1689/1695 train_time:168304ms step_avg:99.65ms +step:1690/1695 train_time:168407ms step_avg:99.65ms +step:1691/1695 train_time:168510ms step_avg:99.65ms +step:1692/1695 train_time:168613ms step_avg:99.65ms +step:1693/1695 train_time:168716ms step_avg:99.66ms +step:1694/1695 train_time:168820ms step_avg:99.66ms +step:1695/1695 train_time:168923ms step_avg:99.66ms +step:1695/1695 val_loss:3.2777 train_time:169024ms step_avg:99.72ms +peak memory allocated: 34761 MiB reserved: 49580 MiB diff --git a/records/082725_FA3/17e712ee-7cf8-44c9-a784-3762e61b174c.txt b/records/082725_FA3/17e712ee-7cf8-44c9-a784-3762e61b174c.txt new file mode 100644 index 000000000..b5371a4da --- /dev/null +++ b/records/082725_FA3/17e712ee-7cf8-44c9-a784-3762e61b174c.txt @@ -0,0 +1,2808 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + + while True: + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + + if align_to_bos: + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] + else: + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + + pos += batch_span + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. + # optimization + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 60 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): + inputs, targets = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, ws, ws // 2) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * lr + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Wed Aug 27 04:15:50 2025 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.6 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:00:0B.0 Off | Off | +| N/A 30C P0 115W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:00:0C.0 Off | Off | +| N/A 32C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:00:0D.0 Off | Off | +| N/A 33C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:00:0E.0 Off | Off | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:00:0F.0 Off | Off | +| N/A 30C P0 110W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:00:10.0 Off | Off | +| N/A 34C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:00:11.0 Off | Off | +| N/A 32C P0 111W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:00:12.0 Off | Off | +| N/A 31C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| ++---------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1695 train_time:524ms step_avg:524.12ms +step:2/1695 train_time:549ms step_avg:274.51ms +step:3/1695 train_time:617ms step_avg:205.57ms +step:4/1695 train_time:709ms step_avg:177.26ms +step:5/1695 train_time:803ms step_avg:160.52ms +step:6/1695 train_time:897ms step_avg:149.44ms +step:7/1695 train_time:989ms step_avg:141.33ms +step:8/1695 train_time:1082ms step_avg:135.31ms +step:9/1695 train_time:1177ms step_avg:130.74ms +step:10/1695 train_time:1270ms step_avg:127.00ms +step:11/1695 train_time:1364ms step_avg:123.98ms +step:12/1695 train_time:1460ms step_avg:121.70ms +step:13/1695 train_time:1558ms step_avg:119.84ms +step:14/1695 train_time:1653ms step_avg:118.08ms +step:15/1695 train_time:1748ms step_avg:116.53ms +step:16/1695 train_time:1843ms step_avg:115.16ms +step:17/1695 train_time:1937ms step_avg:113.93ms +step:18/1695 train_time:2030ms step_avg:112.80ms +step:19/1695 train_time:2124ms step_avg:111.79ms +step:20/1695 train_time:2218ms step_avg:110.90ms +step:21/1695 train_time:2312ms step_avg:110.11ms +step:22/1695 train_time:2407ms step_avg:109.43ms +step:23/1695 train_time:2504ms step_avg:108.87ms +step:24/1695 train_time:2600ms step_avg:108.35ms +step:25/1695 train_time:2696ms step_avg:107.83ms +step:26/1695 train_time:2790ms step_avg:107.30ms +step:27/1695 train_time:2884ms step_avg:106.81ms +step:28/1695 train_time:2979ms step_avg:106.39ms +step:29/1695 train_time:3073ms step_avg:105.96ms +step:30/1695 train_time:3167ms step_avg:105.56ms +step:31/1695 train_time:3261ms step_avg:105.19ms +step:32/1695 train_time:3356ms step_avg:104.89ms +step:33/1695 train_time:3451ms step_avg:104.57ms +step:34/1695 train_time:3546ms step_avg:104.31ms +step:35/1695 train_time:3644ms step_avg:104.12ms +step:36/1695 train_time:3740ms step_avg:103.89ms +step:37/1695 train_time:3835ms step_avg:103.64ms +step:38/1695 train_time:3929ms step_avg:103.38ms +step:39/1695 train_time:4023ms step_avg:103.16ms +step:40/1695 train_time:4117ms step_avg:102.94ms +step:41/1695 train_time:4211ms step_avg:102.71ms +step:42/1695 train_time:4305ms step_avg:102.51ms +step:43/1695 train_time:4401ms step_avg:102.34ms +step:44/1695 train_time:4497ms step_avg:102.20ms +step:45/1695 train_time:4592ms step_avg:102.04ms +step:46/1695 train_time:4686ms step_avg:101.87ms +step:47/1695 train_time:4782ms step_avg:101.74ms +step:48/1695 train_time:4877ms step_avg:101.60ms +step:49/1695 train_time:4971ms step_avg:101.45ms +step:50/1695 train_time:5065ms step_avg:101.30ms +step:51/1695 train_time:5160ms step_avg:101.17ms +step:52/1695 train_time:5254ms step_avg:101.03ms +step:53/1695 train_time:5347ms step_avg:100.89ms +step:54/1695 train_time:5442ms step_avg:100.79ms +step:55/1695 train_time:5538ms step_avg:100.69ms +step:56/1695 train_time:5632ms step_avg:100.57ms +step:57/1695 train_time:5726ms step_avg:100.46ms +step:58/1695 train_time:5822ms step_avg:100.38ms +step:59/1695 train_time:5918ms step_avg:100.30ms +step:60/1695 train_time:6012ms step_avg:100.20ms +step:61/1695 train_time:6106ms step_avg:100.10ms +step:62/1695 train_time:6201ms step_avg:100.02ms +step:63/1695 train_time:6296ms step_avg:99.94ms +step:64/1695 train_time:6390ms step_avg:99.84ms +step:65/1695 train_time:6485ms step_avg:99.77ms +step:66/1695 train_time:6579ms step_avg:99.68ms +step:67/1695 train_time:6673ms step_avg:99.59ms +step:68/1695 train_time:6767ms step_avg:99.52ms +step:69/1695 train_time:6863ms step_avg:99.47ms +step:70/1695 train_time:6958ms step_avg:99.40ms +step:71/1695 train_time:7052ms step_avg:99.32ms +step:72/1695 train_time:7146ms step_avg:99.25ms +step:73/1695 train_time:7241ms step_avg:99.19ms +step:74/1695 train_time:7337ms step_avg:99.15ms +step:75/1695 train_time:7431ms step_avg:99.08ms +step:76/1695 train_time:7526ms step_avg:99.02ms +step:77/1695 train_time:7621ms step_avg:98.97ms +step:78/1695 train_time:7716ms step_avg:98.92ms +step:79/1695 train_time:7809ms step_avg:98.85ms +step:80/1695 train_time:7905ms step_avg:98.81ms +step:81/1695 train_time:8000ms step_avg:98.76ms +step:82/1695 train_time:8094ms step_avg:98.70ms +step:83/1695 train_time:8189ms step_avg:98.66ms +step:84/1695 train_time:8283ms step_avg:98.60ms +step:85/1695 train_time:8378ms step_avg:98.56ms +step:86/1695 train_time:8471ms step_avg:98.50ms +step:87/1695 train_time:8566ms step_avg:98.45ms +step:88/1695 train_time:8661ms step_avg:98.42ms +step:89/1695 train_time:8755ms step_avg:98.37ms +step:90/1695 train_time:8849ms step_avg:98.32ms +step:91/1695 train_time:8944ms step_avg:98.29ms +step:92/1695 train_time:9039ms step_avg:98.25ms +step:93/1695 train_time:9133ms step_avg:98.21ms +step:94/1695 train_time:9227ms step_avg:98.16ms +step:95/1695 train_time:9322ms step_avg:98.13ms +step:96/1695 train_time:9417ms step_avg:98.10ms +step:97/1695 train_time:9511ms step_avg:98.05ms +step:98/1695 train_time:9606ms step_avg:98.02ms +step:99/1695 train_time:9702ms step_avg:98.00ms +step:100/1695 train_time:9797ms step_avg:97.97ms +step:101/1695 train_time:9891ms step_avg:97.93ms +step:102/1695 train_time:9985ms step_avg:97.89ms +step:103/1695 train_time:10079ms step_avg:97.85ms +step:104/1695 train_time:10174ms step_avg:97.82ms +step:105/1695 train_time:10268ms step_avg:97.79ms +step:106/1695 train_time:10362ms step_avg:97.76ms +step:107/1695 train_time:10456ms step_avg:97.72ms +step:108/1695 train_time:10550ms step_avg:97.69ms +step:109/1695 train_time:10645ms step_avg:97.66ms +step:110/1695 train_time:10740ms step_avg:97.63ms +step:111/1695 train_time:10835ms step_avg:97.61ms +step:112/1695 train_time:10929ms step_avg:97.58ms +step:113/1695 train_time:11023ms step_avg:97.55ms +step:114/1695 train_time:11118ms step_avg:97.53ms +step:115/1695 train_time:11213ms step_avg:97.50ms +step:116/1695 train_time:11307ms step_avg:97.47ms +step:117/1695 train_time:11402ms step_avg:97.45ms +step:118/1695 train_time:11496ms step_avg:97.42ms +step:119/1695 train_time:11589ms step_avg:97.39ms +step:120/1695 train_time:11684ms step_avg:97.37ms +step:121/1695 train_time:11779ms step_avg:97.34ms +step:122/1695 train_time:11873ms step_avg:97.32ms +step:123/1695 train_time:11967ms step_avg:97.29ms +step:124/1695 train_time:12062ms step_avg:97.28ms +step:125/1695 train_time:12157ms step_avg:97.25ms +step:125/1695 val_loss:4.3113 train_time:12248ms step_avg:97.99ms +step:126/1695 train_time:12274ms step_avg:97.41ms +step:127/1695 train_time:12351ms step_avg:97.25ms +step:128/1695 train_time:12451ms step_avg:97.28ms +step:129/1695 train_time:12547ms step_avg:97.26ms +step:130/1695 train_time:12640ms step_avg:97.23ms +step:131/1695 train_time:12734ms step_avg:97.21ms +step:132/1695 train_time:12828ms step_avg:97.18ms +step:133/1695 train_time:12921ms step_avg:97.15ms +step:134/1695 train_time:13014ms step_avg:97.12ms +step:135/1695 train_time:13108ms step_avg:97.09ms +step:136/1695 train_time:13201ms step_avg:97.07ms +step:137/1695 train_time:13297ms step_avg:97.05ms +step:138/1695 train_time:13394ms step_avg:97.06ms +step:139/1695 train_time:13490ms step_avg:97.05ms +step:140/1695 train_time:13584ms step_avg:97.03ms +step:141/1695 train_time:13678ms step_avg:97.00ms +step:142/1695 train_time:13772ms step_avg:96.98ms +step:143/1695 train_time:13865ms step_avg:96.96ms +step:144/1695 train_time:13958ms step_avg:96.93ms +step:145/1695 train_time:14052ms step_avg:96.91ms +step:146/1695 train_time:14144ms step_avg:96.88ms +step:147/1695 train_time:14238ms step_avg:96.86ms +step:148/1695 train_time:14333ms step_avg:96.84ms +step:149/1695 train_time:14429ms step_avg:96.84ms +step:150/1695 train_time:14523ms step_avg:96.82ms +step:151/1695 train_time:14618ms step_avg:96.81ms +step:152/1695 train_time:14713ms step_avg:96.79ms +step:153/1695 train_time:14806ms step_avg:96.77ms +step:154/1695 train_time:14900ms step_avg:96.75ms +step:155/1695 train_time:14995ms step_avg:96.74ms +step:156/1695 train_time:15088ms step_avg:96.72ms +step:157/1695 train_time:15181ms step_avg:96.69ms +step:158/1695 train_time:15274ms step_avg:96.67ms +step:159/1695 train_time:15369ms step_avg:96.66ms +step:160/1695 train_time:15464ms step_avg:96.65ms +step:161/1695 train_time:15558ms step_avg:96.63ms +step:162/1695 train_time:15653ms step_avg:96.62ms +step:163/1695 train_time:15748ms step_avg:96.61ms +step:164/1695 train_time:15841ms step_avg:96.59ms +step:165/1695 train_time:15936ms step_avg:96.58ms +step:166/1695 train_time:16030ms step_avg:96.57ms +step:167/1695 train_time:16124ms step_avg:96.55ms +step:168/1695 train_time:16217ms step_avg:96.53ms +step:169/1695 train_time:16311ms step_avg:96.52ms +step:170/1695 train_time:16406ms step_avg:96.50ms +step:171/1695 train_time:16500ms step_avg:96.49ms +step:172/1695 train_time:16595ms step_avg:96.48ms +step:173/1695 train_time:16964ms step_avg:98.06ms +step:174/1695 train_time:17033ms step_avg:97.89ms +step:175/1695 train_time:17126ms step_avg:97.86ms +step:176/1695 train_time:17219ms step_avg:97.83ms +step:177/1695 train_time:17312ms step_avg:97.81ms +step:178/1695 train_time:17405ms step_avg:97.78ms +step:179/1695 train_time:17499ms step_avg:97.76ms +step:180/1695 train_time:17591ms step_avg:97.73ms +step:181/1695 train_time:17684ms step_avg:97.70ms +step:182/1695 train_time:17777ms step_avg:97.68ms +step:183/1695 train_time:17876ms step_avg:97.68ms +step:184/1695 train_time:17973ms step_avg:97.68ms +step:185/1695 train_time:18068ms step_avg:97.67ms +step:186/1695 train_time:18162ms step_avg:97.64ms +step:187/1695 train_time:18256ms step_avg:97.63ms +step:188/1695 train_time:18350ms step_avg:97.61ms +step:189/1695 train_time:18443ms step_avg:97.58ms +step:190/1695 train_time:18537ms step_avg:97.56ms +step:191/1695 train_time:18630ms step_avg:97.54ms +step:192/1695 train_time:18724ms step_avg:97.52ms +step:193/1695 train_time:18818ms step_avg:97.50ms +step:194/1695 train_time:18914ms step_avg:97.49ms +step:195/1695 train_time:19010ms step_avg:97.48ms +step:196/1695 train_time:19105ms step_avg:97.47ms +step:197/1695 train_time:19198ms step_avg:97.45ms +step:198/1695 train_time:19292ms step_avg:97.44ms +step:199/1695 train_time:19387ms step_avg:97.42ms +step:200/1695 train_time:19480ms step_avg:97.40ms +step:201/1695 train_time:19574ms step_avg:97.39ms +step:202/1695 train_time:19669ms step_avg:97.37ms +step:203/1695 train_time:19763ms step_avg:97.35ms +step:204/1695 train_time:19857ms step_avg:97.34ms +step:205/1695 train_time:19952ms step_avg:97.32ms +step:206/1695 train_time:20047ms step_avg:97.31ms +step:207/1695 train_time:20140ms step_avg:97.30ms +step:208/1695 train_time:20235ms step_avg:97.28ms +step:209/1695 train_time:20329ms step_avg:97.27ms +step:210/1695 train_time:20423ms step_avg:97.25ms +step:211/1695 train_time:20516ms step_avg:97.23ms +step:212/1695 train_time:20611ms step_avg:97.22ms +step:213/1695 train_time:20706ms step_avg:97.21ms +step:214/1695 train_time:20799ms step_avg:97.19ms +step:215/1695 train_time:20894ms step_avg:97.18ms +step:216/1695 train_time:20988ms step_avg:97.17ms +step:217/1695 train_time:21082ms step_avg:97.15ms +step:218/1695 train_time:21176ms step_avg:97.14ms +step:219/1695 train_time:21271ms step_avg:97.13ms +step:220/1695 train_time:21365ms step_avg:97.12ms +step:221/1695 train_time:21459ms step_avg:97.10ms +step:222/1695 train_time:21554ms step_avg:97.09ms +step:223/1695 train_time:21648ms step_avg:97.07ms +step:224/1695 train_time:21741ms step_avg:97.06ms +step:225/1695 train_time:21835ms step_avg:97.05ms +step:226/1695 train_time:21931ms step_avg:97.04ms +step:227/1695 train_time:22024ms step_avg:97.02ms +step:228/1695 train_time:22118ms step_avg:97.01ms +step:229/1695 train_time:22213ms step_avg:97.00ms +step:230/1695 train_time:22308ms step_avg:96.99ms +step:231/1695 train_time:22401ms step_avg:96.98ms +step:232/1695 train_time:22496ms step_avg:96.96ms +step:233/1695 train_time:22589ms step_avg:96.95ms +step:234/1695 train_time:22683ms step_avg:96.93ms +step:235/1695 train_time:22776ms step_avg:96.92ms +step:236/1695 train_time:22872ms step_avg:96.91ms +step:237/1695 train_time:22967ms step_avg:96.91ms +step:238/1695 train_time:23061ms step_avg:96.89ms +step:239/1695 train_time:23155ms step_avg:96.88ms +step:240/1695 train_time:23249ms step_avg:96.87ms +step:241/1695 train_time:23343ms step_avg:96.86ms +step:242/1695 train_time:23437ms step_avg:96.85ms +step:243/1695 train_time:23532ms step_avg:96.84ms +step:244/1695 train_time:23626ms step_avg:96.83ms +step:245/1695 train_time:23719ms step_avg:96.81ms +step:246/1695 train_time:23814ms step_avg:96.81ms +step:247/1695 train_time:23909ms step_avg:96.80ms +step:248/1695 train_time:24003ms step_avg:96.79ms +step:249/1695 train_time:24097ms step_avg:96.78ms +step:250/1695 train_time:24191ms step_avg:96.77ms +step:250/1695 val_loss:3.9807 train_time:24284ms step_avg:97.14ms +step:251/1695 train_time:24310ms step_avg:96.85ms +step:252/1695 train_time:24384ms step_avg:96.76ms +step:253/1695 train_time:24484ms step_avg:96.78ms +step:254/1695 train_time:24580ms step_avg:96.77ms +step:255/1695 train_time:24673ms step_avg:96.76ms +step:256/1695 train_time:24766ms step_avg:96.74ms +step:257/1695 train_time:24859ms step_avg:96.73ms +step:258/1695 train_time:24953ms step_avg:96.72ms +step:259/1695 train_time:25046ms step_avg:96.70ms +step:260/1695 train_time:25139ms step_avg:96.69ms +step:261/1695 train_time:25232ms step_avg:96.68ms +step:262/1695 train_time:25328ms step_avg:96.67ms +step:263/1695 train_time:25425ms step_avg:96.67ms +step:264/1695 train_time:25521ms step_avg:96.67ms +step:265/1695 train_time:25616ms step_avg:96.67ms +step:266/1695 train_time:25710ms step_avg:96.65ms +step:267/1695 train_time:25804ms step_avg:96.64ms +step:268/1695 train_time:25898ms step_avg:96.64ms +step:269/1695 train_time:25992ms step_avg:96.62ms +step:270/1695 train_time:26085ms step_avg:96.61ms +step:271/1695 train_time:26179ms step_avg:96.60ms +step:272/1695 train_time:26273ms step_avg:96.59ms +step:273/1695 train_time:26368ms step_avg:96.59ms +step:274/1695 train_time:26465ms step_avg:96.59ms +step:275/1695 train_time:26560ms step_avg:96.58ms +step:276/1695 train_time:26655ms step_avg:96.58ms +step:277/1695 train_time:26749ms step_avg:96.57ms +step:278/1695 train_time:26843ms step_avg:96.56ms +step:279/1695 train_time:26936ms step_avg:96.55ms +step:280/1695 train_time:27030ms step_avg:96.53ms +step:281/1695 train_time:27124ms step_avg:96.53ms +step:282/1695 train_time:27218ms step_avg:96.52ms +step:283/1695 train_time:27312ms step_avg:96.51ms +step:284/1695 train_time:27407ms step_avg:96.50ms +step:285/1695 train_time:27502ms step_avg:96.50ms +step:286/1695 train_time:27596ms step_avg:96.49ms +step:287/1695 train_time:27690ms step_avg:96.48ms +step:288/1695 train_time:27785ms step_avg:96.47ms +step:289/1695 train_time:27879ms step_avg:96.47ms +step:290/1695 train_time:27971ms step_avg:96.45ms +step:291/1695 train_time:28065ms step_avg:96.44ms +step:292/1695 train_time:28160ms step_avg:96.44ms +step:293/1695 train_time:28254ms step_avg:96.43ms +step:294/1695 train_time:28348ms step_avg:96.42ms +step:295/1695 train_time:28443ms step_avg:96.42ms +step:296/1695 train_time:28538ms step_avg:96.41ms +step:297/1695 train_time:28632ms step_avg:96.40ms +step:298/1695 train_time:28726ms step_avg:96.40ms +step:299/1695 train_time:28820ms step_avg:96.39ms +step:300/1695 train_time:28913ms step_avg:96.38ms +step:301/1695 train_time:29006ms step_avg:96.37ms +step:302/1695 train_time:29100ms step_avg:96.36ms +step:303/1695 train_time:29194ms step_avg:96.35ms +step:304/1695 train_time:29288ms step_avg:96.34ms +step:305/1695 train_time:29382ms step_avg:96.33ms +step:306/1695 train_time:29477ms step_avg:96.33ms +step:307/1695 train_time:29571ms step_avg:96.32ms +step:308/1695 train_time:29666ms step_avg:96.32ms +step:309/1695 train_time:29761ms step_avg:96.31ms +step:310/1695 train_time:29855ms step_avg:96.31ms +step:311/1695 train_time:29948ms step_avg:96.30ms +step:312/1695 train_time:30042ms step_avg:96.29ms +step:313/1695 train_time:30135ms step_avg:96.28ms +step:314/1695 train_time:30229ms step_avg:96.27ms +step:315/1695 train_time:30322ms step_avg:96.26ms +step:316/1695 train_time:30416ms step_avg:96.25ms +step:317/1695 train_time:30510ms step_avg:96.24ms +step:318/1695 train_time:30605ms step_avg:96.24ms +step:319/1695 train_time:30700ms step_avg:96.24ms +step:320/1695 train_time:30795ms step_avg:96.23ms +step:321/1695 train_time:30888ms step_avg:96.23ms +step:322/1695 train_time:30983ms step_avg:96.22ms +step:323/1695 train_time:31077ms step_avg:96.21ms +step:324/1695 train_time:31170ms step_avg:96.20ms +step:325/1695 train_time:31264ms step_avg:96.20ms +step:326/1695 train_time:31360ms step_avg:96.20ms +step:327/1695 train_time:31453ms step_avg:96.19ms +step:328/1695 train_time:31547ms step_avg:96.18ms +step:329/1695 train_time:31642ms step_avg:96.18ms +step:330/1695 train_time:31737ms step_avg:96.17ms +step:331/1695 train_time:31832ms step_avg:96.17ms +step:332/1695 train_time:31926ms step_avg:96.16ms +step:333/1695 train_time:32020ms step_avg:96.16ms +step:334/1695 train_time:32114ms step_avg:96.15ms +step:335/1695 train_time:32207ms step_avg:96.14ms +step:336/1695 train_time:32302ms step_avg:96.14ms +step:337/1695 train_time:32395ms step_avg:96.13ms +step:338/1695 train_time:32488ms step_avg:96.12ms +step:339/1695 train_time:32582ms step_avg:96.11ms +step:340/1695 train_time:32677ms step_avg:96.11ms +step:341/1695 train_time:32771ms step_avg:96.10ms +step:342/1695 train_time:32866ms step_avg:96.10ms +step:343/1695 train_time:32961ms step_avg:96.10ms +step:344/1695 train_time:33055ms step_avg:96.09ms +step:345/1695 train_time:33378ms step_avg:96.75ms +step:346/1695 train_time:33470ms step_avg:96.73ms +step:347/1695 train_time:33563ms step_avg:96.72ms +step:348/1695 train_time:33655ms step_avg:96.71ms +step:349/1695 train_time:33748ms step_avg:96.70ms +step:350/1695 train_time:33841ms step_avg:96.69ms +step:351/1695 train_time:33934ms step_avg:96.68ms +step:352/1695 train_time:34027ms step_avg:96.67ms +step:353/1695 train_time:34120ms step_avg:96.66ms +step:354/1695 train_time:34213ms step_avg:96.65ms +step:355/1695 train_time:34312ms step_avg:96.65ms +step:356/1695 train_time:34409ms step_avg:96.65ms +step:357/1695 train_time:34506ms step_avg:96.66ms +step:358/1695 train_time:34602ms step_avg:96.65ms +step:359/1695 train_time:34695ms step_avg:96.64ms +step:360/1695 train_time:34788ms step_avg:96.63ms +step:361/1695 train_time:34881ms step_avg:96.62ms +step:362/1695 train_time:34974ms step_avg:96.61ms +step:363/1695 train_time:35068ms step_avg:96.60ms +step:364/1695 train_time:35161ms step_avg:96.60ms +step:365/1695 train_time:35256ms step_avg:96.59ms +step:366/1695 train_time:35351ms step_avg:96.59ms +step:367/1695 train_time:35448ms step_avg:96.59ms +step:368/1695 train_time:35544ms step_avg:96.59ms +step:369/1695 train_time:35639ms step_avg:96.58ms +step:370/1695 train_time:35733ms step_avg:96.58ms +step:371/1695 train_time:35826ms step_avg:96.57ms +step:372/1695 train_time:35920ms step_avg:96.56ms +step:373/1695 train_time:36013ms step_avg:96.55ms +step:374/1695 train_time:36106ms step_avg:96.54ms +step:375/1695 train_time:36199ms step_avg:96.53ms +step:375/1695 val_loss:3.8148 train_time:36291ms step_avg:96.78ms +step:376/1695 train_time:36317ms step_avg:96.59ms +step:377/1695 train_time:36395ms step_avg:96.54ms +step:378/1695 train_time:36491ms step_avg:96.54ms +step:379/1695 train_time:36586ms step_avg:96.53ms +step:380/1695 train_time:36680ms step_avg:96.53ms +step:381/1695 train_time:36773ms step_avg:96.52ms +step:382/1695 train_time:36866ms step_avg:96.51ms +step:383/1695 train_time:36960ms step_avg:96.50ms +step:384/1695 train_time:37054ms step_avg:96.49ms +step:385/1695 train_time:37147ms step_avg:96.49ms +step:386/1695 train_time:37241ms step_avg:96.48ms +step:387/1695 train_time:37337ms step_avg:96.48ms +step:388/1695 train_time:37433ms step_avg:96.48ms +step:389/1695 train_time:37529ms step_avg:96.47ms +step:390/1695 train_time:37623ms step_avg:96.47ms +step:391/1695 train_time:37716ms step_avg:96.46ms +step:392/1695 train_time:37809ms step_avg:96.45ms +step:393/1695 train_time:37902ms step_avg:96.44ms +step:394/1695 train_time:37996ms step_avg:96.44ms +step:395/1695 train_time:38089ms step_avg:96.43ms +step:396/1695 train_time:38183ms step_avg:96.42ms +step:397/1695 train_time:38277ms step_avg:96.42ms +step:398/1695 train_time:38371ms step_avg:96.41ms +step:399/1695 train_time:38466ms step_avg:96.41ms +step:400/1695 train_time:38561ms step_avg:96.40ms +step:401/1695 train_time:38655ms step_avg:96.40ms +step:402/1695 train_time:38749ms step_avg:96.39ms +step:403/1695 train_time:38843ms step_avg:96.38ms +step:404/1695 train_time:38936ms step_avg:96.38ms +step:405/1695 train_time:39030ms step_avg:96.37ms +step:406/1695 train_time:39124ms step_avg:96.36ms +step:407/1695 train_time:39217ms step_avg:96.36ms +step:408/1695 train_time:39311ms step_avg:96.35ms +step:409/1695 train_time:39406ms step_avg:96.35ms +step:410/1695 train_time:39501ms step_avg:96.34ms +step:411/1695 train_time:39594ms step_avg:96.34ms +step:412/1695 train_time:39689ms step_avg:96.33ms +step:413/1695 train_time:39783ms step_avg:96.33ms +step:414/1695 train_time:39876ms step_avg:96.32ms +step:415/1695 train_time:39970ms step_avg:96.31ms +step:416/1695 train_time:40063ms step_avg:96.31ms +step:417/1695 train_time:40157ms step_avg:96.30ms +step:418/1695 train_time:40251ms step_avg:96.29ms +step:419/1695 train_time:40346ms step_avg:96.29ms +step:420/1695 train_time:40440ms step_avg:96.29ms +step:421/1695 train_time:40534ms step_avg:96.28ms +step:422/1695 train_time:40629ms step_avg:96.28ms +step:423/1695 train_time:40723ms step_avg:96.27ms +step:424/1695 train_time:40816ms step_avg:96.26ms +step:425/1695 train_time:40910ms step_avg:96.26ms +step:426/1695 train_time:41005ms step_avg:96.25ms +step:427/1695 train_time:41099ms step_avg:96.25ms +step:428/1695 train_time:41192ms step_avg:96.24ms +step:429/1695 train_time:41288ms step_avg:96.24ms +step:430/1695 train_time:41383ms step_avg:96.24ms +step:431/1695 train_time:41476ms step_avg:96.23ms +step:432/1695 train_time:41570ms step_avg:96.23ms +step:433/1695 train_time:41664ms step_avg:96.22ms +step:434/1695 train_time:41758ms step_avg:96.22ms +step:435/1695 train_time:41851ms step_avg:96.21ms +step:436/1695 train_time:41946ms step_avg:96.21ms +step:437/1695 train_time:42039ms step_avg:96.20ms +step:438/1695 train_time:42132ms step_avg:96.19ms +step:439/1695 train_time:42227ms step_avg:96.19ms +step:440/1695 train_time:42322ms step_avg:96.19ms +step:441/1695 train_time:42416ms step_avg:96.18ms +step:442/1695 train_time:42510ms step_avg:96.18ms +step:443/1695 train_time:42605ms step_avg:96.17ms +step:444/1695 train_time:42698ms step_avg:96.17ms +step:445/1695 train_time:42792ms step_avg:96.16ms +step:446/1695 train_time:42887ms step_avg:96.16ms +step:447/1695 train_time:42982ms step_avg:96.16ms +step:448/1695 train_time:43075ms step_avg:96.15ms +step:449/1695 train_time:43169ms step_avg:96.14ms +step:450/1695 train_time:43264ms step_avg:96.14ms +step:451/1695 train_time:43358ms step_avg:96.14ms +step:452/1695 train_time:43452ms step_avg:96.13ms +step:453/1695 train_time:43548ms step_avg:96.13ms +step:454/1695 train_time:43642ms step_avg:96.13ms +step:455/1695 train_time:43735ms step_avg:96.12ms +step:456/1695 train_time:43828ms step_avg:96.11ms +step:457/1695 train_time:43923ms step_avg:96.11ms +step:458/1695 train_time:44018ms step_avg:96.11ms +step:459/1695 train_time:44112ms step_avg:96.10ms +step:460/1695 train_time:44205ms step_avg:96.10ms +step:461/1695 train_time:44300ms step_avg:96.09ms +step:462/1695 train_time:44393ms step_avg:96.09ms +step:463/1695 train_time:44487ms step_avg:96.08ms +step:464/1695 train_time:44582ms step_avg:96.08ms +step:465/1695 train_time:44676ms step_avg:96.08ms +step:466/1695 train_time:44770ms step_avg:96.07ms +step:467/1695 train_time:44864ms step_avg:96.07ms +step:468/1695 train_time:44959ms step_avg:96.07ms +step:469/1695 train_time:45053ms step_avg:96.06ms +step:470/1695 train_time:45147ms step_avg:96.06ms +step:471/1695 train_time:45242ms step_avg:96.05ms +step:472/1695 train_time:45335ms step_avg:96.05ms +step:473/1695 train_time:45429ms step_avg:96.04ms +step:474/1695 train_time:45523ms step_avg:96.04ms +step:475/1695 train_time:45616ms step_avg:96.03ms +step:476/1695 train_time:45710ms step_avg:96.03ms +step:477/1695 train_time:45805ms step_avg:96.03ms +step:478/1695 train_time:45899ms step_avg:96.02ms +step:479/1695 train_time:45992ms step_avg:96.02ms +step:480/1695 train_time:46087ms step_avg:96.01ms +step:481/1695 train_time:46181ms step_avg:96.01ms +step:482/1695 train_time:46275ms step_avg:96.01ms +step:483/1695 train_time:46369ms step_avg:96.00ms +step:484/1695 train_time:46464ms step_avg:96.00ms +step:485/1695 train_time:46559ms step_avg:96.00ms +step:486/1695 train_time:46653ms step_avg:95.99ms +step:487/1695 train_time:46747ms step_avg:95.99ms +step:488/1695 train_time:46842ms step_avg:95.99ms +step:489/1695 train_time:46935ms step_avg:95.98ms +step:490/1695 train_time:47029ms step_avg:95.98ms +step:491/1695 train_time:47122ms step_avg:95.97ms +step:492/1695 train_time:47216ms step_avg:95.97ms +step:493/1695 train_time:47309ms step_avg:95.96ms +step:494/1695 train_time:47403ms step_avg:95.96ms +step:495/1695 train_time:47496ms step_avg:95.95ms +step:496/1695 train_time:47591ms step_avg:95.95ms +step:497/1695 train_time:47686ms step_avg:95.95ms +step:498/1695 train_time:47780ms step_avg:95.94ms +step:499/1695 train_time:47874ms step_avg:95.94ms +step:500/1695 train_time:47968ms step_avg:95.94ms +step:500/1695 val_loss:3.7151 train_time:48060ms step_avg:96.12ms +step:501/1695 train_time:48087ms step_avg:95.98ms +step:502/1695 train_time:48163ms step_avg:95.94ms +step:503/1695 train_time:48261ms step_avg:95.95ms +step:504/1695 train_time:48355ms step_avg:95.94ms +step:505/1695 train_time:48448ms step_avg:95.94ms +step:506/1695 train_time:48542ms step_avg:95.93ms +step:507/1695 train_time:48634ms step_avg:95.93ms +step:508/1695 train_time:48728ms step_avg:95.92ms +step:509/1695 train_time:48820ms step_avg:95.91ms +step:510/1695 train_time:48913ms step_avg:95.91ms +step:511/1695 train_time:49007ms step_avg:95.90ms +step:512/1695 train_time:49103ms step_avg:95.90ms +step:513/1695 train_time:49198ms step_avg:95.90ms +step:514/1695 train_time:49293ms step_avg:95.90ms +step:515/1695 train_time:49388ms step_avg:95.90ms +step:516/1695 train_time:49482ms step_avg:95.90ms +step:517/1695 train_time:49575ms step_avg:95.89ms +step:518/1695 train_time:49669ms step_avg:95.89ms +step:519/1695 train_time:50009ms step_avg:96.36ms +step:520/1695 train_time:50195ms step_avg:96.53ms +step:521/1695 train_time:50287ms step_avg:96.52ms +step:522/1695 train_time:50380ms step_avg:96.51ms +step:523/1695 train_time:50473ms step_avg:96.51ms +step:524/1695 train_time:50566ms step_avg:96.50ms +step:525/1695 train_time:50658ms step_avg:96.49ms +step:526/1695 train_time:50751ms step_avg:96.49ms +step:527/1695 train_time:50845ms step_avg:96.48ms +step:528/1695 train_time:50937ms step_avg:96.47ms +step:529/1695 train_time:51035ms step_avg:96.47ms +step:530/1695 train_time:51134ms step_avg:96.48ms +step:531/1695 train_time:51232ms step_avg:96.48ms +step:532/1695 train_time:51328ms step_avg:96.48ms +step:533/1695 train_time:51422ms step_avg:96.48ms +step:534/1695 train_time:51515ms step_avg:96.47ms +step:535/1695 train_time:51609ms step_avg:96.47ms +step:536/1695 train_time:51703ms step_avg:96.46ms +step:537/1695 train_time:51795ms step_avg:96.45ms +step:538/1695 train_time:51889ms step_avg:96.45ms +step:539/1695 train_time:51982ms step_avg:96.44ms +step:540/1695 train_time:52077ms step_avg:96.44ms +step:541/1695 train_time:52173ms step_avg:96.44ms +step:542/1695 train_time:52270ms step_avg:96.44ms +step:543/1695 train_time:52365ms step_avg:96.44ms +step:544/1695 train_time:52458ms step_avg:96.43ms +step:545/1695 train_time:52552ms step_avg:96.43ms +step:546/1695 train_time:52646ms step_avg:96.42ms +step:547/1695 train_time:52739ms step_avg:96.42ms +step:548/1695 train_time:52833ms step_avg:96.41ms +step:549/1695 train_time:52926ms step_avg:96.40ms +step:550/1695 train_time:53020ms step_avg:96.40ms +step:551/1695 train_time:53114ms step_avg:96.40ms +step:552/1695 train_time:53209ms step_avg:96.39ms +step:553/1695 train_time:53303ms step_avg:96.39ms +step:554/1695 train_time:53397ms step_avg:96.38ms +step:555/1695 train_time:53491ms step_avg:96.38ms +step:556/1695 train_time:53584ms step_avg:96.37ms +step:557/1695 train_time:53678ms step_avg:96.37ms +step:558/1695 train_time:53771ms step_avg:96.36ms +step:559/1695 train_time:53865ms step_avg:96.36ms +step:560/1695 train_time:53959ms step_avg:96.35ms +step:561/1695 train_time:54053ms step_avg:96.35ms +step:562/1695 train_time:54147ms step_avg:96.35ms +step:563/1695 train_time:54242ms step_avg:96.34ms +step:564/1695 train_time:54336ms step_avg:96.34ms +step:565/1695 train_time:54430ms step_avg:96.34ms +step:566/1695 train_time:54524ms step_avg:96.33ms +step:567/1695 train_time:54618ms step_avg:96.33ms +step:568/1695 train_time:54713ms step_avg:96.33ms +step:569/1695 train_time:54809ms step_avg:96.32ms +step:570/1695 train_time:54905ms step_avg:96.33ms +step:571/1695 train_time:55001ms step_avg:96.32ms +step:572/1695 train_time:55096ms step_avg:96.32ms +step:573/1695 train_time:55193ms step_avg:96.32ms +step:574/1695 train_time:55289ms step_avg:96.32ms +step:575/1695 train_time:55385ms step_avg:96.32ms +step:576/1695 train_time:55481ms step_avg:96.32ms +step:577/1695 train_time:55576ms step_avg:96.32ms +step:578/1695 train_time:55672ms step_avg:96.32ms +step:579/1695 train_time:55769ms step_avg:96.32ms +step:580/1695 train_time:55865ms step_avg:96.32ms +step:581/1695 train_time:55961ms step_avg:96.32ms +step:582/1695 train_time:56056ms step_avg:96.32ms +step:583/1695 train_time:56152ms step_avg:96.32ms +step:584/1695 train_time:56248ms step_avg:96.32ms +step:585/1695 train_time:56345ms step_avg:96.32ms +step:586/1695 train_time:56442ms step_avg:96.32ms +step:587/1695 train_time:56537ms step_avg:96.32ms +step:588/1695 train_time:56633ms step_avg:96.31ms +step:589/1695 train_time:56729ms step_avg:96.31ms +step:590/1695 train_time:56824ms step_avg:96.31ms +step:591/1695 train_time:56919ms step_avg:96.31ms +step:592/1695 train_time:57015ms step_avg:96.31ms +step:593/1695 train_time:57111ms step_avg:96.31ms +step:594/1695 train_time:57208ms step_avg:96.31ms +step:595/1695 train_time:57304ms step_avg:96.31ms +step:596/1695 train_time:57401ms step_avg:96.31ms +step:597/1695 train_time:57496ms step_avg:96.31ms +step:598/1695 train_time:57592ms step_avg:96.31ms +step:599/1695 train_time:57689ms step_avg:96.31ms +step:600/1695 train_time:57784ms step_avg:96.31ms +step:601/1695 train_time:57880ms step_avg:96.31ms +step:602/1695 train_time:57976ms step_avg:96.31ms +step:603/1695 train_time:58071ms step_avg:96.30ms +step:604/1695 train_time:58168ms step_avg:96.30ms +step:605/1695 train_time:58264ms step_avg:96.30ms +step:606/1695 train_time:58360ms step_avg:96.30ms +step:607/1695 train_time:58455ms step_avg:96.30ms +step:608/1695 train_time:58550ms step_avg:96.30ms +step:609/1695 train_time:58647ms step_avg:96.30ms +step:610/1695 train_time:58744ms step_avg:96.30ms +step:611/1695 train_time:58841ms step_avg:96.30ms +step:612/1695 train_time:58936ms step_avg:96.30ms +step:613/1695 train_time:59032ms step_avg:96.30ms +step:614/1695 train_time:59128ms step_avg:96.30ms +step:615/1695 train_time:59224ms step_avg:96.30ms +step:616/1695 train_time:59320ms step_avg:96.30ms +step:617/1695 train_time:59415ms step_avg:96.30ms +step:618/1695 train_time:59511ms step_avg:96.30ms +step:619/1695 train_time:59607ms step_avg:96.30ms +step:620/1695 train_time:59703ms step_avg:96.29ms +step:621/1695 train_time:59798ms step_avg:96.29ms +step:622/1695 train_time:59894ms step_avg:96.29ms +step:623/1695 train_time:59991ms step_avg:96.29ms +step:624/1695 train_time:60087ms step_avg:96.29ms +step:625/1695 train_time:60183ms step_avg:96.29ms +step:625/1695 val_loss:3.6179 train_time:60276ms step_avg:96.44ms +step:626/1695 train_time:60301ms step_avg:96.33ms +step:627/1695 train_time:60381ms step_avg:96.30ms +step:628/1695 train_time:60478ms step_avg:96.30ms +step:629/1695 train_time:60574ms step_avg:96.30ms +step:630/1695 train_time:60669ms step_avg:96.30ms +step:631/1695 train_time:60764ms step_avg:96.30ms +step:632/1695 train_time:60858ms step_avg:96.29ms +step:633/1695 train_time:60953ms step_avg:96.29ms +step:634/1695 train_time:61048ms step_avg:96.29ms +step:635/1695 train_time:61143ms step_avg:96.29ms +step:636/1695 train_time:61240ms step_avg:96.29ms +step:637/1695 train_time:61337ms step_avg:96.29ms +step:638/1695 train_time:61435ms step_avg:96.29ms +step:639/1695 train_time:61532ms step_avg:96.29ms +step:640/1695 train_time:61628ms step_avg:96.29ms +step:641/1695 train_time:61725ms step_avg:96.30ms +step:642/1695 train_time:61821ms step_avg:96.29ms +step:643/1695 train_time:61915ms step_avg:96.29ms +step:644/1695 train_time:62011ms step_avg:96.29ms +step:645/1695 train_time:62106ms step_avg:96.29ms +step:646/1695 train_time:62201ms step_avg:96.29ms +step:647/1695 train_time:62297ms step_avg:96.29ms +step:648/1695 train_time:62394ms step_avg:96.29ms +step:649/1695 train_time:62492ms step_avg:96.29ms +step:650/1695 train_time:62589ms step_avg:96.29ms +step:651/1695 train_time:62685ms step_avg:96.29ms +step:652/1695 train_time:62781ms step_avg:96.29ms +step:653/1695 train_time:62876ms step_avg:96.29ms +step:654/1695 train_time:62971ms step_avg:96.29ms +step:655/1695 train_time:63067ms step_avg:96.29ms +step:656/1695 train_time:63164ms step_avg:96.29ms +step:657/1695 train_time:63261ms step_avg:96.29ms +step:658/1695 train_time:63357ms step_avg:96.29ms +step:659/1695 train_time:63453ms step_avg:96.29ms +step:660/1695 train_time:63550ms step_avg:96.29ms +step:661/1695 train_time:63647ms step_avg:96.29ms +step:662/1695 train_time:63743ms step_avg:96.29ms +step:663/1695 train_time:63838ms step_avg:96.29ms +step:664/1695 train_time:63933ms step_avg:96.28ms +step:665/1695 train_time:64029ms step_avg:96.28ms +step:666/1695 train_time:64126ms step_avg:96.28ms +step:667/1695 train_time:64222ms step_avg:96.29ms +step:668/1695 train_time:64318ms step_avg:96.28ms +step:669/1695 train_time:64413ms step_avg:96.28ms +step:670/1695 train_time:64511ms step_avg:96.28ms +step:671/1695 train_time:64608ms step_avg:96.29ms +step:672/1695 train_time:64705ms step_avg:96.29ms +step:673/1695 train_time:64801ms step_avg:96.29ms +step:674/1695 train_time:64896ms step_avg:96.28ms +step:675/1695 train_time:64993ms step_avg:96.29ms +step:676/1695 train_time:65089ms step_avg:96.29ms +step:677/1695 train_time:65186ms step_avg:96.29ms +step:678/1695 train_time:65283ms step_avg:96.29ms +step:679/1695 train_time:65378ms step_avg:96.29ms +step:680/1695 train_time:65474ms step_avg:96.29ms +step:681/1695 train_time:65571ms step_avg:96.29ms +step:682/1695 train_time:65667ms step_avg:96.29ms +step:683/1695 train_time:65763ms step_avg:96.29ms +step:684/1695 train_time:65859ms step_avg:96.28ms +step:685/1695 train_time:65954ms step_avg:96.28ms +step:686/1695 train_time:66050ms step_avg:96.28ms +step:687/1695 train_time:66146ms step_avg:96.28ms +step:688/1695 train_time:66242ms step_avg:96.28ms +step:689/1695 train_time:66338ms step_avg:96.28ms +step:690/1695 train_time:66433ms step_avg:96.28ms +step:691/1695 train_time:66794ms step_avg:96.66ms +step:692/1695 train_time:66958ms step_avg:96.76ms +step:693/1695 train_time:67053ms step_avg:96.76ms +step:694/1695 train_time:67148ms step_avg:96.76ms +step:695/1695 train_time:67244ms step_avg:96.75ms +step:696/1695 train_time:67338ms step_avg:96.75ms +step:697/1695 train_time:67433ms step_avg:96.75ms +step:698/1695 train_time:67528ms step_avg:96.75ms +step:699/1695 train_time:67624ms step_avg:96.74ms +step:700/1695 train_time:67718ms step_avg:96.74ms +step:701/1695 train_time:67820ms step_avg:96.75ms +step:702/1695 train_time:67919ms step_avg:96.75ms +step:703/1695 train_time:68017ms step_avg:96.75ms +step:704/1695 train_time:68115ms step_avg:96.75ms +step:705/1695 train_time:68211ms step_avg:96.75ms +step:706/1695 train_time:68306ms step_avg:96.75ms +step:707/1695 train_time:68400ms step_avg:96.75ms +step:708/1695 train_time:68495ms step_avg:96.74ms +step:709/1695 train_time:68590ms step_avg:96.74ms +step:710/1695 train_time:68686ms step_avg:96.74ms +step:711/1695 train_time:68784ms step_avg:96.74ms +step:712/1695 train_time:68882ms step_avg:96.74ms +step:713/1695 train_time:68978ms step_avg:96.74ms +step:714/1695 train_time:69074ms step_avg:96.74ms +step:715/1695 train_time:69170ms step_avg:96.74ms +step:716/1695 train_time:69266ms step_avg:96.74ms +step:717/1695 train_time:69361ms step_avg:96.74ms +step:718/1695 train_time:69455ms step_avg:96.73ms +step:719/1695 train_time:69551ms step_avg:96.73ms +step:720/1695 train_time:69646ms step_avg:96.73ms +step:721/1695 train_time:69742ms step_avg:96.73ms +step:722/1695 train_time:69838ms step_avg:96.73ms +step:723/1695 train_time:69935ms step_avg:96.73ms +step:724/1695 train_time:70032ms step_avg:96.73ms +step:725/1695 train_time:70129ms step_avg:96.73ms +step:726/1695 train_time:70225ms step_avg:96.73ms +step:727/1695 train_time:70322ms step_avg:96.73ms +step:728/1695 train_time:70417ms step_avg:96.73ms +step:729/1695 train_time:70512ms step_avg:96.72ms +step:730/1695 train_time:70608ms step_avg:96.72ms +step:731/1695 train_time:70704ms step_avg:96.72ms +step:732/1695 train_time:70801ms step_avg:96.72ms +step:733/1695 train_time:70896ms step_avg:96.72ms +step:734/1695 train_time:70993ms step_avg:96.72ms +step:735/1695 train_time:71090ms step_avg:96.72ms +step:736/1695 train_time:71187ms step_avg:96.72ms +step:737/1695 train_time:71284ms step_avg:96.72ms +step:738/1695 train_time:71380ms step_avg:96.72ms +step:739/1695 train_time:71475ms step_avg:96.72ms +step:740/1695 train_time:71570ms step_avg:96.72ms +step:741/1695 train_time:71667ms step_avg:96.72ms +step:742/1695 train_time:71765ms step_avg:96.72ms +step:743/1695 train_time:71861ms step_avg:96.72ms +step:744/1695 train_time:71957ms step_avg:96.72ms +step:745/1695 train_time:72053ms step_avg:96.71ms +step:746/1695 train_time:72150ms step_avg:96.72ms +step:747/1695 train_time:72247ms step_avg:96.72ms +step:748/1695 train_time:72343ms step_avg:96.72ms +step:749/1695 train_time:72439ms step_avg:96.71ms +step:750/1695 train_time:72534ms step_avg:96.71ms +step:750/1695 val_loss:3.5645 train_time:72628ms step_avg:96.84ms +step:751/1695 train_time:72654ms step_avg:96.74ms +step:752/1695 train_time:72734ms step_avg:96.72ms +step:753/1695 train_time:72834ms step_avg:96.72ms +step:754/1695 train_time:72930ms step_avg:96.72ms +step:755/1695 train_time:73026ms step_avg:96.72ms +step:756/1695 train_time:73121ms step_avg:96.72ms +step:757/1695 train_time:73215ms step_avg:96.72ms +step:758/1695 train_time:73311ms step_avg:96.72ms +step:759/1695 train_time:73407ms step_avg:96.72ms +step:760/1695 train_time:73501ms step_avg:96.71ms +step:761/1695 train_time:73598ms step_avg:96.71ms +step:762/1695 train_time:73696ms step_avg:96.71ms +step:763/1695 train_time:73795ms step_avg:96.72ms +step:764/1695 train_time:73893ms step_avg:96.72ms +step:765/1695 train_time:73990ms step_avg:96.72ms +step:766/1695 train_time:74086ms step_avg:96.72ms +step:767/1695 train_time:74182ms step_avg:96.72ms +step:768/1695 train_time:74276ms step_avg:96.71ms +step:769/1695 train_time:74372ms step_avg:96.71ms +step:770/1695 train_time:74468ms step_avg:96.71ms +step:771/1695 train_time:74564ms step_avg:96.71ms +step:772/1695 train_time:74661ms step_avg:96.71ms +step:773/1695 train_time:74758ms step_avg:96.71ms +step:774/1695 train_time:74854ms step_avg:96.71ms +step:775/1695 train_time:74951ms step_avg:96.71ms +step:776/1695 train_time:75048ms step_avg:96.71ms +step:777/1695 train_time:75143ms step_avg:96.71ms +step:778/1695 train_time:75238ms step_avg:96.71ms +step:779/1695 train_time:75333ms step_avg:96.71ms +step:780/1695 train_time:75429ms step_avg:96.70ms +step:781/1695 train_time:75526ms step_avg:96.70ms +step:782/1695 train_time:75622ms step_avg:96.70ms +step:783/1695 train_time:75718ms step_avg:96.70ms +step:784/1695 train_time:75815ms step_avg:96.70ms +step:785/1695 train_time:75911ms step_avg:96.70ms +step:786/1695 train_time:76008ms step_avg:96.70ms +step:787/1695 train_time:76104ms step_avg:96.70ms +step:788/1695 train_time:76199ms step_avg:96.70ms +step:789/1695 train_time:76295ms step_avg:96.70ms +step:790/1695 train_time:76390ms step_avg:96.70ms +step:791/1695 train_time:76486ms step_avg:96.70ms +step:792/1695 train_time:76582ms step_avg:96.69ms +step:793/1695 train_time:76678ms step_avg:96.69ms +step:794/1695 train_time:76774ms step_avg:96.69ms +step:795/1695 train_time:76871ms step_avg:96.69ms +step:796/1695 train_time:76969ms step_avg:96.69ms +step:797/1695 train_time:77067ms step_avg:96.70ms +step:798/1695 train_time:77163ms step_avg:96.70ms +step:799/1695 train_time:77259ms step_avg:96.69ms +step:800/1695 train_time:77355ms step_avg:96.69ms +step:801/1695 train_time:77450ms step_avg:96.69ms +step:802/1695 train_time:77546ms step_avg:96.69ms +step:803/1695 train_time:77642ms step_avg:96.69ms +step:804/1695 train_time:77738ms step_avg:96.69ms +step:805/1695 train_time:77834ms step_avg:96.69ms +step:806/1695 train_time:77931ms step_avg:96.69ms +step:807/1695 train_time:78029ms step_avg:96.69ms +step:808/1695 train_time:78126ms step_avg:96.69ms +step:809/1695 train_time:78222ms step_avg:96.69ms +step:810/1695 train_time:78317ms step_avg:96.69ms +step:811/1695 train_time:78413ms step_avg:96.69ms +step:812/1695 train_time:78509ms step_avg:96.69ms +step:813/1695 train_time:78604ms step_avg:96.68ms +step:814/1695 train_time:78699ms step_avg:96.68ms +step:815/1695 train_time:78795ms step_avg:96.68ms +step:816/1695 train_time:78891ms step_avg:96.68ms +step:817/1695 train_time:78988ms step_avg:96.68ms +step:818/1695 train_time:79085ms step_avg:96.68ms +step:819/1695 train_time:79182ms step_avg:96.68ms +step:820/1695 train_time:79278ms step_avg:96.68ms +step:821/1695 train_time:79373ms step_avg:96.68ms +step:822/1695 train_time:79469ms step_avg:96.68ms +step:823/1695 train_time:79565ms step_avg:96.68ms +step:824/1695 train_time:79660ms step_avg:96.67ms +step:825/1695 train_time:79755ms step_avg:96.67ms +step:826/1695 train_time:79852ms step_avg:96.67ms +step:827/1695 train_time:79949ms step_avg:96.67ms +step:828/1695 train_time:80045ms step_avg:96.67ms +step:829/1695 train_time:80142ms step_avg:96.67ms +step:830/1695 train_time:80237ms step_avg:96.67ms +step:831/1695 train_time:80333ms step_avg:96.67ms +step:832/1695 train_time:80430ms step_avg:96.67ms +step:833/1695 train_time:80527ms step_avg:96.67ms +step:834/1695 train_time:80624ms step_avg:96.67ms +step:835/1695 train_time:80719ms step_avg:96.67ms +step:836/1695 train_time:80815ms step_avg:96.67ms +step:837/1695 train_time:80911ms step_avg:96.67ms +step:838/1695 train_time:81007ms step_avg:96.67ms +step:839/1695 train_time:81103ms step_avg:96.67ms +step:840/1695 train_time:81199ms step_avg:96.67ms +step:841/1695 train_time:81295ms step_avg:96.66ms +step:842/1695 train_time:81392ms step_avg:96.66ms +step:843/1695 train_time:81488ms step_avg:96.66ms +step:844/1695 train_time:81583ms step_avg:96.66ms +step:845/1695 train_time:81678ms step_avg:96.66ms +step:846/1695 train_time:81773ms step_avg:96.66ms +step:847/1695 train_time:81869ms step_avg:96.66ms +step:848/1695 train_time:81965ms step_avg:96.66ms +step:849/1695 train_time:82062ms step_avg:96.66ms +step:850/1695 train_time:82158ms step_avg:96.66ms +step:851/1695 train_time:82254ms step_avg:96.66ms +step:852/1695 train_time:82350ms step_avg:96.66ms +step:853/1695 train_time:82447ms step_avg:96.66ms +step:854/1695 train_time:82542ms step_avg:96.65ms +step:855/1695 train_time:82637ms step_avg:96.65ms +step:856/1695 train_time:82733ms step_avg:96.65ms +step:857/1695 train_time:82829ms step_avg:96.65ms +step:858/1695 train_time:82925ms step_avg:96.65ms +step:859/1695 train_time:83021ms step_avg:96.65ms +step:860/1695 train_time:83116ms step_avg:96.65ms +step:861/1695 train_time:83212ms step_avg:96.65ms +step:862/1695 train_time:83309ms step_avg:96.65ms +step:863/1695 train_time:83635ms step_avg:96.91ms +step:864/1695 train_time:83834ms step_avg:97.03ms +step:865/1695 train_time:83928ms step_avg:97.03ms +step:866/1695 train_time:84024ms step_avg:97.03ms +step:867/1695 train_time:84118ms step_avg:97.02ms +step:868/1695 train_time:84213ms step_avg:97.02ms +step:869/1695 train_time:84308ms step_avg:97.02ms +step:870/1695 train_time:84404ms step_avg:97.02ms +step:871/1695 train_time:84498ms step_avg:97.01ms +step:872/1695 train_time:84593ms step_avg:97.01ms +step:873/1695 train_time:84695ms step_avg:97.02ms +step:874/1695 train_time:84795ms step_avg:97.02ms +step:875/1695 train_time:84893ms step_avg:97.02ms +step:875/1695 val_loss:3.5224 train_time:84986ms step_avg:97.13ms +step:876/1695 train_time:85012ms step_avg:97.05ms +step:877/1695 train_time:85093ms step_avg:97.03ms +step:878/1695 train_time:85195ms step_avg:97.03ms +step:879/1695 train_time:85293ms step_avg:97.03ms +step:880/1695 train_time:85390ms step_avg:97.03ms +step:881/1695 train_time:85485ms step_avg:97.03ms +step:882/1695 train_time:85580ms step_avg:97.03ms +step:883/1695 train_time:85675ms step_avg:97.03ms +step:884/1695 train_time:85769ms step_avg:97.02ms +step:885/1695 train_time:85864ms step_avg:97.02ms +step:886/1695 train_time:85959ms step_avg:97.02ms +step:887/1695 train_time:86057ms step_avg:97.02ms +step:888/1695 train_time:86155ms step_avg:97.02ms +step:889/1695 train_time:86253ms step_avg:97.02ms +step:890/1695 train_time:86350ms step_avg:97.02ms +step:891/1695 train_time:86447ms step_avg:97.02ms +step:892/1695 train_time:86542ms step_avg:97.02ms +step:893/1695 train_time:86637ms step_avg:97.02ms +step:894/1695 train_time:86732ms step_avg:97.02ms +step:895/1695 train_time:86827ms step_avg:97.01ms +step:896/1695 train_time:86922ms step_avg:97.01ms +step:897/1695 train_time:87018ms step_avg:97.01ms +step:898/1695 train_time:87115ms step_avg:97.01ms +step:899/1695 train_time:87213ms step_avg:97.01ms +step:900/1695 train_time:87311ms step_avg:97.01ms +step:901/1695 train_time:87409ms step_avg:97.01ms +step:902/1695 train_time:87506ms step_avg:97.01ms +step:903/1695 train_time:87601ms step_avg:97.01ms +step:904/1695 train_time:87696ms step_avg:97.01ms +step:905/1695 train_time:87792ms step_avg:97.01ms +step:906/1695 train_time:87887ms step_avg:97.01ms +step:907/1695 train_time:87983ms step_avg:97.00ms +step:908/1695 train_time:88079ms step_avg:97.00ms +step:909/1695 train_time:88175ms step_avg:97.00ms +step:910/1695 train_time:88271ms step_avg:97.00ms +step:911/1695 train_time:88367ms step_avg:97.00ms +step:912/1695 train_time:88463ms step_avg:97.00ms +step:913/1695 train_time:88558ms step_avg:97.00ms +step:914/1695 train_time:88654ms step_avg:97.00ms +step:915/1695 train_time:88750ms step_avg:96.99ms +step:916/1695 train_time:88846ms step_avg:96.99ms +step:917/1695 train_time:88942ms step_avg:96.99ms +step:918/1695 train_time:89038ms step_avg:96.99ms +step:919/1695 train_time:89134ms step_avg:96.99ms +step:920/1695 train_time:89231ms step_avg:96.99ms +step:921/1695 train_time:89327ms step_avg:96.99ms +step:922/1695 train_time:89422ms step_avg:96.99ms +step:923/1695 train_time:89518ms step_avg:96.99ms +step:924/1695 train_time:89614ms step_avg:96.98ms +step:925/1695 train_time:89709ms step_avg:96.98ms +step:926/1695 train_time:89806ms step_avg:96.98ms +step:927/1695 train_time:89901ms step_avg:96.98ms +step:928/1695 train_time:89997ms step_avg:96.98ms +step:929/1695 train_time:90093ms step_avg:96.98ms +step:930/1695 train_time:90190ms step_avg:96.98ms +step:931/1695 train_time:90287ms step_avg:96.98ms +step:932/1695 train_time:90384ms step_avg:96.98ms +step:933/1695 train_time:90479ms step_avg:96.98ms +step:934/1695 train_time:90575ms step_avg:96.98ms +step:935/1695 train_time:90671ms step_avg:96.97ms +step:936/1695 train_time:90767ms step_avg:96.97ms +step:937/1695 train_time:90863ms step_avg:96.97ms +step:938/1695 train_time:90958ms step_avg:96.97ms +step:939/1695 train_time:91055ms step_avg:96.97ms +step:940/1695 train_time:91150ms step_avg:96.97ms +step:941/1695 train_time:91246ms step_avg:96.97ms +step:942/1695 train_time:91342ms step_avg:96.97ms +step:943/1695 train_time:91438ms step_avg:96.96ms +step:944/1695 train_time:91533ms step_avg:96.96ms +step:945/1695 train_time:91630ms step_avg:96.96ms +step:946/1695 train_time:91727ms step_avg:96.96ms +step:947/1695 train_time:91824ms step_avg:96.96ms +step:948/1695 train_time:91920ms step_avg:96.96ms +step:949/1695 train_time:92015ms step_avg:96.96ms +step:950/1695 train_time:92112ms step_avg:96.96ms +step:951/1695 train_time:92208ms step_avg:96.96ms +step:952/1695 train_time:92305ms step_avg:96.96ms +step:953/1695 train_time:92401ms step_avg:96.96ms +step:954/1695 train_time:92496ms step_avg:96.96ms +step:955/1695 train_time:92592ms step_avg:96.96ms +step:956/1695 train_time:92689ms step_avg:96.95ms +step:957/1695 train_time:92785ms step_avg:96.95ms +step:958/1695 train_time:92881ms step_avg:96.95ms +step:959/1695 train_time:92976ms step_avg:96.95ms +step:960/1695 train_time:93072ms step_avg:96.95ms +step:961/1695 train_time:93168ms step_avg:96.95ms +step:962/1695 train_time:93265ms step_avg:96.95ms +step:963/1695 train_time:93362ms step_avg:96.95ms +step:964/1695 train_time:93458ms step_avg:96.95ms +step:965/1695 train_time:93555ms step_avg:96.95ms +step:966/1695 train_time:93652ms step_avg:96.95ms +step:967/1695 train_time:93748ms step_avg:96.95ms +step:968/1695 train_time:93844ms step_avg:96.95ms +step:969/1695 train_time:93939ms step_avg:96.94ms +step:970/1695 train_time:94035ms step_avg:96.94ms +step:971/1695 train_time:94132ms step_avg:96.94ms +step:972/1695 train_time:94229ms step_avg:96.94ms +step:973/1695 train_time:94326ms step_avg:96.94ms +step:974/1695 train_time:94422ms step_avg:96.94ms +step:975/1695 train_time:94517ms step_avg:96.94ms +step:976/1695 train_time:94614ms step_avg:96.94ms +step:977/1695 train_time:94712ms step_avg:96.94ms +step:978/1695 train_time:94808ms step_avg:96.94ms +step:979/1695 train_time:94904ms step_avg:96.94ms +step:980/1695 train_time:95000ms step_avg:96.94ms +step:981/1695 train_time:95095ms step_avg:96.94ms +step:982/1695 train_time:95192ms step_avg:96.94ms +step:983/1695 train_time:95289ms step_avg:96.94ms +step:984/1695 train_time:95386ms step_avg:96.94ms +step:985/1695 train_time:95482ms step_avg:96.94ms +step:986/1695 train_time:95577ms step_avg:96.93ms +step:987/1695 train_time:95673ms step_avg:96.93ms +step:988/1695 train_time:95770ms step_avg:96.93ms +step:989/1695 train_time:95866ms step_avg:96.93ms +step:990/1695 train_time:95963ms step_avg:96.93ms +step:991/1695 train_time:96059ms step_avg:96.93ms +step:992/1695 train_time:96154ms step_avg:96.93ms +step:993/1695 train_time:96251ms step_avg:96.93ms +step:994/1695 train_time:96348ms step_avg:96.93ms +step:995/1695 train_time:96445ms step_avg:96.93ms +step:996/1695 train_time:96539ms step_avg:96.93ms +step:997/1695 train_time:96634ms step_avg:96.93ms +step:998/1695 train_time:96732ms step_avg:96.93ms +step:999/1695 train_time:96829ms step_avg:96.93ms +step:1000/1695 train_time:96926ms step_avg:96.93ms +step:1000/1695 val_loss:3.4843 train_time:97020ms step_avg:97.02ms +step:1001/1695 train_time:97046ms step_avg:96.95ms +step:1002/1695 train_time:97126ms step_avg:96.93ms +step:1003/1695 train_time:97224ms step_avg:96.93ms +step:1004/1695 train_time:97319ms step_avg:96.93ms +step:1005/1695 train_time:97415ms step_avg:96.93ms +step:1006/1695 train_time:97511ms step_avg:96.93ms +step:1007/1695 train_time:97607ms step_avg:96.93ms +step:1008/1695 train_time:97701ms step_avg:96.93ms +step:1009/1695 train_time:97796ms step_avg:96.92ms +step:1010/1695 train_time:97892ms step_avg:96.92ms +step:1011/1695 train_time:97989ms step_avg:96.92ms +step:1012/1695 train_time:98087ms step_avg:96.92ms +step:1013/1695 train_time:98185ms step_avg:96.93ms +step:1014/1695 train_time:98281ms step_avg:96.92ms +step:1015/1695 train_time:98377ms step_avg:96.92ms +step:1016/1695 train_time:98474ms step_avg:96.92ms +step:1017/1695 train_time:98570ms step_avg:96.92ms +step:1018/1695 train_time:98665ms step_avg:96.92ms +step:1019/1695 train_time:98760ms step_avg:96.92ms +step:1020/1695 train_time:98855ms step_avg:96.92ms +step:1021/1695 train_time:98951ms step_avg:96.92ms +step:1022/1695 train_time:99049ms step_avg:96.92ms +step:1023/1695 train_time:99145ms step_avg:96.92ms +step:1024/1695 train_time:99241ms step_avg:96.92ms +step:1025/1695 train_time:99337ms step_avg:96.91ms +step:1026/1695 train_time:99434ms step_avg:96.91ms +step:1027/1695 train_time:99531ms step_avg:96.91ms +step:1028/1695 train_time:99627ms step_avg:96.91ms +step:1029/1695 train_time:99724ms step_avg:96.91ms +step:1030/1695 train_time:99817ms step_avg:96.91ms +step:1031/1695 train_time:99913ms step_avg:96.91ms +step:1032/1695 train_time:100009ms step_avg:96.91ms +step:1033/1695 train_time:100105ms step_avg:96.91ms +step:1034/1695 train_time:100202ms step_avg:96.91ms +step:1035/1695 train_time:100298ms step_avg:96.91ms +step:1036/1695 train_time:100628ms step_avg:97.13ms +step:1037/1695 train_time:100810ms step_avg:97.21ms +step:1038/1695 train_time:100904ms step_avg:97.21ms +step:1039/1695 train_time:100998ms step_avg:97.21ms +step:1040/1695 train_time:101093ms step_avg:97.20ms +step:1041/1695 train_time:101188ms step_avg:97.20ms +step:1042/1695 train_time:101283ms step_avg:97.20ms +step:1043/1695 train_time:101377ms step_avg:97.20ms +step:1044/1695 train_time:101472ms step_avg:97.20ms +step:1045/1695 train_time:101567ms step_avg:97.19ms +step:1046/1695 train_time:101666ms step_avg:97.20ms +step:1047/1695 train_time:101765ms step_avg:97.20ms +step:1048/1695 train_time:101862ms step_avg:97.20ms +step:1049/1695 train_time:101959ms step_avg:97.20ms +step:1050/1695 train_time:102054ms step_avg:97.19ms +step:1051/1695 train_time:102149ms step_avg:97.19ms +step:1052/1695 train_time:102244ms step_avg:97.19ms +step:1053/1695 train_time:102339ms step_avg:97.19ms +step:1054/1695 train_time:102434ms step_avg:97.19ms +step:1055/1695 train_time:102529ms step_avg:97.18ms +step:1056/1695 train_time:102626ms step_avg:97.18ms +step:1057/1695 train_time:102724ms step_avg:97.18ms +step:1058/1695 train_time:102821ms step_avg:97.18ms +step:1059/1695 train_time:102917ms step_avg:97.18ms +step:1060/1695 train_time:103015ms step_avg:97.18ms +step:1061/1695 train_time:103111ms step_avg:97.18ms +step:1062/1695 train_time:103207ms step_avg:97.18ms +step:1063/1695 train_time:103302ms step_avg:97.18ms +step:1064/1695 train_time:103397ms step_avg:97.18ms +step:1065/1695 train_time:103493ms step_avg:97.18ms +step:1066/1695 train_time:103589ms step_avg:97.18ms +step:1067/1695 train_time:103684ms step_avg:97.17ms +step:1068/1695 train_time:103781ms step_avg:97.17ms +step:1069/1695 train_time:103877ms step_avg:97.17ms +step:1070/1695 train_time:103974ms step_avg:97.17ms +step:1071/1695 train_time:104070ms step_avg:97.17ms +step:1072/1695 train_time:104166ms step_avg:97.17ms +step:1073/1695 train_time:104262ms step_avg:97.17ms +step:1074/1695 train_time:104357ms step_avg:97.17ms +step:1075/1695 train_time:104453ms step_avg:97.17ms +step:1076/1695 train_time:104549ms step_avg:97.16ms +step:1077/1695 train_time:104644ms step_avg:97.16ms +step:1078/1695 train_time:104739ms step_avg:97.16ms +step:1079/1695 train_time:104836ms step_avg:97.16ms +step:1080/1695 train_time:104933ms step_avg:97.16ms +step:1081/1695 train_time:105029ms step_avg:97.16ms +step:1082/1695 train_time:105125ms step_avg:97.16ms +step:1083/1695 train_time:105221ms step_avg:97.16ms +step:1084/1695 train_time:105316ms step_avg:97.15ms +step:1085/1695 train_time:105412ms step_avg:97.15ms +step:1086/1695 train_time:105508ms step_avg:97.15ms +step:1087/1695 train_time:105604ms step_avg:97.15ms +step:1088/1695 train_time:105699ms step_avg:97.15ms +step:1089/1695 train_time:105795ms step_avg:97.15ms +step:1090/1695 train_time:105893ms step_avg:97.15ms +step:1091/1695 train_time:105990ms step_avg:97.15ms +step:1092/1695 train_time:106086ms step_avg:97.15ms +step:1093/1695 train_time:106181ms step_avg:97.15ms +step:1094/1695 train_time:106277ms step_avg:97.15ms +step:1095/1695 train_time:106373ms step_avg:97.14ms +step:1096/1695 train_time:106469ms step_avg:97.14ms +step:1097/1695 train_time:106565ms step_avg:97.14ms +step:1098/1695 train_time:106661ms step_avg:97.14ms +step:1099/1695 train_time:106756ms step_avg:97.14ms +step:1100/1695 train_time:106854ms step_avg:97.14ms +step:1101/1695 train_time:106950ms step_avg:97.14ms +step:1102/1695 train_time:107046ms step_avg:97.14ms +step:1103/1695 train_time:107142ms step_avg:97.14ms +step:1104/1695 train_time:107237ms step_avg:97.13ms +step:1105/1695 train_time:107333ms step_avg:97.13ms +step:1106/1695 train_time:107429ms step_avg:97.13ms +step:1107/1695 train_time:107526ms step_avg:97.13ms +step:1108/1695 train_time:107622ms step_avg:97.13ms +step:1109/1695 train_time:107718ms step_avg:97.13ms +step:1110/1695 train_time:107814ms step_avg:97.13ms +step:1111/1695 train_time:107912ms step_avg:97.13ms +step:1112/1695 train_time:108009ms step_avg:97.13ms +step:1113/1695 train_time:108105ms step_avg:97.13ms +step:1114/1695 train_time:108200ms step_avg:97.13ms +step:1115/1695 train_time:108296ms step_avg:97.13ms +step:1116/1695 train_time:108393ms step_avg:97.13ms +step:1117/1695 train_time:108490ms step_avg:97.13ms +step:1118/1695 train_time:108587ms step_avg:97.13ms +step:1119/1695 train_time:108683ms step_avg:97.13ms +step:1120/1695 train_time:108778ms step_avg:97.12ms +step:1121/1695 train_time:108875ms step_avg:97.12ms +step:1122/1695 train_time:108970ms step_avg:97.12ms +step:1123/1695 train_time:109068ms step_avg:97.12ms +step:1124/1695 train_time:109164ms step_avg:97.12ms +step:1125/1695 train_time:109260ms step_avg:97.12ms +step:1125/1695 val_loss:3.4352 train_time:109353ms step_avg:97.20ms +step:1126/1695 train_time:109379ms step_avg:97.14ms +step:1127/1695 train_time:109456ms step_avg:97.12ms +step:1128/1695 train_time:109554ms step_avg:97.12ms +step:1129/1695 train_time:109650ms step_avg:97.12ms +step:1130/1695 train_time:109745ms step_avg:97.12ms +step:1131/1695 train_time:109840ms step_avg:97.12ms +step:1132/1695 train_time:109934ms step_avg:97.12ms +step:1133/1695 train_time:110031ms step_avg:97.11ms +step:1134/1695 train_time:110129ms step_avg:97.12ms +step:1135/1695 train_time:110228ms step_avg:97.12ms +step:1136/1695 train_time:110328ms step_avg:97.12ms +step:1137/1695 train_time:110431ms step_avg:97.12ms +step:1138/1695 train_time:110532ms step_avg:97.13ms +step:1139/1695 train_time:110631ms step_avg:97.13ms +step:1140/1695 train_time:110729ms step_avg:97.13ms +step:1141/1695 train_time:110826ms step_avg:97.13ms +step:1142/1695 train_time:110923ms step_avg:97.13ms +step:1143/1695 train_time:111020ms step_avg:97.13ms +step:1144/1695 train_time:111116ms step_avg:97.13ms +step:1145/1695 train_time:111214ms step_avg:97.13ms +step:1146/1695 train_time:111312ms step_avg:97.13ms +step:1147/1695 train_time:111412ms step_avg:97.13ms +step:1148/1695 train_time:111511ms step_avg:97.14ms +step:1149/1695 train_time:111611ms step_avg:97.14ms +step:1150/1695 train_time:111710ms step_avg:97.14ms +step:1151/1695 train_time:111808ms step_avg:97.14ms +step:1152/1695 train_time:111907ms step_avg:97.14ms +step:1153/1695 train_time:112006ms step_avg:97.14ms +step:1154/1695 train_time:112104ms step_avg:97.14ms +step:1155/1695 train_time:112201ms step_avg:97.14ms +step:1156/1695 train_time:112299ms step_avg:97.14ms +step:1157/1695 train_time:112396ms step_avg:97.14ms +step:1158/1695 train_time:112495ms step_avg:97.15ms +step:1159/1695 train_time:112593ms step_avg:97.15ms +step:1160/1695 train_time:112691ms step_avg:97.15ms +step:1161/1695 train_time:112790ms step_avg:97.15ms +step:1162/1695 train_time:112888ms step_avg:97.15ms +step:1163/1695 train_time:112986ms step_avg:97.15ms +step:1164/1695 train_time:113084ms step_avg:97.15ms +step:1165/1695 train_time:113181ms step_avg:97.15ms +step:1166/1695 train_time:113279ms step_avg:97.15ms +step:1167/1695 train_time:113376ms step_avg:97.15ms +step:1168/1695 train_time:113473ms step_avg:97.15ms +step:1169/1695 train_time:113571ms step_avg:97.15ms +step:1170/1695 train_time:113669ms step_avg:97.15ms +step:1171/1695 train_time:113767ms step_avg:97.15ms +step:1172/1695 train_time:113864ms step_avg:97.15ms +step:1173/1695 train_time:113962ms step_avg:97.15ms +step:1174/1695 train_time:114060ms step_avg:97.15ms +step:1175/1695 train_time:114157ms step_avg:97.16ms +step:1176/1695 train_time:114255ms step_avg:97.16ms +step:1177/1695 train_time:114352ms step_avg:97.16ms +step:1178/1695 train_time:114451ms step_avg:97.16ms +step:1179/1695 train_time:114550ms step_avg:97.16ms +step:1180/1695 train_time:114648ms step_avg:97.16ms +step:1181/1695 train_time:114747ms step_avg:97.16ms +step:1182/1695 train_time:114844ms step_avg:97.16ms +step:1183/1695 train_time:114942ms step_avg:97.16ms +step:1184/1695 train_time:115039ms step_avg:97.16ms +step:1185/1695 train_time:115136ms step_avg:97.16ms +step:1186/1695 train_time:115234ms step_avg:97.16ms +step:1187/1695 train_time:115331ms step_avg:97.16ms +step:1188/1695 train_time:115430ms step_avg:97.16ms +step:1189/1695 train_time:115529ms step_avg:97.16ms +step:1190/1695 train_time:115627ms step_avg:97.17ms +step:1191/1695 train_time:115725ms step_avg:97.17ms +step:1192/1695 train_time:115823ms step_avg:97.17ms +step:1193/1695 train_time:115922ms step_avg:97.17ms +step:1194/1695 train_time:116019ms step_avg:97.17ms +step:1195/1695 train_time:116117ms step_avg:97.17ms +step:1196/1695 train_time:116214ms step_avg:97.17ms +step:1197/1695 train_time:116311ms step_avg:97.17ms +step:1198/1695 train_time:116409ms step_avg:97.17ms +step:1199/1695 train_time:116507ms step_avg:97.17ms +step:1200/1695 train_time:116604ms step_avg:97.17ms +step:1201/1695 train_time:116702ms step_avg:97.17ms +step:1202/1695 train_time:116799ms step_avg:97.17ms +step:1203/1695 train_time:116897ms step_avg:97.17ms +step:1204/1695 train_time:116995ms step_avg:97.17ms +step:1205/1695 train_time:117093ms step_avg:97.17ms +step:1206/1695 train_time:117191ms step_avg:97.17ms +step:1207/1695 train_time:117289ms step_avg:97.17ms +step:1208/1695 train_time:117624ms step_avg:97.37ms +step:1209/1695 train_time:117814ms step_avg:97.45ms +step:1210/1695 train_time:117909ms step_avg:97.45ms +step:1211/1695 train_time:118006ms step_avg:97.44ms +step:1212/1695 train_time:118103ms step_avg:97.44ms +step:1213/1695 train_time:118199ms step_avg:97.44ms +step:1214/1695 train_time:118295ms step_avg:97.44ms +step:1215/1695 train_time:118393ms step_avg:97.44ms +step:1216/1695 train_time:118490ms step_avg:97.44ms +step:1217/1695 train_time:118587ms step_avg:97.44ms +step:1218/1695 train_time:118689ms step_avg:97.45ms +step:1219/1695 train_time:118792ms step_avg:97.45ms +step:1220/1695 train_time:118892ms step_avg:97.45ms +step:1221/1695 train_time:118991ms step_avg:97.45ms +step:1222/1695 train_time:119090ms step_avg:97.46ms +step:1223/1695 train_time:119190ms step_avg:97.46ms +step:1224/1695 train_time:119287ms step_avg:97.46ms +step:1225/1695 train_time:119384ms step_avg:97.46ms +step:1226/1695 train_time:119481ms step_avg:97.46ms +step:1227/1695 train_time:119577ms step_avg:97.45ms +step:1228/1695 train_time:119673ms step_avg:97.45ms +step:1229/1695 train_time:119773ms step_avg:97.46ms +step:1230/1695 train_time:119873ms step_avg:97.46ms +step:1231/1695 train_time:119972ms step_avg:97.46ms +step:1232/1695 train_time:120070ms step_avg:97.46ms +step:1233/1695 train_time:120168ms step_avg:97.46ms +step:1234/1695 train_time:120268ms step_avg:97.46ms +step:1235/1695 train_time:120366ms step_avg:97.46ms +step:1236/1695 train_time:120464ms step_avg:97.46ms +step:1237/1695 train_time:120561ms step_avg:97.46ms +step:1238/1695 train_time:120658ms step_avg:97.46ms +step:1239/1695 train_time:120756ms step_avg:97.46ms +step:1240/1695 train_time:120853ms step_avg:97.46ms +step:1241/1695 train_time:120952ms step_avg:97.46ms +step:1242/1695 train_time:121051ms step_avg:97.46ms +step:1243/1695 train_time:121150ms step_avg:97.47ms +step:1244/1695 train_time:121249ms step_avg:97.47ms +step:1245/1695 train_time:121347ms step_avg:97.47ms +step:1246/1695 train_time:121444ms step_avg:97.47ms +step:1247/1695 train_time:121542ms step_avg:97.47ms +step:1248/1695 train_time:121640ms step_avg:97.47ms +step:1249/1695 train_time:121738ms step_avg:97.47ms +step:1250/1695 train_time:121835ms step_avg:97.47ms +step:1250/1695 val_loss:3.3886 train_time:121930ms step_avg:97.54ms +step:1251/1695 train_time:121956ms step_avg:97.49ms +step:1252/1695 train_time:122037ms step_avg:97.47ms +step:1253/1695 train_time:122135ms step_avg:97.47ms +step:1254/1695 train_time:122231ms step_avg:97.47ms +step:1255/1695 train_time:122327ms step_avg:97.47ms +step:1256/1695 train_time:122424ms step_avg:97.47ms +step:1257/1695 train_time:122520ms step_avg:97.47ms +step:1258/1695 train_time:122616ms step_avg:97.47ms +step:1259/1695 train_time:122713ms step_avg:97.47ms +step:1260/1695 train_time:122809ms step_avg:97.47ms +step:1261/1695 train_time:122913ms step_avg:97.47ms +step:1262/1695 train_time:123013ms step_avg:97.47ms +step:1263/1695 train_time:123111ms step_avg:97.48ms +step:1264/1695 train_time:123209ms step_avg:97.48ms +step:1265/1695 train_time:123306ms step_avg:97.48ms +step:1266/1695 train_time:123403ms step_avg:97.47ms +step:1267/1695 train_time:123499ms step_avg:97.47ms +step:1268/1695 train_time:123596ms step_avg:97.47ms +step:1269/1695 train_time:123693ms step_avg:97.47ms +step:1270/1695 train_time:123789ms step_avg:97.47ms +step:1271/1695 train_time:123889ms step_avg:97.47ms +step:1272/1695 train_time:123988ms step_avg:97.47ms +step:1273/1695 train_time:124086ms step_avg:97.48ms +step:1274/1695 train_time:124185ms step_avg:97.48ms +step:1275/1695 train_time:124284ms step_avg:97.48ms +step:1276/1695 train_time:124381ms step_avg:97.48ms +step:1277/1695 train_time:124479ms step_avg:97.48ms +step:1278/1695 train_time:124576ms step_avg:97.48ms +step:1279/1695 train_time:124673ms step_avg:97.48ms +step:1280/1695 train_time:124770ms step_avg:97.48ms +step:1281/1695 train_time:124868ms step_avg:97.48ms +step:1282/1695 train_time:124966ms step_avg:97.48ms +step:1283/1695 train_time:125065ms step_avg:97.48ms +step:1284/1695 train_time:125165ms step_avg:97.48ms +step:1285/1695 train_time:125264ms step_avg:97.48ms +step:1286/1695 train_time:125363ms step_avg:97.48ms +step:1287/1695 train_time:125460ms step_avg:97.48ms +step:1288/1695 train_time:125558ms step_avg:97.48ms +step:1289/1695 train_time:125656ms step_avg:97.48ms +step:1290/1695 train_time:125755ms step_avg:97.48ms +step:1291/1695 train_time:125853ms step_avg:97.48ms +step:1292/1695 train_time:125950ms step_avg:97.48ms +step:1293/1695 train_time:126048ms step_avg:97.49ms +step:1294/1695 train_time:126147ms step_avg:97.49ms +step:1295/1695 train_time:126245ms step_avg:97.49ms +step:1296/1695 train_time:126344ms step_avg:97.49ms +step:1297/1695 train_time:126440ms step_avg:97.49ms +step:1298/1695 train_time:126538ms step_avg:97.49ms +step:1299/1695 train_time:126635ms step_avg:97.49ms +step:1300/1695 train_time:126731ms step_avg:97.49ms +step:1301/1695 train_time:126829ms step_avg:97.49ms +step:1302/1695 train_time:126927ms step_avg:97.49ms +step:1303/1695 train_time:127025ms step_avg:97.49ms +step:1304/1695 train_time:127124ms step_avg:97.49ms +step:1305/1695 train_time:127222ms step_avg:97.49ms +step:1306/1695 train_time:127320ms step_avg:97.49ms +step:1307/1695 train_time:127418ms step_avg:97.49ms +step:1308/1695 train_time:127515ms step_avg:97.49ms +step:1309/1695 train_time:127612ms step_avg:97.49ms +step:1310/1695 train_time:127710ms step_avg:97.49ms +step:1311/1695 train_time:127807ms step_avg:97.49ms +step:1312/1695 train_time:127905ms step_avg:97.49ms +step:1313/1695 train_time:128004ms step_avg:97.49ms +step:1314/1695 train_time:128104ms step_avg:97.49ms +step:1315/1695 train_time:128203ms step_avg:97.49ms +step:1316/1695 train_time:128301ms step_avg:97.49ms +step:1317/1695 train_time:128399ms step_avg:97.49ms +step:1318/1695 train_time:128498ms step_avg:97.49ms +step:1319/1695 train_time:128596ms step_avg:97.50ms +step:1320/1695 train_time:128695ms step_avg:97.50ms +step:1321/1695 train_time:128792ms step_avg:97.50ms +step:1322/1695 train_time:128889ms step_avg:97.50ms +step:1323/1695 train_time:128986ms step_avg:97.50ms +step:1324/1695 train_time:129085ms step_avg:97.50ms +step:1325/1695 train_time:129184ms step_avg:97.50ms +step:1326/1695 train_time:129282ms step_avg:97.50ms +step:1327/1695 train_time:129379ms step_avg:97.50ms +step:1328/1695 train_time:129477ms step_avg:97.50ms +step:1329/1695 train_time:129574ms step_avg:97.50ms +step:1330/1695 train_time:129672ms step_avg:97.50ms +step:1331/1695 train_time:129769ms step_avg:97.50ms +step:1332/1695 train_time:129866ms step_avg:97.50ms +step:1333/1695 train_time:129964ms step_avg:97.50ms +step:1334/1695 train_time:130063ms step_avg:97.50ms +step:1335/1695 train_time:130162ms step_avg:97.50ms +step:1336/1695 train_time:130260ms step_avg:97.50ms +step:1337/1695 train_time:130358ms step_avg:97.50ms +step:1338/1695 train_time:130455ms step_avg:97.50ms +step:1339/1695 train_time:130553ms step_avg:97.50ms +step:1340/1695 train_time:130652ms step_avg:97.50ms +step:1341/1695 train_time:130749ms step_avg:97.50ms +step:1342/1695 train_time:130846ms step_avg:97.50ms +step:1343/1695 train_time:130944ms step_avg:97.50ms +step:1344/1695 train_time:131043ms step_avg:97.50ms +step:1345/1695 train_time:131142ms step_avg:97.50ms +step:1346/1695 train_time:131241ms step_avg:97.50ms +step:1347/1695 train_time:131340ms step_avg:97.51ms +step:1348/1695 train_time:131438ms step_avg:97.51ms +step:1349/1695 train_time:131537ms step_avg:97.51ms +step:1350/1695 train_time:131636ms step_avg:97.51ms +step:1351/1695 train_time:131734ms step_avg:97.51ms +step:1352/1695 train_time:131832ms step_avg:97.51ms +step:1353/1695 train_time:131930ms step_avg:97.51ms +step:1354/1695 train_time:132028ms step_avg:97.51ms +step:1355/1695 train_time:132126ms step_avg:97.51ms +step:1356/1695 train_time:132223ms step_avg:97.51ms +step:1357/1695 train_time:132321ms step_avg:97.51ms +step:1358/1695 train_time:132419ms step_avg:97.51ms +step:1359/1695 train_time:132517ms step_avg:97.51ms +step:1360/1695 train_time:132614ms step_avg:97.51ms +step:1361/1695 train_time:132711ms step_avg:97.51ms +step:1362/1695 train_time:132808ms step_avg:97.51ms +step:1363/1695 train_time:132905ms step_avg:97.51ms +step:1364/1695 train_time:133004ms step_avg:97.51ms +step:1365/1695 train_time:133102ms step_avg:97.51ms +step:1366/1695 train_time:133200ms step_avg:97.51ms +step:1367/1695 train_time:133297ms step_avg:97.51ms +step:1368/1695 train_time:133393ms step_avg:97.51ms +step:1369/1695 train_time:133491ms step_avg:97.51ms +step:1370/1695 train_time:133589ms step_avg:97.51ms +step:1371/1695 train_time:133687ms step_avg:97.51ms +step:1372/1695 train_time:133785ms step_avg:97.51ms +step:1373/1695 train_time:133884ms step_avg:97.51ms +step:1374/1695 train_time:133982ms step_avg:97.51ms +step:1375/1695 train_time:134080ms step_avg:97.51ms +step:1375/1695 val_loss:3.3495 train_time:134174ms step_avg:97.58ms +step:1376/1695 train_time:134203ms step_avg:97.53ms +step:1377/1695 train_time:134283ms step_avg:97.52ms +step:1378/1695 train_time:134384ms step_avg:97.52ms +step:1379/1695 train_time:134483ms step_avg:97.52ms +step:1380/1695 train_time:134581ms step_avg:97.52ms +step:1381/1695 train_time:134941ms step_avg:97.71ms +step:1382/1695 train_time:135109ms step_avg:97.76ms +step:1383/1695 train_time:135205ms step_avg:97.76ms +step:1384/1695 train_time:135302ms step_avg:97.76ms +step:1385/1695 train_time:135398ms step_avg:97.76ms +step:1386/1695 train_time:135494ms step_avg:97.76ms +step:1387/1695 train_time:135591ms step_avg:97.76ms +step:1388/1695 train_time:135686ms step_avg:97.76ms +step:1389/1695 train_time:135783ms step_avg:97.76ms +step:1390/1695 train_time:135880ms step_avg:97.76ms +step:1391/1695 train_time:135981ms step_avg:97.76ms +step:1392/1695 train_time:136087ms step_avg:97.76ms +step:1393/1695 train_time:136186ms step_avg:97.76ms +step:1394/1695 train_time:136284ms step_avg:97.76ms +step:1395/1695 train_time:136381ms step_avg:97.76ms +step:1396/1695 train_time:136480ms step_avg:97.76ms +step:1397/1695 train_time:136577ms step_avg:97.76ms +step:1398/1695 train_time:136674ms step_avg:97.76ms +step:1399/1695 train_time:136770ms step_avg:97.76ms +step:1400/1695 train_time:136866ms step_avg:97.76ms +step:1401/1695 train_time:136964ms step_avg:97.76ms +step:1402/1695 train_time:137065ms step_avg:97.76ms +step:1403/1695 train_time:137165ms step_avg:97.77ms +step:1404/1695 train_time:137264ms step_avg:97.77ms +step:1405/1695 train_time:137362ms step_avg:97.77ms +step:1406/1695 train_time:137460ms step_avg:97.77ms +step:1407/1695 train_time:137558ms step_avg:97.77ms +step:1408/1695 train_time:137656ms step_avg:97.77ms +step:1409/1695 train_time:137752ms step_avg:97.77ms +step:1410/1695 train_time:137849ms step_avg:97.77ms +step:1411/1695 train_time:137946ms step_avg:97.76ms +step:1412/1695 train_time:138046ms step_avg:97.77ms +step:1413/1695 train_time:138144ms step_avg:97.77ms +step:1414/1695 train_time:138244ms step_avg:97.77ms +step:1415/1695 train_time:138342ms step_avg:97.77ms +step:1416/1695 train_time:138440ms step_avg:97.77ms +step:1417/1695 train_time:138538ms step_avg:97.77ms +step:1418/1695 train_time:138637ms step_avg:97.77ms +step:1419/1695 train_time:138735ms step_avg:97.77ms +step:1420/1695 train_time:138832ms step_avg:97.77ms +step:1421/1695 train_time:138928ms step_avg:97.77ms +step:1422/1695 train_time:139025ms step_avg:97.77ms +step:1423/1695 train_time:139123ms step_avg:97.77ms +step:1424/1695 train_time:139221ms step_avg:97.77ms +step:1425/1695 train_time:139320ms step_avg:97.77ms +step:1426/1695 train_time:139418ms step_avg:97.77ms +step:1427/1695 train_time:139515ms step_avg:97.77ms +step:1428/1695 train_time:139612ms step_avg:97.77ms +step:1429/1695 train_time:139709ms step_avg:97.77ms +step:1430/1695 train_time:139807ms step_avg:97.77ms +step:1431/1695 train_time:139905ms step_avg:97.77ms +step:1432/1695 train_time:140004ms step_avg:97.77ms +step:1433/1695 train_time:140104ms step_avg:97.77ms +step:1434/1695 train_time:140202ms step_avg:97.77ms +step:1435/1695 train_time:140300ms step_avg:97.77ms +step:1436/1695 train_time:140397ms step_avg:97.77ms +step:1437/1695 train_time:140496ms step_avg:97.77ms +step:1438/1695 train_time:140594ms step_avg:97.77ms +step:1439/1695 train_time:140691ms step_avg:97.77ms +step:1440/1695 train_time:140788ms step_avg:97.77ms +step:1441/1695 train_time:140885ms step_avg:97.77ms +step:1442/1695 train_time:140982ms step_avg:97.77ms +step:1443/1695 train_time:141080ms step_avg:97.77ms +step:1444/1695 train_time:141178ms step_avg:97.77ms +step:1445/1695 train_time:141275ms step_avg:97.77ms +step:1446/1695 train_time:141373ms step_avg:97.77ms +step:1447/1695 train_time:141471ms step_avg:97.77ms +step:1448/1695 train_time:141569ms step_avg:97.77ms +step:1449/1695 train_time:141667ms step_avg:97.77ms +step:1450/1695 train_time:141765ms step_avg:97.77ms +step:1451/1695 train_time:141864ms step_avg:97.77ms +step:1452/1695 train_time:141962ms step_avg:97.77ms +step:1453/1695 train_time:142060ms step_avg:97.77ms +step:1454/1695 train_time:142159ms step_avg:97.77ms +step:1455/1695 train_time:142257ms step_avg:97.77ms +step:1456/1695 train_time:142355ms step_avg:97.77ms +step:1457/1695 train_time:142452ms step_avg:97.77ms +step:1458/1695 train_time:142550ms step_avg:97.77ms +step:1459/1695 train_time:142648ms step_avg:97.77ms +step:1460/1695 train_time:142746ms step_avg:97.77ms +step:1461/1695 train_time:142844ms step_avg:97.77ms +step:1462/1695 train_time:142943ms step_avg:97.77ms +step:1463/1695 train_time:143041ms step_avg:97.77ms +step:1464/1695 train_time:143141ms step_avg:97.77ms +step:1465/1695 train_time:143241ms step_avg:97.78ms +step:1466/1695 train_time:143339ms step_avg:97.78ms +step:1467/1695 train_time:143438ms step_avg:97.78ms +step:1468/1695 train_time:143537ms step_avg:97.78ms +step:1469/1695 train_time:143635ms step_avg:97.78ms +step:1470/1695 train_time:143732ms step_avg:97.78ms +step:1471/1695 train_time:143829ms step_avg:97.78ms +step:1472/1695 train_time:143927ms step_avg:97.78ms +step:1473/1695 train_time:144025ms step_avg:97.78ms +step:1474/1695 train_time:144123ms step_avg:97.78ms +step:1475/1695 train_time:144221ms step_avg:97.78ms +step:1476/1695 train_time:144319ms step_avg:97.78ms +step:1477/1695 train_time:144418ms step_avg:97.78ms +step:1478/1695 train_time:144515ms step_avg:97.78ms +step:1479/1695 train_time:144613ms step_avg:97.78ms +step:1480/1695 train_time:144711ms step_avg:97.78ms +step:1481/1695 train_time:144809ms step_avg:97.78ms +step:1482/1695 train_time:144906ms step_avg:97.78ms +step:1483/1695 train_time:145004ms step_avg:97.78ms +step:1484/1695 train_time:145101ms step_avg:97.78ms +step:1485/1695 train_time:145200ms step_avg:97.78ms +step:1486/1695 train_time:145298ms step_avg:97.78ms +step:1487/1695 train_time:145397ms step_avg:97.78ms +step:1488/1695 train_time:145495ms step_avg:97.78ms +step:1489/1695 train_time:145593ms step_avg:97.78ms +step:1490/1695 train_time:145690ms step_avg:97.78ms +step:1491/1695 train_time:145787ms step_avg:97.78ms +step:1492/1695 train_time:145884ms step_avg:97.78ms +step:1493/1695 train_time:145982ms step_avg:97.78ms +step:1494/1695 train_time:146079ms step_avg:97.78ms +step:1495/1695 train_time:146177ms step_avg:97.78ms +step:1496/1695 train_time:146274ms step_avg:97.78ms +step:1497/1695 train_time:146372ms step_avg:97.78ms +step:1498/1695 train_time:146469ms step_avg:97.78ms +step:1499/1695 train_time:146567ms step_avg:97.78ms +step:1500/1695 train_time:146665ms step_avg:97.78ms +step:1500/1695 val_loss:3.3162 train_time:146761ms step_avg:97.84ms +step:1501/1695 train_time:146787ms step_avg:97.79ms +step:1502/1695 train_time:146870ms step_avg:97.78ms +step:1503/1695 train_time:146968ms step_avg:97.78ms +step:1504/1695 train_time:147065ms step_avg:97.78ms +step:1505/1695 train_time:147162ms step_avg:97.78ms +step:1506/1695 train_time:147259ms step_avg:97.78ms +step:1507/1695 train_time:147355ms step_avg:97.78ms +step:1508/1695 train_time:147452ms step_avg:97.78ms +step:1509/1695 train_time:147548ms step_avg:97.78ms +step:1510/1695 train_time:147645ms step_avg:97.78ms +step:1511/1695 train_time:147745ms step_avg:97.78ms +step:1512/1695 train_time:147846ms step_avg:97.78ms +step:1513/1695 train_time:147945ms step_avg:97.78ms +step:1514/1695 train_time:148043ms step_avg:97.78ms +step:1515/1695 train_time:148141ms step_avg:97.78ms +step:1516/1695 train_time:148240ms step_avg:97.78ms +step:1517/1695 train_time:148337ms step_avg:97.78ms +step:1518/1695 train_time:148433ms step_avg:97.78ms +step:1519/1695 train_time:148529ms step_avg:97.78ms +step:1520/1695 train_time:148627ms step_avg:97.78ms +step:1521/1695 train_time:148726ms step_avg:97.78ms +step:1522/1695 train_time:148825ms step_avg:97.78ms +step:1523/1695 train_time:148924ms step_avg:97.78ms +step:1524/1695 train_time:149022ms step_avg:97.78ms +step:1525/1695 train_time:149121ms step_avg:97.78ms +step:1526/1695 train_time:149219ms step_avg:97.78ms +step:1527/1695 train_time:149317ms step_avg:97.78ms +step:1528/1695 train_time:149413ms step_avg:97.78ms +step:1529/1695 train_time:149511ms step_avg:97.78ms +step:1530/1695 train_time:149608ms step_avg:97.78ms +step:1531/1695 train_time:149705ms step_avg:97.78ms +step:1532/1695 train_time:149804ms step_avg:97.78ms +step:1533/1695 train_time:149903ms step_avg:97.78ms +step:1534/1695 train_time:150001ms step_avg:97.78ms +step:1535/1695 train_time:150100ms step_avg:97.78ms +step:1536/1695 train_time:150198ms step_avg:97.79ms +step:1537/1695 train_time:150296ms step_avg:97.79ms +step:1538/1695 train_time:150393ms step_avg:97.78ms +step:1539/1695 train_time:150491ms step_avg:97.78ms +step:1540/1695 train_time:150588ms step_avg:97.78ms +step:1541/1695 train_time:150685ms step_avg:97.78ms +step:1542/1695 train_time:150783ms step_avg:97.78ms +step:1543/1695 train_time:150881ms step_avg:97.78ms +step:1544/1695 train_time:150981ms step_avg:97.79ms +step:1545/1695 train_time:151081ms step_avg:97.79ms +step:1546/1695 train_time:151181ms step_avg:97.79ms +step:1547/1695 train_time:151279ms step_avg:97.79ms +step:1548/1695 train_time:151378ms step_avg:97.79ms +step:1549/1695 train_time:151478ms step_avg:97.79ms +step:1550/1695 train_time:151576ms step_avg:97.79ms +step:1551/1695 train_time:151674ms step_avg:97.79ms +step:1552/1695 train_time:152071ms step_avg:97.98ms +step:1553/1695 train_time:152147ms step_avg:97.97ms +step:1554/1695 train_time:152242ms step_avg:97.97ms +step:1555/1695 train_time:152339ms step_avg:97.97ms +step:1556/1695 train_time:152435ms step_avg:97.97ms +step:1557/1695 train_time:152532ms step_avg:97.97ms +step:1558/1695 train_time:152628ms step_avg:97.96ms +step:1559/1695 train_time:152724ms step_avg:97.96ms +step:1560/1695 train_time:152821ms step_avg:97.96ms +step:1561/1695 train_time:152919ms step_avg:97.96ms +step:1562/1695 train_time:153018ms step_avg:97.96ms +step:1563/1695 train_time:153125ms step_avg:97.97ms +step:1564/1695 train_time:153224ms step_avg:97.97ms +step:1565/1695 train_time:153322ms step_avg:97.97ms +step:1566/1695 train_time:153420ms step_avg:97.97ms +step:1567/1695 train_time:153517ms step_avg:97.97ms +step:1568/1695 train_time:153616ms step_avg:97.97ms +step:1569/1695 train_time:153713ms step_avg:97.97ms +step:1570/1695 train_time:153810ms step_avg:97.97ms +step:1571/1695 train_time:153906ms step_avg:97.97ms +step:1572/1695 train_time:154004ms step_avg:97.97ms +step:1573/1695 train_time:154103ms step_avg:97.97ms +step:1574/1695 train_time:154203ms step_avg:97.97ms +step:1575/1695 train_time:154302ms step_avg:97.97ms +step:1576/1695 train_time:154400ms step_avg:97.97ms +step:1577/1695 train_time:154498ms step_avg:97.97ms +step:1578/1695 train_time:154596ms step_avg:97.97ms +step:1579/1695 train_time:154693ms step_avg:97.97ms +step:1580/1695 train_time:154791ms step_avg:97.97ms +step:1581/1695 train_time:154888ms step_avg:97.97ms +step:1582/1695 train_time:154985ms step_avg:97.97ms +step:1583/1695 train_time:155083ms step_avg:97.97ms +step:1584/1695 train_time:155182ms step_avg:97.97ms +step:1585/1695 train_time:155280ms step_avg:97.97ms +step:1586/1695 train_time:155380ms step_avg:97.97ms +step:1587/1695 train_time:155478ms step_avg:97.97ms +step:1588/1695 train_time:155575ms step_avg:97.97ms +step:1589/1695 train_time:155673ms step_avg:97.97ms +step:1590/1695 train_time:155771ms step_avg:97.97ms +step:1591/1695 train_time:155868ms step_avg:97.97ms +step:1592/1695 train_time:155965ms step_avg:97.97ms +step:1593/1695 train_time:156063ms step_avg:97.97ms +step:1594/1695 train_time:156160ms step_avg:97.97ms +step:1595/1695 train_time:156258ms step_avg:97.97ms +step:1596/1695 train_time:156357ms step_avg:97.97ms +step:1597/1695 train_time:156456ms step_avg:97.97ms +step:1598/1695 train_time:156554ms step_avg:97.97ms +step:1599/1695 train_time:156651ms step_avg:97.97ms +step:1600/1695 train_time:156748ms step_avg:97.97ms +step:1601/1695 train_time:156845ms step_avg:97.97ms +step:1602/1695 train_time:156943ms step_avg:97.97ms +step:1603/1695 train_time:157041ms step_avg:97.97ms +step:1604/1695 train_time:157140ms step_avg:97.97ms +step:1605/1695 train_time:157239ms step_avg:97.97ms +step:1606/1695 train_time:157337ms step_avg:97.97ms +step:1607/1695 train_time:157436ms step_avg:97.97ms +step:1608/1695 train_time:157535ms step_avg:97.97ms +step:1609/1695 train_time:157632ms step_avg:97.97ms +step:1610/1695 train_time:157730ms step_avg:97.97ms +step:1611/1695 train_time:157827ms step_avg:97.97ms +step:1612/1695 train_time:157925ms step_avg:97.97ms +step:1613/1695 train_time:158022ms step_avg:97.97ms +step:1614/1695 train_time:158119ms step_avg:97.97ms +step:1615/1695 train_time:158218ms step_avg:97.97ms +step:1616/1695 train_time:158317ms step_avg:97.97ms +step:1617/1695 train_time:158415ms step_avg:97.97ms +step:1618/1695 train_time:158513ms step_avg:97.97ms +step:1619/1695 train_time:158611ms step_avg:97.97ms +step:1620/1695 train_time:158708ms step_avg:97.97ms +step:1621/1695 train_time:158805ms step_avg:97.97ms +step:1622/1695 train_time:158903ms step_avg:97.97ms +step:1623/1695 train_time:159001ms step_avg:97.97ms +step:1624/1695 train_time:159099ms step_avg:97.97ms +step:1625/1695 train_time:159197ms step_avg:97.97ms +step:1625/1695 val_loss:3.2895 train_time:159292ms step_avg:98.03ms +step:1626/1695 train_time:159319ms step_avg:97.98ms +step:1627/1695 train_time:159403ms step_avg:97.97ms +step:1628/1695 train_time:159501ms step_avg:97.97ms +step:1629/1695 train_time:159598ms step_avg:97.97ms +step:1630/1695 train_time:159696ms step_avg:97.97ms +step:1631/1695 train_time:159793ms step_avg:97.97ms +step:1632/1695 train_time:159890ms step_avg:97.97ms +step:1633/1695 train_time:159986ms step_avg:97.97ms +step:1634/1695 train_time:160083ms step_avg:97.97ms +step:1635/1695 train_time:160179ms step_avg:97.97ms +step:1636/1695 train_time:160280ms step_avg:97.97ms +step:1637/1695 train_time:160382ms step_avg:97.97ms +step:1638/1695 train_time:160482ms step_avg:97.97ms +step:1639/1695 train_time:160580ms step_avg:97.97ms +step:1640/1695 train_time:160678ms step_avg:97.97ms +step:1641/1695 train_time:160774ms step_avg:97.97ms +step:1642/1695 train_time:160871ms step_avg:97.97ms +step:1643/1695 train_time:160969ms step_avg:97.97ms +step:1644/1695 train_time:161066ms step_avg:97.97ms +step:1645/1695 train_time:161162ms step_avg:97.97ms +step:1646/1695 train_time:161261ms step_avg:97.97ms +step:1647/1695 train_time:161362ms step_avg:97.97ms +step:1648/1695 train_time:161461ms step_avg:97.97ms +step:1649/1695 train_time:161559ms step_avg:97.97ms +step:1650/1695 train_time:161657ms step_avg:97.97ms +step:1651/1695 train_time:161755ms step_avg:97.97ms +step:1652/1695 train_time:161852ms step_avg:97.97ms +step:1653/1695 train_time:161951ms step_avg:97.97ms +step:1654/1695 train_time:162049ms step_avg:97.97ms +step:1655/1695 train_time:162146ms step_avg:97.97ms +step:1656/1695 train_time:162244ms step_avg:97.97ms +step:1657/1695 train_time:162342ms step_avg:97.97ms +step:1658/1695 train_time:162440ms step_avg:97.97ms +step:1659/1695 train_time:162538ms step_avg:97.97ms +step:1660/1695 train_time:162636ms step_avg:97.97ms +step:1661/1695 train_time:162734ms step_avg:97.97ms +step:1662/1695 train_time:162831ms step_avg:97.97ms +step:1663/1695 train_time:162928ms step_avg:97.97ms +step:1664/1695 train_time:163026ms step_avg:97.97ms +step:1665/1695 train_time:163122ms step_avg:97.97ms +step:1666/1695 train_time:163221ms step_avg:97.97ms +step:1667/1695 train_time:163320ms step_avg:97.97ms +step:1668/1695 train_time:163418ms step_avg:97.97ms +step:1669/1695 train_time:163518ms step_avg:97.97ms +step:1670/1695 train_time:163617ms step_avg:97.97ms +step:1671/1695 train_time:163715ms step_avg:97.97ms +step:1672/1695 train_time:163812ms step_avg:97.97ms +step:1673/1695 train_time:163911ms step_avg:97.97ms +step:1674/1695 train_time:164008ms step_avg:97.97ms +step:1675/1695 train_time:164105ms step_avg:97.97ms +step:1676/1695 train_time:164202ms step_avg:97.97ms +step:1677/1695 train_time:164300ms step_avg:97.97ms +step:1678/1695 train_time:164397ms step_avg:97.97ms +step:1679/1695 train_time:164495ms step_avg:97.97ms +step:1680/1695 train_time:164593ms step_avg:97.97ms +step:1681/1695 train_time:164691ms step_avg:97.97ms +step:1682/1695 train_time:164789ms step_avg:97.97ms +step:1683/1695 train_time:164886ms step_avg:97.97ms +step:1684/1695 train_time:164984ms step_avg:97.97ms +step:1685/1695 train_time:165081ms step_avg:97.97ms +step:1686/1695 train_time:165179ms step_avg:97.97ms +step:1687/1695 train_time:165278ms step_avg:97.97ms +step:1688/1695 train_time:165377ms step_avg:97.97ms +step:1689/1695 train_time:165475ms step_avg:97.97ms +step:1690/1695 train_time:165573ms step_avg:97.97ms +step:1691/1695 train_time:165672ms step_avg:97.97ms +step:1692/1695 train_time:165770ms step_avg:97.97ms +step:1693/1695 train_time:165868ms step_avg:97.97ms +step:1694/1695 train_time:165966ms step_avg:97.97ms +step:1695/1695 train_time:166062ms step_avg:97.97ms +step:1695/1695 val_loss:3.2782 train_time:166157ms step_avg:98.03ms +peak memory allocated: 34361 MiB reserved: 49576 MiB diff --git a/records/082725_FA3/1d46fee6-b32c-48de-bd61-0a326442ec4e.txt b/records/082725_FA3/1d46fee6-b32c-48de-bd61-0a326442ec4e.txt new file mode 100644 index 000000000..32ec95b7e --- /dev/null +++ b/records/082725_FA3/1d46fee6-b32c-48de-bd61-0a326442ec4e.txt @@ -0,0 +1,2808 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + + while True: + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + + if align_to_bos: + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] + else: + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + + pos += batch_span + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. + # optimization + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 60 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): + inputs, targets = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, ws, ws // 2) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * lr + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Wed Aug 27 04:04:43 2025 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.6 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:00:0B.0 Off | Off | +| N/A 29C P0 114W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:00:0C.0 Off | Off | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:00:0D.0 Off | Off | +| N/A 32C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:00:0E.0 Off | Off | +| N/A 29C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:00:0F.0 Off | Off | +| N/A 29C P0 109W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:00:10.0 Off | Off | +| N/A 33C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:00:11.0 Off | Off | +| N/A 31C P0 111W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:00:12.0 Off | Off | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| ++---------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:517ms step_avg:517.24ms +step:2/1695 train_time:541ms step_avg:270.47ms +step:3/1695 train_time:610ms step_avg:203.20ms +step:4/1695 train_time:702ms step_avg:175.48ms +step:5/1695 train_time:795ms step_avg:158.96ms +step:6/1695 train_time:888ms step_avg:148.00ms +step:7/1695 train_time:982ms step_avg:140.22ms +step:8/1695 train_time:1075ms step_avg:134.38ms +step:9/1695 train_time:1169ms step_avg:129.83ms +step:10/1695 train_time:1262ms step_avg:126.22ms +step:11/1695 train_time:1356ms step_avg:123.26ms +step:12/1695 train_time:1454ms step_avg:121.13ms +step:13/1695 train_time:1553ms step_avg:119.44ms +step:14/1695 train_time:1650ms step_avg:117.83ms +step:15/1695 train_time:1744ms step_avg:116.30ms +step:16/1695 train_time:1838ms step_avg:114.86ms +step:17/1695 train_time:1931ms step_avg:113.60ms +step:18/1695 train_time:2025ms step_avg:112.49ms +step:19/1695 train_time:2118ms step_avg:111.45ms +step:20/1695 train_time:2211ms step_avg:110.56ms +step:21/1695 train_time:2306ms step_avg:109.79ms +step:22/1695 train_time:2401ms step_avg:109.13ms +step:23/1695 train_time:2497ms step_avg:108.55ms +step:24/1695 train_time:2592ms step_avg:108.02ms +step:25/1695 train_time:2689ms step_avg:107.56ms +step:26/1695 train_time:2785ms step_avg:107.10ms +step:27/1695 train_time:2879ms step_avg:106.62ms +step:28/1695 train_time:2973ms step_avg:106.16ms +step:29/1695 train_time:3067ms step_avg:105.75ms +step:30/1695 train_time:3160ms step_avg:105.34ms +step:31/1695 train_time:3253ms step_avg:104.95ms +step:32/1695 train_time:3349ms step_avg:104.66ms +step:33/1695 train_time:3445ms step_avg:104.39ms +step:34/1695 train_time:3540ms step_avg:104.10ms +step:35/1695 train_time:3634ms step_avg:103.84ms +step:36/1695 train_time:3731ms step_avg:103.64ms +step:37/1695 train_time:3827ms step_avg:103.44ms +step:38/1695 train_time:3922ms step_avg:103.22ms +step:39/1695 train_time:4016ms step_avg:102.98ms +step:40/1695 train_time:4111ms step_avg:102.77ms +step:41/1695 train_time:4205ms step_avg:102.56ms +step:42/1695 train_time:4299ms step_avg:102.35ms +step:43/1695 train_time:4393ms step_avg:102.16ms +step:44/1695 train_time:4488ms step_avg:102.01ms +step:45/1695 train_time:4584ms step_avg:101.86ms +step:46/1695 train_time:4678ms step_avg:101.68ms +step:47/1695 train_time:4773ms step_avg:101.54ms +step:48/1695 train_time:4867ms step_avg:101.40ms +step:49/1695 train_time:4962ms step_avg:101.27ms +step:50/1695 train_time:5056ms step_avg:101.12ms +step:51/1695 train_time:5150ms step_avg:100.98ms +step:52/1695 train_time:5244ms step_avg:100.85ms +step:53/1695 train_time:5338ms step_avg:100.72ms +step:54/1695 train_time:5432ms step_avg:100.60ms +step:55/1695 train_time:5528ms step_avg:100.51ms +step:56/1695 train_time:5623ms step_avg:100.42ms +step:57/1695 train_time:5717ms step_avg:100.31ms +step:58/1695 train_time:5812ms step_avg:100.20ms +step:59/1695 train_time:5908ms step_avg:100.13ms +step:60/1695 train_time:6003ms step_avg:100.04ms +step:61/1695 train_time:6096ms step_avg:99.94ms +step:62/1695 train_time:6190ms step_avg:99.84ms +step:63/1695 train_time:6284ms step_avg:99.75ms +step:64/1695 train_time:6378ms step_avg:99.66ms +step:65/1695 train_time:6472ms step_avg:99.57ms +step:66/1695 train_time:6568ms step_avg:99.51ms +step:67/1695 train_time:6663ms step_avg:99.45ms +step:68/1695 train_time:6756ms step_avg:99.36ms +step:69/1695 train_time:6851ms step_avg:99.29ms +step:70/1695 train_time:6946ms step_avg:99.23ms +step:71/1695 train_time:7040ms step_avg:99.16ms +step:72/1695 train_time:7133ms step_avg:99.07ms +step:73/1695 train_time:7228ms step_avg:99.02ms +step:74/1695 train_time:7324ms step_avg:98.97ms +step:75/1695 train_time:7418ms step_avg:98.90ms +step:76/1695 train_time:7512ms step_avg:98.84ms +step:77/1695 train_time:7607ms step_avg:98.80ms +step:78/1695 train_time:7702ms step_avg:98.75ms +step:79/1695 train_time:7796ms step_avg:98.69ms +step:80/1695 train_time:7891ms step_avg:98.63ms +step:81/1695 train_time:7985ms step_avg:98.58ms +step:82/1695 train_time:8079ms step_avg:98.52ms +step:83/1695 train_time:8172ms step_avg:98.46ms +step:84/1695 train_time:8268ms step_avg:98.42ms +step:85/1695 train_time:8363ms step_avg:98.39ms +step:86/1695 train_time:8457ms step_avg:98.33ms +step:87/1695 train_time:8551ms step_avg:98.29ms +step:88/1695 train_time:8647ms step_avg:98.26ms +step:89/1695 train_time:8742ms step_avg:98.22ms +step:90/1695 train_time:8835ms step_avg:98.17ms +step:91/1695 train_time:8931ms step_avg:98.14ms +step:92/1695 train_time:9024ms step_avg:98.09ms +step:93/1695 train_time:9118ms step_avg:98.04ms +step:94/1695 train_time:9211ms step_avg:97.99ms +step:95/1695 train_time:9305ms step_avg:97.95ms +step:96/1695 train_time:9399ms step_avg:97.91ms +step:97/1695 train_time:9494ms step_avg:97.87ms +step:98/1695 train_time:9588ms step_avg:97.84ms +step:99/1695 train_time:9684ms step_avg:97.81ms +step:100/1695 train_time:9777ms step_avg:97.77ms +step:101/1695 train_time:9872ms step_avg:97.75ms +step:102/1695 train_time:9968ms step_avg:97.73ms +step:103/1695 train_time:10063ms step_avg:97.70ms +step:104/1695 train_time:10157ms step_avg:97.66ms +step:105/1695 train_time:10251ms step_avg:97.63ms +step:106/1695 train_time:10345ms step_avg:97.59ms +step:107/1695 train_time:10439ms step_avg:97.56ms +step:108/1695 train_time:10533ms step_avg:97.52ms +step:109/1695 train_time:10627ms step_avg:97.50ms +step:110/1695 train_time:10721ms step_avg:97.46ms +step:111/1695 train_time:10814ms step_avg:97.42ms +step:112/1695 train_time:10909ms step_avg:97.40ms +step:113/1695 train_time:11004ms step_avg:97.38ms +step:114/1695 train_time:11098ms step_avg:97.35ms +step:115/1695 train_time:11192ms step_avg:97.33ms +step:116/1695 train_time:11288ms step_avg:97.31ms +step:117/1695 train_time:11381ms step_avg:97.28ms +step:118/1695 train_time:11476ms step_avg:97.25ms +step:119/1695 train_time:11570ms step_avg:97.23ms +step:120/1695 train_time:11664ms step_avg:97.20ms +step:121/1695 train_time:11758ms step_avg:97.17ms +step:122/1695 train_time:11852ms step_avg:97.15ms +step:123/1695 train_time:11947ms step_avg:97.13ms +step:124/1695 train_time:12042ms step_avg:97.11ms +step:125/1695 train_time:12135ms step_avg:97.08ms +step:125/1695 val_loss:4.3142 train_time:12227ms step_avg:97.82ms +step:126/1695 train_time:12252ms step_avg:97.24ms +step:127/1695 train_time:12329ms step_avg:97.08ms +step:128/1695 train_time:12428ms step_avg:97.09ms +step:129/1695 train_time:12522ms step_avg:97.07ms +step:130/1695 train_time:12616ms step_avg:97.05ms +step:131/1695 train_time:12710ms step_avg:97.02ms +step:132/1695 train_time:12803ms step_avg:96.99ms +step:133/1695 train_time:12896ms step_avg:96.96ms +step:134/1695 train_time:12990ms step_avg:96.94ms +step:135/1695 train_time:13083ms step_avg:96.91ms +step:136/1695 train_time:13177ms step_avg:96.89ms +step:137/1695 train_time:13273ms step_avg:96.88ms +step:138/1695 train_time:13370ms step_avg:96.88ms +step:139/1695 train_time:13466ms step_avg:96.88ms +step:140/1695 train_time:13560ms step_avg:96.86ms +step:141/1695 train_time:13654ms step_avg:96.84ms +step:142/1695 train_time:13748ms step_avg:96.82ms +step:143/1695 train_time:13841ms step_avg:96.79ms +step:144/1695 train_time:13935ms step_avg:96.77ms +step:145/1695 train_time:14029ms step_avg:96.75ms +step:146/1695 train_time:14122ms step_avg:96.72ms +step:147/1695 train_time:14215ms step_avg:96.70ms +step:148/1695 train_time:14312ms step_avg:96.71ms +step:149/1695 train_time:14409ms step_avg:96.70ms +step:150/1695 train_time:14503ms step_avg:96.69ms +step:151/1695 train_time:14597ms step_avg:96.67ms +step:152/1695 train_time:14692ms step_avg:96.66ms +step:153/1695 train_time:14787ms step_avg:96.65ms +step:154/1695 train_time:14881ms step_avg:96.63ms +step:155/1695 train_time:14974ms step_avg:96.61ms +step:156/1695 train_time:15068ms step_avg:96.59ms +step:157/1695 train_time:15161ms step_avg:96.56ms +step:158/1695 train_time:15255ms step_avg:96.55ms +step:159/1695 train_time:15350ms step_avg:96.54ms +step:160/1695 train_time:15446ms step_avg:96.53ms +step:161/1695 train_time:15540ms step_avg:96.52ms +step:162/1695 train_time:15635ms step_avg:96.51ms +step:163/1695 train_time:15729ms step_avg:96.50ms +step:164/1695 train_time:15824ms step_avg:96.49ms +step:165/1695 train_time:15917ms step_avg:96.47ms +step:166/1695 train_time:16012ms step_avg:96.46ms +step:167/1695 train_time:16106ms step_avg:96.44ms +step:168/1695 train_time:16199ms step_avg:96.42ms +step:169/1695 train_time:16293ms step_avg:96.41ms +step:170/1695 train_time:16388ms step_avg:96.40ms +step:171/1695 train_time:16482ms step_avg:96.39ms +step:172/1695 train_time:16576ms step_avg:96.37ms +step:173/1695 train_time:16958ms step_avg:98.02ms +step:174/1695 train_time:17044ms step_avg:97.95ms +step:175/1695 train_time:17136ms step_avg:97.92ms +step:176/1695 train_time:17228ms step_avg:97.89ms +step:177/1695 train_time:17321ms step_avg:97.86ms +step:178/1695 train_time:17414ms step_avg:97.83ms +step:179/1695 train_time:17507ms step_avg:97.80ms +step:180/1695 train_time:17599ms step_avg:97.77ms +step:181/1695 train_time:17693ms step_avg:97.75ms +step:182/1695 train_time:17787ms step_avg:97.73ms +step:183/1695 train_time:17880ms step_avg:97.71ms +step:184/1695 train_time:17978ms step_avg:97.71ms +step:185/1695 train_time:18075ms step_avg:97.71ms +step:186/1695 train_time:18170ms step_avg:97.69ms +step:187/1695 train_time:18264ms step_avg:97.67ms +step:188/1695 train_time:18357ms step_avg:97.64ms +step:189/1695 train_time:18451ms step_avg:97.62ms +step:190/1695 train_time:18545ms step_avg:97.60ms +step:191/1695 train_time:18637ms step_avg:97.58ms +step:192/1695 train_time:18731ms step_avg:97.56ms +step:193/1695 train_time:18825ms step_avg:97.54ms +step:194/1695 train_time:18919ms step_avg:97.52ms +step:195/1695 train_time:19016ms step_avg:97.52ms +step:196/1695 train_time:19111ms step_avg:97.51ms +step:197/1695 train_time:19207ms step_avg:97.50ms +step:198/1695 train_time:19300ms step_avg:97.48ms +step:199/1695 train_time:19395ms step_avg:97.46ms +step:200/1695 train_time:19489ms step_avg:97.44ms +step:201/1695 train_time:19582ms step_avg:97.42ms +step:202/1695 train_time:19676ms step_avg:97.40ms +step:203/1695 train_time:19769ms step_avg:97.39ms +step:204/1695 train_time:19863ms step_avg:97.37ms +step:205/1695 train_time:19957ms step_avg:97.35ms +step:206/1695 train_time:20052ms step_avg:97.34ms +step:207/1695 train_time:20148ms step_avg:97.33ms +step:208/1695 train_time:20242ms step_avg:97.32ms +step:209/1695 train_time:20336ms step_avg:97.30ms +step:210/1695 train_time:20430ms step_avg:97.29ms +step:211/1695 train_time:20523ms step_avg:97.27ms +step:212/1695 train_time:20617ms step_avg:97.25ms +step:213/1695 train_time:20710ms step_avg:97.23ms +step:214/1695 train_time:20804ms step_avg:97.21ms +step:215/1695 train_time:20897ms step_avg:97.20ms +step:216/1695 train_time:20991ms step_avg:97.18ms +step:217/1695 train_time:21086ms step_avg:97.17ms +step:218/1695 train_time:21180ms step_avg:97.15ms +step:219/1695 train_time:21274ms step_avg:97.14ms +step:220/1695 train_time:21369ms step_avg:97.13ms +step:221/1695 train_time:21463ms step_avg:97.12ms +step:222/1695 train_time:21557ms step_avg:97.10ms +step:223/1695 train_time:21651ms step_avg:97.09ms +step:224/1695 train_time:21745ms step_avg:97.07ms +step:225/1695 train_time:21838ms step_avg:97.06ms +step:226/1695 train_time:21933ms step_avg:97.05ms +step:227/1695 train_time:22028ms step_avg:97.04ms +step:228/1695 train_time:22122ms step_avg:97.03ms +step:229/1695 train_time:22216ms step_avg:97.01ms +step:230/1695 train_time:22311ms step_avg:97.01ms +step:231/1695 train_time:22406ms step_avg:97.00ms +step:232/1695 train_time:22500ms step_avg:96.98ms +step:233/1695 train_time:22593ms step_avg:96.97ms +step:234/1695 train_time:22688ms step_avg:96.96ms +step:235/1695 train_time:22781ms step_avg:96.94ms +step:236/1695 train_time:22874ms step_avg:96.93ms +step:237/1695 train_time:22969ms step_avg:96.91ms +step:238/1695 train_time:23062ms step_avg:96.90ms +step:239/1695 train_time:23155ms step_avg:96.88ms +step:240/1695 train_time:23249ms step_avg:96.87ms +step:241/1695 train_time:23343ms step_avg:96.86ms +step:242/1695 train_time:23437ms step_avg:96.85ms +step:243/1695 train_time:23531ms step_avg:96.84ms +step:244/1695 train_time:23625ms step_avg:96.82ms +step:245/1695 train_time:23719ms step_avg:96.81ms +step:246/1695 train_time:23813ms step_avg:96.80ms +step:247/1695 train_time:23908ms step_avg:96.79ms +step:248/1695 train_time:24002ms step_avg:96.78ms +step:249/1695 train_time:24096ms step_avg:96.77ms +step:250/1695 train_time:24190ms step_avg:96.76ms +step:250/1695 val_loss:3.9738 train_time:24282ms step_avg:97.13ms +step:251/1695 train_time:24306ms step_avg:96.84ms +step:252/1695 train_time:24385ms step_avg:96.77ms +step:253/1695 train_time:24484ms step_avg:96.78ms +step:254/1695 train_time:24579ms step_avg:96.77ms +step:255/1695 train_time:24672ms step_avg:96.75ms +step:256/1695 train_time:24766ms step_avg:96.74ms +step:257/1695 train_time:24858ms step_avg:96.73ms +step:258/1695 train_time:24951ms step_avg:96.71ms +step:259/1695 train_time:25044ms step_avg:96.69ms +step:260/1695 train_time:25137ms step_avg:96.68ms +step:261/1695 train_time:25231ms step_avg:96.67ms +step:262/1695 train_time:25325ms step_avg:96.66ms +step:263/1695 train_time:25423ms step_avg:96.66ms +step:264/1695 train_time:25519ms step_avg:96.66ms +step:265/1695 train_time:25614ms step_avg:96.66ms +step:266/1695 train_time:25708ms step_avg:96.65ms +step:267/1695 train_time:25801ms step_avg:96.63ms +step:268/1695 train_time:25895ms step_avg:96.62ms +step:269/1695 train_time:25988ms step_avg:96.61ms +step:270/1695 train_time:26081ms step_avg:96.60ms +step:271/1695 train_time:26174ms step_avg:96.58ms +step:272/1695 train_time:26267ms step_avg:96.57ms +step:273/1695 train_time:26362ms step_avg:96.57ms +step:274/1695 train_time:26458ms step_avg:96.56ms +step:275/1695 train_time:26553ms step_avg:96.56ms +step:276/1695 train_time:26648ms step_avg:96.55ms +step:277/1695 train_time:26741ms step_avg:96.54ms +step:278/1695 train_time:26836ms step_avg:96.53ms +step:279/1695 train_time:26931ms step_avg:96.53ms +step:280/1695 train_time:27023ms step_avg:96.51ms +step:281/1695 train_time:27117ms step_avg:96.50ms +step:282/1695 train_time:27210ms step_avg:96.49ms +step:283/1695 train_time:27303ms step_avg:96.48ms +step:284/1695 train_time:27398ms step_avg:96.47ms +step:285/1695 train_time:27492ms step_avg:96.46ms +step:286/1695 train_time:27587ms step_avg:96.46ms +step:287/1695 train_time:27681ms step_avg:96.45ms +step:288/1695 train_time:27775ms step_avg:96.44ms +step:289/1695 train_time:27870ms step_avg:96.44ms +step:290/1695 train_time:27963ms step_avg:96.43ms +step:291/1695 train_time:28057ms step_avg:96.42ms +step:292/1695 train_time:28151ms step_avg:96.41ms +step:293/1695 train_time:28244ms step_avg:96.39ms +step:294/1695 train_time:28338ms step_avg:96.39ms +step:295/1695 train_time:28432ms step_avg:96.38ms +step:296/1695 train_time:28526ms step_avg:96.37ms +step:297/1695 train_time:28620ms step_avg:96.36ms +step:298/1695 train_time:28714ms step_avg:96.35ms +step:299/1695 train_time:28808ms step_avg:96.35ms +step:300/1695 train_time:28901ms step_avg:96.34ms +step:301/1695 train_time:28995ms step_avg:96.33ms +step:302/1695 train_time:29089ms step_avg:96.32ms +step:303/1695 train_time:29182ms step_avg:96.31ms +step:304/1695 train_time:29276ms step_avg:96.30ms +step:305/1695 train_time:29370ms step_avg:96.30ms +step:306/1695 train_time:29464ms step_avg:96.29ms +step:307/1695 train_time:29558ms step_avg:96.28ms +step:308/1695 train_time:29653ms step_avg:96.28ms +step:309/1695 train_time:29748ms step_avg:96.27ms +step:310/1695 train_time:29841ms step_avg:96.26ms +step:311/1695 train_time:29936ms step_avg:96.26ms +step:312/1695 train_time:30031ms step_avg:96.25ms +step:313/1695 train_time:30124ms step_avg:96.24ms +step:314/1695 train_time:30218ms step_avg:96.24ms +step:315/1695 train_time:30312ms step_avg:96.23ms +step:316/1695 train_time:30405ms step_avg:96.22ms +step:317/1695 train_time:30500ms step_avg:96.21ms +step:318/1695 train_time:30595ms step_avg:96.21ms +step:319/1695 train_time:30689ms step_avg:96.20ms +step:320/1695 train_time:30783ms step_avg:96.20ms +step:321/1695 train_time:30877ms step_avg:96.19ms +step:322/1695 train_time:30972ms step_avg:96.19ms +step:323/1695 train_time:31066ms step_avg:96.18ms +step:324/1695 train_time:31159ms step_avg:96.17ms +step:325/1695 train_time:31252ms step_avg:96.16ms +step:326/1695 train_time:31346ms step_avg:96.15ms +step:327/1695 train_time:31440ms step_avg:96.15ms +step:328/1695 train_time:31535ms step_avg:96.14ms +step:329/1695 train_time:31630ms step_avg:96.14ms +step:330/1695 train_time:31723ms step_avg:96.13ms +step:331/1695 train_time:31817ms step_avg:96.12ms +step:332/1695 train_time:31912ms step_avg:96.12ms +step:333/1695 train_time:32006ms step_avg:96.11ms +step:334/1695 train_time:32099ms step_avg:96.11ms +step:335/1695 train_time:32194ms step_avg:96.10ms +step:336/1695 train_time:32288ms step_avg:96.09ms +step:337/1695 train_time:32381ms step_avg:96.09ms +step:338/1695 train_time:32475ms step_avg:96.08ms +step:339/1695 train_time:32570ms step_avg:96.08ms +step:340/1695 train_time:32664ms step_avg:96.07ms +step:341/1695 train_time:32758ms step_avg:96.06ms +step:342/1695 train_time:32852ms step_avg:96.06ms +step:343/1695 train_time:32947ms step_avg:96.05ms +step:344/1695 train_time:33040ms step_avg:96.05ms +step:345/1695 train_time:33366ms step_avg:96.71ms +step:346/1695 train_time:33470ms step_avg:96.73ms +step:347/1695 train_time:33562ms step_avg:96.72ms +step:348/1695 train_time:33655ms step_avg:96.71ms +step:349/1695 train_time:33748ms step_avg:96.70ms +step:350/1695 train_time:33840ms step_avg:96.69ms +step:351/1695 train_time:33933ms step_avg:96.68ms +step:352/1695 train_time:34026ms step_avg:96.66ms +step:353/1695 train_time:34119ms step_avg:96.65ms +step:354/1695 train_time:34212ms step_avg:96.65ms +step:355/1695 train_time:34310ms step_avg:96.65ms +step:356/1695 train_time:34408ms step_avg:96.65ms +step:357/1695 train_time:34502ms step_avg:96.65ms +step:358/1695 train_time:34597ms step_avg:96.64ms +step:359/1695 train_time:34690ms step_avg:96.63ms +step:360/1695 train_time:34783ms step_avg:96.62ms +step:361/1695 train_time:34876ms step_avg:96.61ms +step:362/1695 train_time:34970ms step_avg:96.60ms +step:363/1695 train_time:35062ms step_avg:96.59ms +step:364/1695 train_time:35155ms step_avg:96.58ms +step:365/1695 train_time:35250ms step_avg:96.58ms +step:366/1695 train_time:35345ms step_avg:96.57ms +step:367/1695 train_time:35440ms step_avg:96.57ms +step:368/1695 train_time:35535ms step_avg:96.56ms +step:369/1695 train_time:35630ms step_avg:96.56ms +step:370/1695 train_time:35723ms step_avg:96.55ms +step:371/1695 train_time:35817ms step_avg:96.54ms +step:372/1695 train_time:35911ms step_avg:96.53ms +step:373/1695 train_time:36004ms step_avg:96.52ms +step:374/1695 train_time:36097ms step_avg:96.52ms +step:375/1695 train_time:36191ms step_avg:96.51ms +step:375/1695 val_loss:3.8151 train_time:36283ms step_avg:96.76ms +step:376/1695 train_time:36310ms step_avg:96.57ms +step:377/1695 train_time:36385ms step_avg:96.51ms +step:378/1695 train_time:36485ms step_avg:96.52ms +step:379/1695 train_time:36582ms step_avg:96.52ms +step:380/1695 train_time:36675ms step_avg:96.51ms +step:381/1695 train_time:36768ms step_avg:96.50ms +step:382/1695 train_time:36861ms step_avg:96.50ms +step:383/1695 train_time:36955ms step_avg:96.49ms +step:384/1695 train_time:37047ms step_avg:96.48ms +step:385/1695 train_time:37140ms step_avg:96.47ms +step:386/1695 train_time:37233ms step_avg:96.46ms +step:387/1695 train_time:37328ms step_avg:96.46ms +step:388/1695 train_time:37424ms step_avg:96.45ms +step:389/1695 train_time:37521ms step_avg:96.46ms +step:390/1695 train_time:37617ms step_avg:96.45ms +step:391/1695 train_time:37711ms step_avg:96.45ms +step:392/1695 train_time:37804ms step_avg:96.44ms +step:393/1695 train_time:37897ms step_avg:96.43ms +step:394/1695 train_time:37990ms step_avg:96.42ms +step:395/1695 train_time:38083ms step_avg:96.41ms +step:396/1695 train_time:38176ms step_avg:96.40ms +step:397/1695 train_time:38270ms step_avg:96.40ms +step:398/1695 train_time:38364ms step_avg:96.39ms +step:399/1695 train_time:38460ms step_avg:96.39ms +step:400/1695 train_time:38556ms step_avg:96.39ms +step:401/1695 train_time:38650ms step_avg:96.38ms +step:402/1695 train_time:38744ms step_avg:96.38ms +step:403/1695 train_time:38837ms step_avg:96.37ms +step:404/1695 train_time:38930ms step_avg:96.36ms +step:405/1695 train_time:39024ms step_avg:96.35ms +step:406/1695 train_time:39118ms step_avg:96.35ms +step:407/1695 train_time:39211ms step_avg:96.34ms +step:408/1695 train_time:39304ms step_avg:96.33ms +step:409/1695 train_time:39398ms step_avg:96.33ms +step:410/1695 train_time:39494ms step_avg:96.33ms +step:411/1695 train_time:39588ms step_avg:96.32ms +step:412/1695 train_time:39683ms step_avg:96.32ms +step:413/1695 train_time:39776ms step_avg:96.31ms +step:414/1695 train_time:39870ms step_avg:96.30ms +step:415/1695 train_time:39963ms step_avg:96.30ms +step:416/1695 train_time:40058ms step_avg:96.29ms +step:417/1695 train_time:40152ms step_avg:96.29ms +step:418/1695 train_time:40246ms step_avg:96.28ms +step:419/1695 train_time:40339ms step_avg:96.27ms +step:420/1695 train_time:40433ms step_avg:96.27ms +step:421/1695 train_time:40528ms step_avg:96.27ms +step:422/1695 train_time:40622ms step_avg:96.26ms +step:423/1695 train_time:40716ms step_avg:96.26ms +step:424/1695 train_time:40810ms step_avg:96.25ms +step:425/1695 train_time:40903ms step_avg:96.24ms +step:426/1695 train_time:40997ms step_avg:96.24ms +step:427/1695 train_time:41091ms step_avg:96.23ms +step:428/1695 train_time:41184ms step_avg:96.22ms +step:429/1695 train_time:41278ms step_avg:96.22ms +step:430/1695 train_time:41372ms step_avg:96.21ms +step:431/1695 train_time:41466ms step_avg:96.21ms +step:432/1695 train_time:41561ms step_avg:96.21ms +step:433/1695 train_time:41656ms step_avg:96.20ms +step:434/1695 train_time:41750ms step_avg:96.20ms +step:435/1695 train_time:41843ms step_avg:96.19ms +step:436/1695 train_time:41938ms step_avg:96.19ms +step:437/1695 train_time:42032ms step_avg:96.18ms +step:438/1695 train_time:42126ms step_avg:96.18ms +step:439/1695 train_time:42220ms step_avg:96.17ms +step:440/1695 train_time:42314ms step_avg:96.17ms +step:441/1695 train_time:42408ms step_avg:96.16ms +step:442/1695 train_time:42501ms step_avg:96.16ms +step:443/1695 train_time:42596ms step_avg:96.15ms +step:444/1695 train_time:42691ms step_avg:96.15ms +step:445/1695 train_time:42784ms step_avg:96.14ms +step:446/1695 train_time:42878ms step_avg:96.14ms +step:447/1695 train_time:42972ms step_avg:96.13ms +step:448/1695 train_time:43066ms step_avg:96.13ms +step:449/1695 train_time:43160ms step_avg:96.12ms +step:450/1695 train_time:43255ms step_avg:96.12ms +step:451/1695 train_time:43348ms step_avg:96.12ms +step:452/1695 train_time:43443ms step_avg:96.11ms +step:453/1695 train_time:43537ms step_avg:96.11ms +step:454/1695 train_time:43632ms step_avg:96.11ms +step:455/1695 train_time:43725ms step_avg:96.10ms +step:456/1695 train_time:43819ms step_avg:96.09ms +step:457/1695 train_time:43913ms step_avg:96.09ms +step:458/1695 train_time:44006ms step_avg:96.08ms +step:459/1695 train_time:44100ms step_avg:96.08ms +step:460/1695 train_time:44194ms step_avg:96.07ms +step:461/1695 train_time:44287ms step_avg:96.07ms +step:462/1695 train_time:44381ms step_avg:96.06ms +step:463/1695 train_time:44476ms step_avg:96.06ms +step:464/1695 train_time:44569ms step_avg:96.05ms +step:465/1695 train_time:44663ms step_avg:96.05ms +step:466/1695 train_time:44758ms step_avg:96.05ms +step:467/1695 train_time:44853ms step_avg:96.04ms +step:468/1695 train_time:44947ms step_avg:96.04ms +step:469/1695 train_time:45040ms step_avg:96.03ms +step:470/1695 train_time:45134ms step_avg:96.03ms +step:471/1695 train_time:45229ms step_avg:96.03ms +step:472/1695 train_time:45323ms step_avg:96.02ms +step:473/1695 train_time:45418ms step_avg:96.02ms +step:474/1695 train_time:45512ms step_avg:96.02ms +step:475/1695 train_time:45605ms step_avg:96.01ms +step:476/1695 train_time:45699ms step_avg:96.01ms +step:477/1695 train_time:45795ms step_avg:96.01ms +step:478/1695 train_time:45888ms step_avg:96.00ms +step:479/1695 train_time:45982ms step_avg:96.00ms +step:480/1695 train_time:46076ms step_avg:95.99ms +step:481/1695 train_time:46170ms step_avg:95.99ms +step:482/1695 train_time:46264ms step_avg:95.98ms +step:483/1695 train_time:46358ms step_avg:95.98ms +step:484/1695 train_time:46454ms step_avg:95.98ms +step:485/1695 train_time:46547ms step_avg:95.97ms +step:486/1695 train_time:46641ms step_avg:95.97ms +step:487/1695 train_time:46735ms step_avg:95.97ms +step:488/1695 train_time:46830ms step_avg:95.96ms +step:489/1695 train_time:46924ms step_avg:95.96ms +step:490/1695 train_time:47018ms step_avg:95.96ms +step:491/1695 train_time:47112ms step_avg:95.95ms +step:492/1695 train_time:47205ms step_avg:95.95ms +step:493/1695 train_time:47299ms step_avg:95.94ms +step:494/1695 train_time:47393ms step_avg:95.94ms +step:495/1695 train_time:47487ms step_avg:95.93ms +step:496/1695 train_time:47581ms step_avg:95.93ms +step:497/1695 train_time:47675ms step_avg:95.93ms +step:498/1695 train_time:47768ms step_avg:95.92ms +step:499/1695 train_time:47862ms step_avg:95.92ms +step:500/1695 train_time:47957ms step_avg:95.91ms +step:500/1695 val_loss:3.7158 train_time:48050ms step_avg:96.10ms +step:501/1695 train_time:48074ms step_avg:95.96ms +step:502/1695 train_time:48155ms step_avg:95.93ms +step:503/1695 train_time:48257ms step_avg:95.94ms +step:504/1695 train_time:48350ms step_avg:95.93ms +step:505/1695 train_time:48444ms step_avg:95.93ms +step:506/1695 train_time:48537ms step_avg:95.92ms +step:507/1695 train_time:48630ms step_avg:95.92ms +step:508/1695 train_time:48723ms step_avg:95.91ms +step:509/1695 train_time:48816ms step_avg:95.91ms +step:510/1695 train_time:48909ms step_avg:95.90ms +step:511/1695 train_time:49002ms step_avg:95.89ms +step:512/1695 train_time:49098ms step_avg:95.89ms +step:513/1695 train_time:49195ms step_avg:95.90ms +step:514/1695 train_time:49290ms step_avg:95.90ms +step:515/1695 train_time:49386ms step_avg:95.89ms +step:516/1695 train_time:49480ms step_avg:95.89ms +step:517/1695 train_time:49573ms step_avg:95.89ms +step:518/1695 train_time:49666ms step_avg:95.88ms +step:519/1695 train_time:49996ms step_avg:96.33ms +step:520/1695 train_time:50189ms step_avg:96.52ms +step:521/1695 train_time:50281ms step_avg:96.51ms +step:522/1695 train_time:50374ms step_avg:96.50ms +step:523/1695 train_time:50467ms step_avg:96.50ms +step:524/1695 train_time:50560ms step_avg:96.49ms +step:525/1695 train_time:50653ms step_avg:96.48ms +step:526/1695 train_time:50746ms step_avg:96.48ms +step:527/1695 train_time:50840ms step_avg:96.47ms +step:528/1695 train_time:50933ms step_avg:96.46ms +step:529/1695 train_time:51027ms step_avg:96.46ms +step:530/1695 train_time:51125ms step_avg:96.46ms +step:531/1695 train_time:51222ms step_avg:96.46ms +step:532/1695 train_time:51316ms step_avg:96.46ms +step:533/1695 train_time:51409ms step_avg:96.45ms +step:534/1695 train_time:51504ms step_avg:96.45ms +step:535/1695 train_time:51596ms step_avg:96.44ms +step:536/1695 train_time:51689ms step_avg:96.43ms +step:537/1695 train_time:51782ms step_avg:96.43ms +step:538/1695 train_time:51875ms step_avg:96.42ms +step:539/1695 train_time:51969ms step_avg:96.42ms +step:540/1695 train_time:52063ms step_avg:96.41ms +step:541/1695 train_time:52158ms step_avg:96.41ms +step:542/1695 train_time:52252ms step_avg:96.41ms +step:543/1695 train_time:52348ms step_avg:96.40ms +step:544/1695 train_time:52442ms step_avg:96.40ms +step:545/1695 train_time:52536ms step_avg:96.40ms +step:546/1695 train_time:52629ms step_avg:96.39ms +step:547/1695 train_time:52723ms step_avg:96.39ms +step:548/1695 train_time:52816ms step_avg:96.38ms +step:549/1695 train_time:52909ms step_avg:96.37ms +step:550/1695 train_time:53003ms step_avg:96.37ms +step:551/1695 train_time:53097ms step_avg:96.36ms +step:552/1695 train_time:53192ms step_avg:96.36ms +step:553/1695 train_time:53287ms step_avg:96.36ms +step:554/1695 train_time:53382ms step_avg:96.36ms +step:555/1695 train_time:53476ms step_avg:96.35ms +step:556/1695 train_time:53569ms step_avg:96.35ms +step:557/1695 train_time:53663ms step_avg:96.34ms +step:558/1695 train_time:53756ms step_avg:96.34ms +step:559/1695 train_time:53849ms step_avg:96.33ms +step:560/1695 train_time:53943ms step_avg:96.33ms +step:561/1695 train_time:54037ms step_avg:96.32ms +step:562/1695 train_time:54131ms step_avg:96.32ms +step:563/1695 train_time:54226ms step_avg:96.32ms +step:564/1695 train_time:54321ms step_avg:96.31ms +step:565/1695 train_time:54414ms step_avg:96.31ms +step:566/1695 train_time:54508ms step_avg:96.30ms +step:567/1695 train_time:54602ms step_avg:96.30ms +step:568/1695 train_time:54697ms step_avg:96.30ms +step:569/1695 train_time:54792ms step_avg:96.30ms +step:570/1695 train_time:54889ms step_avg:96.30ms +step:571/1695 train_time:54986ms step_avg:96.30ms +step:572/1695 train_time:55083ms step_avg:96.30ms +step:573/1695 train_time:55180ms step_avg:96.30ms +step:574/1695 train_time:55276ms step_avg:96.30ms +step:575/1695 train_time:55373ms step_avg:96.30ms +step:576/1695 train_time:55469ms step_avg:96.30ms +step:577/1695 train_time:55565ms step_avg:96.30ms +step:578/1695 train_time:55662ms step_avg:96.30ms +step:579/1695 train_time:55757ms step_avg:96.30ms +step:580/1695 train_time:55852ms step_avg:96.30ms +step:581/1695 train_time:55948ms step_avg:96.30ms +step:582/1695 train_time:56044ms step_avg:96.30ms +step:583/1695 train_time:56140ms step_avg:96.30ms +step:584/1695 train_time:56236ms step_avg:96.29ms +step:585/1695 train_time:56331ms step_avg:96.29ms +step:586/1695 train_time:56427ms step_avg:96.29ms +step:587/1695 train_time:56523ms step_avg:96.29ms +step:588/1695 train_time:56621ms step_avg:96.29ms +step:589/1695 train_time:56716ms step_avg:96.29ms +step:590/1695 train_time:56812ms step_avg:96.29ms +step:591/1695 train_time:56908ms step_avg:96.29ms +step:592/1695 train_time:57005ms step_avg:96.29ms +step:593/1695 train_time:57101ms step_avg:96.29ms +step:594/1695 train_time:57196ms step_avg:96.29ms +step:595/1695 train_time:57291ms step_avg:96.29ms +step:596/1695 train_time:57388ms step_avg:96.29ms +step:597/1695 train_time:57484ms step_avg:96.29ms +step:598/1695 train_time:57581ms step_avg:96.29ms +step:599/1695 train_time:57678ms step_avg:96.29ms +step:600/1695 train_time:57773ms step_avg:96.29ms +step:601/1695 train_time:57869ms step_avg:96.29ms +step:602/1695 train_time:57964ms step_avg:96.29ms +step:603/1695 train_time:58060ms step_avg:96.28ms +step:604/1695 train_time:58155ms step_avg:96.28ms +step:605/1695 train_time:58251ms step_avg:96.28ms +step:606/1695 train_time:58348ms step_avg:96.28ms +step:607/1695 train_time:58444ms step_avg:96.28ms +step:608/1695 train_time:58541ms step_avg:96.28ms +step:609/1695 train_time:58636ms step_avg:96.28ms +step:610/1695 train_time:58732ms step_avg:96.28ms +step:611/1695 train_time:58827ms step_avg:96.28ms +step:612/1695 train_time:58923ms step_avg:96.28ms +step:613/1695 train_time:59020ms step_avg:96.28ms +step:614/1695 train_time:59116ms step_avg:96.28ms +step:615/1695 train_time:59211ms step_avg:96.28ms +step:616/1695 train_time:59307ms step_avg:96.28ms +step:617/1695 train_time:59404ms step_avg:96.28ms +step:618/1695 train_time:59499ms step_avg:96.28ms +step:619/1695 train_time:59595ms step_avg:96.28ms +step:620/1695 train_time:59691ms step_avg:96.28ms +step:621/1695 train_time:59788ms step_avg:96.28ms +step:622/1695 train_time:59884ms step_avg:96.28ms +step:623/1695 train_time:59982ms step_avg:96.28ms +step:624/1695 train_time:60078ms step_avg:96.28ms +step:625/1695 train_time:60173ms step_avg:96.28ms +step:625/1695 val_loss:3.6195 train_time:60266ms step_avg:96.43ms +step:626/1695 train_time:60290ms step_avg:96.31ms +step:627/1695 train_time:60370ms step_avg:96.28ms +step:628/1695 train_time:60467ms step_avg:96.29ms +step:629/1695 train_time:60563ms step_avg:96.28ms +step:630/1695 train_time:60658ms step_avg:96.28ms +step:631/1695 train_time:60753ms step_avg:96.28ms +step:632/1695 train_time:60847ms step_avg:96.28ms +step:633/1695 train_time:60943ms step_avg:96.28ms +step:634/1695 train_time:61038ms step_avg:96.27ms +step:635/1695 train_time:61133ms step_avg:96.27ms +step:636/1695 train_time:61232ms step_avg:96.28ms +step:637/1695 train_time:61331ms step_avg:96.28ms +step:638/1695 train_time:61426ms step_avg:96.28ms +step:639/1695 train_time:61522ms step_avg:96.28ms +step:640/1695 train_time:61617ms step_avg:96.28ms +step:641/1695 train_time:61712ms step_avg:96.28ms +step:642/1695 train_time:61808ms step_avg:96.27ms +step:643/1695 train_time:61902ms step_avg:96.27ms +step:644/1695 train_time:61997ms step_avg:96.27ms +step:645/1695 train_time:62092ms step_avg:96.27ms +step:646/1695 train_time:62188ms step_avg:96.27ms +step:647/1695 train_time:62284ms step_avg:96.27ms +step:648/1695 train_time:62382ms step_avg:96.27ms +step:649/1695 train_time:62478ms step_avg:96.27ms +step:650/1695 train_time:62575ms step_avg:96.27ms +step:651/1695 train_time:62671ms step_avg:96.27ms +step:652/1695 train_time:62767ms step_avg:96.27ms +step:653/1695 train_time:62862ms step_avg:96.27ms +step:654/1695 train_time:62957ms step_avg:96.27ms +step:655/1695 train_time:63053ms step_avg:96.26ms +step:656/1695 train_time:63148ms step_avg:96.26ms +step:657/1695 train_time:63245ms step_avg:96.26ms +step:658/1695 train_time:63341ms step_avg:96.26ms +step:659/1695 train_time:63438ms step_avg:96.26ms +step:660/1695 train_time:63535ms step_avg:96.27ms +step:661/1695 train_time:63632ms step_avg:96.27ms +step:662/1695 train_time:63727ms step_avg:96.26ms +step:663/1695 train_time:63822ms step_avg:96.26ms +step:664/1695 train_time:63918ms step_avg:96.26ms +step:665/1695 train_time:64014ms step_avg:96.26ms +step:666/1695 train_time:64111ms step_avg:96.26ms +step:667/1695 train_time:64207ms step_avg:96.26ms +step:668/1695 train_time:64303ms step_avg:96.26ms +step:669/1695 train_time:64400ms step_avg:96.26ms +step:670/1695 train_time:64497ms step_avg:96.26ms +step:671/1695 train_time:64594ms step_avg:96.27ms +step:672/1695 train_time:64691ms step_avg:96.27ms +step:673/1695 train_time:64786ms step_avg:96.26ms +step:674/1695 train_time:64881ms step_avg:96.26ms +step:675/1695 train_time:64977ms step_avg:96.26ms +step:676/1695 train_time:65073ms step_avg:96.26ms +step:677/1695 train_time:65170ms step_avg:96.26ms +step:678/1695 train_time:65266ms step_avg:96.26ms +step:679/1695 train_time:65361ms step_avg:96.26ms +step:680/1695 train_time:65458ms step_avg:96.26ms +step:681/1695 train_time:65554ms step_avg:96.26ms +step:682/1695 train_time:65651ms step_avg:96.26ms +step:683/1695 train_time:65747ms step_avg:96.26ms +step:684/1695 train_time:65843ms step_avg:96.26ms +step:685/1695 train_time:65939ms step_avg:96.26ms +step:686/1695 train_time:66034ms step_avg:96.26ms +step:687/1695 train_time:66129ms step_avg:96.26ms +step:688/1695 train_time:66225ms step_avg:96.26ms +step:689/1695 train_time:66320ms step_avg:96.26ms +step:690/1695 train_time:66417ms step_avg:96.26ms +step:691/1695 train_time:66874ms step_avg:96.78ms +step:692/1695 train_time:66944ms step_avg:96.74ms +step:693/1695 train_time:67039ms step_avg:96.74ms +step:694/1695 train_time:67134ms step_avg:96.74ms +step:695/1695 train_time:67229ms step_avg:96.73ms +step:696/1695 train_time:67323ms step_avg:96.73ms +step:697/1695 train_time:67418ms step_avg:96.73ms +step:698/1695 train_time:67513ms step_avg:96.72ms +step:699/1695 train_time:67607ms step_avg:96.72ms +step:700/1695 train_time:67702ms step_avg:96.72ms +step:701/1695 train_time:67802ms step_avg:96.72ms +step:702/1695 train_time:67902ms step_avg:96.73ms +step:703/1695 train_time:68000ms step_avg:96.73ms +step:704/1695 train_time:68096ms step_avg:96.73ms +step:705/1695 train_time:68192ms step_avg:96.73ms +step:706/1695 train_time:68287ms step_avg:96.72ms +step:707/1695 train_time:68382ms step_avg:96.72ms +step:708/1695 train_time:68478ms step_avg:96.72ms +step:709/1695 train_time:68574ms step_avg:96.72ms +step:710/1695 train_time:68670ms step_avg:96.72ms +step:711/1695 train_time:68765ms step_avg:96.72ms +step:712/1695 train_time:68862ms step_avg:96.72ms +step:713/1695 train_time:68959ms step_avg:96.72ms +step:714/1695 train_time:69056ms step_avg:96.72ms +step:715/1695 train_time:69153ms step_avg:96.72ms +step:716/1695 train_time:69250ms step_avg:96.72ms +step:717/1695 train_time:69344ms step_avg:96.71ms +step:718/1695 train_time:69439ms step_avg:96.71ms +step:719/1695 train_time:69535ms step_avg:96.71ms +step:720/1695 train_time:69631ms step_avg:96.71ms +step:721/1695 train_time:69726ms step_avg:96.71ms +step:722/1695 train_time:69822ms step_avg:96.71ms +step:723/1695 train_time:69918ms step_avg:96.71ms +step:724/1695 train_time:70015ms step_avg:96.71ms +step:725/1695 train_time:70113ms step_avg:96.71ms +step:726/1695 train_time:70210ms step_avg:96.71ms +step:727/1695 train_time:70305ms step_avg:96.71ms +step:728/1695 train_time:70400ms step_avg:96.70ms +step:729/1695 train_time:70496ms step_avg:96.70ms +step:730/1695 train_time:70591ms step_avg:96.70ms +step:731/1695 train_time:70686ms step_avg:96.70ms +step:732/1695 train_time:70782ms step_avg:96.70ms +step:733/1695 train_time:70878ms step_avg:96.70ms +step:734/1695 train_time:70974ms step_avg:96.69ms +step:735/1695 train_time:71069ms step_avg:96.69ms +step:736/1695 train_time:71166ms step_avg:96.69ms +step:737/1695 train_time:71262ms step_avg:96.69ms +step:738/1695 train_time:71358ms step_avg:96.69ms +step:739/1695 train_time:71454ms step_avg:96.69ms +step:740/1695 train_time:71549ms step_avg:96.69ms +step:741/1695 train_time:71644ms step_avg:96.69ms +step:742/1695 train_time:71739ms step_avg:96.68ms +step:743/1695 train_time:71836ms step_avg:96.68ms +step:744/1695 train_time:71931ms step_avg:96.68ms +step:745/1695 train_time:72027ms step_avg:96.68ms +step:746/1695 train_time:72123ms step_avg:96.68ms +step:747/1695 train_time:72220ms step_avg:96.68ms +step:748/1695 train_time:72317ms step_avg:96.68ms +step:749/1695 train_time:72413ms step_avg:96.68ms +step:750/1695 train_time:72510ms step_avg:96.68ms +step:750/1695 val_loss:3.5686 train_time:72604ms step_avg:96.81ms +step:751/1695 train_time:72630ms step_avg:96.71ms +step:752/1695 train_time:72710ms step_avg:96.69ms +step:753/1695 train_time:72807ms step_avg:96.69ms +step:754/1695 train_time:72902ms step_avg:96.69ms +step:755/1695 train_time:72998ms step_avg:96.69ms +step:756/1695 train_time:73092ms step_avg:96.68ms +step:757/1695 train_time:73186ms step_avg:96.68ms +step:758/1695 train_time:73281ms step_avg:96.68ms +step:759/1695 train_time:73376ms step_avg:96.67ms +step:760/1695 train_time:73470ms step_avg:96.67ms +step:761/1695 train_time:73566ms step_avg:96.67ms +step:762/1695 train_time:73665ms step_avg:96.67ms +step:763/1695 train_time:73763ms step_avg:96.67ms +step:764/1695 train_time:73859ms step_avg:96.67ms +step:765/1695 train_time:73955ms step_avg:96.67ms +step:766/1695 train_time:74051ms step_avg:96.67ms +step:767/1695 train_time:74146ms step_avg:96.67ms +step:768/1695 train_time:74241ms step_avg:96.67ms +step:769/1695 train_time:74336ms step_avg:96.67ms +step:770/1695 train_time:74430ms step_avg:96.66ms +step:771/1695 train_time:74525ms step_avg:96.66ms +step:772/1695 train_time:74622ms step_avg:96.66ms +step:773/1695 train_time:74719ms step_avg:96.66ms +step:774/1695 train_time:74817ms step_avg:96.66ms +step:775/1695 train_time:74913ms step_avg:96.66ms +step:776/1695 train_time:75008ms step_avg:96.66ms +step:777/1695 train_time:75104ms step_avg:96.66ms +step:778/1695 train_time:75199ms step_avg:96.66ms +step:779/1695 train_time:75294ms step_avg:96.65ms +step:780/1695 train_time:75389ms step_avg:96.65ms +step:781/1695 train_time:75484ms step_avg:96.65ms +step:782/1695 train_time:75580ms step_avg:96.65ms +step:783/1695 train_time:75677ms step_avg:96.65ms +step:784/1695 train_time:75773ms step_avg:96.65ms +step:785/1695 train_time:75870ms step_avg:96.65ms +step:786/1695 train_time:75965ms step_avg:96.65ms +step:787/1695 train_time:76061ms step_avg:96.65ms +step:788/1695 train_time:76156ms step_avg:96.64ms +step:789/1695 train_time:76251ms step_avg:96.64ms +step:790/1695 train_time:76346ms step_avg:96.64ms +step:791/1695 train_time:76441ms step_avg:96.64ms +step:792/1695 train_time:76538ms step_avg:96.64ms +step:793/1695 train_time:76634ms step_avg:96.64ms +step:794/1695 train_time:76730ms step_avg:96.64ms +step:795/1695 train_time:76825ms step_avg:96.64ms +step:796/1695 train_time:76921ms step_avg:96.63ms +step:797/1695 train_time:77017ms step_avg:96.63ms +step:798/1695 train_time:77113ms step_avg:96.63ms +step:799/1695 train_time:77207ms step_avg:96.63ms +step:800/1695 train_time:77303ms step_avg:96.63ms +step:801/1695 train_time:77398ms step_avg:96.63ms +step:802/1695 train_time:77493ms step_avg:96.62ms +step:803/1695 train_time:77588ms step_avg:96.62ms +step:804/1695 train_time:77684ms step_avg:96.62ms +step:805/1695 train_time:77782ms step_avg:96.62ms +step:806/1695 train_time:77879ms step_avg:96.62ms +step:807/1695 train_time:77976ms step_avg:96.62ms +step:808/1695 train_time:78073ms step_avg:96.62ms +step:809/1695 train_time:78168ms step_avg:96.62ms +step:810/1695 train_time:78262ms step_avg:96.62ms +step:811/1695 train_time:78357ms step_avg:96.62ms +step:812/1695 train_time:78453ms step_avg:96.62ms +step:813/1695 train_time:78547ms step_avg:96.61ms +step:814/1695 train_time:78643ms step_avg:96.61ms +step:815/1695 train_time:78739ms step_avg:96.61ms +step:816/1695 train_time:78836ms step_avg:96.61ms +step:817/1695 train_time:78932ms step_avg:96.61ms +step:818/1695 train_time:79028ms step_avg:96.61ms +step:819/1695 train_time:79124ms step_avg:96.61ms +step:820/1695 train_time:79219ms step_avg:96.61ms +step:821/1695 train_time:79315ms step_avg:96.61ms +step:822/1695 train_time:79410ms step_avg:96.61ms +step:823/1695 train_time:79505ms step_avg:96.60ms +step:824/1695 train_time:79600ms step_avg:96.60ms +step:825/1695 train_time:79696ms step_avg:96.60ms +step:826/1695 train_time:79791ms step_avg:96.60ms +step:827/1695 train_time:79887ms step_avg:96.60ms +step:828/1695 train_time:79984ms step_avg:96.60ms +step:829/1695 train_time:80081ms step_avg:96.60ms +step:830/1695 train_time:80178ms step_avg:96.60ms +step:831/1695 train_time:80273ms step_avg:96.60ms +step:832/1695 train_time:80368ms step_avg:96.60ms +step:833/1695 train_time:80463ms step_avg:96.59ms +step:834/1695 train_time:80559ms step_avg:96.59ms +step:835/1695 train_time:80656ms step_avg:96.59ms +step:836/1695 train_time:80752ms step_avg:96.59ms +step:837/1695 train_time:80847ms step_avg:96.59ms +step:838/1695 train_time:80942ms step_avg:96.59ms +step:839/1695 train_time:81038ms step_avg:96.59ms +step:840/1695 train_time:81134ms step_avg:96.59ms +step:841/1695 train_time:81229ms step_avg:96.59ms +step:842/1695 train_time:81324ms step_avg:96.58ms +step:843/1695 train_time:81420ms step_avg:96.58ms +step:844/1695 train_time:81516ms step_avg:96.58ms +step:845/1695 train_time:81612ms step_avg:96.58ms +step:846/1695 train_time:81707ms step_avg:96.58ms +step:847/1695 train_time:81802ms step_avg:96.58ms +step:848/1695 train_time:81899ms step_avg:96.58ms +step:849/1695 train_time:81994ms step_avg:96.58ms +step:850/1695 train_time:82090ms step_avg:96.58ms +step:851/1695 train_time:82186ms step_avg:96.58ms +step:852/1695 train_time:82281ms step_avg:96.57ms +step:853/1695 train_time:82377ms step_avg:96.57ms +step:854/1695 train_time:82473ms step_avg:96.57ms +step:855/1695 train_time:82569ms step_avg:96.57ms +step:856/1695 train_time:82664ms step_avg:96.57ms +step:857/1695 train_time:82760ms step_avg:96.57ms +step:858/1695 train_time:82855ms step_avg:96.57ms +step:859/1695 train_time:82952ms step_avg:96.57ms +step:860/1695 train_time:83047ms step_avg:96.57ms +step:861/1695 train_time:83143ms step_avg:96.57ms +step:862/1695 train_time:83239ms step_avg:96.56ms +step:863/1695 train_time:83566ms step_avg:96.83ms +step:864/1695 train_time:83759ms step_avg:96.94ms +step:865/1695 train_time:83853ms step_avg:96.94ms +step:866/1695 train_time:83948ms step_avg:96.94ms +step:867/1695 train_time:84042ms step_avg:96.93ms +step:868/1695 train_time:84138ms step_avg:96.93ms +step:869/1695 train_time:84233ms step_avg:96.93ms +step:870/1695 train_time:84327ms step_avg:96.93ms +step:871/1695 train_time:84421ms step_avg:96.92ms +step:872/1695 train_time:84516ms step_avg:96.92ms +step:873/1695 train_time:84616ms step_avg:96.93ms +step:874/1695 train_time:84714ms step_avg:96.93ms +step:875/1695 train_time:84811ms step_avg:96.93ms +step:875/1695 val_loss:3.5270 train_time:84905ms step_avg:97.03ms +step:876/1695 train_time:84930ms step_avg:96.95ms +step:877/1695 train_time:85012ms step_avg:96.94ms +step:878/1695 train_time:85111ms step_avg:96.94ms +step:879/1695 train_time:85209ms step_avg:96.94ms +step:880/1695 train_time:85304ms step_avg:96.94ms +step:881/1695 train_time:85400ms step_avg:96.94ms +step:882/1695 train_time:85494ms step_avg:96.93ms +step:883/1695 train_time:85589ms step_avg:96.93ms +step:884/1695 train_time:85685ms step_avg:96.93ms +step:885/1695 train_time:85780ms step_avg:96.93ms +step:886/1695 train_time:85876ms step_avg:96.93ms +step:887/1695 train_time:85973ms step_avg:96.93ms +step:888/1695 train_time:86071ms step_avg:96.93ms +step:889/1695 train_time:86170ms step_avg:96.93ms +step:890/1695 train_time:86267ms step_avg:96.93ms +step:891/1695 train_time:86364ms step_avg:96.93ms +step:892/1695 train_time:86459ms step_avg:96.93ms +step:893/1695 train_time:86554ms step_avg:96.92ms +step:894/1695 train_time:86649ms step_avg:96.92ms +step:895/1695 train_time:86745ms step_avg:96.92ms +step:896/1695 train_time:86842ms step_avg:96.92ms +step:897/1695 train_time:86938ms step_avg:96.92ms +step:898/1695 train_time:87034ms step_avg:96.92ms +step:899/1695 train_time:87131ms step_avg:96.92ms +step:900/1695 train_time:87229ms step_avg:96.92ms +step:901/1695 train_time:87325ms step_avg:96.92ms +step:902/1695 train_time:87422ms step_avg:96.92ms +step:903/1695 train_time:87517ms step_avg:96.92ms +step:904/1695 train_time:87612ms step_avg:96.92ms +step:905/1695 train_time:87708ms step_avg:96.91ms +step:906/1695 train_time:87804ms step_avg:96.91ms +step:907/1695 train_time:87901ms step_avg:96.91ms +step:908/1695 train_time:87997ms step_avg:96.91ms +step:909/1695 train_time:88092ms step_avg:96.91ms +step:910/1695 train_time:88188ms step_avg:96.91ms +step:911/1695 train_time:88285ms step_avg:96.91ms +step:912/1695 train_time:88381ms step_avg:96.91ms +step:913/1695 train_time:88477ms step_avg:96.91ms +step:914/1695 train_time:88572ms step_avg:96.91ms +step:915/1695 train_time:88667ms step_avg:96.90ms +step:916/1695 train_time:88763ms step_avg:96.90ms +step:917/1695 train_time:88860ms step_avg:96.90ms +step:918/1695 train_time:88955ms step_avg:96.90ms +step:919/1695 train_time:89050ms step_avg:96.90ms +step:920/1695 train_time:89147ms step_avg:96.90ms +step:921/1695 train_time:89245ms step_avg:96.90ms +step:922/1695 train_time:89341ms step_avg:96.90ms +step:923/1695 train_time:89436ms step_avg:96.90ms +step:924/1695 train_time:89531ms step_avg:96.90ms +step:925/1695 train_time:89627ms step_avg:96.89ms +step:926/1695 train_time:89724ms step_avg:96.89ms +step:927/1695 train_time:89821ms step_avg:96.89ms +step:928/1695 train_time:89916ms step_avg:96.89ms +step:929/1695 train_time:90011ms step_avg:96.89ms +step:930/1695 train_time:90108ms step_avg:96.89ms +step:931/1695 train_time:90205ms step_avg:96.89ms +step:932/1695 train_time:90302ms step_avg:96.89ms +step:933/1695 train_time:90398ms step_avg:96.89ms +step:934/1695 train_time:90493ms step_avg:96.89ms +step:935/1695 train_time:90589ms step_avg:96.89ms +step:936/1695 train_time:90685ms step_avg:96.89ms +step:937/1695 train_time:90782ms step_avg:96.89ms +step:938/1695 train_time:90877ms step_avg:96.88ms +step:939/1695 train_time:90973ms step_avg:96.88ms +step:940/1695 train_time:91069ms step_avg:96.88ms +step:941/1695 train_time:91167ms step_avg:96.88ms +step:942/1695 train_time:91264ms step_avg:96.88ms +step:943/1695 train_time:91361ms step_avg:96.88ms +step:944/1695 train_time:91456ms step_avg:96.88ms +step:945/1695 train_time:91552ms step_avg:96.88ms +step:946/1695 train_time:91648ms step_avg:96.88ms +step:947/1695 train_time:91744ms step_avg:96.88ms +step:948/1695 train_time:91840ms step_avg:96.88ms +step:949/1695 train_time:91935ms step_avg:96.88ms +step:950/1695 train_time:92031ms step_avg:96.87ms +step:951/1695 train_time:92127ms step_avg:96.87ms +step:952/1695 train_time:92223ms step_avg:96.87ms +step:953/1695 train_time:92319ms step_avg:96.87ms +step:954/1695 train_time:92415ms step_avg:96.87ms +step:955/1695 train_time:92510ms step_avg:96.87ms +step:956/1695 train_time:92606ms step_avg:96.87ms +step:957/1695 train_time:92702ms step_avg:96.87ms +step:958/1695 train_time:92798ms step_avg:96.87ms +step:959/1695 train_time:92894ms step_avg:96.87ms +step:960/1695 train_time:92990ms step_avg:96.86ms +step:961/1695 train_time:93087ms step_avg:96.86ms +step:962/1695 train_time:93183ms step_avg:96.86ms +step:963/1695 train_time:93279ms step_avg:96.86ms +step:964/1695 train_time:93374ms step_avg:96.86ms +step:965/1695 train_time:93471ms step_avg:96.86ms +step:966/1695 train_time:93568ms step_avg:96.86ms +step:967/1695 train_time:93664ms step_avg:96.86ms +step:968/1695 train_time:93760ms step_avg:96.86ms +step:969/1695 train_time:93856ms step_avg:96.86ms +step:970/1695 train_time:93951ms step_avg:96.86ms +step:971/1695 train_time:94048ms step_avg:96.86ms +step:972/1695 train_time:94144ms step_avg:96.86ms +step:973/1695 train_time:94241ms step_avg:96.86ms +step:974/1695 train_time:94336ms step_avg:96.85ms +step:975/1695 train_time:94431ms step_avg:96.85ms +step:976/1695 train_time:94528ms step_avg:96.85ms +step:977/1695 train_time:94625ms step_avg:96.85ms +step:978/1695 train_time:94722ms step_avg:96.85ms +step:979/1695 train_time:94818ms step_avg:96.85ms +step:980/1695 train_time:94913ms step_avg:96.85ms +step:981/1695 train_time:95010ms step_avg:96.85ms +step:982/1695 train_time:95106ms step_avg:96.85ms +step:983/1695 train_time:95202ms step_avg:96.85ms +step:984/1695 train_time:95298ms step_avg:96.85ms +step:985/1695 train_time:95393ms step_avg:96.85ms +step:986/1695 train_time:95489ms step_avg:96.84ms +step:987/1695 train_time:95585ms step_avg:96.84ms +step:988/1695 train_time:95682ms step_avg:96.84ms +step:989/1695 train_time:95777ms step_avg:96.84ms +step:990/1695 train_time:95872ms step_avg:96.84ms +step:991/1695 train_time:95967ms step_avg:96.84ms +step:992/1695 train_time:96064ms step_avg:96.84ms +step:993/1695 train_time:96160ms step_avg:96.84ms +step:994/1695 train_time:96255ms step_avg:96.84ms +step:995/1695 train_time:96351ms step_avg:96.83ms +step:996/1695 train_time:96446ms step_avg:96.83ms +step:997/1695 train_time:96543ms step_avg:96.83ms +step:998/1695 train_time:96638ms step_avg:96.83ms +step:999/1695 train_time:96734ms step_avg:96.83ms +step:1000/1695 train_time:96830ms step_avg:96.83ms +step:1000/1695 val_loss:3.4844 train_time:96924ms step_avg:96.92ms +step:1001/1695 train_time:96949ms step_avg:96.85ms +step:1002/1695 train_time:97032ms step_avg:96.84ms +step:1003/1695 train_time:97130ms step_avg:96.84ms +step:1004/1695 train_time:97226ms step_avg:96.84ms +step:1005/1695 train_time:97322ms step_avg:96.84ms +step:1006/1695 train_time:97417ms step_avg:96.84ms +step:1007/1695 train_time:97512ms step_avg:96.83ms +step:1008/1695 train_time:97606ms step_avg:96.83ms +step:1009/1695 train_time:97702ms step_avg:96.83ms +step:1010/1695 train_time:97797ms step_avg:96.83ms +step:1011/1695 train_time:97893ms step_avg:96.83ms +step:1012/1695 train_time:97991ms step_avg:96.83ms +step:1013/1695 train_time:98089ms step_avg:96.83ms +step:1014/1695 train_time:98187ms step_avg:96.83ms +step:1015/1695 train_time:98284ms step_avg:96.83ms +step:1016/1695 train_time:98379ms step_avg:96.83ms +step:1017/1695 train_time:98474ms step_avg:96.83ms +step:1018/1695 train_time:98569ms step_avg:96.83ms +step:1019/1695 train_time:98665ms step_avg:96.83ms +step:1020/1695 train_time:98761ms step_avg:96.82ms +step:1021/1695 train_time:98857ms step_avg:96.82ms +step:1022/1695 train_time:98954ms step_avg:96.82ms +step:1023/1695 train_time:99050ms step_avg:96.82ms +step:1024/1695 train_time:99147ms step_avg:96.82ms +step:1025/1695 train_time:99243ms step_avg:96.82ms +step:1026/1695 train_time:99340ms step_avg:96.82ms +step:1027/1695 train_time:99436ms step_avg:96.82ms +step:1028/1695 train_time:99530ms step_avg:96.82ms +step:1029/1695 train_time:99625ms step_avg:96.82ms +step:1030/1695 train_time:99721ms step_avg:96.82ms +step:1031/1695 train_time:99818ms step_avg:96.82ms +step:1032/1695 train_time:99913ms step_avg:96.82ms +step:1033/1695 train_time:100009ms step_avg:96.81ms +step:1034/1695 train_time:100107ms step_avg:96.82ms +step:1035/1695 train_time:100204ms step_avg:96.82ms +step:1036/1695 train_time:100552ms step_avg:97.06ms +step:1037/1695 train_time:100724ms step_avg:97.13ms +step:1038/1695 train_time:100817ms step_avg:97.13ms +step:1039/1695 train_time:100912ms step_avg:97.12ms +step:1040/1695 train_time:101007ms step_avg:97.12ms +step:1041/1695 train_time:101101ms step_avg:97.12ms +step:1042/1695 train_time:101196ms step_avg:97.12ms +step:1043/1695 train_time:101290ms step_avg:97.11ms +step:1044/1695 train_time:101385ms step_avg:97.11ms +step:1045/1695 train_time:101480ms step_avg:97.11ms +step:1046/1695 train_time:101579ms step_avg:97.11ms +step:1047/1695 train_time:101680ms step_avg:97.12ms +step:1048/1695 train_time:101777ms step_avg:97.12ms +step:1049/1695 train_time:101873ms step_avg:97.11ms +step:1050/1695 train_time:101968ms step_avg:97.11ms +step:1051/1695 train_time:102063ms step_avg:97.11ms +step:1052/1695 train_time:102159ms step_avg:97.11ms +step:1053/1695 train_time:102253ms step_avg:97.11ms +step:1054/1695 train_time:102348ms step_avg:97.10ms +step:1055/1695 train_time:102443ms step_avg:97.10ms +step:1056/1695 train_time:102540ms step_avg:97.10ms +step:1057/1695 train_time:102637ms step_avg:97.10ms +step:1058/1695 train_time:102733ms step_avg:97.10ms +step:1059/1695 train_time:102829ms step_avg:97.10ms +step:1060/1695 train_time:102925ms step_avg:97.10ms +step:1061/1695 train_time:103021ms step_avg:97.10ms +step:1062/1695 train_time:103117ms step_avg:97.10ms +step:1063/1695 train_time:103212ms step_avg:97.09ms +step:1064/1695 train_time:103307ms step_avg:97.09ms +step:1065/1695 train_time:103402ms step_avg:97.09ms +step:1066/1695 train_time:103499ms step_avg:97.09ms +step:1067/1695 train_time:103595ms step_avg:97.09ms +step:1068/1695 train_time:103691ms step_avg:97.09ms +step:1069/1695 train_time:103789ms step_avg:97.09ms +step:1070/1695 train_time:103886ms step_avg:97.09ms +step:1071/1695 train_time:103983ms step_avg:97.09ms +step:1072/1695 train_time:104080ms step_avg:97.09ms +step:1073/1695 train_time:104176ms step_avg:97.09ms +step:1074/1695 train_time:104270ms step_avg:97.09ms +step:1075/1695 train_time:104366ms step_avg:97.08ms +step:1076/1695 train_time:104461ms step_avg:97.08ms +step:1077/1695 train_time:104558ms step_avg:97.08ms +step:1078/1695 train_time:104654ms step_avg:97.08ms +step:1079/1695 train_time:104750ms step_avg:97.08ms +step:1080/1695 train_time:104847ms step_avg:97.08ms +step:1081/1695 train_time:104946ms step_avg:97.08ms +step:1082/1695 train_time:105043ms step_avg:97.08ms +step:1083/1695 train_time:105140ms step_avg:97.08ms +step:1084/1695 train_time:105235ms step_avg:97.08ms +step:1085/1695 train_time:105330ms step_avg:97.08ms +step:1086/1695 train_time:105426ms step_avg:97.08ms +step:1087/1695 train_time:105523ms step_avg:97.08ms +step:1088/1695 train_time:105619ms step_avg:97.08ms +step:1089/1695 train_time:105715ms step_avg:97.08ms +step:1090/1695 train_time:105811ms step_avg:97.07ms +step:1091/1695 train_time:105907ms step_avg:97.07ms +step:1092/1695 train_time:106003ms step_avg:97.07ms +step:1093/1695 train_time:106099ms step_avg:97.07ms +step:1094/1695 train_time:106194ms step_avg:97.07ms +step:1095/1695 train_time:106289ms step_avg:97.07ms +step:1096/1695 train_time:106386ms step_avg:97.07ms +step:1097/1695 train_time:106482ms step_avg:97.07ms +step:1098/1695 train_time:106578ms step_avg:97.07ms +step:1099/1695 train_time:106673ms step_avg:97.06ms +step:1100/1695 train_time:106769ms step_avg:97.06ms +step:1101/1695 train_time:106866ms step_avg:97.06ms +step:1102/1695 train_time:106963ms step_avg:97.06ms +step:1103/1695 train_time:107060ms step_avg:97.06ms +step:1104/1695 train_time:107156ms step_avg:97.06ms +step:1105/1695 train_time:107251ms step_avg:97.06ms +step:1106/1695 train_time:107347ms step_avg:97.06ms +step:1107/1695 train_time:107443ms step_avg:97.06ms +step:1108/1695 train_time:107540ms step_avg:97.06ms +step:1109/1695 train_time:107636ms step_avg:97.06ms +step:1110/1695 train_time:107730ms step_avg:97.05ms +step:1111/1695 train_time:107826ms step_avg:97.05ms +step:1112/1695 train_time:107922ms step_avg:97.05ms +step:1113/1695 train_time:108019ms step_avg:97.05ms +step:1114/1695 train_time:108115ms step_avg:97.05ms +step:1115/1695 train_time:108211ms step_avg:97.05ms +step:1116/1695 train_time:108306ms step_avg:97.05ms +step:1117/1695 train_time:108403ms step_avg:97.05ms +step:1118/1695 train_time:108499ms step_avg:97.05ms +step:1119/1695 train_time:108594ms step_avg:97.05ms +step:1120/1695 train_time:108690ms step_avg:97.04ms +step:1121/1695 train_time:108786ms step_avg:97.04ms +step:1122/1695 train_time:108883ms step_avg:97.04ms +step:1123/1695 train_time:108979ms step_avg:97.04ms +step:1124/1695 train_time:109075ms step_avg:97.04ms +step:1125/1695 train_time:109170ms step_avg:97.04ms +step:1125/1695 val_loss:3.4368 train_time:109264ms step_avg:97.12ms +step:1126/1695 train_time:109288ms step_avg:97.06ms +step:1127/1695 train_time:109371ms step_avg:97.05ms +step:1128/1695 train_time:109469ms step_avg:97.05ms +step:1129/1695 train_time:109566ms step_avg:97.05ms +step:1130/1695 train_time:109662ms step_avg:97.05ms +step:1131/1695 train_time:109757ms step_avg:97.04ms +step:1132/1695 train_time:109852ms step_avg:97.04ms +step:1133/1695 train_time:109950ms step_avg:97.04ms +step:1134/1695 train_time:110047ms step_avg:97.04ms +step:1135/1695 train_time:110144ms step_avg:97.04ms +step:1136/1695 train_time:110243ms step_avg:97.04ms +step:1137/1695 train_time:110343ms step_avg:97.05ms +step:1138/1695 train_time:110442ms step_avg:97.05ms +step:1139/1695 train_time:110539ms step_avg:97.05ms +step:1140/1695 train_time:110636ms step_avg:97.05ms +step:1141/1695 train_time:110733ms step_avg:97.05ms +step:1142/1695 train_time:110829ms step_avg:97.05ms +step:1143/1695 train_time:110927ms step_avg:97.05ms +step:1144/1695 train_time:111024ms step_avg:97.05ms +step:1145/1695 train_time:111121ms step_avg:97.05ms +step:1146/1695 train_time:111220ms step_avg:97.05ms +step:1147/1695 train_time:111319ms step_avg:97.05ms +step:1148/1695 train_time:111417ms step_avg:97.05ms +step:1149/1695 train_time:111515ms step_avg:97.05ms +step:1150/1695 train_time:111613ms step_avg:97.05ms +step:1151/1695 train_time:111710ms step_avg:97.05ms +step:1152/1695 train_time:111807ms step_avg:97.05ms +step:1153/1695 train_time:111903ms step_avg:97.05ms +step:1154/1695 train_time:112000ms step_avg:97.05ms +step:1155/1695 train_time:112096ms step_avg:97.05ms +step:1156/1695 train_time:112194ms step_avg:97.05ms +step:1157/1695 train_time:112292ms step_avg:97.05ms +step:1158/1695 train_time:112392ms step_avg:97.06ms +step:1159/1695 train_time:112491ms step_avg:97.06ms +step:1160/1695 train_time:112592ms step_avg:97.06ms +step:1161/1695 train_time:112691ms step_avg:97.06ms +step:1162/1695 train_time:112789ms step_avg:97.06ms +step:1163/1695 train_time:112885ms step_avg:97.06ms +step:1164/1695 train_time:112983ms step_avg:97.06ms +step:1165/1695 train_time:113080ms step_avg:97.06ms +step:1166/1695 train_time:113177ms step_avg:97.06ms +step:1167/1695 train_time:113274ms step_avg:97.06ms +step:1168/1695 train_time:113372ms step_avg:97.06ms +step:1169/1695 train_time:113471ms step_avg:97.07ms +step:1170/1695 train_time:113571ms step_avg:97.07ms +step:1171/1695 train_time:113670ms step_avg:97.07ms +step:1172/1695 train_time:113769ms step_avg:97.07ms +step:1173/1695 train_time:113866ms step_avg:97.07ms +step:1174/1695 train_time:113964ms step_avg:97.07ms +step:1175/1695 train_time:114063ms step_avg:97.08ms +step:1176/1695 train_time:114161ms step_avg:97.08ms +step:1177/1695 train_time:114259ms step_avg:97.08ms +step:1178/1695 train_time:114356ms step_avg:97.08ms +step:1179/1695 train_time:114453ms step_avg:97.08ms +step:1180/1695 train_time:114551ms step_avg:97.08ms +step:1181/1695 train_time:114649ms step_avg:97.08ms +step:1182/1695 train_time:114746ms step_avg:97.08ms +step:1183/1695 train_time:114844ms step_avg:97.08ms +step:1184/1695 train_time:114942ms step_avg:97.08ms +step:1185/1695 train_time:115039ms step_avg:97.08ms +step:1186/1695 train_time:115136ms step_avg:97.08ms +step:1187/1695 train_time:115233ms step_avg:97.08ms +step:1188/1695 train_time:115331ms step_avg:97.08ms +step:1189/1695 train_time:115429ms step_avg:97.08ms +step:1190/1695 train_time:115527ms step_avg:97.08ms +step:1191/1695 train_time:115625ms step_avg:97.08ms +step:1192/1695 train_time:115723ms step_avg:97.08ms +step:1193/1695 train_time:115821ms step_avg:97.08ms +step:1194/1695 train_time:115918ms step_avg:97.08ms +step:1195/1695 train_time:116016ms step_avg:97.08ms +step:1196/1695 train_time:116114ms step_avg:97.09ms +step:1197/1695 train_time:116212ms step_avg:97.09ms +step:1198/1695 train_time:116310ms step_avg:97.09ms +step:1199/1695 train_time:116409ms step_avg:97.09ms +step:1200/1695 train_time:116509ms step_avg:97.09ms +step:1201/1695 train_time:116609ms step_avg:97.09ms +step:1202/1695 train_time:116708ms step_avg:97.09ms +step:1203/1695 train_time:116808ms step_avg:97.10ms +step:1204/1695 train_time:116906ms step_avg:97.10ms +step:1205/1695 train_time:117005ms step_avg:97.10ms +step:1206/1695 train_time:117103ms step_avg:97.10ms +step:1207/1695 train_time:117201ms step_avg:97.10ms +step:1208/1695 train_time:117548ms step_avg:97.31ms +step:1209/1695 train_time:117728ms step_avg:97.38ms +step:1210/1695 train_time:117823ms step_avg:97.37ms +step:1211/1695 train_time:117920ms step_avg:97.37ms +step:1212/1695 train_time:118016ms step_avg:97.37ms +step:1213/1695 train_time:118112ms step_avg:97.37ms +step:1214/1695 train_time:118209ms step_avg:97.37ms +step:1215/1695 train_time:118306ms step_avg:97.37ms +step:1216/1695 train_time:118402ms step_avg:97.37ms +step:1217/1695 train_time:118500ms step_avg:97.37ms +step:1218/1695 train_time:118604ms step_avg:97.38ms +step:1219/1695 train_time:118704ms step_avg:97.38ms +step:1220/1695 train_time:118801ms step_avg:97.38ms +step:1221/1695 train_time:118897ms step_avg:97.38ms +step:1222/1695 train_time:118994ms step_avg:97.38ms +step:1223/1695 train_time:119090ms step_avg:97.38ms +step:1224/1695 train_time:119187ms step_avg:97.38ms +step:1225/1695 train_time:119285ms step_avg:97.38ms +step:1226/1695 train_time:119382ms step_avg:97.38ms +step:1227/1695 train_time:119480ms step_avg:97.38ms +step:1228/1695 train_time:119579ms step_avg:97.38ms +step:1229/1695 train_time:119678ms step_avg:97.38ms +step:1230/1695 train_time:119776ms step_avg:97.38ms +step:1231/1695 train_time:119874ms step_avg:97.38ms +step:1232/1695 train_time:119971ms step_avg:97.38ms +step:1233/1695 train_time:120068ms step_avg:97.38ms +step:1234/1695 train_time:120166ms step_avg:97.38ms +step:1235/1695 train_time:120263ms step_avg:97.38ms +step:1236/1695 train_time:120360ms step_avg:97.38ms +step:1237/1695 train_time:120457ms step_avg:97.38ms +step:1238/1695 train_time:120555ms step_avg:97.38ms +step:1239/1695 train_time:120654ms step_avg:97.38ms +step:1240/1695 train_time:120752ms step_avg:97.38ms +step:1241/1695 train_time:120851ms step_avg:97.38ms +step:1242/1695 train_time:120950ms step_avg:97.38ms +step:1243/1695 train_time:121048ms step_avg:97.38ms +step:1244/1695 train_time:121145ms step_avg:97.38ms +step:1245/1695 train_time:121243ms step_avg:97.38ms +step:1246/1695 train_time:121340ms step_avg:97.38ms +step:1247/1695 train_time:121437ms step_avg:97.38ms +step:1248/1695 train_time:121534ms step_avg:97.38ms +step:1249/1695 train_time:121632ms step_avg:97.38ms +step:1250/1695 train_time:121731ms step_avg:97.38ms +step:1250/1695 val_loss:3.3897 train_time:121827ms step_avg:97.46ms +step:1251/1695 train_time:121854ms step_avg:97.40ms +step:1252/1695 train_time:121931ms step_avg:97.39ms +step:1253/1695 train_time:122027ms step_avg:97.39ms +step:1254/1695 train_time:122123ms step_avg:97.39ms +step:1255/1695 train_time:122220ms step_avg:97.39ms +step:1256/1695 train_time:122317ms step_avg:97.39ms +step:1257/1695 train_time:122414ms step_avg:97.39ms +step:1258/1695 train_time:122510ms step_avg:97.38ms +step:1259/1695 train_time:122606ms step_avg:97.38ms +step:1260/1695 train_time:122702ms step_avg:97.38ms +step:1261/1695 train_time:122805ms step_avg:97.39ms +step:1262/1695 train_time:122904ms step_avg:97.39ms +step:1263/1695 train_time:123002ms step_avg:97.39ms +step:1264/1695 train_time:123099ms step_avg:97.39ms +step:1265/1695 train_time:123196ms step_avg:97.39ms +step:1266/1695 train_time:123292ms step_avg:97.39ms +step:1267/1695 train_time:123390ms step_avg:97.39ms +step:1268/1695 train_time:123486ms step_avg:97.39ms +step:1269/1695 train_time:123583ms step_avg:97.39ms +step:1270/1695 train_time:123681ms step_avg:97.39ms +step:1271/1695 train_time:123780ms step_avg:97.39ms +step:1272/1695 train_time:123879ms step_avg:97.39ms +step:1273/1695 train_time:123978ms step_avg:97.39ms +step:1274/1695 train_time:124078ms step_avg:97.39ms +step:1275/1695 train_time:124176ms step_avg:97.39ms +step:1276/1695 train_time:124275ms step_avg:97.39ms +step:1277/1695 train_time:124372ms step_avg:97.39ms +step:1278/1695 train_time:124470ms step_avg:97.39ms +step:1279/1695 train_time:124567ms step_avg:97.39ms +step:1280/1695 train_time:124664ms step_avg:97.39ms +step:1281/1695 train_time:124761ms step_avg:97.39ms +step:1282/1695 train_time:124859ms step_avg:97.39ms +step:1283/1695 train_time:124959ms step_avg:97.40ms +step:1284/1695 train_time:125057ms step_avg:97.40ms +step:1285/1695 train_time:125156ms step_avg:97.40ms +step:1286/1695 train_time:125254ms step_avg:97.40ms +step:1287/1695 train_time:125351ms step_avg:97.40ms +step:1288/1695 train_time:125449ms step_avg:97.40ms +step:1289/1695 train_time:125546ms step_avg:97.40ms +step:1290/1695 train_time:125644ms step_avg:97.40ms +step:1291/1695 train_time:125741ms step_avg:97.40ms +step:1292/1695 train_time:125839ms step_avg:97.40ms +step:1293/1695 train_time:125938ms step_avg:97.40ms +step:1294/1695 train_time:126037ms step_avg:97.40ms +step:1295/1695 train_time:126136ms step_avg:97.40ms +step:1296/1695 train_time:126234ms step_avg:97.40ms +step:1297/1695 train_time:126333ms step_avg:97.40ms +step:1298/1695 train_time:126431ms step_avg:97.40ms +step:1299/1695 train_time:126529ms step_avg:97.41ms +step:1300/1695 train_time:126628ms step_avg:97.41ms +step:1301/1695 train_time:126726ms step_avg:97.41ms +step:1302/1695 train_time:126823ms step_avg:97.41ms +step:1303/1695 train_time:126921ms step_avg:97.41ms +step:1304/1695 train_time:127018ms step_avg:97.41ms +step:1305/1695 train_time:127117ms step_avg:97.41ms +step:1306/1695 train_time:127216ms step_avg:97.41ms +step:1307/1695 train_time:127314ms step_avg:97.41ms +step:1308/1695 train_time:127412ms step_avg:97.41ms +step:1309/1695 train_time:127509ms step_avg:97.41ms +step:1310/1695 train_time:127608ms step_avg:97.41ms +step:1311/1695 train_time:127705ms step_avg:97.41ms +step:1312/1695 train_time:127802ms step_avg:97.41ms +step:1313/1695 train_time:127899ms step_avg:97.41ms +step:1314/1695 train_time:127996ms step_avg:97.41ms +step:1315/1695 train_time:128095ms step_avg:97.41ms +step:1316/1695 train_time:128193ms step_avg:97.41ms +step:1317/1695 train_time:128291ms step_avg:97.41ms +step:1318/1695 train_time:128389ms step_avg:97.41ms +step:1319/1695 train_time:128485ms step_avg:97.41ms +step:1320/1695 train_time:128582ms step_avg:97.41ms +step:1321/1695 train_time:128680ms step_avg:97.41ms +step:1322/1695 train_time:128778ms step_avg:97.41ms +step:1323/1695 train_time:128876ms step_avg:97.41ms +step:1324/1695 train_time:128974ms step_avg:97.41ms +step:1325/1695 train_time:129072ms step_avg:97.41ms +step:1326/1695 train_time:129170ms step_avg:97.41ms +step:1327/1695 train_time:129268ms step_avg:97.41ms +step:1328/1695 train_time:129366ms step_avg:97.41ms +step:1329/1695 train_time:129463ms step_avg:97.41ms +step:1330/1695 train_time:129561ms step_avg:97.41ms +step:1331/1695 train_time:129659ms step_avg:97.41ms +step:1332/1695 train_time:129758ms step_avg:97.42ms +step:1333/1695 train_time:129857ms step_avg:97.42ms +step:1334/1695 train_time:129955ms step_avg:97.42ms +step:1335/1695 train_time:130053ms step_avg:97.42ms +step:1336/1695 train_time:130151ms step_avg:97.42ms +step:1337/1695 train_time:130248ms step_avg:97.42ms +step:1338/1695 train_time:130347ms step_avg:97.42ms +step:1339/1695 train_time:130444ms step_avg:97.42ms +step:1340/1695 train_time:130541ms step_avg:97.42ms +step:1341/1695 train_time:130639ms step_avg:97.42ms +step:1342/1695 train_time:130736ms step_avg:97.42ms +step:1343/1695 train_time:130835ms step_avg:97.42ms +step:1344/1695 train_time:130933ms step_avg:97.42ms +step:1345/1695 train_time:131030ms step_avg:97.42ms +step:1346/1695 train_time:131127ms step_avg:97.42ms +step:1347/1695 train_time:131224ms step_avg:97.42ms +step:1348/1695 train_time:131321ms step_avg:97.42ms +step:1349/1695 train_time:131419ms step_avg:97.42ms +step:1350/1695 train_time:131518ms step_avg:97.42ms +step:1351/1695 train_time:131615ms step_avg:97.42ms +step:1352/1695 train_time:131714ms step_avg:97.42ms +step:1353/1695 train_time:131813ms step_avg:97.42ms +step:1354/1695 train_time:131911ms step_avg:97.42ms +step:1355/1695 train_time:132009ms step_avg:97.42ms +step:1356/1695 train_time:132106ms step_avg:97.42ms +step:1357/1695 train_time:132203ms step_avg:97.42ms +step:1358/1695 train_time:132300ms step_avg:97.42ms +step:1359/1695 train_time:132398ms step_avg:97.42ms +step:1360/1695 train_time:132497ms step_avg:97.42ms +step:1361/1695 train_time:132595ms step_avg:97.42ms +step:1362/1695 train_time:132693ms step_avg:97.43ms +step:1363/1695 train_time:132792ms step_avg:97.43ms +step:1364/1695 train_time:132890ms step_avg:97.43ms +step:1365/1695 train_time:132988ms step_avg:97.43ms +step:1366/1695 train_time:133085ms step_avg:97.43ms +step:1367/1695 train_time:133182ms step_avg:97.43ms +step:1368/1695 train_time:133279ms step_avg:97.43ms +step:1369/1695 train_time:133377ms step_avg:97.43ms +step:1370/1695 train_time:133476ms step_avg:97.43ms +step:1371/1695 train_time:133574ms step_avg:97.43ms +step:1372/1695 train_time:133671ms step_avg:97.43ms +step:1373/1695 train_time:133769ms step_avg:97.43ms +step:1374/1695 train_time:133867ms step_avg:97.43ms +step:1375/1695 train_time:133964ms step_avg:97.43ms +step:1375/1695 val_loss:3.3507 train_time:134060ms step_avg:97.50ms +step:1376/1695 train_time:134085ms step_avg:97.45ms +step:1377/1695 train_time:134167ms step_avg:97.43ms +step:1378/1695 train_time:134266ms step_avg:97.44ms +step:1379/1695 train_time:134364ms step_avg:97.44ms +step:1380/1695 train_time:134461ms step_avg:97.44ms +step:1381/1695 train_time:134815ms step_avg:97.62ms +step:1382/1695 train_time:134984ms step_avg:97.67ms +step:1383/1695 train_time:135080ms step_avg:97.67ms +step:1384/1695 train_time:135176ms step_avg:97.67ms +step:1385/1695 train_time:135272ms step_avg:97.67ms +step:1386/1695 train_time:135369ms step_avg:97.67ms +step:1387/1695 train_time:135465ms step_avg:97.67ms +step:1388/1695 train_time:135562ms step_avg:97.67ms +step:1389/1695 train_time:135658ms step_avg:97.67ms +step:1390/1695 train_time:135756ms step_avg:97.67ms +step:1391/1695 train_time:135859ms step_avg:97.67ms +step:1392/1695 train_time:135961ms step_avg:97.67ms +step:1393/1695 train_time:136060ms step_avg:97.67ms +step:1394/1695 train_time:136156ms step_avg:97.67ms +step:1395/1695 train_time:136253ms step_avg:97.67ms +step:1396/1695 train_time:136350ms step_avg:97.67ms +step:1397/1695 train_time:136446ms step_avg:97.67ms +step:1398/1695 train_time:136542ms step_avg:97.67ms +step:1399/1695 train_time:136639ms step_avg:97.67ms +step:1400/1695 train_time:136736ms step_avg:97.67ms +step:1401/1695 train_time:136834ms step_avg:97.67ms +step:1402/1695 train_time:136933ms step_avg:97.67ms +step:1403/1695 train_time:137032ms step_avg:97.67ms +step:1404/1695 train_time:137131ms step_avg:97.67ms +step:1405/1695 train_time:137230ms step_avg:97.67ms +step:1406/1695 train_time:137328ms step_avg:97.67ms +step:1407/1695 train_time:137425ms step_avg:97.67ms +step:1408/1695 train_time:137522ms step_avg:97.67ms +step:1409/1695 train_time:137619ms step_avg:97.67ms +step:1410/1695 train_time:137716ms step_avg:97.67ms +step:1411/1695 train_time:137813ms step_avg:97.67ms +step:1412/1695 train_time:137912ms step_avg:97.67ms +step:1413/1695 train_time:138011ms step_avg:97.67ms +step:1414/1695 train_time:138111ms step_avg:97.67ms +step:1415/1695 train_time:138210ms step_avg:97.67ms +step:1416/1695 train_time:138308ms step_avg:97.67ms +step:1417/1695 train_time:138405ms step_avg:97.67ms +step:1418/1695 train_time:138502ms step_avg:97.67ms +step:1419/1695 train_time:138600ms step_avg:97.67ms +step:1420/1695 train_time:138698ms step_avg:97.67ms +step:1421/1695 train_time:138795ms step_avg:97.67ms +step:1422/1695 train_time:138892ms step_avg:97.67ms +step:1423/1695 train_time:138991ms step_avg:97.67ms +step:1424/1695 train_time:139089ms step_avg:97.67ms +step:1425/1695 train_time:139188ms step_avg:97.68ms +step:1426/1695 train_time:139285ms step_avg:97.68ms +step:1427/1695 train_time:139382ms step_avg:97.68ms +step:1428/1695 train_time:139480ms step_avg:97.68ms +step:1429/1695 train_time:139577ms step_avg:97.67ms +step:1430/1695 train_time:139673ms step_avg:97.67ms +step:1431/1695 train_time:139771ms step_avg:97.67ms +step:1432/1695 train_time:139869ms step_avg:97.67ms +step:1433/1695 train_time:139968ms step_avg:97.67ms +step:1434/1695 train_time:140067ms step_avg:97.68ms +step:1435/1695 train_time:140167ms step_avg:97.68ms +step:1436/1695 train_time:140266ms step_avg:97.68ms +step:1437/1695 train_time:140363ms step_avg:97.68ms +step:1438/1695 train_time:140461ms step_avg:97.68ms +step:1439/1695 train_time:140558ms step_avg:97.68ms +step:1440/1695 train_time:140655ms step_avg:97.68ms +step:1441/1695 train_time:140752ms step_avg:97.68ms +step:1442/1695 train_time:140849ms step_avg:97.68ms +step:1443/1695 train_time:140948ms step_avg:97.68ms +step:1444/1695 train_time:141045ms step_avg:97.68ms +step:1445/1695 train_time:141144ms step_avg:97.68ms +step:1446/1695 train_time:141242ms step_avg:97.68ms +step:1447/1695 train_time:141340ms step_avg:97.68ms +step:1448/1695 train_time:141438ms step_avg:97.68ms +step:1449/1695 train_time:141534ms step_avg:97.68ms +step:1450/1695 train_time:141631ms step_avg:97.68ms +step:1451/1695 train_time:141728ms step_avg:97.68ms +step:1452/1695 train_time:141826ms step_avg:97.68ms +step:1453/1695 train_time:141924ms step_avg:97.68ms +step:1454/1695 train_time:142021ms step_avg:97.68ms +step:1455/1695 train_time:142117ms step_avg:97.68ms +step:1456/1695 train_time:142216ms step_avg:97.68ms +step:1457/1695 train_time:142314ms step_avg:97.68ms +step:1458/1695 train_time:142413ms step_avg:97.68ms +step:1459/1695 train_time:142510ms step_avg:97.68ms +step:1460/1695 train_time:142608ms step_avg:97.68ms +step:1461/1695 train_time:142706ms step_avg:97.68ms +step:1462/1695 train_time:142803ms step_avg:97.68ms +step:1463/1695 train_time:142901ms step_avg:97.68ms +step:1464/1695 train_time:142999ms step_avg:97.68ms +step:1465/1695 train_time:143096ms step_avg:97.68ms +step:1466/1695 train_time:143194ms step_avg:97.68ms +step:1467/1695 train_time:143291ms step_avg:97.68ms +step:1468/1695 train_time:143389ms step_avg:97.68ms +step:1469/1695 train_time:143487ms step_avg:97.68ms +step:1470/1695 train_time:143585ms step_avg:97.68ms +step:1471/1695 train_time:143682ms step_avg:97.68ms +step:1472/1695 train_time:143779ms step_avg:97.68ms +step:1473/1695 train_time:143877ms step_avg:97.68ms +step:1474/1695 train_time:143974ms step_avg:97.68ms +step:1475/1695 train_time:144072ms step_avg:97.68ms +step:1476/1695 train_time:144169ms step_avg:97.68ms +step:1477/1695 train_time:144267ms step_avg:97.68ms +step:1478/1695 train_time:144365ms step_avg:97.68ms +step:1479/1695 train_time:144462ms step_avg:97.68ms +step:1480/1695 train_time:144559ms step_avg:97.68ms +step:1481/1695 train_time:144657ms step_avg:97.67ms +step:1482/1695 train_time:144754ms step_avg:97.67ms +step:1483/1695 train_time:144852ms step_avg:97.67ms +step:1484/1695 train_time:144949ms step_avg:97.67ms +step:1485/1695 train_time:145048ms step_avg:97.68ms +step:1486/1695 train_time:145146ms step_avg:97.68ms +step:1487/1695 train_time:145244ms step_avg:97.68ms +step:1488/1695 train_time:145341ms step_avg:97.68ms +step:1489/1695 train_time:145438ms step_avg:97.67ms +step:1490/1695 train_time:145535ms step_avg:97.67ms +step:1491/1695 train_time:145632ms step_avg:97.67ms +step:1492/1695 train_time:145730ms step_avg:97.67ms +step:1493/1695 train_time:145829ms step_avg:97.68ms +step:1494/1695 train_time:145927ms step_avg:97.68ms +step:1495/1695 train_time:146024ms step_avg:97.68ms +step:1496/1695 train_time:146123ms step_avg:97.68ms +step:1497/1695 train_time:146220ms step_avg:97.68ms +step:1498/1695 train_time:146317ms step_avg:97.67ms +step:1499/1695 train_time:146414ms step_avg:97.67ms +step:1500/1695 train_time:146512ms step_avg:97.67ms +step:1500/1695 val_loss:3.3178 train_time:146608ms step_avg:97.74ms +step:1501/1695 train_time:146633ms step_avg:97.69ms +step:1502/1695 train_time:146718ms step_avg:97.68ms +step:1503/1695 train_time:146818ms step_avg:97.68ms +step:1504/1695 train_time:146916ms step_avg:97.68ms +step:1505/1695 train_time:147013ms step_avg:97.68ms +step:1506/1695 train_time:147110ms step_avg:97.68ms +step:1507/1695 train_time:147206ms step_avg:97.68ms +step:1508/1695 train_time:147302ms step_avg:97.68ms +step:1509/1695 train_time:147399ms step_avg:97.68ms +step:1510/1695 train_time:147495ms step_avg:97.68ms +step:1511/1695 train_time:147595ms step_avg:97.68ms +step:1512/1695 train_time:147697ms step_avg:97.68ms +step:1513/1695 train_time:147797ms step_avg:97.68ms +step:1514/1695 train_time:147896ms step_avg:97.69ms +step:1515/1695 train_time:147994ms step_avg:97.69ms +step:1516/1695 train_time:148092ms step_avg:97.69ms +step:1517/1695 train_time:148190ms step_avg:97.69ms +step:1518/1695 train_time:148287ms step_avg:97.69ms +step:1519/1695 train_time:148384ms step_avg:97.69ms +step:1520/1695 train_time:148481ms step_avg:97.68ms +step:1521/1695 train_time:148578ms step_avg:97.68ms +step:1522/1695 train_time:148676ms step_avg:97.68ms +step:1523/1695 train_time:148776ms step_avg:97.69ms +step:1524/1695 train_time:148874ms step_avg:97.69ms +step:1525/1695 train_time:148973ms step_avg:97.69ms +step:1526/1695 train_time:149072ms step_avg:97.69ms +step:1527/1695 train_time:149169ms step_avg:97.69ms +step:1528/1695 train_time:149266ms step_avg:97.69ms +step:1529/1695 train_time:149362ms step_avg:97.69ms +step:1530/1695 train_time:149460ms step_avg:97.69ms +step:1531/1695 train_time:149557ms step_avg:97.69ms +step:1532/1695 train_time:149655ms step_avg:97.69ms +step:1533/1695 train_time:149753ms step_avg:97.69ms +step:1534/1695 train_time:149852ms step_avg:97.69ms +step:1535/1695 train_time:149950ms step_avg:97.69ms +step:1536/1695 train_time:150048ms step_avg:97.69ms +step:1537/1695 train_time:150146ms step_avg:97.69ms +step:1538/1695 train_time:150244ms step_avg:97.69ms +step:1539/1695 train_time:150341ms step_avg:97.69ms +step:1540/1695 train_time:150438ms step_avg:97.69ms +step:1541/1695 train_time:150535ms step_avg:97.69ms +step:1542/1695 train_time:150633ms step_avg:97.69ms +step:1543/1695 train_time:150731ms step_avg:97.69ms +step:1544/1695 train_time:150830ms step_avg:97.69ms +step:1545/1695 train_time:150928ms step_avg:97.69ms +step:1546/1695 train_time:151027ms step_avg:97.69ms +step:1547/1695 train_time:151123ms step_avg:97.69ms +step:1548/1695 train_time:151220ms step_avg:97.69ms +step:1549/1695 train_time:151317ms step_avg:97.69ms +step:1550/1695 train_time:151415ms step_avg:97.69ms +step:1551/1695 train_time:151513ms step_avg:97.69ms +step:1552/1695 train_time:151866ms step_avg:97.85ms +step:1553/1695 train_time:152044ms step_avg:97.90ms +step:1554/1695 train_time:152139ms step_avg:97.90ms +step:1555/1695 train_time:152235ms step_avg:97.90ms +step:1556/1695 train_time:152332ms step_avg:97.90ms +step:1557/1695 train_time:152428ms step_avg:97.90ms +step:1558/1695 train_time:152525ms step_avg:97.90ms +step:1559/1695 train_time:152621ms step_avg:97.90ms +step:1560/1695 train_time:152717ms step_avg:97.90ms +step:1561/1695 train_time:152815ms step_avg:97.90ms +step:1562/1695 train_time:152920ms step_avg:97.90ms +step:1563/1695 train_time:153021ms step_avg:97.90ms +step:1564/1695 train_time:153120ms step_avg:97.90ms +step:1565/1695 train_time:153218ms step_avg:97.90ms +step:1566/1695 train_time:153315ms step_avg:97.90ms +step:1567/1695 train_time:153412ms step_avg:97.90ms +step:1568/1695 train_time:153509ms step_avg:97.90ms +step:1569/1695 train_time:153606ms step_avg:97.90ms +step:1570/1695 train_time:153702ms step_avg:97.90ms +step:1571/1695 train_time:153798ms step_avg:97.90ms +step:1572/1695 train_time:153898ms step_avg:97.90ms +step:1573/1695 train_time:153999ms step_avg:97.90ms +step:1574/1695 train_time:154099ms step_avg:97.90ms +step:1575/1695 train_time:154197ms step_avg:97.90ms +step:1576/1695 train_time:154294ms step_avg:97.90ms +step:1577/1695 train_time:154393ms step_avg:97.90ms +step:1578/1695 train_time:154490ms step_avg:97.90ms +step:1579/1695 train_time:154587ms step_avg:97.90ms +step:1580/1695 train_time:154684ms step_avg:97.90ms +step:1581/1695 train_time:154781ms step_avg:97.90ms +step:1582/1695 train_time:154879ms step_avg:97.90ms +step:1583/1695 train_time:154977ms step_avg:97.90ms +step:1584/1695 train_time:155075ms step_avg:97.90ms +step:1585/1695 train_time:155174ms step_avg:97.90ms +step:1586/1695 train_time:155273ms step_avg:97.90ms +step:1587/1695 train_time:155371ms step_avg:97.90ms +step:1588/1695 train_time:155469ms step_avg:97.90ms +step:1589/1695 train_time:155566ms step_avg:97.90ms +step:1590/1695 train_time:155663ms step_avg:97.90ms +step:1591/1695 train_time:155760ms step_avg:97.90ms +step:1592/1695 train_time:155858ms step_avg:97.90ms +step:1593/1695 train_time:155956ms step_avg:97.90ms +step:1594/1695 train_time:156054ms step_avg:97.90ms +step:1595/1695 train_time:156152ms step_avg:97.90ms +step:1596/1695 train_time:156250ms step_avg:97.90ms +step:1597/1695 train_time:156350ms step_avg:97.90ms +step:1598/1695 train_time:156448ms step_avg:97.90ms +step:1599/1695 train_time:156546ms step_avg:97.90ms +step:1600/1695 train_time:156644ms step_avg:97.90ms +step:1601/1695 train_time:156741ms step_avg:97.90ms +step:1602/1695 train_time:156838ms step_avg:97.90ms +step:1603/1695 train_time:156936ms step_avg:97.90ms +step:1604/1695 train_time:157034ms step_avg:97.90ms +step:1605/1695 train_time:157133ms step_avg:97.90ms +step:1606/1695 train_time:157234ms step_avg:97.90ms +step:1607/1695 train_time:157333ms step_avg:97.90ms +step:1608/1695 train_time:157431ms step_avg:97.91ms +step:1609/1695 train_time:157529ms step_avg:97.91ms +step:1610/1695 train_time:157627ms step_avg:97.91ms +step:1611/1695 train_time:157726ms step_avg:97.91ms +step:1612/1695 train_time:157824ms step_avg:97.91ms +step:1613/1695 train_time:157921ms step_avg:97.91ms +step:1614/1695 train_time:158017ms step_avg:97.90ms +step:1615/1695 train_time:158114ms step_avg:97.90ms +step:1616/1695 train_time:158212ms step_avg:97.90ms +step:1617/1695 train_time:158312ms step_avg:97.90ms +step:1618/1695 train_time:158412ms step_avg:97.91ms +step:1619/1695 train_time:158510ms step_avg:97.91ms +step:1620/1695 train_time:158609ms step_avg:97.91ms +step:1621/1695 train_time:158708ms step_avg:97.91ms +step:1622/1695 train_time:158806ms step_avg:97.91ms +step:1623/1695 train_time:158905ms step_avg:97.91ms +step:1624/1695 train_time:159001ms step_avg:97.91ms +step:1625/1695 train_time:159097ms step_avg:97.91ms +step:1625/1695 val_loss:3.2907 train_time:159193ms step_avg:97.96ms +step:1626/1695 train_time:159217ms step_avg:97.92ms +step:1627/1695 train_time:159299ms step_avg:97.91ms +step:1628/1695 train_time:159398ms step_avg:97.91ms +step:1629/1695 train_time:159495ms step_avg:97.91ms +step:1630/1695 train_time:159593ms step_avg:97.91ms +step:1631/1695 train_time:159690ms step_avg:97.91ms +step:1632/1695 train_time:159787ms step_avg:97.91ms +step:1633/1695 train_time:159884ms step_avg:97.91ms +step:1634/1695 train_time:159981ms step_avg:97.91ms +step:1635/1695 train_time:160077ms step_avg:97.91ms +step:1636/1695 train_time:160176ms step_avg:97.91ms +step:1637/1695 train_time:160276ms step_avg:97.91ms +step:1638/1695 train_time:160375ms step_avg:97.91ms +step:1639/1695 train_time:160474ms step_avg:97.91ms +step:1640/1695 train_time:160571ms step_avg:97.91ms +step:1641/1695 train_time:160669ms step_avg:97.91ms +step:1642/1695 train_time:160766ms step_avg:97.91ms +step:1643/1695 train_time:160864ms step_avg:97.91ms +step:1644/1695 train_time:160961ms step_avg:97.91ms +step:1645/1695 train_time:161058ms step_avg:97.91ms +step:1646/1695 train_time:161157ms step_avg:97.91ms +step:1647/1695 train_time:161255ms step_avg:97.91ms +step:1648/1695 train_time:161353ms step_avg:97.91ms +step:1649/1695 train_time:161452ms step_avg:97.91ms +step:1650/1695 train_time:161551ms step_avg:97.91ms +step:1651/1695 train_time:161649ms step_avg:97.91ms +step:1652/1695 train_time:161746ms step_avg:97.91ms +step:1653/1695 train_time:161843ms step_avg:97.91ms +step:1654/1695 train_time:161940ms step_avg:97.91ms +step:1655/1695 train_time:162037ms step_avg:97.91ms +step:1656/1695 train_time:162135ms step_avg:97.91ms +step:1657/1695 train_time:162233ms step_avg:97.91ms +step:1658/1695 train_time:162332ms step_avg:97.91ms +step:1659/1695 train_time:162430ms step_avg:97.91ms +step:1660/1695 train_time:162528ms step_avg:97.91ms +step:1661/1695 train_time:162627ms step_avg:97.91ms +step:1662/1695 train_time:162724ms step_avg:97.91ms +step:1663/1695 train_time:162820ms step_avg:97.91ms +step:1664/1695 train_time:162918ms step_avg:97.91ms +step:1665/1695 train_time:163015ms step_avg:97.91ms +step:1666/1695 train_time:163114ms step_avg:97.91ms +step:1667/1695 train_time:163211ms step_avg:97.91ms +step:1668/1695 train_time:163309ms step_avg:97.91ms +step:1669/1695 train_time:163407ms step_avg:97.91ms +step:1670/1695 train_time:163505ms step_avg:97.91ms +step:1671/1695 train_time:163603ms step_avg:97.91ms +step:1672/1695 train_time:163700ms step_avg:97.91ms +step:1673/1695 train_time:163797ms step_avg:97.91ms +step:1674/1695 train_time:163894ms step_avg:97.91ms +step:1675/1695 train_time:163992ms step_avg:97.91ms +step:1676/1695 train_time:164091ms step_avg:97.91ms +step:1677/1695 train_time:164189ms step_avg:97.91ms +step:1678/1695 train_time:164287ms step_avg:97.91ms +step:1679/1695 train_time:164385ms step_avg:97.91ms +step:1680/1695 train_time:164482ms step_avg:97.91ms +step:1681/1695 train_time:164580ms step_avg:97.91ms +step:1682/1695 train_time:164677ms step_avg:97.91ms +step:1683/1695 train_time:164775ms step_avg:97.91ms +step:1684/1695 train_time:164873ms step_avg:97.91ms +step:1685/1695 train_time:164971ms step_avg:97.91ms +step:1686/1695 train_time:165069ms step_avg:97.91ms +step:1687/1695 train_time:165167ms step_avg:97.91ms +step:1688/1695 train_time:165265ms step_avg:97.91ms +step:1689/1695 train_time:165363ms step_avg:97.91ms +step:1690/1695 train_time:165461ms step_avg:97.91ms +step:1691/1695 train_time:165559ms step_avg:97.91ms +step:1692/1695 train_time:165656ms step_avg:97.91ms +step:1693/1695 train_time:165754ms step_avg:97.91ms +step:1694/1695 train_time:165851ms step_avg:97.91ms +step:1695/1695 train_time:165950ms step_avg:97.91ms +step:1695/1695 val_loss:3.2791 train_time:166045ms step_avg:97.96ms +peak memory allocated: 34073 MiB reserved: 49476 MiB diff --git a/records/082725_FA3/27d1e0d2-df15-41a9-9496-492a21943fb1.txt b/records/082725_FA3/27d1e0d2-df15-41a9-9496-492a21943fb1.txt new file mode 100644 index 000000000..9652d6c2d --- /dev/null +++ b/records/082725_FA3/27d1e0d2-df15-41a9-9496-492a21943fb1.txt @@ -0,0 +1,2808 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + + while True: + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + + if align_to_bos: + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] + else: + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + + pos += batch_span + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. + # optimization + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 60 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): + inputs, targets = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, ws, ws // 2) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * lr + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Wed Aug 27 03:43:24 2025 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.6 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:00:0B.0 Off | Off | +| N/A 30C P0 115W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:00:0C.0 Off | Off | +| N/A 32C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:00:0D.0 Off | Off | +| N/A 33C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:00:0E.0 Off | Off | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:00:0F.0 Off | Off | +| N/A 30C P0 111W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:00:10.0 Off | Off | +| N/A 34C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:00:11.0 Off | Off | +| N/A 32C P0 111W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:00:12.0 Off | Off | +| N/A 31C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| ++---------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:511ms step_avg:510.59ms +step:2/1695 train_time:534ms step_avg:266.84ms +step:3/1695 train_time:604ms step_avg:201.32ms +step:4/1695 train_time:696ms step_avg:174.01ms +step:5/1695 train_time:789ms step_avg:157.81ms +step:6/1695 train_time:882ms step_avg:147.03ms +step:7/1695 train_time:975ms step_avg:139.35ms +step:8/1695 train_time:1069ms step_avg:133.63ms +step:9/1695 train_time:1163ms step_avg:129.21ms +step:10/1695 train_time:1256ms step_avg:125.59ms +step:11/1695 train_time:1349ms step_avg:122.66ms +step:12/1695 train_time:1447ms step_avg:120.59ms +step:13/1695 train_time:1547ms step_avg:119.02ms +step:14/1695 train_time:1644ms step_avg:117.41ms +step:15/1695 train_time:1738ms step_avg:115.86ms +step:16/1695 train_time:1831ms step_avg:114.46ms +step:17/1695 train_time:1925ms step_avg:113.25ms +step:18/1695 train_time:2019ms step_avg:112.18ms +step:19/1695 train_time:2113ms step_avg:111.19ms +step:20/1695 train_time:2207ms step_avg:110.34ms +step:21/1695 train_time:2300ms step_avg:109.54ms +step:22/1695 train_time:2395ms step_avg:108.87ms +step:23/1695 train_time:2491ms step_avg:108.31ms +step:24/1695 train_time:2587ms step_avg:107.80ms +step:25/1695 train_time:2683ms step_avg:107.33ms +step:26/1695 train_time:2778ms step_avg:106.85ms +step:27/1695 train_time:2872ms step_avg:106.37ms +step:28/1695 train_time:2967ms step_avg:105.96ms +step:29/1695 train_time:3061ms step_avg:105.56ms +step:30/1695 train_time:3155ms step_avg:105.16ms +step:31/1695 train_time:3249ms step_avg:104.81ms +step:32/1695 train_time:3344ms step_avg:104.50ms +step:33/1695 train_time:3438ms step_avg:104.19ms +step:34/1695 train_time:3533ms step_avg:103.92ms +step:35/1695 train_time:3629ms step_avg:103.69ms +step:36/1695 train_time:3726ms step_avg:103.50ms +step:37/1695 train_time:3822ms step_avg:103.29ms +step:38/1695 train_time:3916ms step_avg:103.06ms +step:39/1695 train_time:4010ms step_avg:102.83ms +step:40/1695 train_time:4105ms step_avg:102.62ms +step:41/1695 train_time:4199ms step_avg:102.42ms +step:42/1695 train_time:4293ms step_avg:102.21ms +step:43/1695 train_time:4388ms step_avg:102.06ms +step:44/1695 train_time:4484ms step_avg:101.90ms +step:45/1695 train_time:4579ms step_avg:101.75ms +step:46/1695 train_time:4674ms step_avg:101.61ms +step:47/1695 train_time:4770ms step_avg:101.48ms +step:48/1695 train_time:4865ms step_avg:101.36ms +step:49/1695 train_time:4960ms step_avg:101.22ms +step:50/1695 train_time:5054ms step_avg:101.07ms +step:51/1695 train_time:5149ms step_avg:100.96ms +step:52/1695 train_time:5243ms step_avg:100.83ms +step:53/1695 train_time:5338ms step_avg:100.71ms +step:54/1695 train_time:5432ms step_avg:100.59ms +step:55/1695 train_time:5528ms step_avg:100.51ms +step:56/1695 train_time:5623ms step_avg:100.41ms +step:57/1695 train_time:5718ms step_avg:100.31ms +step:58/1695 train_time:5813ms step_avg:100.22ms +step:59/1695 train_time:5908ms step_avg:100.14ms +step:60/1695 train_time:6003ms step_avg:100.05ms +step:61/1695 train_time:6097ms step_avg:99.95ms +step:62/1695 train_time:6191ms step_avg:99.86ms +step:63/1695 train_time:6286ms step_avg:99.78ms +step:64/1695 train_time:6381ms step_avg:99.70ms +step:65/1695 train_time:6475ms step_avg:99.62ms +step:66/1695 train_time:6570ms step_avg:99.55ms +step:67/1695 train_time:6665ms step_avg:99.48ms +step:68/1695 train_time:6760ms step_avg:99.40ms +step:69/1695 train_time:6854ms step_avg:99.33ms +step:70/1695 train_time:6949ms step_avg:99.26ms +step:71/1695 train_time:7043ms step_avg:99.20ms +step:72/1695 train_time:7138ms step_avg:99.14ms +step:73/1695 train_time:7232ms step_avg:99.07ms +step:74/1695 train_time:7327ms step_avg:99.01ms +step:75/1695 train_time:7422ms step_avg:98.96ms +step:76/1695 train_time:7516ms step_avg:98.89ms +step:77/1695 train_time:7610ms step_avg:98.84ms +step:78/1695 train_time:7706ms step_avg:98.79ms +step:79/1695 train_time:7801ms step_avg:98.75ms +step:80/1695 train_time:7895ms step_avg:98.69ms +step:81/1695 train_time:7990ms step_avg:98.64ms +step:82/1695 train_time:8085ms step_avg:98.59ms +step:83/1695 train_time:8179ms step_avg:98.54ms +step:84/1695 train_time:8273ms step_avg:98.49ms +step:85/1695 train_time:8369ms step_avg:98.46ms +step:86/1695 train_time:8463ms step_avg:98.41ms +step:87/1695 train_time:8557ms step_avg:98.36ms +step:88/1695 train_time:8652ms step_avg:98.31ms +step:89/1695 train_time:8747ms step_avg:98.28ms +step:90/1695 train_time:8842ms step_avg:98.24ms +step:91/1695 train_time:8936ms step_avg:98.20ms +step:92/1695 train_time:9030ms step_avg:98.16ms +step:93/1695 train_time:9126ms step_avg:98.13ms +step:94/1695 train_time:9221ms step_avg:98.10ms +step:95/1695 train_time:9315ms step_avg:98.05ms +step:96/1695 train_time:9409ms step_avg:98.01ms +step:97/1695 train_time:9504ms step_avg:97.98ms +step:98/1695 train_time:9597ms step_avg:97.93ms +step:99/1695 train_time:9691ms step_avg:97.89ms +step:100/1695 train_time:9786ms step_avg:97.86ms +step:101/1695 train_time:9881ms step_avg:97.84ms +step:102/1695 train_time:9975ms step_avg:97.79ms +step:103/1695 train_time:10069ms step_avg:97.76ms +step:104/1695 train_time:10164ms step_avg:97.73ms +step:105/1695 train_time:10259ms step_avg:97.70ms +step:106/1695 train_time:10352ms step_avg:97.66ms +step:107/1695 train_time:10448ms step_avg:97.64ms +step:108/1695 train_time:10543ms step_avg:97.62ms +step:109/1695 train_time:10636ms step_avg:97.58ms +step:110/1695 train_time:10730ms step_avg:97.54ms +step:111/1695 train_time:10826ms step_avg:97.53ms +step:112/1695 train_time:10922ms step_avg:97.51ms +step:113/1695 train_time:11016ms step_avg:97.48ms +step:114/1695 train_time:11110ms step_avg:97.45ms +step:115/1695 train_time:11205ms step_avg:97.44ms +step:116/1695 train_time:11300ms step_avg:97.41ms +step:117/1695 train_time:11393ms step_avg:97.37ms +step:118/1695 train_time:11487ms step_avg:97.35ms +step:119/1695 train_time:11582ms step_avg:97.33ms +step:120/1695 train_time:11676ms step_avg:97.30ms +step:121/1695 train_time:11770ms step_avg:97.27ms +step:122/1695 train_time:11864ms step_avg:97.25ms +step:123/1695 train_time:11959ms step_avg:97.22ms +step:124/1695 train_time:12052ms step_avg:97.20ms +step:125/1695 train_time:12148ms step_avg:97.18ms +step:125/1695 val_loss:4.3128 train_time:12241ms step_avg:97.92ms +step:126/1695 train_time:12267ms step_avg:97.36ms +step:127/1695 train_time:12345ms step_avg:97.20ms +step:128/1695 train_time:12446ms step_avg:97.23ms +step:129/1695 train_time:12540ms step_avg:97.21ms +step:130/1695 train_time:12635ms step_avg:97.19ms +step:131/1695 train_time:12728ms step_avg:97.16ms +step:132/1695 train_time:12821ms step_avg:97.13ms +step:133/1695 train_time:12915ms step_avg:97.11ms +step:134/1695 train_time:13008ms step_avg:97.08ms +step:135/1695 train_time:13102ms step_avg:97.05ms +step:136/1695 train_time:13196ms step_avg:97.03ms +step:137/1695 train_time:13291ms step_avg:97.01ms +step:138/1695 train_time:13388ms step_avg:97.01ms +step:139/1695 train_time:13482ms step_avg:96.99ms +step:140/1695 train_time:13577ms step_avg:96.98ms +step:141/1695 train_time:13672ms step_avg:96.96ms +step:142/1695 train_time:13766ms step_avg:96.94ms +step:143/1695 train_time:13859ms step_avg:96.92ms +step:144/1695 train_time:13954ms step_avg:96.90ms +step:145/1695 train_time:14047ms step_avg:96.88ms +step:146/1695 train_time:14140ms step_avg:96.85ms +step:147/1695 train_time:14235ms step_avg:96.84ms +step:148/1695 train_time:14331ms step_avg:96.83ms +step:149/1695 train_time:14426ms step_avg:96.82ms +step:150/1695 train_time:14520ms step_avg:96.80ms +step:151/1695 train_time:14614ms step_avg:96.78ms +step:152/1695 train_time:14709ms step_avg:96.77ms +step:153/1695 train_time:14802ms step_avg:96.75ms +step:154/1695 train_time:14896ms step_avg:96.73ms +step:155/1695 train_time:14991ms step_avg:96.72ms +step:156/1695 train_time:15084ms step_avg:96.69ms +step:157/1695 train_time:15178ms step_avg:96.68ms +step:158/1695 train_time:15272ms step_avg:96.66ms +step:159/1695 train_time:15366ms step_avg:96.64ms +step:160/1695 train_time:15461ms step_avg:96.63ms +step:161/1695 train_time:15556ms step_avg:96.62ms +step:162/1695 train_time:15651ms step_avg:96.61ms +step:163/1695 train_time:15745ms step_avg:96.59ms +step:164/1695 train_time:15839ms step_avg:96.58ms +step:165/1695 train_time:15934ms step_avg:96.57ms +step:166/1695 train_time:16029ms step_avg:96.56ms +step:167/1695 train_time:16123ms step_avg:96.55ms +step:168/1695 train_time:16217ms step_avg:96.53ms +step:169/1695 train_time:16312ms step_avg:96.52ms +step:170/1695 train_time:16406ms step_avg:96.51ms +step:171/1695 train_time:16501ms step_avg:96.50ms +step:172/1695 train_time:16596ms step_avg:96.49ms +step:173/1695 train_time:16939ms step_avg:97.91ms +step:174/1695 train_time:17041ms step_avg:97.94ms +step:175/1695 train_time:17135ms step_avg:97.91ms +step:176/1695 train_time:17228ms step_avg:97.88ms +step:177/1695 train_time:17321ms step_avg:97.86ms +step:178/1695 train_time:17414ms step_avg:97.83ms +step:179/1695 train_time:17509ms step_avg:97.82ms +step:180/1695 train_time:17602ms step_avg:97.79ms +step:181/1695 train_time:17695ms step_avg:97.77ms +step:182/1695 train_time:17788ms step_avg:97.74ms +step:183/1695 train_time:17884ms step_avg:97.73ms +step:184/1695 train_time:17983ms step_avg:97.73ms +step:185/1695 train_time:18078ms step_avg:97.72ms +step:186/1695 train_time:18174ms step_avg:97.71ms +step:187/1695 train_time:18268ms step_avg:97.69ms +step:188/1695 train_time:18361ms step_avg:97.67ms +step:189/1695 train_time:18455ms step_avg:97.65ms +step:190/1695 train_time:18548ms step_avg:97.62ms +step:191/1695 train_time:18641ms step_avg:97.60ms +step:192/1695 train_time:18735ms step_avg:97.58ms +step:193/1695 train_time:18830ms step_avg:97.56ms +step:194/1695 train_time:18924ms step_avg:97.55ms +step:195/1695 train_time:19018ms step_avg:97.53ms +step:196/1695 train_time:19114ms step_avg:97.52ms +step:197/1695 train_time:19209ms step_avg:97.51ms +step:198/1695 train_time:19302ms step_avg:97.49ms +step:199/1695 train_time:19397ms step_avg:97.47ms +step:200/1695 train_time:19490ms step_avg:97.45ms +step:201/1695 train_time:19583ms step_avg:97.43ms +step:202/1695 train_time:19677ms step_avg:97.41ms +step:203/1695 train_time:19771ms step_avg:97.40ms +step:204/1695 train_time:19866ms step_avg:97.38ms +step:205/1695 train_time:19960ms step_avg:97.37ms +step:206/1695 train_time:20056ms step_avg:97.36ms +step:207/1695 train_time:20151ms step_avg:97.35ms +step:208/1695 train_time:20244ms step_avg:97.33ms +step:209/1695 train_time:20338ms step_avg:97.31ms +step:210/1695 train_time:20433ms step_avg:97.30ms +step:211/1695 train_time:20527ms step_avg:97.29ms +step:212/1695 train_time:20621ms step_avg:97.27ms +step:213/1695 train_time:20715ms step_avg:97.26ms +step:214/1695 train_time:20810ms step_avg:97.24ms +step:215/1695 train_time:20904ms step_avg:97.23ms +step:216/1695 train_time:20998ms step_avg:97.21ms +step:217/1695 train_time:21094ms step_avg:97.21ms +step:218/1695 train_time:21188ms step_avg:97.19ms +step:219/1695 train_time:21281ms step_avg:97.17ms +step:220/1695 train_time:21376ms step_avg:97.16ms +step:221/1695 train_time:21470ms step_avg:97.15ms +step:222/1695 train_time:21563ms step_avg:97.13ms +step:223/1695 train_time:21657ms step_avg:97.12ms +step:224/1695 train_time:21751ms step_avg:97.10ms +step:225/1695 train_time:21845ms step_avg:97.09ms +step:226/1695 train_time:21939ms step_avg:97.07ms +step:227/1695 train_time:22034ms step_avg:97.06ms +step:228/1695 train_time:22128ms step_avg:97.05ms +step:229/1695 train_time:22221ms step_avg:97.03ms +step:230/1695 train_time:22315ms step_avg:97.02ms +step:231/1695 train_time:22410ms step_avg:97.01ms +step:232/1695 train_time:22505ms step_avg:97.00ms +step:233/1695 train_time:22599ms step_avg:96.99ms +step:234/1695 train_time:22693ms step_avg:96.98ms +step:235/1695 train_time:22787ms step_avg:96.97ms +step:236/1695 train_time:22881ms step_avg:96.95ms +step:237/1695 train_time:22975ms step_avg:96.94ms +step:238/1695 train_time:23071ms step_avg:96.94ms +step:239/1695 train_time:23166ms step_avg:96.93ms +step:240/1695 train_time:23259ms step_avg:96.91ms +step:241/1695 train_time:23354ms step_avg:96.90ms +step:242/1695 train_time:23448ms step_avg:96.89ms +step:243/1695 train_time:23541ms step_avg:96.88ms +step:244/1695 train_time:23637ms step_avg:96.87ms +step:245/1695 train_time:23731ms step_avg:96.86ms +step:246/1695 train_time:23825ms step_avg:96.85ms +step:247/1695 train_time:23919ms step_avg:96.84ms +step:248/1695 train_time:24014ms step_avg:96.83ms +step:249/1695 train_time:24109ms step_avg:96.83ms +step:250/1695 train_time:24204ms step_avg:96.81ms +step:250/1695 val_loss:3.9758 train_time:24295ms step_avg:97.18ms +step:251/1695 train_time:24320ms step_avg:96.89ms +step:252/1695 train_time:24399ms step_avg:96.82ms +step:253/1695 train_time:24500ms step_avg:96.84ms +step:254/1695 train_time:24595ms step_avg:96.83ms +step:255/1695 train_time:24689ms step_avg:96.82ms +step:256/1695 train_time:24782ms step_avg:96.81ms +step:257/1695 train_time:24876ms step_avg:96.79ms +step:258/1695 train_time:24969ms step_avg:96.78ms +step:259/1695 train_time:25062ms step_avg:96.76ms +step:260/1695 train_time:25155ms step_avg:96.75ms +step:261/1695 train_time:25248ms step_avg:96.74ms +step:262/1695 train_time:25344ms step_avg:96.73ms +step:263/1695 train_time:25441ms step_avg:96.73ms +step:264/1695 train_time:25538ms step_avg:96.73ms +step:265/1695 train_time:25632ms step_avg:96.73ms +step:266/1695 train_time:25726ms step_avg:96.71ms +step:267/1695 train_time:25820ms step_avg:96.70ms +step:268/1695 train_time:25914ms step_avg:96.69ms +step:269/1695 train_time:26007ms step_avg:96.68ms +step:270/1695 train_time:26100ms step_avg:96.67ms +step:271/1695 train_time:26193ms step_avg:96.65ms +step:272/1695 train_time:26287ms step_avg:96.64ms +step:273/1695 train_time:26382ms step_avg:96.64ms +step:274/1695 train_time:26478ms step_avg:96.63ms +step:275/1695 train_time:26574ms step_avg:96.63ms +step:276/1695 train_time:26668ms step_avg:96.62ms +step:277/1695 train_time:26762ms step_avg:96.61ms +step:278/1695 train_time:26856ms step_avg:96.61ms +step:279/1695 train_time:26950ms step_avg:96.60ms +step:280/1695 train_time:27044ms step_avg:96.59ms +step:281/1695 train_time:27137ms step_avg:96.57ms +step:282/1695 train_time:27231ms step_avg:96.56ms +step:283/1695 train_time:27324ms step_avg:96.55ms +step:284/1695 train_time:27419ms step_avg:96.54ms +step:285/1695 train_time:27513ms step_avg:96.54ms +step:286/1695 train_time:27608ms step_avg:96.53ms +step:287/1695 train_time:27702ms step_avg:96.52ms +step:288/1695 train_time:27796ms step_avg:96.51ms +step:289/1695 train_time:27891ms step_avg:96.51ms +step:290/1695 train_time:27984ms step_avg:96.50ms +step:291/1695 train_time:28078ms step_avg:96.49ms +step:292/1695 train_time:28171ms step_avg:96.48ms +step:293/1695 train_time:28265ms step_avg:96.47ms +step:294/1695 train_time:28359ms step_avg:96.46ms +step:295/1695 train_time:28453ms step_avg:96.45ms +step:296/1695 train_time:28547ms step_avg:96.44ms +step:297/1695 train_time:28641ms step_avg:96.43ms +step:298/1695 train_time:28735ms step_avg:96.43ms +step:299/1695 train_time:28831ms step_avg:96.42ms +step:300/1695 train_time:28925ms step_avg:96.42ms +step:301/1695 train_time:29019ms step_avg:96.41ms +step:302/1695 train_time:29114ms step_avg:96.40ms +step:303/1695 train_time:29208ms step_avg:96.39ms +step:304/1695 train_time:29301ms step_avg:96.39ms +step:305/1695 train_time:29396ms step_avg:96.38ms +step:306/1695 train_time:29490ms step_avg:96.37ms +step:307/1695 train_time:29584ms step_avg:96.37ms +step:308/1695 train_time:29678ms step_avg:96.36ms +step:309/1695 train_time:29772ms step_avg:96.35ms +step:310/1695 train_time:29866ms step_avg:96.34ms +step:311/1695 train_time:29960ms step_avg:96.33ms +step:312/1695 train_time:30055ms step_avg:96.33ms +step:313/1695 train_time:30149ms step_avg:96.32ms +step:314/1695 train_time:30242ms step_avg:96.31ms +step:315/1695 train_time:30337ms step_avg:96.31ms +step:316/1695 train_time:30431ms step_avg:96.30ms +step:317/1695 train_time:30525ms step_avg:96.29ms +step:318/1695 train_time:30619ms step_avg:96.29ms +step:319/1695 train_time:30713ms step_avg:96.28ms +step:320/1695 train_time:30807ms step_avg:96.27ms +step:321/1695 train_time:30900ms step_avg:96.26ms +step:322/1695 train_time:30995ms step_avg:96.26ms +step:323/1695 train_time:31089ms step_avg:96.25ms +step:324/1695 train_time:31182ms step_avg:96.24ms +step:325/1695 train_time:31277ms step_avg:96.24ms +step:326/1695 train_time:31371ms step_avg:96.23ms +step:327/1695 train_time:31465ms step_avg:96.22ms +step:328/1695 train_time:31559ms step_avg:96.22ms +step:329/1695 train_time:31654ms step_avg:96.21ms +step:330/1695 train_time:31750ms step_avg:96.21ms +step:331/1695 train_time:31843ms step_avg:96.20ms +step:332/1695 train_time:31938ms step_avg:96.20ms +step:333/1695 train_time:32032ms step_avg:96.19ms +step:334/1695 train_time:32126ms step_avg:96.19ms +step:335/1695 train_time:32220ms step_avg:96.18ms +step:336/1695 train_time:32315ms step_avg:96.18ms +step:337/1695 train_time:32410ms step_avg:96.17ms +step:338/1695 train_time:32503ms step_avg:96.16ms +step:339/1695 train_time:32597ms step_avg:96.16ms +step:340/1695 train_time:32692ms step_avg:96.15ms +step:341/1695 train_time:32785ms step_avg:96.14ms +step:342/1695 train_time:32879ms step_avg:96.14ms +step:343/1695 train_time:32974ms step_avg:96.13ms +step:344/1695 train_time:33069ms step_avg:96.13ms +step:345/1695 train_time:33399ms step_avg:96.81ms +step:346/1695 train_time:33523ms step_avg:96.89ms +step:347/1695 train_time:33615ms step_avg:96.87ms +step:348/1695 train_time:33709ms step_avg:96.87ms +step:349/1695 train_time:33802ms step_avg:96.85ms +step:350/1695 train_time:33895ms step_avg:96.84ms +step:351/1695 train_time:33988ms step_avg:96.83ms +step:352/1695 train_time:34081ms step_avg:96.82ms +step:353/1695 train_time:34174ms step_avg:96.81ms +step:354/1695 train_time:34267ms step_avg:96.80ms +step:355/1695 train_time:34364ms step_avg:96.80ms +step:356/1695 train_time:34462ms step_avg:96.80ms +step:357/1695 train_time:34558ms step_avg:96.80ms +step:358/1695 train_time:34653ms step_avg:96.80ms +step:359/1695 train_time:34747ms step_avg:96.79ms +step:360/1695 train_time:34840ms step_avg:96.78ms +step:361/1695 train_time:34934ms step_avg:96.77ms +step:362/1695 train_time:35026ms step_avg:96.76ms +step:363/1695 train_time:35119ms step_avg:96.75ms +step:364/1695 train_time:35213ms step_avg:96.74ms +step:365/1695 train_time:35306ms step_avg:96.73ms +step:366/1695 train_time:35402ms step_avg:96.73ms +step:367/1695 train_time:35497ms step_avg:96.72ms +step:368/1695 train_time:35591ms step_avg:96.72ms +step:369/1695 train_time:35686ms step_avg:96.71ms +step:370/1695 train_time:35780ms step_avg:96.70ms +step:371/1695 train_time:35874ms step_avg:96.69ms +step:372/1695 train_time:35967ms step_avg:96.68ms +step:373/1695 train_time:36060ms step_avg:96.68ms +step:374/1695 train_time:36154ms step_avg:96.67ms +step:375/1695 train_time:36248ms step_avg:96.66ms +step:375/1695 val_loss:3.8203 train_time:36339ms step_avg:96.90ms +step:376/1695 train_time:36364ms step_avg:96.71ms +step:377/1695 train_time:36442ms step_avg:96.66ms +step:378/1695 train_time:36539ms step_avg:96.66ms +step:379/1695 train_time:36633ms step_avg:96.66ms +step:380/1695 train_time:36726ms step_avg:96.65ms +step:381/1695 train_time:36820ms step_avg:96.64ms +step:382/1695 train_time:36912ms step_avg:96.63ms +step:383/1695 train_time:37005ms step_avg:96.62ms +step:384/1695 train_time:37098ms step_avg:96.61ms +step:385/1695 train_time:37190ms step_avg:96.60ms +step:386/1695 train_time:37284ms step_avg:96.59ms +step:387/1695 train_time:37379ms step_avg:96.59ms +step:388/1695 train_time:37475ms step_avg:96.59ms +step:389/1695 train_time:37570ms step_avg:96.58ms +step:390/1695 train_time:37665ms step_avg:96.58ms +step:391/1695 train_time:37759ms step_avg:96.57ms +step:392/1695 train_time:37852ms step_avg:96.56ms +step:393/1695 train_time:37946ms step_avg:96.55ms +step:394/1695 train_time:38039ms step_avg:96.55ms +step:395/1695 train_time:38131ms step_avg:96.53ms +step:396/1695 train_time:38225ms step_avg:96.53ms +step:397/1695 train_time:38319ms step_avg:96.52ms +step:398/1695 train_time:38413ms step_avg:96.52ms +step:399/1695 train_time:38508ms step_avg:96.51ms +step:400/1695 train_time:38604ms step_avg:96.51ms +step:401/1695 train_time:38699ms step_avg:96.51ms +step:402/1695 train_time:38792ms step_avg:96.50ms +step:403/1695 train_time:38886ms step_avg:96.49ms +step:404/1695 train_time:38980ms step_avg:96.49ms +step:405/1695 train_time:39073ms step_avg:96.48ms +step:406/1695 train_time:39166ms step_avg:96.47ms +step:407/1695 train_time:39260ms step_avg:96.46ms +step:408/1695 train_time:39354ms step_avg:96.46ms +step:409/1695 train_time:39448ms step_avg:96.45ms +step:410/1695 train_time:39543ms step_avg:96.45ms +step:411/1695 train_time:39638ms step_avg:96.44ms +step:412/1695 train_time:39732ms step_avg:96.44ms +step:413/1695 train_time:39825ms step_avg:96.43ms +step:414/1695 train_time:39919ms step_avg:96.42ms +step:415/1695 train_time:40012ms step_avg:96.41ms +step:416/1695 train_time:40105ms step_avg:96.41ms +step:417/1695 train_time:40199ms step_avg:96.40ms +step:418/1695 train_time:40292ms step_avg:96.39ms +step:419/1695 train_time:40386ms step_avg:96.39ms +step:420/1695 train_time:40481ms step_avg:96.38ms +step:421/1695 train_time:40575ms step_avg:96.38ms +step:422/1695 train_time:40669ms step_avg:96.37ms +step:423/1695 train_time:40764ms step_avg:96.37ms +step:424/1695 train_time:40858ms step_avg:96.36ms +step:425/1695 train_time:40952ms step_avg:96.36ms +step:426/1695 train_time:41046ms step_avg:96.35ms +step:427/1695 train_time:41140ms step_avg:96.35ms +step:428/1695 train_time:41233ms step_avg:96.34ms +step:429/1695 train_time:41327ms step_avg:96.33ms +step:430/1695 train_time:41420ms step_avg:96.33ms +step:431/1695 train_time:41514ms step_avg:96.32ms +step:432/1695 train_time:41608ms step_avg:96.32ms +step:433/1695 train_time:41702ms step_avg:96.31ms +step:434/1695 train_time:41797ms step_avg:96.31ms +step:435/1695 train_time:41890ms step_avg:96.30ms +step:436/1695 train_time:41985ms step_avg:96.30ms +step:437/1695 train_time:42079ms step_avg:96.29ms +step:438/1695 train_time:42173ms step_avg:96.29ms +step:439/1695 train_time:42267ms step_avg:96.28ms +step:440/1695 train_time:42361ms step_avg:96.28ms +step:441/1695 train_time:42455ms step_avg:96.27ms +step:442/1695 train_time:42549ms step_avg:96.26ms +step:443/1695 train_time:42643ms step_avg:96.26ms +step:444/1695 train_time:42737ms step_avg:96.25ms +step:445/1695 train_time:42830ms step_avg:96.25ms +step:446/1695 train_time:42924ms step_avg:96.24ms +step:447/1695 train_time:43019ms step_avg:96.24ms +step:448/1695 train_time:43114ms step_avg:96.24ms +step:449/1695 train_time:43208ms step_avg:96.23ms +step:450/1695 train_time:43302ms step_avg:96.23ms +step:451/1695 train_time:43397ms step_avg:96.22ms +step:452/1695 train_time:43490ms step_avg:96.22ms +step:453/1695 train_time:43584ms step_avg:96.21ms +step:454/1695 train_time:43679ms step_avg:96.21ms +step:455/1695 train_time:43773ms step_avg:96.20ms +step:456/1695 train_time:43866ms step_avg:96.20ms +step:457/1695 train_time:43960ms step_avg:96.19ms +step:458/1695 train_time:44055ms step_avg:96.19ms +step:459/1695 train_time:44149ms step_avg:96.18ms +step:460/1695 train_time:44243ms step_avg:96.18ms +step:461/1695 train_time:44338ms step_avg:96.18ms +step:462/1695 train_time:44432ms step_avg:96.17ms +step:463/1695 train_time:44526ms step_avg:96.17ms +step:464/1695 train_time:44621ms step_avg:96.16ms +step:465/1695 train_time:44714ms step_avg:96.16ms +step:466/1695 train_time:44808ms step_avg:96.15ms +step:467/1695 train_time:44902ms step_avg:96.15ms +step:468/1695 train_time:44996ms step_avg:96.14ms +step:469/1695 train_time:45089ms step_avg:96.14ms +step:470/1695 train_time:45184ms step_avg:96.14ms +step:471/1695 train_time:45278ms step_avg:96.13ms +step:472/1695 train_time:45372ms step_avg:96.13ms +step:473/1695 train_time:45466ms step_avg:96.12ms +step:474/1695 train_time:45560ms step_avg:96.12ms +step:475/1695 train_time:45653ms step_avg:96.11ms +step:476/1695 train_time:45748ms step_avg:96.11ms +step:477/1695 train_time:45842ms step_avg:96.10ms +step:478/1695 train_time:45935ms step_avg:96.10ms +step:479/1695 train_time:46029ms step_avg:96.09ms +step:480/1695 train_time:46123ms step_avg:96.09ms +step:481/1695 train_time:46217ms step_avg:96.08ms +step:482/1695 train_time:46311ms step_avg:96.08ms +step:483/1695 train_time:46405ms step_avg:96.08ms +step:484/1695 train_time:46499ms step_avg:96.07ms +step:485/1695 train_time:46593ms step_avg:96.07ms +step:486/1695 train_time:46688ms step_avg:96.07ms +step:487/1695 train_time:46782ms step_avg:96.06ms +step:488/1695 train_time:46876ms step_avg:96.06ms +step:489/1695 train_time:46969ms step_avg:96.05ms +step:490/1695 train_time:47064ms step_avg:96.05ms +step:491/1695 train_time:47158ms step_avg:96.05ms +step:492/1695 train_time:47252ms step_avg:96.04ms +step:493/1695 train_time:47346ms step_avg:96.04ms +step:494/1695 train_time:47442ms step_avg:96.04ms +step:495/1695 train_time:47536ms step_avg:96.03ms +step:496/1695 train_time:47629ms step_avg:96.03ms +step:497/1695 train_time:47724ms step_avg:96.02ms +step:498/1695 train_time:47820ms step_avg:96.02ms +step:499/1695 train_time:47914ms step_avg:96.02ms +step:500/1695 train_time:48008ms step_avg:96.02ms +step:500/1695 val_loss:3.7161 train_time:48100ms step_avg:96.20ms +step:501/1695 train_time:48124ms step_avg:96.06ms +step:502/1695 train_time:48204ms step_avg:96.02ms +step:503/1695 train_time:48302ms step_avg:96.03ms +step:504/1695 train_time:48397ms step_avg:96.03ms +step:505/1695 train_time:48491ms step_avg:96.02ms +step:506/1695 train_time:48584ms step_avg:96.02ms +step:507/1695 train_time:48678ms step_avg:96.01ms +step:508/1695 train_time:48771ms step_avg:96.01ms +step:509/1695 train_time:48864ms step_avg:96.00ms +step:510/1695 train_time:48957ms step_avg:95.99ms +step:511/1695 train_time:49050ms step_avg:95.99ms +step:512/1695 train_time:49146ms step_avg:95.99ms +step:513/1695 train_time:49242ms step_avg:95.99ms +step:514/1695 train_time:49337ms step_avg:95.99ms +step:515/1695 train_time:49432ms step_avg:95.98ms +step:516/1695 train_time:49525ms step_avg:95.98ms +step:517/1695 train_time:49619ms step_avg:95.97ms +step:518/1695 train_time:49713ms step_avg:95.97ms +step:519/1695 train_time:50082ms step_avg:96.50ms +step:520/1695 train_time:50228ms step_avg:96.59ms +step:521/1695 train_time:50320ms step_avg:96.58ms +step:522/1695 train_time:50412ms step_avg:96.58ms +step:523/1695 train_time:50505ms step_avg:96.57ms +step:524/1695 train_time:50598ms step_avg:96.56ms +step:525/1695 train_time:50691ms step_avg:96.55ms +step:526/1695 train_time:50784ms step_avg:96.55ms +step:527/1695 train_time:50878ms step_avg:96.54ms +step:528/1695 train_time:50971ms step_avg:96.54ms +step:529/1695 train_time:51069ms step_avg:96.54ms +step:530/1695 train_time:51167ms step_avg:96.54ms +step:531/1695 train_time:51264ms step_avg:96.54ms +step:532/1695 train_time:51358ms step_avg:96.54ms +step:533/1695 train_time:51452ms step_avg:96.53ms +step:534/1695 train_time:51545ms step_avg:96.53ms +step:535/1695 train_time:51638ms step_avg:96.52ms +step:536/1695 train_time:51732ms step_avg:96.51ms +step:537/1695 train_time:51824ms step_avg:96.51ms +step:538/1695 train_time:51918ms step_avg:96.50ms +step:539/1695 train_time:52014ms step_avg:96.50ms +step:540/1695 train_time:52110ms step_avg:96.50ms +step:541/1695 train_time:52204ms step_avg:96.50ms +step:542/1695 train_time:52299ms step_avg:96.49ms +step:543/1695 train_time:52393ms step_avg:96.49ms +step:544/1695 train_time:52486ms step_avg:96.48ms +step:545/1695 train_time:52580ms step_avg:96.48ms +step:546/1695 train_time:52674ms step_avg:96.47ms +step:547/1695 train_time:52767ms step_avg:96.47ms +step:548/1695 train_time:52860ms step_avg:96.46ms +step:549/1695 train_time:52954ms step_avg:96.46ms +step:550/1695 train_time:53049ms step_avg:96.45ms +step:551/1695 train_time:53143ms step_avg:96.45ms +step:552/1695 train_time:53238ms step_avg:96.45ms +step:553/1695 train_time:53331ms step_avg:96.44ms +step:554/1695 train_time:53425ms step_avg:96.43ms +step:555/1695 train_time:53519ms step_avg:96.43ms +step:556/1695 train_time:53614ms step_avg:96.43ms +step:557/1695 train_time:53708ms step_avg:96.42ms +step:558/1695 train_time:53801ms step_avg:96.42ms +step:559/1695 train_time:53895ms step_avg:96.41ms +step:560/1695 train_time:53989ms step_avg:96.41ms +step:561/1695 train_time:54083ms step_avg:96.40ms +step:562/1695 train_time:54178ms step_avg:96.40ms +step:563/1695 train_time:54273ms step_avg:96.40ms +step:564/1695 train_time:54367ms step_avg:96.40ms +step:565/1695 train_time:54461ms step_avg:96.39ms +step:566/1695 train_time:54555ms step_avg:96.39ms +step:567/1695 train_time:54650ms step_avg:96.38ms +step:568/1695 train_time:54746ms step_avg:96.38ms +step:569/1695 train_time:54841ms step_avg:96.38ms +step:570/1695 train_time:54938ms step_avg:96.38ms +step:571/1695 train_time:55035ms step_avg:96.38ms +step:572/1695 train_time:55131ms step_avg:96.38ms +step:573/1695 train_time:55228ms step_avg:96.38ms +step:574/1695 train_time:55323ms step_avg:96.38ms +step:575/1695 train_time:55420ms step_avg:96.38ms +step:576/1695 train_time:55517ms step_avg:96.38ms +step:577/1695 train_time:55614ms step_avg:96.38ms +step:578/1695 train_time:55711ms step_avg:96.39ms +step:579/1695 train_time:55806ms step_avg:96.38ms +step:580/1695 train_time:55903ms step_avg:96.38ms +step:581/1695 train_time:55999ms step_avg:96.38ms +step:582/1695 train_time:56097ms step_avg:96.39ms +step:583/1695 train_time:56194ms step_avg:96.39ms +step:584/1695 train_time:56290ms step_avg:96.39ms +step:585/1695 train_time:56386ms step_avg:96.39ms +step:586/1695 train_time:56482ms step_avg:96.39ms +step:587/1695 train_time:56578ms step_avg:96.39ms +step:588/1695 train_time:56675ms step_avg:96.39ms +step:589/1695 train_time:56772ms step_avg:96.39ms +step:590/1695 train_time:56868ms step_avg:96.39ms +step:591/1695 train_time:56964ms step_avg:96.39ms +step:592/1695 train_time:57061ms step_avg:96.39ms +step:593/1695 train_time:57157ms step_avg:96.39ms +step:594/1695 train_time:57254ms step_avg:96.39ms +step:595/1695 train_time:57350ms step_avg:96.39ms +step:596/1695 train_time:57446ms step_avg:96.39ms +step:597/1695 train_time:57542ms step_avg:96.39ms +step:598/1695 train_time:57638ms step_avg:96.38ms +step:599/1695 train_time:57735ms step_avg:96.39ms +step:600/1695 train_time:57831ms step_avg:96.39ms +step:601/1695 train_time:57926ms step_avg:96.38ms +step:602/1695 train_time:58022ms step_avg:96.38ms +step:603/1695 train_time:58118ms step_avg:96.38ms +step:604/1695 train_time:58215ms step_avg:96.38ms +step:605/1695 train_time:58312ms step_avg:96.38ms +step:606/1695 train_time:58408ms step_avg:96.38ms +step:607/1695 train_time:58504ms step_avg:96.38ms +step:608/1695 train_time:58600ms step_avg:96.38ms +step:609/1695 train_time:58696ms step_avg:96.38ms +step:610/1695 train_time:58794ms step_avg:96.38ms +step:611/1695 train_time:58891ms step_avg:96.38ms +step:612/1695 train_time:58987ms step_avg:96.38ms +step:613/1695 train_time:59083ms step_avg:96.38ms +step:614/1695 train_time:59180ms step_avg:96.38ms +step:615/1695 train_time:59276ms step_avg:96.38ms +step:616/1695 train_time:59373ms step_avg:96.39ms +step:617/1695 train_time:59470ms step_avg:96.39ms +step:618/1695 train_time:59565ms step_avg:96.38ms +step:619/1695 train_time:59661ms step_avg:96.38ms +step:620/1695 train_time:59758ms step_avg:96.38ms +step:621/1695 train_time:59855ms step_avg:96.39ms +step:622/1695 train_time:59952ms step_avg:96.39ms +step:623/1695 train_time:60048ms step_avg:96.39ms +step:624/1695 train_time:60143ms step_avg:96.38ms +step:625/1695 train_time:60239ms step_avg:96.38ms +step:625/1695 val_loss:3.6203 train_time:60334ms step_avg:96.53ms +step:626/1695 train_time:60358ms step_avg:96.42ms +step:627/1695 train_time:60442ms step_avg:96.40ms +step:628/1695 train_time:60540ms step_avg:96.40ms +step:629/1695 train_time:60637ms step_avg:96.40ms +step:630/1695 train_time:60732ms step_avg:96.40ms +step:631/1695 train_time:60827ms step_avg:96.40ms +step:632/1695 train_time:60921ms step_avg:96.39ms +step:633/1695 train_time:61017ms step_avg:96.39ms +step:634/1695 train_time:61112ms step_avg:96.39ms +step:635/1695 train_time:61208ms step_avg:96.39ms +step:636/1695 train_time:61305ms step_avg:96.39ms +step:637/1695 train_time:61405ms step_avg:96.40ms +step:638/1695 train_time:61502ms step_avg:96.40ms +step:639/1695 train_time:61599ms step_avg:96.40ms +step:640/1695 train_time:61696ms step_avg:96.40ms +step:641/1695 train_time:61793ms step_avg:96.40ms +step:642/1695 train_time:61887ms step_avg:96.40ms +step:643/1695 train_time:61983ms step_avg:96.40ms +step:644/1695 train_time:62078ms step_avg:96.39ms +step:645/1695 train_time:62175ms step_avg:96.40ms +step:646/1695 train_time:62272ms step_avg:96.40ms +step:647/1695 train_time:62368ms step_avg:96.40ms +step:648/1695 train_time:62465ms step_avg:96.40ms +step:649/1695 train_time:62562ms step_avg:96.40ms +step:650/1695 train_time:62660ms step_avg:96.40ms +step:651/1695 train_time:62756ms step_avg:96.40ms +step:652/1695 train_time:62852ms step_avg:96.40ms +step:653/1695 train_time:62947ms step_avg:96.40ms +step:654/1695 train_time:63041ms step_avg:96.39ms +step:655/1695 train_time:63139ms step_avg:96.39ms +step:656/1695 train_time:63237ms step_avg:96.40ms +step:657/1695 train_time:63335ms step_avg:96.40ms +step:658/1695 train_time:63431ms step_avg:96.40ms +step:659/1695 train_time:63527ms step_avg:96.40ms +step:660/1695 train_time:63623ms step_avg:96.40ms +step:661/1695 train_time:63719ms step_avg:96.40ms +step:662/1695 train_time:63815ms step_avg:96.40ms +step:663/1695 train_time:63911ms step_avg:96.40ms +step:664/1695 train_time:64007ms step_avg:96.40ms +step:665/1695 train_time:64103ms step_avg:96.40ms +step:666/1695 train_time:64199ms step_avg:96.39ms +step:667/1695 train_time:64295ms step_avg:96.39ms +step:668/1695 train_time:64391ms step_avg:96.39ms +step:669/1695 train_time:64487ms step_avg:96.39ms +step:670/1695 train_time:64583ms step_avg:96.39ms +step:671/1695 train_time:64679ms step_avg:96.39ms +step:672/1695 train_time:64775ms step_avg:96.39ms +step:673/1695 train_time:64871ms step_avg:96.39ms +step:674/1695 train_time:64967ms step_avg:96.39ms +step:675/1695 train_time:65062ms step_avg:96.39ms +step:676/1695 train_time:65158ms step_avg:96.39ms +step:677/1695 train_time:65255ms step_avg:96.39ms +step:678/1695 train_time:65351ms step_avg:96.39ms +step:679/1695 train_time:65447ms step_avg:96.39ms +step:680/1695 train_time:65542ms step_avg:96.39ms +step:681/1695 train_time:65639ms step_avg:96.39ms +step:682/1695 train_time:65735ms step_avg:96.39ms +step:683/1695 train_time:65832ms step_avg:96.39ms +step:684/1695 train_time:65929ms step_avg:96.39ms +step:685/1695 train_time:66025ms step_avg:96.39ms +step:686/1695 train_time:66120ms step_avg:96.38ms +step:687/1695 train_time:66216ms step_avg:96.38ms +step:688/1695 train_time:66313ms step_avg:96.39ms +step:689/1695 train_time:66410ms step_avg:96.39ms +step:690/1695 train_time:66505ms step_avg:96.38ms +step:691/1695 train_time:66945ms step_avg:96.88ms +step:692/1695 train_time:67026ms step_avg:96.86ms +step:693/1695 train_time:67121ms step_avg:96.86ms +step:694/1695 train_time:67216ms step_avg:96.85ms +step:695/1695 train_time:67311ms step_avg:96.85ms +step:696/1695 train_time:67406ms step_avg:96.85ms +step:697/1695 train_time:67501ms step_avg:96.84ms +step:698/1695 train_time:67597ms step_avg:96.84ms +step:699/1695 train_time:67692ms step_avg:96.84ms +step:700/1695 train_time:67787ms step_avg:96.84ms +step:701/1695 train_time:67886ms step_avg:96.84ms +step:702/1695 train_time:67985ms step_avg:96.84ms +step:703/1695 train_time:68082ms step_avg:96.84ms +step:704/1695 train_time:68178ms step_avg:96.84ms +step:705/1695 train_time:68274ms step_avg:96.84ms +step:706/1695 train_time:68370ms step_avg:96.84ms +step:707/1695 train_time:68465ms step_avg:96.84ms +step:708/1695 train_time:68561ms step_avg:96.84ms +step:709/1695 train_time:68656ms step_avg:96.84ms +step:710/1695 train_time:68752ms step_avg:96.83ms +step:711/1695 train_time:68850ms step_avg:96.84ms +step:712/1695 train_time:68946ms step_avg:96.83ms +step:713/1695 train_time:69043ms step_avg:96.83ms +step:714/1695 train_time:69139ms step_avg:96.83ms +step:715/1695 train_time:69235ms step_avg:96.83ms +step:716/1695 train_time:69331ms step_avg:96.83ms +step:717/1695 train_time:69426ms step_avg:96.83ms +step:718/1695 train_time:69522ms step_avg:96.83ms +step:719/1695 train_time:69618ms step_avg:96.83ms +step:720/1695 train_time:69714ms step_avg:96.82ms +step:721/1695 train_time:69811ms step_avg:96.83ms +step:722/1695 train_time:69908ms step_avg:96.83ms +step:723/1695 train_time:70004ms step_avg:96.82ms +step:724/1695 train_time:70101ms step_avg:96.82ms +step:725/1695 train_time:70198ms step_avg:96.83ms +step:726/1695 train_time:70296ms step_avg:96.83ms +step:727/1695 train_time:70392ms step_avg:96.83ms +step:728/1695 train_time:70487ms step_avg:96.82ms +step:729/1695 train_time:70583ms step_avg:96.82ms +step:730/1695 train_time:70679ms step_avg:96.82ms +step:731/1695 train_time:70776ms step_avg:96.82ms +step:732/1695 train_time:70874ms step_avg:96.82ms +step:733/1695 train_time:70970ms step_avg:96.82ms +step:734/1695 train_time:71066ms step_avg:96.82ms +step:735/1695 train_time:71162ms step_avg:96.82ms +step:736/1695 train_time:71258ms step_avg:96.82ms +step:737/1695 train_time:71355ms step_avg:96.82ms +step:738/1695 train_time:71451ms step_avg:96.82ms +step:739/1695 train_time:71546ms step_avg:96.81ms +step:740/1695 train_time:71641ms step_avg:96.81ms +step:741/1695 train_time:71739ms step_avg:96.81ms +step:742/1695 train_time:71837ms step_avg:96.81ms +step:743/1695 train_time:71934ms step_avg:96.82ms +step:744/1695 train_time:72030ms step_avg:96.81ms +step:745/1695 train_time:72125ms step_avg:96.81ms +step:746/1695 train_time:72221ms step_avg:96.81ms +step:747/1695 train_time:72318ms step_avg:96.81ms +step:748/1695 train_time:72414ms step_avg:96.81ms +step:749/1695 train_time:72510ms step_avg:96.81ms +step:750/1695 train_time:72605ms step_avg:96.81ms +step:750/1695 val_loss:3.5663 train_time:72700ms step_avg:96.93ms +step:751/1695 train_time:72724ms step_avg:96.84ms +step:752/1695 train_time:72807ms step_avg:96.82ms +step:753/1695 train_time:72904ms step_avg:96.82ms +step:754/1695 train_time:73002ms step_avg:96.82ms +step:755/1695 train_time:73098ms step_avg:96.82ms +step:756/1695 train_time:73192ms step_avg:96.82ms +step:757/1695 train_time:73287ms step_avg:96.81ms +step:758/1695 train_time:73381ms step_avg:96.81ms +step:759/1695 train_time:73476ms step_avg:96.81ms +step:760/1695 train_time:73571ms step_avg:96.80ms +step:761/1695 train_time:73668ms step_avg:96.80ms +step:762/1695 train_time:73766ms step_avg:96.81ms +step:763/1695 train_time:73864ms step_avg:96.81ms +step:764/1695 train_time:73962ms step_avg:96.81ms +step:765/1695 train_time:74059ms step_avg:96.81ms +step:766/1695 train_time:74154ms step_avg:96.81ms +step:767/1695 train_time:74249ms step_avg:96.80ms +step:768/1695 train_time:74344ms step_avg:96.80ms +step:769/1695 train_time:74439ms step_avg:96.80ms +step:770/1695 train_time:74535ms step_avg:96.80ms +step:771/1695 train_time:74630ms step_avg:96.80ms +step:772/1695 train_time:74726ms step_avg:96.80ms +step:773/1695 train_time:74824ms step_avg:96.80ms +step:774/1695 train_time:74921ms step_avg:96.80ms +step:775/1695 train_time:75018ms step_avg:96.80ms +step:776/1695 train_time:75114ms step_avg:96.80ms +step:777/1695 train_time:75209ms step_avg:96.79ms +step:778/1695 train_time:75304ms step_avg:96.79ms +step:779/1695 train_time:75400ms step_avg:96.79ms +step:780/1695 train_time:75496ms step_avg:96.79ms +step:781/1695 train_time:75592ms step_avg:96.79ms +step:782/1695 train_time:75687ms step_avg:96.79ms +step:783/1695 train_time:75784ms step_avg:96.79ms +step:784/1695 train_time:75880ms step_avg:96.79ms +step:785/1695 train_time:75977ms step_avg:96.79ms +step:786/1695 train_time:76073ms step_avg:96.79ms +step:787/1695 train_time:76168ms step_avg:96.78ms +step:788/1695 train_time:76264ms step_avg:96.78ms +step:789/1695 train_time:76359ms step_avg:96.78ms +step:790/1695 train_time:76455ms step_avg:96.78ms +step:791/1695 train_time:76549ms step_avg:96.78ms +step:792/1695 train_time:76645ms step_avg:96.77ms +step:793/1695 train_time:76742ms step_avg:96.77ms +step:794/1695 train_time:76839ms step_avg:96.77ms +step:795/1695 train_time:76935ms step_avg:96.77ms +step:796/1695 train_time:77031ms step_avg:96.77ms +step:797/1695 train_time:77126ms step_avg:96.77ms +step:798/1695 train_time:77224ms step_avg:96.77ms +step:799/1695 train_time:77320ms step_avg:96.77ms +step:800/1695 train_time:77416ms step_avg:96.77ms +step:801/1695 train_time:77511ms step_avg:96.77ms +step:802/1695 train_time:77607ms step_avg:96.77ms +step:803/1695 train_time:77702ms step_avg:96.76ms +step:804/1695 train_time:77797ms step_avg:96.76ms +step:805/1695 train_time:77893ms step_avg:96.76ms +step:806/1695 train_time:77989ms step_avg:96.76ms +step:807/1695 train_time:78085ms step_avg:96.76ms +step:808/1695 train_time:78181ms step_avg:96.76ms +step:809/1695 train_time:78276ms step_avg:96.76ms +step:810/1695 train_time:78371ms step_avg:96.75ms +step:811/1695 train_time:78467ms step_avg:96.75ms +step:812/1695 train_time:78562ms step_avg:96.75ms +step:813/1695 train_time:78657ms step_avg:96.75ms +step:814/1695 train_time:78752ms step_avg:96.75ms +step:815/1695 train_time:78848ms step_avg:96.75ms +step:816/1695 train_time:78945ms step_avg:96.75ms +step:817/1695 train_time:79042ms step_avg:96.75ms +step:818/1695 train_time:79138ms step_avg:96.75ms +step:819/1695 train_time:79234ms step_avg:96.74ms +step:820/1695 train_time:79329ms step_avg:96.74ms +step:821/1695 train_time:79425ms step_avg:96.74ms +step:822/1695 train_time:79520ms step_avg:96.74ms +step:823/1695 train_time:79616ms step_avg:96.74ms +step:824/1695 train_time:79711ms step_avg:96.74ms +step:825/1695 train_time:79807ms step_avg:96.74ms +step:826/1695 train_time:79903ms step_avg:96.73ms +step:827/1695 train_time:79998ms step_avg:96.73ms +step:828/1695 train_time:80094ms step_avg:96.73ms +step:829/1695 train_time:80190ms step_avg:96.73ms +step:830/1695 train_time:80286ms step_avg:96.73ms +step:831/1695 train_time:80382ms step_avg:96.73ms +step:832/1695 train_time:80477ms step_avg:96.73ms +step:833/1695 train_time:80572ms step_avg:96.73ms +step:834/1695 train_time:80668ms step_avg:96.72ms +step:835/1695 train_time:80763ms step_avg:96.72ms +step:836/1695 train_time:80859ms step_avg:96.72ms +step:837/1695 train_time:80955ms step_avg:96.72ms +step:838/1695 train_time:81050ms step_avg:96.72ms +step:839/1695 train_time:81146ms step_avg:96.72ms +step:840/1695 train_time:81243ms step_avg:96.72ms +step:841/1695 train_time:81339ms step_avg:96.72ms +step:842/1695 train_time:81435ms step_avg:96.72ms +step:843/1695 train_time:81530ms step_avg:96.71ms +step:844/1695 train_time:81625ms step_avg:96.71ms +step:845/1695 train_time:81722ms step_avg:96.71ms +step:846/1695 train_time:81818ms step_avg:96.71ms +step:847/1695 train_time:81915ms step_avg:96.71ms +step:848/1695 train_time:82011ms step_avg:96.71ms +step:849/1695 train_time:82106ms step_avg:96.71ms +step:850/1695 train_time:82202ms step_avg:96.71ms +step:851/1695 train_time:82298ms step_avg:96.71ms +step:852/1695 train_time:82393ms step_avg:96.70ms +step:853/1695 train_time:82488ms step_avg:96.70ms +step:854/1695 train_time:82583ms step_avg:96.70ms +step:855/1695 train_time:82679ms step_avg:96.70ms +step:856/1695 train_time:82775ms step_avg:96.70ms +step:857/1695 train_time:82870ms step_avg:96.70ms +step:858/1695 train_time:82966ms step_avg:96.70ms +step:859/1695 train_time:83063ms step_avg:96.70ms +step:860/1695 train_time:83159ms step_avg:96.70ms +step:861/1695 train_time:83255ms step_avg:96.70ms +step:862/1695 train_time:83350ms step_avg:96.69ms +step:863/1695 train_time:83679ms step_avg:96.96ms +step:864/1695 train_time:83862ms step_avg:97.06ms +step:865/1695 train_time:83955ms step_avg:97.06ms +step:866/1695 train_time:84050ms step_avg:97.06ms +step:867/1695 train_time:84145ms step_avg:97.05ms +step:868/1695 train_time:84240ms step_avg:97.05ms +step:869/1695 train_time:84336ms step_avg:97.05ms +step:870/1695 train_time:84431ms step_avg:97.05ms +step:871/1695 train_time:84525ms step_avg:97.04ms +step:872/1695 train_time:84620ms step_avg:97.04ms +step:873/1695 train_time:84718ms step_avg:97.04ms +step:874/1695 train_time:84818ms step_avg:97.05ms +step:875/1695 train_time:84917ms step_avg:97.05ms +step:875/1695 val_loss:3.5235 train_time:85011ms step_avg:97.16ms +step:876/1695 train_time:85037ms step_avg:97.07ms +step:877/1695 train_time:85116ms step_avg:97.05ms +step:878/1695 train_time:85213ms step_avg:97.05ms +step:879/1695 train_time:85309ms step_avg:97.05ms +step:880/1695 train_time:85404ms step_avg:97.05ms +step:881/1695 train_time:85499ms step_avg:97.05ms +step:882/1695 train_time:85594ms step_avg:97.05ms +step:883/1695 train_time:85690ms step_avg:97.04ms +step:884/1695 train_time:85785ms step_avg:97.04ms +step:885/1695 train_time:85879ms step_avg:97.04ms +step:886/1695 train_time:85976ms step_avg:97.04ms +step:887/1695 train_time:86075ms step_avg:97.04ms +step:888/1695 train_time:86174ms step_avg:97.04ms +step:889/1695 train_time:86271ms step_avg:97.04ms +step:890/1695 train_time:86367ms step_avg:97.04ms +step:891/1695 train_time:86462ms step_avg:97.04ms +step:892/1695 train_time:86557ms step_avg:97.04ms +step:893/1695 train_time:86653ms step_avg:97.04ms +step:894/1695 train_time:86749ms step_avg:97.04ms +step:895/1695 train_time:86845ms step_avg:97.03ms +step:896/1695 train_time:86940ms step_avg:97.03ms +step:897/1695 train_time:87038ms step_avg:97.03ms +step:898/1695 train_time:87136ms step_avg:97.03ms +step:899/1695 train_time:87234ms step_avg:97.03ms +step:900/1695 train_time:87331ms step_avg:97.03ms +step:901/1695 train_time:87427ms step_avg:97.03ms +step:902/1695 train_time:87522ms step_avg:97.03ms +step:903/1695 train_time:87617ms step_avg:97.03ms +step:904/1695 train_time:87714ms step_avg:97.03ms +step:905/1695 train_time:87810ms step_avg:97.03ms +step:906/1695 train_time:87905ms step_avg:97.03ms +step:907/1695 train_time:88002ms step_avg:97.03ms +step:908/1695 train_time:88099ms step_avg:97.03ms +step:909/1695 train_time:88197ms step_avg:97.03ms +step:910/1695 train_time:88295ms step_avg:97.03ms +step:911/1695 train_time:88392ms step_avg:97.03ms +step:912/1695 train_time:88487ms step_avg:97.03ms +step:913/1695 train_time:88582ms step_avg:97.02ms +step:914/1695 train_time:88678ms step_avg:97.02ms +step:915/1695 train_time:88774ms step_avg:97.02ms +step:916/1695 train_time:88870ms step_avg:97.02ms +step:917/1695 train_time:88965ms step_avg:97.02ms +step:918/1695 train_time:89060ms step_avg:97.02ms +step:919/1695 train_time:89157ms step_avg:97.02ms +step:920/1695 train_time:89255ms step_avg:97.02ms +step:921/1695 train_time:89353ms step_avg:97.02ms +step:922/1695 train_time:89451ms step_avg:97.02ms +step:923/1695 train_time:89547ms step_avg:97.02ms +step:924/1695 train_time:89642ms step_avg:97.01ms +step:925/1695 train_time:89738ms step_avg:97.01ms +step:926/1695 train_time:89833ms step_avg:97.01ms +step:927/1695 train_time:89929ms step_avg:97.01ms +step:928/1695 train_time:90025ms step_avg:97.01ms +step:929/1695 train_time:90120ms step_avg:97.01ms +step:930/1695 train_time:90216ms step_avg:97.01ms +step:931/1695 train_time:90313ms step_avg:97.01ms +step:932/1695 train_time:90410ms step_avg:97.01ms +step:933/1695 train_time:90506ms step_avg:97.01ms +step:934/1695 train_time:90602ms step_avg:97.00ms +step:935/1695 train_time:90697ms step_avg:97.00ms +step:936/1695 train_time:90794ms step_avg:97.00ms +step:937/1695 train_time:90890ms step_avg:97.00ms +step:938/1695 train_time:90985ms step_avg:97.00ms +step:939/1695 train_time:91081ms step_avg:97.00ms +step:940/1695 train_time:91177ms step_avg:97.00ms +step:941/1695 train_time:91274ms step_avg:97.00ms +step:942/1695 train_time:91371ms step_avg:97.00ms +step:943/1695 train_time:91468ms step_avg:97.00ms +step:944/1695 train_time:91563ms step_avg:97.00ms +step:945/1695 train_time:91660ms step_avg:96.99ms +step:946/1695 train_time:91756ms step_avg:96.99ms +step:947/1695 train_time:91855ms step_avg:97.00ms +step:948/1695 train_time:91952ms step_avg:97.00ms +step:949/1695 train_time:92048ms step_avg:96.99ms +step:950/1695 train_time:92144ms step_avg:96.99ms +step:951/1695 train_time:92239ms step_avg:96.99ms +step:952/1695 train_time:92336ms step_avg:96.99ms +step:953/1695 train_time:92433ms step_avg:96.99ms +step:954/1695 train_time:92529ms step_avg:96.99ms +step:955/1695 train_time:92625ms step_avg:96.99ms +step:956/1695 train_time:92721ms step_avg:96.99ms +step:957/1695 train_time:92817ms step_avg:96.99ms +step:958/1695 train_time:92914ms step_avg:96.99ms +step:959/1695 train_time:93011ms step_avg:96.99ms +step:960/1695 train_time:93107ms step_avg:96.99ms +step:961/1695 train_time:93203ms step_avg:96.99ms +step:962/1695 train_time:93299ms step_avg:96.98ms +step:963/1695 train_time:93395ms step_avg:96.98ms +step:964/1695 train_time:93491ms step_avg:96.98ms +step:965/1695 train_time:93587ms step_avg:96.98ms +step:966/1695 train_time:93682ms step_avg:96.98ms +step:967/1695 train_time:93779ms step_avg:96.98ms +step:968/1695 train_time:93874ms step_avg:96.98ms +step:969/1695 train_time:93970ms step_avg:96.98ms +step:970/1695 train_time:94066ms step_avg:96.98ms +step:971/1695 train_time:94162ms step_avg:96.97ms +step:972/1695 train_time:94259ms step_avg:96.97ms +step:973/1695 train_time:94356ms step_avg:96.97ms +step:974/1695 train_time:94452ms step_avg:96.97ms +step:975/1695 train_time:94549ms step_avg:96.97ms +step:976/1695 train_time:94643ms step_avg:96.97ms +step:977/1695 train_time:94739ms step_avg:96.97ms +step:978/1695 train_time:94835ms step_avg:96.97ms +step:979/1695 train_time:94932ms step_avg:96.97ms +step:980/1695 train_time:95028ms step_avg:96.97ms +step:981/1695 train_time:95124ms step_avg:96.97ms +step:982/1695 train_time:95219ms step_avg:96.96ms +step:983/1695 train_time:95316ms step_avg:96.96ms +step:984/1695 train_time:95412ms step_avg:96.96ms +step:985/1695 train_time:95508ms step_avg:96.96ms +step:986/1695 train_time:95603ms step_avg:96.96ms +step:987/1695 train_time:95699ms step_avg:96.96ms +step:988/1695 train_time:95795ms step_avg:96.96ms +step:989/1695 train_time:95891ms step_avg:96.96ms +step:990/1695 train_time:95987ms step_avg:96.96ms +step:991/1695 train_time:96083ms step_avg:96.96ms +step:992/1695 train_time:96178ms step_avg:96.95ms +step:993/1695 train_time:96274ms step_avg:96.95ms +step:994/1695 train_time:96370ms step_avg:96.95ms +step:995/1695 train_time:96465ms step_avg:96.95ms +step:996/1695 train_time:96560ms step_avg:96.95ms +step:997/1695 train_time:96657ms step_avg:96.95ms +step:998/1695 train_time:96753ms step_avg:96.95ms +step:999/1695 train_time:96850ms step_avg:96.95ms +step:1000/1695 train_time:96947ms step_avg:96.95ms +step:1000/1695 val_loss:3.4841 train_time:97040ms step_avg:97.04ms +step:1001/1695 train_time:97064ms step_avg:96.97ms +step:1002/1695 train_time:97146ms step_avg:96.95ms +step:1003/1695 train_time:97243ms step_avg:96.95ms +step:1004/1695 train_time:97339ms step_avg:96.95ms +step:1005/1695 train_time:97434ms step_avg:96.95ms +step:1006/1695 train_time:97530ms step_avg:96.95ms +step:1007/1695 train_time:97624ms step_avg:96.95ms +step:1008/1695 train_time:97719ms step_avg:96.94ms +step:1009/1695 train_time:97815ms step_avg:96.94ms +step:1010/1695 train_time:97910ms step_avg:96.94ms +step:1011/1695 train_time:98007ms step_avg:96.94ms +step:1012/1695 train_time:98104ms step_avg:96.94ms +step:1013/1695 train_time:98201ms step_avg:96.94ms +step:1014/1695 train_time:98298ms step_avg:96.94ms +step:1015/1695 train_time:98395ms step_avg:96.94ms +step:1016/1695 train_time:98490ms step_avg:96.94ms +step:1017/1695 train_time:98586ms step_avg:96.94ms +step:1018/1695 train_time:98682ms step_avg:96.94ms +step:1019/1695 train_time:98777ms step_avg:96.93ms +step:1020/1695 train_time:98872ms step_avg:96.93ms +step:1021/1695 train_time:98967ms step_avg:96.93ms +step:1022/1695 train_time:99063ms step_avg:96.93ms +step:1023/1695 train_time:99160ms step_avg:96.93ms +step:1024/1695 train_time:99258ms step_avg:96.93ms +step:1025/1695 train_time:99355ms step_avg:96.93ms +step:1026/1695 train_time:99451ms step_avg:96.93ms +step:1027/1695 train_time:99547ms step_avg:96.93ms +step:1028/1695 train_time:99643ms step_avg:96.93ms +step:1029/1695 train_time:99738ms step_avg:96.93ms +step:1030/1695 train_time:99833ms step_avg:96.93ms +step:1031/1695 train_time:99930ms step_avg:96.93ms +step:1032/1695 train_time:100026ms step_avg:96.92ms +step:1033/1695 train_time:100122ms step_avg:96.92ms +step:1034/1695 train_time:100218ms step_avg:96.92ms +step:1035/1695 train_time:100315ms step_avg:96.92ms +step:1036/1695 train_time:100647ms step_avg:97.15ms +step:1037/1695 train_time:100826ms step_avg:97.23ms +step:1038/1695 train_time:100920ms step_avg:97.22ms +step:1039/1695 train_time:101015ms step_avg:97.22ms +step:1040/1695 train_time:101110ms step_avg:97.22ms +step:1041/1695 train_time:101204ms step_avg:97.22ms +step:1042/1695 train_time:101299ms step_avg:97.22ms +step:1043/1695 train_time:101394ms step_avg:97.21ms +step:1044/1695 train_time:101489ms step_avg:97.21ms +step:1045/1695 train_time:101584ms step_avg:97.21ms +step:1046/1695 train_time:101681ms step_avg:97.21ms +step:1047/1695 train_time:101783ms step_avg:97.21ms +step:1048/1695 train_time:101883ms step_avg:97.22ms +step:1049/1695 train_time:101980ms step_avg:97.22ms +step:1050/1695 train_time:102077ms step_avg:97.22ms +step:1051/1695 train_time:102173ms step_avg:97.21ms +step:1052/1695 train_time:102268ms step_avg:97.21ms +step:1053/1695 train_time:102362ms step_avg:97.21ms +step:1054/1695 train_time:102457ms step_avg:97.21ms +step:1055/1695 train_time:102553ms step_avg:97.21ms +step:1056/1695 train_time:102650ms step_avg:97.21ms +step:1057/1695 train_time:102747ms step_avg:97.21ms +step:1058/1695 train_time:102844ms step_avg:97.21ms +step:1059/1695 train_time:102941ms step_avg:97.21ms +step:1060/1695 train_time:103037ms step_avg:97.20ms +step:1061/1695 train_time:103134ms step_avg:97.20ms +step:1062/1695 train_time:103230ms step_avg:97.20ms +step:1063/1695 train_time:103325ms step_avg:97.20ms +step:1064/1695 train_time:103421ms step_avg:97.20ms +step:1065/1695 train_time:103516ms step_avg:97.20ms +step:1066/1695 train_time:103612ms step_avg:97.20ms +step:1067/1695 train_time:103709ms step_avg:97.20ms +step:1068/1695 train_time:103806ms step_avg:97.20ms +step:1069/1695 train_time:103901ms step_avg:97.19ms +step:1070/1695 train_time:103997ms step_avg:97.19ms +step:1071/1695 train_time:104094ms step_avg:97.19ms +step:1072/1695 train_time:104190ms step_avg:97.19ms +step:1073/1695 train_time:104285ms step_avg:97.19ms +step:1074/1695 train_time:104381ms step_avg:97.19ms +step:1075/1695 train_time:104476ms step_avg:97.19ms +step:1076/1695 train_time:104572ms step_avg:97.19ms +step:1077/1695 train_time:104668ms step_avg:97.18ms +step:1078/1695 train_time:104764ms step_avg:97.18ms +step:1079/1695 train_time:104860ms step_avg:97.18ms +step:1080/1695 train_time:104956ms step_avg:97.18ms +step:1081/1695 train_time:105052ms step_avg:97.18ms +step:1082/1695 train_time:105149ms step_avg:97.18ms +step:1083/1695 train_time:105246ms step_avg:97.18ms +step:1084/1695 train_time:105341ms step_avg:97.18ms +step:1085/1695 train_time:105437ms step_avg:97.18ms +step:1086/1695 train_time:105533ms step_avg:97.18ms +step:1087/1695 train_time:105630ms step_avg:97.18ms +step:1088/1695 train_time:105726ms step_avg:97.18ms +step:1089/1695 train_time:105822ms step_avg:97.17ms +step:1090/1695 train_time:105918ms step_avg:97.17ms +step:1091/1695 train_time:106014ms step_avg:97.17ms +step:1092/1695 train_time:106110ms step_avg:97.17ms +step:1093/1695 train_time:106206ms step_avg:97.17ms +step:1094/1695 train_time:106301ms step_avg:97.17ms +step:1095/1695 train_time:106397ms step_avg:97.17ms +step:1096/1695 train_time:106493ms step_avg:97.16ms +step:1097/1695 train_time:106589ms step_avg:97.16ms +step:1098/1695 train_time:106685ms step_avg:97.16ms +step:1099/1695 train_time:106781ms step_avg:97.16ms +step:1100/1695 train_time:106878ms step_avg:97.16ms +step:1101/1695 train_time:106974ms step_avg:97.16ms +step:1102/1695 train_time:107070ms step_avg:97.16ms +step:1103/1695 train_time:107166ms step_avg:97.16ms +step:1104/1695 train_time:107261ms step_avg:97.16ms +step:1105/1695 train_time:107358ms step_avg:97.16ms +step:1106/1695 train_time:107455ms step_avg:97.16ms +step:1107/1695 train_time:107551ms step_avg:97.16ms +step:1108/1695 train_time:107648ms step_avg:97.16ms +step:1109/1695 train_time:107743ms step_avg:97.15ms +step:1110/1695 train_time:107839ms step_avg:97.15ms +step:1111/1695 train_time:107935ms step_avg:97.15ms +step:1112/1695 train_time:108031ms step_avg:97.15ms +step:1113/1695 train_time:108127ms step_avg:97.15ms +step:1114/1695 train_time:108223ms step_avg:97.15ms +step:1115/1695 train_time:108319ms step_avg:97.15ms +step:1116/1695 train_time:108416ms step_avg:97.15ms +step:1117/1695 train_time:108512ms step_avg:97.15ms +step:1118/1695 train_time:108608ms step_avg:97.15ms +step:1119/1695 train_time:108704ms step_avg:97.14ms +step:1120/1695 train_time:108799ms step_avg:97.14ms +step:1121/1695 train_time:108896ms step_avg:97.14ms +step:1122/1695 train_time:108993ms step_avg:97.14ms +step:1123/1695 train_time:109090ms step_avg:97.14ms +step:1124/1695 train_time:109186ms step_avg:97.14ms +step:1125/1695 train_time:109281ms step_avg:97.14ms +step:1125/1695 val_loss:3.4352 train_time:109375ms step_avg:97.22ms +step:1126/1695 train_time:109400ms step_avg:97.16ms +step:1127/1695 train_time:109483ms step_avg:97.15ms +step:1128/1695 train_time:109580ms step_avg:97.15ms +step:1129/1695 train_time:109676ms step_avg:97.14ms +step:1130/1695 train_time:109771ms step_avg:97.14ms +step:1131/1695 train_time:109866ms step_avg:97.14ms +step:1132/1695 train_time:109960ms step_avg:97.14ms +step:1133/1695 train_time:110056ms step_avg:97.14ms +step:1134/1695 train_time:110153ms step_avg:97.14ms +step:1135/1695 train_time:110251ms step_avg:97.14ms +step:1136/1695 train_time:110349ms step_avg:97.14ms +step:1137/1695 train_time:110450ms step_avg:97.14ms +step:1138/1695 train_time:110549ms step_avg:97.14ms +step:1139/1695 train_time:110648ms step_avg:97.14ms +step:1140/1695 train_time:110745ms step_avg:97.14ms +step:1141/1695 train_time:110841ms step_avg:97.14ms +step:1142/1695 train_time:110938ms step_avg:97.14ms +step:1143/1695 train_time:111036ms step_avg:97.14ms +step:1144/1695 train_time:111133ms step_avg:97.14ms +step:1145/1695 train_time:111230ms step_avg:97.14ms +step:1146/1695 train_time:111328ms step_avg:97.15ms +step:1147/1695 train_time:111427ms step_avg:97.15ms +step:1148/1695 train_time:111527ms step_avg:97.15ms +step:1149/1695 train_time:111626ms step_avg:97.15ms +step:1150/1695 train_time:111724ms step_avg:97.15ms +step:1151/1695 train_time:111820ms step_avg:97.15ms +step:1152/1695 train_time:111918ms step_avg:97.15ms +step:1153/1695 train_time:112015ms step_avg:97.15ms +step:1154/1695 train_time:112112ms step_avg:97.15ms +step:1155/1695 train_time:112210ms step_avg:97.15ms +step:1156/1695 train_time:112307ms step_avg:97.15ms +step:1157/1695 train_time:112405ms step_avg:97.15ms +step:1158/1695 train_time:112503ms step_avg:97.15ms +step:1159/1695 train_time:112602ms step_avg:97.15ms +step:1160/1695 train_time:112701ms step_avg:97.16ms +step:1161/1695 train_time:112798ms step_avg:97.16ms +step:1162/1695 train_time:112895ms step_avg:97.16ms +step:1163/1695 train_time:112993ms step_avg:97.16ms +step:1164/1695 train_time:113089ms step_avg:97.16ms +step:1165/1695 train_time:113186ms step_avg:97.16ms +step:1166/1695 train_time:113283ms step_avg:97.16ms +step:1167/1695 train_time:113381ms step_avg:97.16ms +step:1168/1695 train_time:113479ms step_avg:97.16ms +step:1169/1695 train_time:113578ms step_avg:97.16ms +step:1170/1695 train_time:113678ms step_avg:97.16ms +step:1171/1695 train_time:113777ms step_avg:97.16ms +step:1172/1695 train_time:113875ms step_avg:97.16ms +step:1173/1695 train_time:113973ms step_avg:97.16ms +step:1174/1695 train_time:114071ms step_avg:97.16ms +step:1175/1695 train_time:114168ms step_avg:97.16ms +step:1176/1695 train_time:114265ms step_avg:97.16ms +step:1177/1695 train_time:114362ms step_avg:97.16ms +step:1178/1695 train_time:114460ms step_avg:97.16ms +step:1179/1695 train_time:114558ms step_avg:97.17ms +step:1180/1695 train_time:114659ms step_avg:97.17ms +step:1181/1695 train_time:114757ms step_avg:97.17ms +step:1182/1695 train_time:114856ms step_avg:97.17ms +step:1183/1695 train_time:114955ms step_avg:97.17ms +step:1184/1695 train_time:115054ms step_avg:97.17ms +step:1185/1695 train_time:115152ms step_avg:97.17ms +step:1186/1695 train_time:115250ms step_avg:97.18ms +step:1187/1695 train_time:115348ms step_avg:97.18ms +step:1188/1695 train_time:115445ms step_avg:97.18ms +step:1189/1695 train_time:115543ms step_avg:97.18ms +step:1190/1695 train_time:115640ms step_avg:97.18ms +step:1191/1695 train_time:115739ms step_avg:97.18ms +step:1192/1695 train_time:115838ms step_avg:97.18ms +step:1193/1695 train_time:115938ms step_avg:97.18ms +step:1194/1695 train_time:116038ms step_avg:97.18ms +step:1195/1695 train_time:116138ms step_avg:97.19ms +step:1196/1695 train_time:116236ms step_avg:97.19ms +step:1197/1695 train_time:116336ms step_avg:97.19ms +step:1198/1695 train_time:116435ms step_avg:97.19ms +step:1199/1695 train_time:116533ms step_avg:97.19ms +step:1200/1695 train_time:116632ms step_avg:97.19ms +step:1201/1695 train_time:116730ms step_avg:97.19ms +step:1202/1695 train_time:116828ms step_avg:97.19ms +step:1203/1695 train_time:116927ms step_avg:97.20ms +step:1204/1695 train_time:117024ms step_avg:97.20ms +step:1205/1695 train_time:117122ms step_avg:97.20ms +step:1206/1695 train_time:117220ms step_avg:97.20ms +step:1207/1695 train_time:117318ms step_avg:97.20ms +step:1208/1695 train_time:117661ms step_avg:97.40ms +step:1209/1695 train_time:117847ms step_avg:97.47ms +step:1210/1695 train_time:117942ms step_avg:97.47ms +step:1211/1695 train_time:118039ms step_avg:97.47ms +step:1212/1695 train_time:118136ms step_avg:97.47ms +step:1213/1695 train_time:118233ms step_avg:97.47ms +step:1214/1695 train_time:118330ms step_avg:97.47ms +step:1215/1695 train_time:118426ms step_avg:97.47ms +step:1216/1695 train_time:118522ms step_avg:97.47ms +step:1217/1695 train_time:118618ms step_avg:97.47ms +step:1218/1695 train_time:118722ms step_avg:97.47ms +step:1219/1695 train_time:118826ms step_avg:97.48ms +step:1220/1695 train_time:118924ms step_avg:97.48ms +step:1221/1695 train_time:119021ms step_avg:97.48ms +step:1222/1695 train_time:119118ms step_avg:97.48ms +step:1223/1695 train_time:119215ms step_avg:97.48ms +step:1224/1695 train_time:119312ms step_avg:97.48ms +step:1225/1695 train_time:119409ms step_avg:97.48ms +step:1226/1695 train_time:119505ms step_avg:97.48ms +step:1227/1695 train_time:119602ms step_avg:97.48ms +step:1228/1695 train_time:119701ms step_avg:97.48ms +step:1229/1695 train_time:119802ms step_avg:97.48ms +step:1230/1695 train_time:119902ms step_avg:97.48ms +step:1231/1695 train_time:120000ms step_avg:97.48ms +step:1232/1695 train_time:120098ms step_avg:97.48ms +step:1233/1695 train_time:120196ms step_avg:97.48ms +step:1234/1695 train_time:120294ms step_avg:97.48ms +step:1235/1695 train_time:120391ms step_avg:97.48ms +step:1236/1695 train_time:120488ms step_avg:97.48ms +step:1237/1695 train_time:120585ms step_avg:97.48ms +step:1238/1695 train_time:120683ms step_avg:97.48ms +step:1239/1695 train_time:120781ms step_avg:97.48ms +step:1240/1695 train_time:120880ms step_avg:97.48ms +step:1241/1695 train_time:120978ms step_avg:97.48ms +step:1242/1695 train_time:121077ms step_avg:97.49ms +step:1243/1695 train_time:121174ms step_avg:97.48ms +step:1244/1695 train_time:121272ms step_avg:97.49ms +step:1245/1695 train_time:121369ms step_avg:97.48ms +step:1246/1695 train_time:121466ms step_avg:97.48ms +step:1247/1695 train_time:121562ms step_avg:97.48ms +step:1248/1695 train_time:121661ms step_avg:97.48ms +step:1249/1695 train_time:121759ms step_avg:97.49ms +step:1250/1695 train_time:121859ms step_avg:97.49ms +step:1250/1695 val_loss:3.3872 train_time:121956ms step_avg:97.56ms +step:1251/1695 train_time:121980ms step_avg:97.51ms +step:1252/1695 train_time:122061ms step_avg:97.49ms +step:1253/1695 train_time:122158ms step_avg:97.49ms +step:1254/1695 train_time:122255ms step_avg:97.49ms +step:1255/1695 train_time:122351ms step_avg:97.49ms +step:1256/1695 train_time:122448ms step_avg:97.49ms +step:1257/1695 train_time:122545ms step_avg:97.49ms +step:1258/1695 train_time:122641ms step_avg:97.49ms +step:1259/1695 train_time:122737ms step_avg:97.49ms +step:1260/1695 train_time:122835ms step_avg:97.49ms +step:1261/1695 train_time:122937ms step_avg:97.49ms +step:1262/1695 train_time:123037ms step_avg:97.49ms +step:1263/1695 train_time:123135ms step_avg:97.49ms +step:1264/1695 train_time:123233ms step_avg:97.49ms +step:1265/1695 train_time:123330ms step_avg:97.49ms +step:1266/1695 train_time:123427ms step_avg:97.49ms +step:1267/1695 train_time:123524ms step_avg:97.49ms +step:1268/1695 train_time:123621ms step_avg:97.49ms +step:1269/1695 train_time:123718ms step_avg:97.49ms +step:1270/1695 train_time:123816ms step_avg:97.49ms +step:1271/1695 train_time:123915ms step_avg:97.49ms +step:1272/1695 train_time:124014ms step_avg:97.50ms +step:1273/1695 train_time:124113ms step_avg:97.50ms +step:1274/1695 train_time:124210ms step_avg:97.50ms +step:1275/1695 train_time:124308ms step_avg:97.50ms +step:1276/1695 train_time:124406ms step_avg:97.50ms +step:1277/1695 train_time:124503ms step_avg:97.50ms +step:1278/1695 train_time:124600ms step_avg:97.50ms +step:1279/1695 train_time:124697ms step_avg:97.50ms +step:1280/1695 train_time:124795ms step_avg:97.50ms +step:1281/1695 train_time:124893ms step_avg:97.50ms +step:1282/1695 train_time:124991ms step_avg:97.50ms +step:1283/1695 train_time:125089ms step_avg:97.50ms +step:1284/1695 train_time:125187ms step_avg:97.50ms +step:1285/1695 train_time:125285ms step_avg:97.50ms +step:1286/1695 train_time:125383ms step_avg:97.50ms +step:1287/1695 train_time:125480ms step_avg:97.50ms +step:1288/1695 train_time:125578ms step_avg:97.50ms +step:1289/1695 train_time:125675ms step_avg:97.50ms +step:1290/1695 train_time:125772ms step_avg:97.50ms +step:1291/1695 train_time:125870ms step_avg:97.50ms +step:1292/1695 train_time:125968ms step_avg:97.50ms +step:1293/1695 train_time:126066ms step_avg:97.50ms +step:1294/1695 train_time:126165ms step_avg:97.50ms +step:1295/1695 train_time:126264ms step_avg:97.50ms +step:1296/1695 train_time:126364ms step_avg:97.50ms +step:1297/1695 train_time:126462ms step_avg:97.50ms +step:1298/1695 train_time:126560ms step_avg:97.50ms +step:1299/1695 train_time:126658ms step_avg:97.50ms +step:1300/1695 train_time:126756ms step_avg:97.50ms +step:1301/1695 train_time:126855ms step_avg:97.51ms +step:1302/1695 train_time:126952ms step_avg:97.51ms +step:1303/1695 train_time:127049ms step_avg:97.50ms +step:1304/1695 train_time:127147ms step_avg:97.51ms +step:1305/1695 train_time:127244ms step_avg:97.51ms +step:1306/1695 train_time:127343ms step_avg:97.51ms +step:1307/1695 train_time:127440ms step_avg:97.51ms +step:1308/1695 train_time:127539ms step_avg:97.51ms +step:1309/1695 train_time:127637ms step_avg:97.51ms +step:1310/1695 train_time:127735ms step_avg:97.51ms +step:1311/1695 train_time:127834ms step_avg:97.51ms +step:1312/1695 train_time:127931ms step_avg:97.51ms +step:1313/1695 train_time:128028ms step_avg:97.51ms +step:1314/1695 train_time:128126ms step_avg:97.51ms +step:1315/1695 train_time:128224ms step_avg:97.51ms +step:1316/1695 train_time:128322ms step_avg:97.51ms +step:1317/1695 train_time:128420ms step_avg:97.51ms +step:1318/1695 train_time:128518ms step_avg:97.51ms +step:1319/1695 train_time:128616ms step_avg:97.51ms +step:1320/1695 train_time:128714ms step_avg:97.51ms +step:1321/1695 train_time:128813ms step_avg:97.51ms +step:1322/1695 train_time:128910ms step_avg:97.51ms +step:1323/1695 train_time:129008ms step_avg:97.51ms +step:1324/1695 train_time:129106ms step_avg:97.51ms +step:1325/1695 train_time:129204ms step_avg:97.51ms +step:1326/1695 train_time:129303ms step_avg:97.51ms +step:1327/1695 train_time:129401ms step_avg:97.51ms +step:1328/1695 train_time:129499ms step_avg:97.51ms +step:1329/1695 train_time:129597ms step_avg:97.51ms +step:1330/1695 train_time:129695ms step_avg:97.51ms +step:1331/1695 train_time:129792ms step_avg:97.51ms +step:1332/1695 train_time:129890ms step_avg:97.52ms +step:1333/1695 train_time:129988ms step_avg:97.52ms +step:1334/1695 train_time:130085ms step_avg:97.51ms +step:1335/1695 train_time:130182ms step_avg:97.51ms +step:1336/1695 train_time:130281ms step_avg:97.52ms +step:1337/1695 train_time:130381ms step_avg:97.52ms +step:1338/1695 train_time:130478ms step_avg:97.52ms +step:1339/1695 train_time:130577ms step_avg:97.52ms +step:1340/1695 train_time:130674ms step_avg:97.52ms +step:1341/1695 train_time:130773ms step_avg:97.52ms +step:1342/1695 train_time:130870ms step_avg:97.52ms +step:1343/1695 train_time:130967ms step_avg:97.52ms +step:1344/1695 train_time:131063ms step_avg:97.52ms +step:1345/1695 train_time:131161ms step_avg:97.52ms +step:1346/1695 train_time:131259ms step_avg:97.52ms +step:1347/1695 train_time:131357ms step_avg:97.52ms +step:1348/1695 train_time:131455ms step_avg:97.52ms +step:1349/1695 train_time:131553ms step_avg:97.52ms +step:1350/1695 train_time:131651ms step_avg:97.52ms +step:1351/1695 train_time:131749ms step_avg:97.52ms +step:1352/1695 train_time:131847ms step_avg:97.52ms +step:1353/1695 train_time:131946ms step_avg:97.52ms +step:1354/1695 train_time:132044ms step_avg:97.52ms +step:1355/1695 train_time:132142ms step_avg:97.52ms +step:1356/1695 train_time:132239ms step_avg:97.52ms +step:1357/1695 train_time:132336ms step_avg:97.52ms +step:1358/1695 train_time:132434ms step_avg:97.52ms +step:1359/1695 train_time:132532ms step_avg:97.52ms +step:1360/1695 train_time:132629ms step_avg:97.52ms +step:1361/1695 train_time:132727ms step_avg:97.52ms +step:1362/1695 train_time:132825ms step_avg:97.52ms +step:1363/1695 train_time:132925ms step_avg:97.52ms +step:1364/1695 train_time:133023ms step_avg:97.52ms +step:1365/1695 train_time:133120ms step_avg:97.52ms +step:1366/1695 train_time:133218ms step_avg:97.52ms +step:1367/1695 train_time:133316ms step_avg:97.52ms +step:1368/1695 train_time:133413ms step_avg:97.52ms +step:1369/1695 train_time:133511ms step_avg:97.52ms +step:1370/1695 train_time:133609ms step_avg:97.52ms +step:1371/1695 train_time:133707ms step_avg:97.53ms +step:1372/1695 train_time:133805ms step_avg:97.53ms +step:1373/1695 train_time:133904ms step_avg:97.53ms +step:1374/1695 train_time:134003ms step_avg:97.53ms +step:1375/1695 train_time:134101ms step_avg:97.53ms +step:1375/1695 val_loss:3.3494 train_time:134197ms step_avg:97.60ms +step:1376/1695 train_time:134222ms step_avg:97.55ms +step:1377/1695 train_time:134308ms step_avg:97.54ms +step:1378/1695 train_time:134406ms step_avg:97.54ms +step:1379/1695 train_time:134504ms step_avg:97.54ms +step:1380/1695 train_time:134602ms step_avg:97.54ms +step:1381/1695 train_time:135056ms step_avg:97.80ms +step:1382/1695 train_time:135131ms step_avg:97.78ms +step:1383/1695 train_time:135227ms step_avg:97.78ms +step:1384/1695 train_time:135324ms step_avg:97.78ms +step:1385/1695 train_time:135420ms step_avg:97.78ms +step:1386/1695 train_time:135517ms step_avg:97.78ms +step:1387/1695 train_time:135613ms step_avg:97.77ms +step:1388/1695 train_time:135709ms step_avg:97.77ms +step:1389/1695 train_time:135806ms step_avg:97.77ms +step:1390/1695 train_time:135905ms step_avg:97.77ms +step:1391/1695 train_time:136009ms step_avg:97.78ms +step:1392/1695 train_time:136109ms step_avg:97.78ms +step:1393/1695 train_time:136207ms step_avg:97.78ms +step:1394/1695 train_time:136304ms step_avg:97.78ms +step:1395/1695 train_time:136402ms step_avg:97.78ms +step:1396/1695 train_time:136499ms step_avg:97.78ms +step:1397/1695 train_time:136596ms step_avg:97.78ms +step:1398/1695 train_time:136692ms step_avg:97.78ms +step:1399/1695 train_time:136788ms step_avg:97.78ms +step:1400/1695 train_time:136887ms step_avg:97.78ms +step:1401/1695 train_time:136985ms step_avg:97.78ms +step:1402/1695 train_time:137085ms step_avg:97.78ms +step:1403/1695 train_time:137185ms step_avg:97.78ms +step:1404/1695 train_time:137283ms step_avg:97.78ms +step:1405/1695 train_time:137381ms step_avg:97.78ms +step:1406/1695 train_time:137479ms step_avg:97.78ms +step:1407/1695 train_time:137577ms step_avg:97.78ms +step:1408/1695 train_time:137674ms step_avg:97.78ms +step:1409/1695 train_time:137771ms step_avg:97.78ms +step:1410/1695 train_time:137869ms step_avg:97.78ms +step:1411/1695 train_time:137967ms step_avg:97.78ms +step:1412/1695 train_time:138066ms step_avg:97.78ms +step:1413/1695 train_time:138164ms step_avg:97.78ms +step:1414/1695 train_time:138263ms step_avg:97.78ms +step:1415/1695 train_time:138361ms step_avg:97.78ms +step:1416/1695 train_time:138459ms step_avg:97.78ms +step:1417/1695 train_time:138557ms step_avg:97.78ms +step:1418/1695 train_time:138655ms step_avg:97.78ms +step:1419/1695 train_time:138753ms step_avg:97.78ms +step:1420/1695 train_time:138850ms step_avg:97.78ms +step:1421/1695 train_time:138947ms step_avg:97.78ms +step:1422/1695 train_time:139045ms step_avg:97.78ms +step:1423/1695 train_time:139143ms step_avg:97.78ms +step:1424/1695 train_time:139242ms step_avg:97.78ms +step:1425/1695 train_time:139340ms step_avg:97.78ms +step:1426/1695 train_time:139438ms step_avg:97.78ms +step:1427/1695 train_time:139535ms step_avg:97.78ms +step:1428/1695 train_time:139633ms step_avg:97.78ms +step:1429/1695 train_time:139730ms step_avg:97.78ms +step:1430/1695 train_time:139828ms step_avg:97.78ms +step:1431/1695 train_time:139926ms step_avg:97.78ms +step:1432/1695 train_time:140024ms step_avg:97.78ms +step:1433/1695 train_time:140122ms step_avg:97.78ms +step:1434/1695 train_time:140222ms step_avg:97.78ms +step:1435/1695 train_time:140319ms step_avg:97.78ms +step:1436/1695 train_time:140418ms step_avg:97.78ms +step:1437/1695 train_time:140516ms step_avg:97.78ms +step:1438/1695 train_time:140613ms step_avg:97.78ms +step:1439/1695 train_time:140711ms step_avg:97.78ms +step:1440/1695 train_time:140808ms step_avg:97.78ms +step:1441/1695 train_time:140905ms step_avg:97.78ms +step:1442/1695 train_time:141003ms step_avg:97.78ms +step:1443/1695 train_time:141101ms step_avg:97.78ms +step:1444/1695 train_time:141199ms step_avg:97.78ms +step:1445/1695 train_time:141297ms step_avg:97.78ms +step:1446/1695 train_time:141395ms step_avg:97.78ms +step:1447/1695 train_time:141493ms step_avg:97.78ms +step:1448/1695 train_time:141590ms step_avg:97.78ms +step:1449/1695 train_time:141687ms step_avg:97.78ms +step:1450/1695 train_time:141785ms step_avg:97.78ms +step:1451/1695 train_time:141882ms step_avg:97.78ms +step:1452/1695 train_time:141980ms step_avg:97.78ms +step:1453/1695 train_time:142078ms step_avg:97.78ms +step:1454/1695 train_time:142176ms step_avg:97.78ms +step:1455/1695 train_time:142274ms step_avg:97.78ms +step:1456/1695 train_time:142372ms step_avg:97.78ms +step:1457/1695 train_time:142470ms step_avg:97.78ms +step:1458/1695 train_time:142568ms step_avg:97.78ms +step:1459/1695 train_time:142665ms step_avg:97.78ms +step:1460/1695 train_time:142764ms step_avg:97.78ms +step:1461/1695 train_time:142863ms step_avg:97.78ms +step:1462/1695 train_time:142961ms step_avg:97.78ms +step:1463/1695 train_time:143059ms step_avg:97.78ms +step:1464/1695 train_time:143156ms step_avg:97.78ms +step:1465/1695 train_time:143253ms step_avg:97.78ms +step:1466/1695 train_time:143351ms step_avg:97.78ms +step:1467/1695 train_time:143448ms step_avg:97.78ms +step:1468/1695 train_time:143546ms step_avg:97.78ms +step:1469/1695 train_time:143644ms step_avg:97.78ms +step:1470/1695 train_time:143742ms step_avg:97.78ms +step:1471/1695 train_time:143841ms step_avg:97.78ms +step:1472/1695 train_time:143938ms step_avg:97.78ms +step:1473/1695 train_time:144036ms step_avg:97.78ms +step:1474/1695 train_time:144132ms step_avg:97.78ms +step:1475/1695 train_time:144231ms step_avg:97.78ms +step:1476/1695 train_time:144328ms step_avg:97.78ms +step:1477/1695 train_time:144426ms step_avg:97.78ms +step:1478/1695 train_time:144524ms step_avg:97.78ms +step:1479/1695 train_time:144623ms step_avg:97.78ms +step:1480/1695 train_time:144722ms step_avg:97.79ms +step:1481/1695 train_time:144821ms step_avg:97.79ms +step:1482/1695 train_time:144919ms step_avg:97.79ms +step:1483/1695 train_time:145017ms step_avg:97.79ms +step:1484/1695 train_time:145115ms step_avg:97.79ms +step:1485/1695 train_time:145212ms step_avg:97.79ms +step:1486/1695 train_time:145310ms step_avg:97.79ms +step:1487/1695 train_time:145407ms step_avg:97.79ms +step:1488/1695 train_time:145505ms step_avg:97.79ms +step:1489/1695 train_time:145603ms step_avg:97.79ms +step:1490/1695 train_time:145702ms step_avg:97.79ms +step:1491/1695 train_time:145799ms step_avg:97.79ms +step:1492/1695 train_time:145896ms step_avg:97.79ms +step:1493/1695 train_time:145994ms step_avg:97.79ms +step:1494/1695 train_time:146092ms step_avg:97.79ms +step:1495/1695 train_time:146190ms step_avg:97.79ms +step:1496/1695 train_time:146288ms step_avg:97.79ms +step:1497/1695 train_time:146385ms step_avg:97.79ms +step:1498/1695 train_time:146482ms step_avg:97.79ms +step:1499/1695 train_time:146580ms step_avg:97.79ms +step:1500/1695 train_time:146679ms step_avg:97.79ms +step:1500/1695 val_loss:3.3158 train_time:146775ms step_avg:97.85ms +step:1501/1695 train_time:146802ms step_avg:97.80ms +step:1502/1695 train_time:146885ms step_avg:97.79ms +step:1503/1695 train_time:146985ms step_avg:97.79ms +step:1504/1695 train_time:147082ms step_avg:97.79ms +step:1505/1695 train_time:147180ms step_avg:97.79ms +step:1506/1695 train_time:147276ms step_avg:97.79ms +step:1507/1695 train_time:147372ms step_avg:97.79ms +step:1508/1695 train_time:147469ms step_avg:97.79ms +step:1509/1695 train_time:147566ms step_avg:97.79ms +step:1510/1695 train_time:147663ms step_avg:97.79ms +step:1511/1695 train_time:147762ms step_avg:97.79ms +step:1512/1695 train_time:147865ms step_avg:97.79ms +step:1513/1695 train_time:147965ms step_avg:97.80ms +step:1514/1695 train_time:148064ms step_avg:97.80ms +step:1515/1695 train_time:148162ms step_avg:97.80ms +step:1516/1695 train_time:148260ms step_avg:97.80ms +step:1517/1695 train_time:148356ms step_avg:97.80ms +step:1518/1695 train_time:148454ms step_avg:97.80ms +step:1519/1695 train_time:148551ms step_avg:97.80ms +step:1520/1695 train_time:148647ms step_avg:97.79ms +step:1521/1695 train_time:148745ms step_avg:97.79ms +step:1522/1695 train_time:148844ms step_avg:97.79ms +step:1523/1695 train_time:148943ms step_avg:97.80ms +step:1524/1695 train_time:149041ms step_avg:97.80ms +step:1525/1695 train_time:149140ms step_avg:97.80ms +step:1526/1695 train_time:149238ms step_avg:97.80ms +step:1527/1695 train_time:149336ms step_avg:97.80ms +step:1528/1695 train_time:149434ms step_avg:97.80ms +step:1529/1695 train_time:149531ms step_avg:97.80ms +step:1530/1695 train_time:149628ms step_avg:97.80ms +step:1531/1695 train_time:149726ms step_avg:97.80ms +step:1532/1695 train_time:149824ms step_avg:97.80ms +step:1533/1695 train_time:149922ms step_avg:97.80ms +step:1534/1695 train_time:150020ms step_avg:97.80ms +step:1535/1695 train_time:150119ms step_avg:97.80ms +step:1536/1695 train_time:150217ms step_avg:97.80ms +step:1537/1695 train_time:150316ms step_avg:97.80ms +step:1538/1695 train_time:150413ms step_avg:97.80ms +step:1539/1695 train_time:150510ms step_avg:97.80ms +step:1540/1695 train_time:150607ms step_avg:97.80ms +step:1541/1695 train_time:150704ms step_avg:97.80ms +step:1542/1695 train_time:150803ms step_avg:97.80ms +step:1543/1695 train_time:150902ms step_avg:97.80ms +step:1544/1695 train_time:151000ms step_avg:97.80ms +step:1545/1695 train_time:151099ms step_avg:97.80ms +step:1546/1695 train_time:151198ms step_avg:97.80ms +step:1547/1695 train_time:151296ms step_avg:97.80ms +step:1548/1695 train_time:151394ms step_avg:97.80ms +step:1549/1695 train_time:151492ms step_avg:97.80ms +step:1550/1695 train_time:151590ms step_avg:97.80ms +step:1551/1695 train_time:151687ms step_avg:97.80ms +step:1552/1695 train_time:152039ms step_avg:97.96ms +step:1553/1695 train_time:152209ms step_avg:98.01ms +step:1554/1695 train_time:152305ms step_avg:98.01ms +step:1555/1695 train_time:152401ms step_avg:98.01ms +step:1556/1695 train_time:152498ms step_avg:98.01ms +step:1557/1695 train_time:152595ms step_avg:98.01ms +step:1558/1695 train_time:152692ms step_avg:98.00ms +step:1559/1695 train_time:152787ms step_avg:98.00ms +step:1560/1695 train_time:152884ms step_avg:98.00ms +step:1561/1695 train_time:152980ms step_avg:98.00ms +step:1562/1695 train_time:153085ms step_avg:98.01ms +step:1563/1695 train_time:153187ms step_avg:98.01ms +step:1564/1695 train_time:153287ms step_avg:98.01ms +step:1565/1695 train_time:153384ms step_avg:98.01ms +step:1566/1695 train_time:153482ms step_avg:98.01ms +step:1567/1695 train_time:153579ms step_avg:98.01ms +step:1568/1695 train_time:153677ms step_avg:98.01ms +step:1569/1695 train_time:153774ms step_avg:98.01ms +step:1570/1695 train_time:153871ms step_avg:98.01ms +step:1571/1695 train_time:153968ms step_avg:98.01ms +step:1572/1695 train_time:154066ms step_avg:98.01ms +step:1573/1695 train_time:154166ms step_avg:98.01ms +step:1574/1695 train_time:154265ms step_avg:98.01ms +step:1575/1695 train_time:154364ms step_avg:98.01ms +step:1576/1695 train_time:154461ms step_avg:98.01ms +step:1577/1695 train_time:154559ms step_avg:98.01ms +step:1578/1695 train_time:154656ms step_avg:98.01ms +step:1579/1695 train_time:154753ms step_avg:98.01ms +step:1580/1695 train_time:154850ms step_avg:98.01ms +step:1581/1695 train_time:154946ms step_avg:98.01ms +step:1582/1695 train_time:155044ms step_avg:98.01ms +step:1583/1695 train_time:155144ms step_avg:98.01ms +step:1584/1695 train_time:155243ms step_avg:98.01ms +step:1585/1695 train_time:155341ms step_avg:98.01ms +step:1586/1695 train_time:155439ms step_avg:98.01ms +step:1587/1695 train_time:155537ms step_avg:98.01ms +step:1588/1695 train_time:155634ms step_avg:98.01ms +step:1589/1695 train_time:155731ms step_avg:98.01ms +step:1590/1695 train_time:155829ms step_avg:98.01ms +step:1591/1695 train_time:155926ms step_avg:98.00ms +step:1592/1695 train_time:156023ms step_avg:98.00ms +step:1593/1695 train_time:156121ms step_avg:98.00ms +step:1594/1695 train_time:156220ms step_avg:98.01ms +step:1595/1695 train_time:156320ms step_avg:98.01ms +step:1596/1695 train_time:156419ms step_avg:98.01ms +step:1597/1695 train_time:156517ms step_avg:98.01ms +step:1598/1695 train_time:156615ms step_avg:98.01ms +step:1599/1695 train_time:156713ms step_avg:98.01ms +step:1600/1695 train_time:156810ms step_avg:98.01ms +step:1601/1695 train_time:156908ms step_avg:98.01ms +step:1602/1695 train_time:157005ms step_avg:98.01ms +step:1603/1695 train_time:157103ms step_avg:98.01ms +step:1604/1695 train_time:157201ms step_avg:98.01ms +step:1605/1695 train_time:157300ms step_avg:98.01ms +step:1606/1695 train_time:157399ms step_avg:98.01ms +step:1607/1695 train_time:157497ms step_avg:98.01ms +step:1608/1695 train_time:157595ms step_avg:98.01ms +step:1609/1695 train_time:157693ms step_avg:98.01ms +step:1610/1695 train_time:157791ms step_avg:98.01ms +step:1611/1695 train_time:157889ms step_avg:98.01ms +step:1612/1695 train_time:157986ms step_avg:98.01ms +step:1613/1695 train_time:158083ms step_avg:98.01ms +step:1614/1695 train_time:158181ms step_avg:98.01ms +step:1615/1695 train_time:158279ms step_avg:98.01ms +step:1616/1695 train_time:158378ms step_avg:98.01ms +step:1617/1695 train_time:158477ms step_avg:98.01ms +step:1618/1695 train_time:158575ms step_avg:98.01ms +step:1619/1695 train_time:158672ms step_avg:98.01ms +step:1620/1695 train_time:158771ms step_avg:98.01ms +step:1621/1695 train_time:158869ms step_avg:98.01ms +step:1622/1695 train_time:158967ms step_avg:98.01ms +step:1623/1695 train_time:159064ms step_avg:98.01ms +step:1624/1695 train_time:159161ms step_avg:98.01ms +step:1625/1695 train_time:159259ms step_avg:98.01ms +step:1625/1695 val_loss:3.2885 train_time:159356ms step_avg:98.07ms +step:1626/1695 train_time:159382ms step_avg:98.02ms +step:1627/1695 train_time:159464ms step_avg:98.01ms +step:1628/1695 train_time:159563ms step_avg:98.01ms +step:1629/1695 train_time:159661ms step_avg:98.01ms +step:1630/1695 train_time:159759ms step_avg:98.01ms +step:1631/1695 train_time:159856ms step_avg:98.01ms +step:1632/1695 train_time:159953ms step_avg:98.01ms +step:1633/1695 train_time:160051ms step_avg:98.01ms +step:1634/1695 train_time:160147ms step_avg:98.01ms +step:1635/1695 train_time:160244ms step_avg:98.01ms +step:1636/1695 train_time:160346ms step_avg:98.01ms +step:1637/1695 train_time:160445ms step_avg:98.01ms +step:1638/1695 train_time:160544ms step_avg:98.01ms +step:1639/1695 train_time:160643ms step_avg:98.01ms +step:1640/1695 train_time:160740ms step_avg:98.01ms +step:1641/1695 train_time:160839ms step_avg:98.01ms +step:1642/1695 train_time:160936ms step_avg:98.01ms +step:1643/1695 train_time:161034ms step_avg:98.01ms +step:1644/1695 train_time:161132ms step_avg:98.01ms +step:1645/1695 train_time:161230ms step_avg:98.01ms +step:1646/1695 train_time:161329ms step_avg:98.01ms +step:1647/1695 train_time:161427ms step_avg:98.01ms +step:1648/1695 train_time:161526ms step_avg:98.01ms +step:1649/1695 train_time:161624ms step_avg:98.01ms +step:1650/1695 train_time:161721ms step_avg:98.01ms +step:1651/1695 train_time:161819ms step_avg:98.01ms +step:1652/1695 train_time:161917ms step_avg:98.01ms +step:1653/1695 train_time:162016ms step_avg:98.01ms +step:1654/1695 train_time:162114ms step_avg:98.01ms +step:1655/1695 train_time:162213ms step_avg:98.01ms +step:1656/1695 train_time:162313ms step_avg:98.01ms +step:1657/1695 train_time:162411ms step_avg:98.02ms +step:1658/1695 train_time:162510ms step_avg:98.02ms +step:1659/1695 train_time:162608ms step_avg:98.02ms +step:1660/1695 train_time:162706ms step_avg:98.02ms +step:1661/1695 train_time:162804ms step_avg:98.02ms +step:1662/1695 train_time:162901ms step_avg:98.02ms +step:1663/1695 train_time:163000ms step_avg:98.02ms +step:1664/1695 train_time:163099ms step_avg:98.02ms +step:1665/1695 train_time:163198ms step_avg:98.02ms +step:1666/1695 train_time:163297ms step_avg:98.02ms +step:1667/1695 train_time:163396ms step_avg:98.02ms +step:1668/1695 train_time:163496ms step_avg:98.02ms +step:1669/1695 train_time:163595ms step_avg:98.02ms +step:1670/1695 train_time:163695ms step_avg:98.02ms +step:1671/1695 train_time:163793ms step_avg:98.02ms +step:1672/1695 train_time:163891ms step_avg:98.02ms +step:1673/1695 train_time:163988ms step_avg:98.02ms +step:1674/1695 train_time:164085ms step_avg:98.02ms +step:1675/1695 train_time:164182ms step_avg:98.02ms +step:1676/1695 train_time:164280ms step_avg:98.02ms +step:1677/1695 train_time:164379ms step_avg:98.02ms +step:1678/1695 train_time:164478ms step_avg:98.02ms +step:1679/1695 train_time:164578ms step_avg:98.02ms +step:1680/1695 train_time:164677ms step_avg:98.02ms +step:1681/1695 train_time:164774ms step_avg:98.02ms +step:1682/1695 train_time:164872ms step_avg:98.02ms +step:1683/1695 train_time:164969ms step_avg:98.02ms +step:1684/1695 train_time:165067ms step_avg:98.02ms +step:1685/1695 train_time:165164ms step_avg:98.02ms +step:1686/1695 train_time:165262ms step_avg:98.02ms +step:1687/1695 train_time:165359ms step_avg:98.02ms +step:1688/1695 train_time:165458ms step_avg:98.02ms +step:1689/1695 train_time:165558ms step_avg:98.02ms +step:1690/1695 train_time:165659ms step_avg:98.02ms +step:1691/1695 train_time:165757ms step_avg:98.02ms +step:1692/1695 train_time:165855ms step_avg:98.02ms +step:1693/1695 train_time:165953ms step_avg:98.02ms +step:1694/1695 train_time:166052ms step_avg:98.02ms +step:1695/1695 train_time:166151ms step_avg:98.02ms +step:1695/1695 val_loss:3.2769 train_time:166247ms step_avg:98.08ms +peak memory allocated: 34505 MiB reserved: 49576 MiB diff --git a/records/082725_FA3/7a492532-c19b-40dd-958d-fec55aa4d3fd.txt b/records/082725_FA3/7a492532-c19b-40dd-958d-fec55aa4d3fd.txt new file mode 100644 index 000000000..7e21a501e --- /dev/null +++ b/records/082725_FA3/7a492532-c19b-40dd-958d-fec55aa4d3fd.txt @@ -0,0 +1,2808 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + + while True: + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + + if align_to_bos: + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] + else: + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + + pos += batch_span + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. + # optimization + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 60 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): + inputs, targets = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, ws, ws // 2) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * lr + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Wed Aug 27 03:53:12 2025 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.6 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:00:0B.0 Off | Off | +| N/A 30C P0 114W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:00:0C.0 Off | Off | +| N/A 32C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:00:0D.0 Off | Off | +| N/A 33C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:00:0E.0 Off | Off | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:00:0F.0 Off | Off | +| N/A 30C P0 111W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:00:10.0 Off | Off | +| N/A 34C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:00:11.0 Off | Off | +| N/A 32C P0 111W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:00:12.0 Off | Off | +| N/A 31C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| ++---------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1695 train_time:508ms step_avg:507.99ms +step:2/1695 train_time:531ms step_avg:265.69ms +step:3/1695 train_time:603ms step_avg:200.91ms +step:4/1695 train_time:695ms step_avg:173.68ms +step:5/1695 train_time:787ms step_avg:157.47ms +step:6/1695 train_time:881ms step_avg:146.76ms +step:7/1695 train_time:974ms step_avg:139.13ms +step:8/1695 train_time:1067ms step_avg:133.43ms +step:9/1695 train_time:1161ms step_avg:128.95ms +step:10/1695 train_time:1253ms step_avg:125.34ms +step:11/1695 train_time:1347ms step_avg:122.48ms +step:12/1695 train_time:1445ms step_avg:120.43ms +step:13/1695 train_time:1543ms step_avg:118.70ms +step:14/1695 train_time:1638ms step_avg:117.04ms +step:15/1695 train_time:1732ms step_avg:115.49ms +step:16/1695 train_time:1827ms step_avg:114.19ms +step:17/1695 train_time:1921ms step_avg:113.01ms +step:18/1695 train_time:2015ms step_avg:111.93ms +step:19/1695 train_time:2108ms step_avg:110.95ms +step:20/1695 train_time:2202ms step_avg:110.08ms +step:21/1695 train_time:2295ms step_avg:109.29ms +step:22/1695 train_time:2390ms step_avg:108.63ms +step:23/1695 train_time:2486ms step_avg:108.07ms +step:24/1695 train_time:2583ms step_avg:107.61ms +step:25/1695 train_time:2678ms step_avg:107.10ms +step:26/1695 train_time:2771ms step_avg:106.59ms +step:27/1695 train_time:2867ms step_avg:106.18ms +step:28/1695 train_time:2962ms step_avg:105.78ms +step:29/1695 train_time:3055ms step_avg:105.34ms +step:30/1695 train_time:3149ms step_avg:104.96ms +step:31/1695 train_time:3243ms step_avg:104.60ms +step:32/1695 train_time:3336ms step_avg:104.26ms +step:33/1695 train_time:3430ms step_avg:103.94ms +step:34/1695 train_time:3526ms step_avg:103.70ms +step:35/1695 train_time:3622ms step_avg:103.48ms +step:36/1695 train_time:3717ms step_avg:103.24ms +step:37/1695 train_time:3811ms step_avg:102.99ms +step:38/1695 train_time:3906ms step_avg:102.79ms +step:39/1695 train_time:4001ms step_avg:102.58ms +step:40/1695 train_time:4094ms step_avg:102.36ms +step:41/1695 train_time:4188ms step_avg:102.14ms +step:42/1695 train_time:4282ms step_avg:101.96ms +step:43/1695 train_time:4376ms step_avg:101.76ms +step:44/1695 train_time:4470ms step_avg:101.58ms +step:45/1695 train_time:4565ms step_avg:101.45ms +step:46/1695 train_time:4661ms step_avg:101.32ms +step:47/1695 train_time:4754ms step_avg:101.15ms +step:48/1695 train_time:4849ms step_avg:101.01ms +step:49/1695 train_time:4944ms step_avg:100.89ms +step:50/1695 train_time:5039ms step_avg:100.77ms +step:51/1695 train_time:5132ms step_avg:100.63ms +step:52/1695 train_time:5227ms step_avg:100.51ms +step:53/1695 train_time:5322ms step_avg:100.41ms +step:54/1695 train_time:5416ms step_avg:100.29ms +step:55/1695 train_time:5510ms step_avg:100.18ms +step:56/1695 train_time:5606ms step_avg:100.10ms +step:57/1695 train_time:5701ms step_avg:100.02ms +step:58/1695 train_time:5795ms step_avg:99.91ms +step:59/1695 train_time:5889ms step_avg:99.81ms +step:60/1695 train_time:5984ms step_avg:99.73ms +step:61/1695 train_time:6077ms step_avg:99.63ms +step:62/1695 train_time:6171ms step_avg:99.54ms +step:63/1695 train_time:6267ms step_avg:99.47ms +step:64/1695 train_time:6362ms step_avg:99.40ms +step:65/1695 train_time:6457ms step_avg:99.33ms +step:66/1695 train_time:6551ms step_avg:99.26ms +step:67/1695 train_time:6647ms step_avg:99.21ms +step:68/1695 train_time:6743ms step_avg:99.15ms +step:69/1695 train_time:6837ms step_avg:99.08ms +step:70/1695 train_time:6930ms step_avg:99.00ms +step:71/1695 train_time:7025ms step_avg:98.95ms +step:72/1695 train_time:7119ms step_avg:98.88ms +step:73/1695 train_time:7214ms step_avg:98.82ms +step:74/1695 train_time:7308ms step_avg:98.75ms +step:75/1695 train_time:7403ms step_avg:98.70ms +step:76/1695 train_time:7497ms step_avg:98.64ms +step:77/1695 train_time:7591ms step_avg:98.59ms +step:78/1695 train_time:7685ms step_avg:98.53ms +step:79/1695 train_time:7781ms step_avg:98.50ms +step:80/1695 train_time:7876ms step_avg:98.45ms +step:81/1695 train_time:7969ms step_avg:98.39ms +step:82/1695 train_time:8065ms step_avg:98.35ms +step:83/1695 train_time:8160ms step_avg:98.31ms +step:84/1695 train_time:8253ms step_avg:98.25ms +step:85/1695 train_time:8347ms step_avg:98.21ms +step:86/1695 train_time:8442ms step_avg:98.16ms +step:87/1695 train_time:8536ms step_avg:98.11ms +step:88/1695 train_time:8630ms step_avg:98.07ms +step:89/1695 train_time:8725ms step_avg:98.03ms +step:90/1695 train_time:8819ms step_avg:97.99ms +step:91/1695 train_time:8913ms step_avg:97.95ms +step:92/1695 train_time:9008ms step_avg:97.91ms +step:93/1695 train_time:9102ms step_avg:97.87ms +step:94/1695 train_time:9196ms step_avg:97.83ms +step:95/1695 train_time:9290ms step_avg:97.79ms +step:96/1695 train_time:9385ms step_avg:97.76ms +step:97/1695 train_time:9480ms step_avg:97.73ms +step:98/1695 train_time:9574ms step_avg:97.69ms +step:99/1695 train_time:9669ms step_avg:97.66ms +step:100/1695 train_time:9764ms step_avg:97.64ms +step:101/1695 train_time:9858ms step_avg:97.61ms +step:102/1695 train_time:9952ms step_avg:97.57ms +step:103/1695 train_time:10047ms step_avg:97.54ms +step:104/1695 train_time:10141ms step_avg:97.51ms +step:105/1695 train_time:10235ms step_avg:97.47ms +step:106/1695 train_time:10329ms step_avg:97.44ms +step:107/1695 train_time:10424ms step_avg:97.42ms +step:108/1695 train_time:10519ms step_avg:97.40ms +step:109/1695 train_time:10613ms step_avg:97.37ms +step:110/1695 train_time:10708ms step_avg:97.34ms +step:111/1695 train_time:10802ms step_avg:97.32ms +step:112/1695 train_time:10896ms step_avg:97.29ms +step:113/1695 train_time:10990ms step_avg:97.25ms +step:114/1695 train_time:11084ms step_avg:97.22ms +step:115/1695 train_time:11178ms step_avg:97.20ms +step:116/1695 train_time:11271ms step_avg:97.17ms +step:117/1695 train_time:11366ms step_avg:97.14ms +step:118/1695 train_time:11460ms step_avg:97.12ms +step:119/1695 train_time:11555ms step_avg:97.10ms +step:120/1695 train_time:11649ms step_avg:97.07ms +step:121/1695 train_time:11744ms step_avg:97.05ms +step:122/1695 train_time:11839ms step_avg:97.04ms +step:123/1695 train_time:11933ms step_avg:97.01ms +step:124/1695 train_time:12028ms step_avg:97.00ms +step:125/1695 train_time:12122ms step_avg:96.98ms +step:125/1695 val_loss:4.3129 train_time:12214ms step_avg:97.71ms +step:126/1695 train_time:12238ms step_avg:97.13ms +step:127/1695 train_time:12320ms step_avg:97.01ms +step:128/1695 train_time:12421ms step_avg:97.04ms +step:129/1695 train_time:12516ms step_avg:97.02ms +step:130/1695 train_time:12609ms step_avg:97.00ms +step:131/1695 train_time:12702ms step_avg:96.96ms +step:132/1695 train_time:12795ms step_avg:96.94ms +step:133/1695 train_time:12889ms step_avg:96.91ms +step:134/1695 train_time:12982ms step_avg:96.88ms +step:135/1695 train_time:13075ms step_avg:96.85ms +step:136/1695 train_time:13168ms step_avg:96.83ms +step:137/1695 train_time:13264ms step_avg:96.82ms +step:138/1695 train_time:13359ms step_avg:96.81ms +step:139/1695 train_time:13455ms step_avg:96.80ms +step:140/1695 train_time:13550ms step_avg:96.79ms +step:141/1695 train_time:13645ms step_avg:96.77ms +step:142/1695 train_time:13739ms step_avg:96.75ms +step:143/1695 train_time:13832ms step_avg:96.73ms +step:144/1695 train_time:13926ms step_avg:96.71ms +step:145/1695 train_time:14019ms step_avg:96.68ms +step:146/1695 train_time:14112ms step_avg:96.66ms +step:147/1695 train_time:14206ms step_avg:96.64ms +step:148/1695 train_time:14302ms step_avg:96.63ms +step:149/1695 train_time:14396ms step_avg:96.62ms +step:150/1695 train_time:14492ms step_avg:96.61ms +step:151/1695 train_time:14586ms step_avg:96.60ms +step:152/1695 train_time:14681ms step_avg:96.59ms +step:153/1695 train_time:14775ms step_avg:96.57ms +step:154/1695 train_time:14869ms step_avg:96.55ms +step:155/1695 train_time:14963ms step_avg:96.53ms +step:156/1695 train_time:15056ms step_avg:96.51ms +step:157/1695 train_time:15149ms step_avg:96.49ms +step:158/1695 train_time:15245ms step_avg:96.49ms +step:159/1695 train_time:15340ms step_avg:96.48ms +step:160/1695 train_time:15434ms step_avg:96.46ms +step:161/1695 train_time:15529ms step_avg:96.45ms +step:162/1695 train_time:15624ms step_avg:96.45ms +step:163/1695 train_time:15719ms step_avg:96.44ms +step:164/1695 train_time:15813ms step_avg:96.42ms +step:165/1695 train_time:15907ms step_avg:96.40ms +step:166/1695 train_time:16001ms step_avg:96.39ms +step:167/1695 train_time:16095ms step_avg:96.38ms +step:168/1695 train_time:16189ms step_avg:96.36ms +step:169/1695 train_time:16283ms step_avg:96.35ms +step:170/1695 train_time:16378ms step_avg:96.34ms +step:171/1695 train_time:16471ms step_avg:96.32ms +step:172/1695 train_time:16567ms step_avg:96.32ms +step:173/1695 train_time:16951ms step_avg:97.98ms +step:174/1695 train_time:17020ms step_avg:97.82ms +step:175/1695 train_time:17112ms step_avg:97.78ms +step:176/1695 train_time:17206ms step_avg:97.76ms +step:177/1695 train_time:17299ms step_avg:97.73ms +step:178/1695 train_time:17391ms step_avg:97.70ms +step:179/1695 train_time:17484ms step_avg:97.68ms +step:180/1695 train_time:17578ms step_avg:97.65ms +step:181/1695 train_time:17671ms step_avg:97.63ms +step:182/1695 train_time:17764ms step_avg:97.61ms +step:183/1695 train_time:17859ms step_avg:97.59ms +step:184/1695 train_time:17956ms step_avg:97.59ms +step:185/1695 train_time:18052ms step_avg:97.58ms +step:186/1695 train_time:18148ms step_avg:97.57ms +step:187/1695 train_time:18243ms step_avg:97.56ms +step:188/1695 train_time:18336ms step_avg:97.53ms +step:189/1695 train_time:18429ms step_avg:97.51ms +step:190/1695 train_time:18522ms step_avg:97.49ms +step:191/1695 train_time:18615ms step_avg:97.46ms +step:192/1695 train_time:18709ms step_avg:97.44ms +step:193/1695 train_time:18803ms step_avg:97.42ms +step:194/1695 train_time:18898ms step_avg:97.41ms +step:195/1695 train_time:18993ms step_avg:97.40ms +step:196/1695 train_time:19088ms step_avg:97.39ms +step:197/1695 train_time:19184ms step_avg:97.38ms +step:198/1695 train_time:19278ms step_avg:97.37ms +step:199/1695 train_time:19372ms step_avg:97.35ms +step:200/1695 train_time:19466ms step_avg:97.33ms +step:201/1695 train_time:19560ms step_avg:97.31ms +step:202/1695 train_time:19652ms step_avg:97.29ms +step:203/1695 train_time:19747ms step_avg:97.28ms +step:204/1695 train_time:19841ms step_avg:97.26ms +step:205/1695 train_time:19935ms step_avg:97.24ms +step:206/1695 train_time:20030ms step_avg:97.23ms +step:207/1695 train_time:20124ms step_avg:97.22ms +step:208/1695 train_time:20219ms step_avg:97.21ms +step:209/1695 train_time:20312ms step_avg:97.19ms +step:210/1695 train_time:20406ms step_avg:97.17ms +step:211/1695 train_time:20500ms step_avg:97.16ms +step:212/1695 train_time:20593ms step_avg:97.14ms +step:213/1695 train_time:20686ms step_avg:97.12ms +step:214/1695 train_time:20781ms step_avg:97.11ms +step:215/1695 train_time:20874ms step_avg:97.09ms +step:216/1695 train_time:20969ms step_avg:97.08ms +step:217/1695 train_time:21063ms step_avg:97.07ms +step:218/1695 train_time:21158ms step_avg:97.05ms +step:219/1695 train_time:21252ms step_avg:97.04ms +step:220/1695 train_time:21348ms step_avg:97.04ms +step:221/1695 train_time:21442ms step_avg:97.02ms +step:222/1695 train_time:21535ms step_avg:97.01ms +step:223/1695 train_time:21629ms step_avg:96.99ms +step:224/1695 train_time:21723ms step_avg:96.98ms +step:225/1695 train_time:21817ms step_avg:96.97ms +step:226/1695 train_time:21911ms step_avg:96.95ms +step:227/1695 train_time:22005ms step_avg:96.94ms +step:228/1695 train_time:22099ms step_avg:96.93ms +step:229/1695 train_time:22193ms step_avg:96.91ms +step:230/1695 train_time:22287ms step_avg:96.90ms +step:231/1695 train_time:22382ms step_avg:96.89ms +step:232/1695 train_time:22476ms step_avg:96.88ms +step:233/1695 train_time:22570ms step_avg:96.87ms +step:234/1695 train_time:22663ms step_avg:96.85ms +step:235/1695 train_time:22756ms step_avg:96.83ms +step:236/1695 train_time:22850ms step_avg:96.82ms +step:237/1695 train_time:22945ms step_avg:96.81ms +step:238/1695 train_time:23040ms step_avg:96.81ms +step:239/1695 train_time:23134ms step_avg:96.79ms +step:240/1695 train_time:23228ms step_avg:96.78ms +step:241/1695 train_time:23323ms step_avg:96.78ms +step:242/1695 train_time:23418ms step_avg:96.77ms +step:243/1695 train_time:23512ms step_avg:96.76ms +step:244/1695 train_time:23606ms step_avg:96.75ms +step:245/1695 train_time:23699ms step_avg:96.73ms +step:246/1695 train_time:23792ms step_avg:96.72ms +step:247/1695 train_time:23886ms step_avg:96.71ms +step:248/1695 train_time:23981ms step_avg:96.70ms +step:249/1695 train_time:24075ms step_avg:96.69ms +step:250/1695 train_time:24169ms step_avg:96.68ms +step:250/1695 val_loss:3.9787 train_time:24262ms step_avg:97.05ms +step:251/1695 train_time:24286ms step_avg:96.76ms +step:252/1695 train_time:24364ms step_avg:96.68ms +step:253/1695 train_time:24461ms step_avg:96.68ms +step:254/1695 train_time:24555ms step_avg:96.67ms +step:255/1695 train_time:24648ms step_avg:96.66ms +step:256/1695 train_time:24742ms step_avg:96.65ms +step:257/1695 train_time:24835ms step_avg:96.63ms +step:258/1695 train_time:24928ms step_avg:96.62ms +step:259/1695 train_time:25021ms step_avg:96.61ms +step:260/1695 train_time:25114ms step_avg:96.59ms +step:261/1695 train_time:25208ms step_avg:96.58ms +step:262/1695 train_time:25304ms step_avg:96.58ms +step:263/1695 train_time:25401ms step_avg:96.58ms +step:264/1695 train_time:25495ms step_avg:96.57ms +step:265/1695 train_time:25590ms step_avg:96.57ms +step:266/1695 train_time:25684ms step_avg:96.56ms +step:267/1695 train_time:25777ms step_avg:96.54ms +step:268/1695 train_time:25870ms step_avg:96.53ms +step:269/1695 train_time:25964ms step_avg:96.52ms +step:270/1695 train_time:26058ms step_avg:96.51ms +step:271/1695 train_time:26151ms step_avg:96.50ms +step:272/1695 train_time:26245ms step_avg:96.49ms +step:273/1695 train_time:26341ms step_avg:96.49ms +step:274/1695 train_time:26436ms step_avg:96.48ms +step:275/1695 train_time:26531ms step_avg:96.48ms +step:276/1695 train_time:26626ms step_avg:96.47ms +step:277/1695 train_time:26720ms step_avg:96.46ms +step:278/1695 train_time:26813ms step_avg:96.45ms +step:279/1695 train_time:26906ms step_avg:96.44ms +step:280/1695 train_time:26999ms step_avg:96.43ms +step:281/1695 train_time:27092ms step_avg:96.41ms +step:282/1695 train_time:27187ms step_avg:96.41ms +step:283/1695 train_time:27282ms step_avg:96.40ms +step:284/1695 train_time:27376ms step_avg:96.40ms +step:285/1695 train_time:27471ms step_avg:96.39ms +step:286/1695 train_time:27566ms step_avg:96.39ms +step:287/1695 train_time:27661ms step_avg:96.38ms +step:288/1695 train_time:27754ms step_avg:96.37ms +step:289/1695 train_time:27849ms step_avg:96.36ms +step:290/1695 train_time:27943ms step_avg:96.35ms +step:291/1695 train_time:28037ms step_avg:96.35ms +step:292/1695 train_time:28130ms step_avg:96.33ms +step:293/1695 train_time:28224ms step_avg:96.33ms +step:294/1695 train_time:28318ms step_avg:96.32ms +step:295/1695 train_time:28412ms step_avg:96.31ms +step:296/1695 train_time:28507ms step_avg:96.31ms +step:297/1695 train_time:28603ms step_avg:96.30ms +step:298/1695 train_time:28697ms step_avg:96.30ms +step:299/1695 train_time:28791ms step_avg:96.29ms +step:300/1695 train_time:28885ms step_avg:96.28ms +step:301/1695 train_time:28979ms step_avg:96.27ms +step:302/1695 train_time:29072ms step_avg:96.27ms +step:303/1695 train_time:29166ms step_avg:96.26ms +step:304/1695 train_time:29261ms step_avg:96.25ms +step:305/1695 train_time:29355ms step_avg:96.25ms +step:306/1695 train_time:29449ms step_avg:96.24ms +step:307/1695 train_time:29544ms step_avg:96.23ms +step:308/1695 train_time:29638ms step_avg:96.23ms +step:309/1695 train_time:29732ms step_avg:96.22ms +step:310/1695 train_time:29827ms step_avg:96.21ms +step:311/1695 train_time:29921ms step_avg:96.21ms +step:312/1695 train_time:30014ms step_avg:96.20ms +step:313/1695 train_time:30108ms step_avg:96.19ms +step:314/1695 train_time:30203ms step_avg:96.19ms +step:315/1695 train_time:30297ms step_avg:96.18ms +step:316/1695 train_time:30391ms step_avg:96.17ms +step:317/1695 train_time:30486ms step_avg:96.17ms +step:318/1695 train_time:30580ms step_avg:96.16ms +step:319/1695 train_time:30674ms step_avg:96.16ms +step:320/1695 train_time:30769ms step_avg:96.15ms +step:321/1695 train_time:30864ms step_avg:96.15ms +step:322/1695 train_time:30957ms step_avg:96.14ms +step:323/1695 train_time:31050ms step_avg:96.13ms +step:324/1695 train_time:31145ms step_avg:96.13ms +step:325/1695 train_time:31239ms step_avg:96.12ms +step:326/1695 train_time:31333ms step_avg:96.11ms +step:327/1695 train_time:31428ms step_avg:96.11ms +step:328/1695 train_time:31523ms step_avg:96.11ms +step:329/1695 train_time:31616ms step_avg:96.10ms +step:330/1695 train_time:31710ms step_avg:96.09ms +step:331/1695 train_time:31805ms step_avg:96.09ms +step:332/1695 train_time:31899ms step_avg:96.08ms +step:333/1695 train_time:31993ms step_avg:96.07ms +step:334/1695 train_time:32087ms step_avg:96.07ms +step:335/1695 train_time:32180ms step_avg:96.06ms +step:336/1695 train_time:32274ms step_avg:96.05ms +step:337/1695 train_time:32368ms step_avg:96.05ms +step:338/1695 train_time:32468ms step_avg:96.06ms +step:339/1695 train_time:32561ms step_avg:96.05ms +step:340/1695 train_time:32655ms step_avg:96.04ms +step:341/1695 train_time:32748ms step_avg:96.04ms +step:342/1695 train_time:32839ms step_avg:96.02ms +step:343/1695 train_time:32933ms step_avg:96.01ms +step:344/1695 train_time:33027ms step_avg:96.01ms +step:345/1695 train_time:33355ms step_avg:96.68ms +step:346/1695 train_time:33456ms step_avg:96.69ms +step:347/1695 train_time:33548ms step_avg:96.68ms +step:348/1695 train_time:33642ms step_avg:96.67ms +step:349/1695 train_time:33735ms step_avg:96.66ms +step:350/1695 train_time:33828ms step_avg:96.65ms +step:351/1695 train_time:33921ms step_avg:96.64ms +step:352/1695 train_time:34014ms step_avg:96.63ms +step:353/1695 train_time:34106ms step_avg:96.62ms +step:354/1695 train_time:34200ms step_avg:96.61ms +step:355/1695 train_time:34296ms step_avg:96.61ms +step:356/1695 train_time:34393ms step_avg:96.61ms +step:357/1695 train_time:34490ms step_avg:96.61ms +step:358/1695 train_time:34584ms step_avg:96.60ms +step:359/1695 train_time:34678ms step_avg:96.60ms +step:360/1695 train_time:34771ms step_avg:96.59ms +step:361/1695 train_time:34865ms step_avg:96.58ms +step:362/1695 train_time:34959ms step_avg:96.57ms +step:363/1695 train_time:35051ms step_avg:96.56ms +step:364/1695 train_time:35145ms step_avg:96.55ms +step:365/1695 train_time:35238ms step_avg:96.54ms +step:366/1695 train_time:35332ms step_avg:96.54ms +step:367/1695 train_time:35428ms step_avg:96.53ms +step:368/1695 train_time:35523ms step_avg:96.53ms +step:369/1695 train_time:35616ms step_avg:96.52ms +step:370/1695 train_time:35710ms step_avg:96.51ms +step:371/1695 train_time:35803ms step_avg:96.51ms +step:372/1695 train_time:35897ms step_avg:96.50ms +step:373/1695 train_time:35990ms step_avg:96.49ms +step:374/1695 train_time:36083ms step_avg:96.48ms +step:375/1695 train_time:36176ms step_avg:96.47ms +step:375/1695 val_loss:3.8232 train_time:36268ms step_avg:96.71ms +step:376/1695 train_time:36292ms step_avg:96.52ms +step:377/1695 train_time:36372ms step_avg:96.48ms +step:378/1695 train_time:36470ms step_avg:96.48ms +step:379/1695 train_time:36565ms step_avg:96.48ms +step:380/1695 train_time:36659ms step_avg:96.47ms +step:381/1695 train_time:36752ms step_avg:96.46ms +step:382/1695 train_time:36845ms step_avg:96.45ms +step:383/1695 train_time:36938ms step_avg:96.44ms +step:384/1695 train_time:37031ms step_avg:96.43ms +step:385/1695 train_time:37124ms step_avg:96.43ms +step:386/1695 train_time:37218ms step_avg:96.42ms +step:387/1695 train_time:37313ms step_avg:96.42ms +step:388/1695 train_time:37410ms step_avg:96.42ms +step:389/1695 train_time:37505ms step_avg:96.41ms +step:390/1695 train_time:37599ms step_avg:96.41ms +step:391/1695 train_time:37693ms step_avg:96.40ms +step:392/1695 train_time:37787ms step_avg:96.40ms +step:393/1695 train_time:37880ms step_avg:96.39ms +step:394/1695 train_time:37973ms step_avg:96.38ms +step:395/1695 train_time:38067ms step_avg:96.37ms +step:396/1695 train_time:38160ms step_avg:96.36ms +step:397/1695 train_time:38254ms step_avg:96.36ms +step:398/1695 train_time:38350ms step_avg:96.36ms +step:399/1695 train_time:38445ms step_avg:96.35ms +step:400/1695 train_time:38540ms step_avg:96.35ms +step:401/1695 train_time:38633ms step_avg:96.34ms +step:402/1695 train_time:38727ms step_avg:96.34ms +step:403/1695 train_time:38822ms step_avg:96.33ms +step:404/1695 train_time:38915ms step_avg:96.32ms +step:405/1695 train_time:39008ms step_avg:96.32ms +step:406/1695 train_time:39101ms step_avg:96.31ms +step:407/1695 train_time:39194ms step_avg:96.30ms +step:408/1695 train_time:39289ms step_avg:96.30ms +step:409/1695 train_time:39384ms step_avg:96.29ms +step:410/1695 train_time:39478ms step_avg:96.29ms +step:411/1695 train_time:39572ms step_avg:96.28ms +step:412/1695 train_time:39667ms step_avg:96.28ms +step:413/1695 train_time:39762ms step_avg:96.28ms +step:414/1695 train_time:39856ms step_avg:96.27ms +step:415/1695 train_time:39949ms step_avg:96.26ms +step:416/1695 train_time:40043ms step_avg:96.26ms +step:417/1695 train_time:40137ms step_avg:96.25ms +step:418/1695 train_time:40230ms step_avg:96.24ms +step:419/1695 train_time:40324ms step_avg:96.24ms +step:420/1695 train_time:40419ms step_avg:96.24ms +step:421/1695 train_time:40513ms step_avg:96.23ms +step:422/1695 train_time:40607ms step_avg:96.22ms +step:423/1695 train_time:40702ms step_avg:96.22ms +step:424/1695 train_time:40795ms step_avg:96.21ms +step:425/1695 train_time:40889ms step_avg:96.21ms +step:426/1695 train_time:40984ms step_avg:96.21ms +step:427/1695 train_time:41077ms step_avg:96.20ms +step:428/1695 train_time:41170ms step_avg:96.19ms +step:429/1695 train_time:41264ms step_avg:96.19ms +step:430/1695 train_time:41359ms step_avg:96.18ms +step:431/1695 train_time:41453ms step_avg:96.18ms +step:432/1695 train_time:41547ms step_avg:96.17ms +step:433/1695 train_time:41641ms step_avg:96.17ms +step:434/1695 train_time:41734ms step_avg:96.16ms +step:435/1695 train_time:41828ms step_avg:96.16ms +step:436/1695 train_time:41922ms step_avg:96.15ms +step:437/1695 train_time:42016ms step_avg:96.15ms +step:438/1695 train_time:42110ms step_avg:96.14ms +step:439/1695 train_time:42203ms step_avg:96.13ms +step:440/1695 train_time:42297ms step_avg:96.13ms +step:441/1695 train_time:42392ms step_avg:96.13ms +step:442/1695 train_time:42487ms step_avg:96.12ms +step:443/1695 train_time:42583ms step_avg:96.12ms +step:444/1695 train_time:42676ms step_avg:96.12ms +step:445/1695 train_time:42770ms step_avg:96.11ms +step:446/1695 train_time:42864ms step_avg:96.11ms +step:447/1695 train_time:42957ms step_avg:96.10ms +step:448/1695 train_time:43050ms step_avg:96.09ms +step:449/1695 train_time:43144ms step_avg:96.09ms +step:450/1695 train_time:43237ms step_avg:96.08ms +step:451/1695 train_time:43331ms step_avg:96.08ms +step:452/1695 train_time:43426ms step_avg:96.07ms +step:453/1695 train_time:43519ms step_avg:96.07ms +step:454/1695 train_time:43613ms step_avg:96.06ms +step:455/1695 train_time:43707ms step_avg:96.06ms +step:456/1695 train_time:43801ms step_avg:96.06ms +step:457/1695 train_time:43895ms step_avg:96.05ms +step:458/1695 train_time:43990ms step_avg:96.05ms +step:459/1695 train_time:44084ms step_avg:96.04ms +step:460/1695 train_time:44178ms step_avg:96.04ms +step:461/1695 train_time:44271ms step_avg:96.03ms +step:462/1695 train_time:44365ms step_avg:96.03ms +step:463/1695 train_time:44459ms step_avg:96.02ms +step:464/1695 train_time:44553ms step_avg:96.02ms +step:465/1695 train_time:44647ms step_avg:96.02ms +step:466/1695 train_time:44742ms step_avg:96.01ms +step:467/1695 train_time:44836ms step_avg:96.01ms +step:468/1695 train_time:44930ms step_avg:96.00ms +step:469/1695 train_time:45025ms step_avg:96.00ms +step:470/1695 train_time:45119ms step_avg:96.00ms +step:471/1695 train_time:45212ms step_avg:95.99ms +step:472/1695 train_time:45306ms step_avg:95.99ms +step:473/1695 train_time:45401ms step_avg:95.98ms +step:474/1695 train_time:45494ms step_avg:95.98ms +step:475/1695 train_time:45589ms step_avg:95.98ms +step:476/1695 train_time:45683ms step_avg:95.97ms +step:477/1695 train_time:45777ms step_avg:95.97ms +step:478/1695 train_time:45870ms step_avg:95.96ms +step:479/1695 train_time:45964ms step_avg:95.96ms +step:480/1695 train_time:46059ms step_avg:95.96ms +step:481/1695 train_time:46153ms step_avg:95.95ms +step:482/1695 train_time:46247ms step_avg:95.95ms +step:483/1695 train_time:46341ms step_avg:95.94ms +step:484/1695 train_time:46435ms step_avg:95.94ms +step:485/1695 train_time:46529ms step_avg:95.94ms +step:486/1695 train_time:46623ms step_avg:95.93ms +step:487/1695 train_time:46718ms step_avg:95.93ms +step:488/1695 train_time:46811ms step_avg:95.92ms +step:489/1695 train_time:46905ms step_avg:95.92ms +step:490/1695 train_time:46998ms step_avg:95.91ms +step:491/1695 train_time:47092ms step_avg:95.91ms +step:492/1695 train_time:47186ms step_avg:95.91ms +step:493/1695 train_time:47280ms step_avg:95.90ms +step:494/1695 train_time:47374ms step_avg:95.90ms +step:495/1695 train_time:47468ms step_avg:95.89ms +step:496/1695 train_time:47562ms step_avg:95.89ms +step:497/1695 train_time:47657ms step_avg:95.89ms +step:498/1695 train_time:47751ms step_avg:95.88ms +step:499/1695 train_time:47845ms step_avg:95.88ms +step:500/1695 train_time:47938ms step_avg:95.88ms +step:500/1695 val_loss:3.7202 train_time:48030ms step_avg:96.06ms +step:501/1695 train_time:48054ms step_avg:95.92ms +step:502/1695 train_time:48133ms step_avg:95.88ms +step:503/1695 train_time:48232ms step_avg:95.89ms +step:504/1695 train_time:48327ms step_avg:95.89ms +step:505/1695 train_time:48419ms step_avg:95.88ms +step:506/1695 train_time:48513ms step_avg:95.88ms +step:507/1695 train_time:48607ms step_avg:95.87ms +step:508/1695 train_time:48699ms step_avg:95.86ms +step:509/1695 train_time:48792ms step_avg:95.86ms +step:510/1695 train_time:48885ms step_avg:95.85ms +step:511/1695 train_time:48979ms step_avg:95.85ms +step:512/1695 train_time:49076ms step_avg:95.85ms +step:513/1695 train_time:49173ms step_avg:95.85ms +step:514/1695 train_time:49268ms step_avg:95.85ms +step:515/1695 train_time:49363ms step_avg:95.85ms +step:516/1695 train_time:49456ms step_avg:95.84ms +step:517/1695 train_time:49549ms step_avg:95.84ms +step:518/1695 train_time:49643ms step_avg:95.84ms +step:519/1695 train_time:49968ms step_avg:96.28ms +step:520/1695 train_time:50168ms step_avg:96.48ms +step:521/1695 train_time:50261ms step_avg:96.47ms +step:522/1695 train_time:50353ms step_avg:96.46ms +step:523/1695 train_time:50446ms step_avg:96.46ms +step:524/1695 train_time:50539ms step_avg:96.45ms +step:525/1695 train_time:50632ms step_avg:96.44ms +step:526/1695 train_time:50725ms step_avg:96.43ms +step:527/1695 train_time:50817ms step_avg:96.43ms +step:528/1695 train_time:50910ms step_avg:96.42ms +step:529/1695 train_time:51008ms step_avg:96.42ms +step:530/1695 train_time:51106ms step_avg:96.43ms +step:531/1695 train_time:51202ms step_avg:96.43ms +step:532/1695 train_time:51296ms step_avg:96.42ms +step:533/1695 train_time:51389ms step_avg:96.41ms +step:534/1695 train_time:51482ms step_avg:96.41ms +step:535/1695 train_time:51575ms step_avg:96.40ms +step:536/1695 train_time:51668ms step_avg:96.40ms +step:537/1695 train_time:51761ms step_avg:96.39ms +step:538/1695 train_time:51854ms step_avg:96.38ms +step:539/1695 train_time:51949ms step_avg:96.38ms +step:540/1695 train_time:52044ms step_avg:96.38ms +step:541/1695 train_time:52139ms step_avg:96.38ms +step:542/1695 train_time:52234ms step_avg:96.37ms +step:543/1695 train_time:52328ms step_avg:96.37ms +step:544/1695 train_time:52421ms step_avg:96.36ms +step:545/1695 train_time:52515ms step_avg:96.36ms +step:546/1695 train_time:52609ms step_avg:96.35ms +step:547/1695 train_time:52702ms step_avg:96.35ms +step:548/1695 train_time:52795ms step_avg:96.34ms +step:549/1695 train_time:52889ms step_avg:96.34ms +step:550/1695 train_time:52983ms step_avg:96.33ms +step:551/1695 train_time:53077ms step_avg:96.33ms +step:552/1695 train_time:53172ms step_avg:96.33ms +step:553/1695 train_time:53267ms step_avg:96.32ms +step:554/1695 train_time:53361ms step_avg:96.32ms +step:555/1695 train_time:53454ms step_avg:96.31ms +step:556/1695 train_time:53548ms step_avg:96.31ms +step:557/1695 train_time:53640ms step_avg:96.30ms +step:558/1695 train_time:53734ms step_avg:96.30ms +step:559/1695 train_time:53827ms step_avg:96.29ms +step:560/1695 train_time:53920ms step_avg:96.29ms +step:561/1695 train_time:54015ms step_avg:96.28ms +step:562/1695 train_time:54111ms step_avg:96.28ms +step:563/1695 train_time:54207ms step_avg:96.28ms +step:564/1695 train_time:54301ms step_avg:96.28ms +step:565/1695 train_time:54395ms step_avg:96.27ms +step:566/1695 train_time:54488ms step_avg:96.27ms +step:567/1695 train_time:54583ms step_avg:96.27ms +step:568/1695 train_time:54678ms step_avg:96.26ms +step:569/1695 train_time:54774ms step_avg:96.26ms +step:570/1695 train_time:54869ms step_avg:96.26ms +step:571/1695 train_time:54965ms step_avg:96.26ms +step:572/1695 train_time:55061ms step_avg:96.26ms +step:573/1695 train_time:55157ms step_avg:96.26ms +step:574/1695 train_time:55253ms step_avg:96.26ms +step:575/1695 train_time:55349ms step_avg:96.26ms +step:576/1695 train_time:55445ms step_avg:96.26ms +step:577/1695 train_time:55540ms step_avg:96.26ms +step:578/1695 train_time:55636ms step_avg:96.26ms +step:579/1695 train_time:55732ms step_avg:96.26ms +step:580/1695 train_time:55828ms step_avg:96.25ms +step:581/1695 train_time:55923ms step_avg:96.25ms +step:582/1695 train_time:56018ms step_avg:96.25ms +step:583/1695 train_time:56116ms step_avg:96.25ms +step:584/1695 train_time:56213ms step_avg:96.25ms +step:585/1695 train_time:56310ms step_avg:96.26ms +step:586/1695 train_time:56407ms step_avg:96.26ms +step:587/1695 train_time:56502ms step_avg:96.26ms +step:588/1695 train_time:56597ms step_avg:96.25ms +step:589/1695 train_time:56693ms step_avg:96.25ms +step:590/1695 train_time:56789ms step_avg:96.25ms +step:591/1695 train_time:56885ms step_avg:96.25ms +step:592/1695 train_time:56981ms step_avg:96.25ms +step:593/1695 train_time:57077ms step_avg:96.25ms +step:594/1695 train_time:57173ms step_avg:96.25ms +step:595/1695 train_time:57270ms step_avg:96.25ms +step:596/1695 train_time:57365ms step_avg:96.25ms +step:597/1695 train_time:57461ms step_avg:96.25ms +step:598/1695 train_time:57556ms step_avg:96.25ms +step:599/1695 train_time:57653ms step_avg:96.25ms +step:600/1695 train_time:57749ms step_avg:96.25ms +step:601/1695 train_time:57845ms step_avg:96.25ms +step:602/1695 train_time:57941ms step_avg:96.25ms +step:603/1695 train_time:58037ms step_avg:96.25ms +step:604/1695 train_time:58134ms step_avg:96.25ms +step:605/1695 train_time:58230ms step_avg:96.25ms +step:606/1695 train_time:58326ms step_avg:96.25ms +step:607/1695 train_time:58421ms step_avg:96.25ms +step:608/1695 train_time:58518ms step_avg:96.25ms +step:609/1695 train_time:58614ms step_avg:96.25ms +step:610/1695 train_time:58711ms step_avg:96.25ms +step:611/1695 train_time:58808ms step_avg:96.25ms +step:612/1695 train_time:58904ms step_avg:96.25ms +step:613/1695 train_time:59000ms step_avg:96.25ms +step:614/1695 train_time:59095ms step_avg:96.25ms +step:615/1695 train_time:59192ms step_avg:96.25ms +step:616/1695 train_time:59289ms step_avg:96.25ms +step:617/1695 train_time:59385ms step_avg:96.25ms +step:618/1695 train_time:59481ms step_avg:96.25ms +step:619/1695 train_time:59576ms step_avg:96.25ms +step:620/1695 train_time:59673ms step_avg:96.25ms +step:621/1695 train_time:59769ms step_avg:96.25ms +step:622/1695 train_time:59865ms step_avg:96.25ms +step:623/1695 train_time:59960ms step_avg:96.24ms +step:624/1695 train_time:60056ms step_avg:96.24ms +step:625/1695 train_time:60152ms step_avg:96.24ms +step:625/1695 val_loss:3.6216 train_time:60246ms step_avg:96.39ms +step:626/1695 train_time:60272ms step_avg:96.28ms +step:627/1695 train_time:60351ms step_avg:96.25ms +step:628/1695 train_time:60448ms step_avg:96.25ms +step:629/1695 train_time:60544ms step_avg:96.25ms +step:630/1695 train_time:60638ms step_avg:96.25ms +step:631/1695 train_time:60733ms step_avg:96.25ms +step:632/1695 train_time:60828ms step_avg:96.25ms +step:633/1695 train_time:60923ms step_avg:96.25ms +step:634/1695 train_time:61018ms step_avg:96.24ms +step:635/1695 train_time:61112ms step_avg:96.24ms +step:636/1695 train_time:61210ms step_avg:96.24ms +step:637/1695 train_time:61308ms step_avg:96.25ms +step:638/1695 train_time:61406ms step_avg:96.25ms +step:639/1695 train_time:61503ms step_avg:96.25ms +step:640/1695 train_time:61598ms step_avg:96.25ms +step:641/1695 train_time:61693ms step_avg:96.24ms +step:642/1695 train_time:61788ms step_avg:96.24ms +step:643/1695 train_time:61883ms step_avg:96.24ms +step:644/1695 train_time:61977ms step_avg:96.24ms +step:645/1695 train_time:62072ms step_avg:96.24ms +step:646/1695 train_time:62168ms step_avg:96.23ms +step:647/1695 train_time:62264ms step_avg:96.24ms +step:648/1695 train_time:62361ms step_avg:96.24ms +step:649/1695 train_time:62458ms step_avg:96.24ms +step:650/1695 train_time:62554ms step_avg:96.24ms +step:651/1695 train_time:62651ms step_avg:96.24ms +step:652/1695 train_time:62747ms step_avg:96.24ms +step:653/1695 train_time:62843ms step_avg:96.24ms +step:654/1695 train_time:62937ms step_avg:96.23ms +step:655/1695 train_time:63034ms step_avg:96.23ms +step:656/1695 train_time:63130ms step_avg:96.24ms +step:657/1695 train_time:63228ms step_avg:96.24ms +step:658/1695 train_time:63324ms step_avg:96.24ms +step:659/1695 train_time:63420ms step_avg:96.24ms +step:660/1695 train_time:63515ms step_avg:96.24ms +step:661/1695 train_time:63612ms step_avg:96.24ms +step:662/1695 train_time:63707ms step_avg:96.23ms +step:663/1695 train_time:63803ms step_avg:96.23ms +step:664/1695 train_time:63897ms step_avg:96.23ms +step:665/1695 train_time:63993ms step_avg:96.23ms +step:666/1695 train_time:64089ms step_avg:96.23ms +step:667/1695 train_time:64186ms step_avg:96.23ms +step:668/1695 train_time:64282ms step_avg:96.23ms +step:669/1695 train_time:64377ms step_avg:96.23ms +step:670/1695 train_time:64473ms step_avg:96.23ms +step:671/1695 train_time:64570ms step_avg:96.23ms +step:672/1695 train_time:64666ms step_avg:96.23ms +step:673/1695 train_time:64762ms step_avg:96.23ms +step:674/1695 train_time:64857ms step_avg:96.23ms +step:675/1695 train_time:64954ms step_avg:96.23ms +step:676/1695 train_time:65048ms step_avg:96.23ms +step:677/1695 train_time:65144ms step_avg:96.22ms +step:678/1695 train_time:65238ms step_avg:96.22ms +step:679/1695 train_time:65335ms step_avg:96.22ms +step:680/1695 train_time:65431ms step_avg:96.22ms +step:681/1695 train_time:65528ms step_avg:96.22ms +step:682/1695 train_time:65624ms step_avg:96.22ms +step:683/1695 train_time:65720ms step_avg:96.22ms +step:684/1695 train_time:65815ms step_avg:96.22ms +step:685/1695 train_time:65911ms step_avg:96.22ms +step:686/1695 train_time:66007ms step_avg:96.22ms +step:687/1695 train_time:66103ms step_avg:96.22ms +step:688/1695 train_time:66199ms step_avg:96.22ms +step:689/1695 train_time:66294ms step_avg:96.22ms +step:690/1695 train_time:66390ms step_avg:96.22ms +step:691/1695 train_time:66847ms step_avg:96.74ms +step:692/1695 train_time:66917ms step_avg:96.70ms +step:693/1695 train_time:67011ms step_avg:96.70ms +step:694/1695 train_time:67106ms step_avg:96.69ms +step:695/1695 train_time:67201ms step_avg:96.69ms +step:696/1695 train_time:67296ms step_avg:96.69ms +step:697/1695 train_time:67392ms step_avg:96.69ms +step:698/1695 train_time:67487ms step_avg:96.69ms +step:699/1695 train_time:67581ms step_avg:96.68ms +step:700/1695 train_time:67676ms step_avg:96.68ms +step:701/1695 train_time:67776ms step_avg:96.69ms +step:702/1695 train_time:67879ms step_avg:96.69ms +step:703/1695 train_time:67976ms step_avg:96.69ms +step:704/1695 train_time:68073ms step_avg:96.69ms +step:705/1695 train_time:68169ms step_avg:96.69ms +step:706/1695 train_time:68266ms step_avg:96.69ms +step:707/1695 train_time:68361ms step_avg:96.69ms +step:708/1695 train_time:68455ms step_avg:96.69ms +step:709/1695 train_time:68550ms step_avg:96.69ms +step:710/1695 train_time:68645ms step_avg:96.68ms +step:711/1695 train_time:68741ms step_avg:96.68ms +step:712/1695 train_time:68838ms step_avg:96.68ms +step:713/1695 train_time:68936ms step_avg:96.68ms +step:714/1695 train_time:69034ms step_avg:96.69ms +step:715/1695 train_time:69130ms step_avg:96.69ms +step:716/1695 train_time:69226ms step_avg:96.68ms +step:717/1695 train_time:69321ms step_avg:96.68ms +step:718/1695 train_time:69416ms step_avg:96.68ms +step:719/1695 train_time:69512ms step_avg:96.68ms +step:720/1695 train_time:69607ms step_avg:96.68ms +step:721/1695 train_time:69702ms step_avg:96.67ms +step:722/1695 train_time:69799ms step_avg:96.67ms +step:723/1695 train_time:69895ms step_avg:96.67ms +step:724/1695 train_time:69992ms step_avg:96.67ms +step:725/1695 train_time:70088ms step_avg:96.67ms +step:726/1695 train_time:70185ms step_avg:96.67ms +step:727/1695 train_time:70280ms step_avg:96.67ms +step:728/1695 train_time:70375ms step_avg:96.67ms +step:729/1695 train_time:70471ms step_avg:96.67ms +step:730/1695 train_time:70566ms step_avg:96.67ms +step:731/1695 train_time:70661ms step_avg:96.66ms +step:732/1695 train_time:70757ms step_avg:96.66ms +step:733/1695 train_time:70853ms step_avg:96.66ms +step:734/1695 train_time:70951ms step_avg:96.66ms +step:735/1695 train_time:71049ms step_avg:96.66ms +step:736/1695 train_time:71145ms step_avg:96.66ms +step:737/1695 train_time:71240ms step_avg:96.66ms +step:738/1695 train_time:71336ms step_avg:96.66ms +step:739/1695 train_time:71432ms step_avg:96.66ms +step:740/1695 train_time:71528ms step_avg:96.66ms +step:741/1695 train_time:71623ms step_avg:96.66ms +step:742/1695 train_time:71719ms step_avg:96.66ms +step:743/1695 train_time:71814ms step_avg:96.65ms +step:744/1695 train_time:71910ms step_avg:96.65ms +step:745/1695 train_time:72007ms step_avg:96.65ms +step:746/1695 train_time:72103ms step_avg:96.65ms +step:747/1695 train_time:72199ms step_avg:96.65ms +step:748/1695 train_time:72295ms step_avg:96.65ms +step:749/1695 train_time:72391ms step_avg:96.65ms +step:750/1695 train_time:72488ms step_avg:96.65ms +step:750/1695 val_loss:3.5671 train_time:72581ms step_avg:96.77ms +step:751/1695 train_time:72608ms step_avg:96.68ms +step:752/1695 train_time:72687ms step_avg:96.66ms +step:753/1695 train_time:72784ms step_avg:96.66ms +step:754/1695 train_time:72880ms step_avg:96.66ms +step:755/1695 train_time:72976ms step_avg:96.66ms +step:756/1695 train_time:73072ms step_avg:96.66ms +step:757/1695 train_time:73167ms step_avg:96.65ms +step:758/1695 train_time:73262ms step_avg:96.65ms +step:759/1695 train_time:73357ms step_avg:96.65ms +step:760/1695 train_time:73452ms step_avg:96.65ms +step:761/1695 train_time:73549ms step_avg:96.65ms +step:762/1695 train_time:73646ms step_avg:96.65ms +step:763/1695 train_time:73744ms step_avg:96.65ms +step:764/1695 train_time:73841ms step_avg:96.65ms +step:765/1695 train_time:73938ms step_avg:96.65ms +step:766/1695 train_time:74034ms step_avg:96.65ms +step:767/1695 train_time:74129ms step_avg:96.65ms +step:768/1695 train_time:74224ms step_avg:96.65ms +step:769/1695 train_time:74319ms step_avg:96.64ms +step:770/1695 train_time:74414ms step_avg:96.64ms +step:771/1695 train_time:74509ms step_avg:96.64ms +step:772/1695 train_time:74606ms step_avg:96.64ms +step:773/1695 train_time:74703ms step_avg:96.64ms +step:774/1695 train_time:74800ms step_avg:96.64ms +step:775/1695 train_time:74897ms step_avg:96.64ms +step:776/1695 train_time:74994ms step_avg:96.64ms +step:777/1695 train_time:75090ms step_avg:96.64ms +step:778/1695 train_time:75185ms step_avg:96.64ms +step:779/1695 train_time:75280ms step_avg:96.64ms +step:780/1695 train_time:75375ms step_avg:96.63ms +step:781/1695 train_time:75472ms step_avg:96.63ms +step:782/1695 train_time:75567ms step_avg:96.63ms +step:783/1695 train_time:75663ms step_avg:96.63ms +step:784/1695 train_time:75760ms step_avg:96.63ms +step:785/1695 train_time:75857ms step_avg:96.63ms +step:786/1695 train_time:75953ms step_avg:96.63ms +step:787/1695 train_time:76049ms step_avg:96.63ms +step:788/1695 train_time:76144ms step_avg:96.63ms +step:789/1695 train_time:76239ms step_avg:96.63ms +step:790/1695 train_time:76335ms step_avg:96.63ms +step:791/1695 train_time:76433ms step_avg:96.63ms +step:792/1695 train_time:76529ms step_avg:96.63ms +step:793/1695 train_time:76624ms step_avg:96.63ms +step:794/1695 train_time:76721ms step_avg:96.63ms +step:795/1695 train_time:76818ms step_avg:96.63ms +step:796/1695 train_time:76916ms step_avg:96.63ms +step:797/1695 train_time:77012ms step_avg:96.63ms +step:798/1695 train_time:77108ms step_avg:96.63ms +step:799/1695 train_time:77202ms step_avg:96.62ms +step:800/1695 train_time:77298ms step_avg:96.62ms +step:801/1695 train_time:77394ms step_avg:96.62ms +step:802/1695 train_time:77490ms step_avg:96.62ms +step:803/1695 train_time:77585ms step_avg:96.62ms +step:804/1695 train_time:77681ms step_avg:96.62ms +step:805/1695 train_time:77778ms step_avg:96.62ms +step:806/1695 train_time:77874ms step_avg:96.62ms +step:807/1695 train_time:77971ms step_avg:96.62ms +step:808/1695 train_time:78067ms step_avg:96.62ms +step:809/1695 train_time:78163ms step_avg:96.62ms +step:810/1695 train_time:78258ms step_avg:96.62ms +step:811/1695 train_time:78355ms step_avg:96.61ms +step:812/1695 train_time:78450ms step_avg:96.61ms +step:813/1695 train_time:78545ms step_avg:96.61ms +step:814/1695 train_time:78641ms step_avg:96.61ms +step:815/1695 train_time:78738ms step_avg:96.61ms +step:816/1695 train_time:78835ms step_avg:96.61ms +step:817/1695 train_time:78932ms step_avg:96.61ms +step:818/1695 train_time:79028ms step_avg:96.61ms +step:819/1695 train_time:79123ms step_avg:96.61ms +step:820/1695 train_time:79219ms step_avg:96.61ms +step:821/1695 train_time:79316ms step_avg:96.61ms +step:822/1695 train_time:79412ms step_avg:96.61ms +step:823/1695 train_time:79508ms step_avg:96.61ms +step:824/1695 train_time:79604ms step_avg:96.61ms +step:825/1695 train_time:79700ms step_avg:96.61ms +step:826/1695 train_time:79796ms step_avg:96.60ms +step:827/1695 train_time:79892ms step_avg:96.61ms +step:828/1695 train_time:79989ms step_avg:96.61ms +step:829/1695 train_time:80084ms step_avg:96.60ms +step:830/1695 train_time:80180ms step_avg:96.60ms +step:831/1695 train_time:80276ms step_avg:96.60ms +step:832/1695 train_time:80373ms step_avg:96.60ms +step:833/1695 train_time:80469ms step_avg:96.60ms +step:834/1695 train_time:80565ms step_avg:96.60ms +step:835/1695 train_time:80660ms step_avg:96.60ms +step:836/1695 train_time:80756ms step_avg:96.60ms +step:837/1695 train_time:80853ms step_avg:96.60ms +step:838/1695 train_time:80949ms step_avg:96.60ms +step:839/1695 train_time:81045ms step_avg:96.60ms +step:840/1695 train_time:81141ms step_avg:96.60ms +step:841/1695 train_time:81237ms step_avg:96.60ms +step:842/1695 train_time:81333ms step_avg:96.59ms +step:843/1695 train_time:81429ms step_avg:96.59ms +step:844/1695 train_time:81524ms step_avg:96.59ms +step:845/1695 train_time:81619ms step_avg:96.59ms +step:846/1695 train_time:81716ms step_avg:96.59ms +step:847/1695 train_time:81812ms step_avg:96.59ms +step:848/1695 train_time:81908ms step_avg:96.59ms +step:849/1695 train_time:82003ms step_avg:96.59ms +step:850/1695 train_time:82099ms step_avg:96.59ms +step:851/1695 train_time:82196ms step_avg:96.59ms +step:852/1695 train_time:82292ms step_avg:96.59ms +step:853/1695 train_time:82388ms step_avg:96.59ms +step:854/1695 train_time:82484ms step_avg:96.59ms +step:855/1695 train_time:82579ms step_avg:96.58ms +step:856/1695 train_time:82676ms step_avg:96.58ms +step:857/1695 train_time:82773ms step_avg:96.58ms +step:858/1695 train_time:82869ms step_avg:96.58ms +step:859/1695 train_time:82964ms step_avg:96.58ms +step:860/1695 train_time:83060ms step_avg:96.58ms +step:861/1695 train_time:83156ms step_avg:96.58ms +step:862/1695 train_time:83252ms step_avg:96.58ms +step:863/1695 train_time:83584ms step_avg:96.85ms +step:864/1695 train_time:83778ms step_avg:96.96ms +step:865/1695 train_time:83872ms step_avg:96.96ms +step:866/1695 train_time:83966ms step_avg:96.96ms +step:867/1695 train_time:84061ms step_avg:96.96ms +step:868/1695 train_time:84156ms step_avg:96.95ms +step:869/1695 train_time:84250ms step_avg:96.95ms +step:870/1695 train_time:84345ms step_avg:96.95ms +step:871/1695 train_time:84440ms step_avg:96.95ms +step:872/1695 train_time:84535ms step_avg:96.94ms +step:873/1695 train_time:84637ms step_avg:96.95ms +step:874/1695 train_time:84737ms step_avg:96.95ms +step:875/1695 train_time:84837ms step_avg:96.96ms +step:875/1695 val_loss:3.5244 train_time:84933ms step_avg:97.07ms +step:876/1695 train_time:84957ms step_avg:96.98ms +step:877/1695 train_time:85038ms step_avg:96.96ms +step:878/1695 train_time:85138ms step_avg:96.97ms +step:879/1695 train_time:85235ms step_avg:96.97ms +step:880/1695 train_time:85330ms step_avg:96.97ms +step:881/1695 train_time:85425ms step_avg:96.96ms +step:882/1695 train_time:85520ms step_avg:96.96ms +step:883/1695 train_time:85614ms step_avg:96.96ms +step:884/1695 train_time:85709ms step_avg:96.96ms +step:885/1695 train_time:85804ms step_avg:96.95ms +step:886/1695 train_time:85901ms step_avg:96.95ms +step:887/1695 train_time:86000ms step_avg:96.96ms +step:888/1695 train_time:86100ms step_avg:96.96ms +step:889/1695 train_time:86198ms step_avg:96.96ms +step:890/1695 train_time:86295ms step_avg:96.96ms +step:891/1695 train_time:86391ms step_avg:96.96ms +step:892/1695 train_time:86486ms step_avg:96.96ms +step:893/1695 train_time:86581ms step_avg:96.96ms +step:894/1695 train_time:86677ms step_avg:96.95ms +step:895/1695 train_time:86772ms step_avg:96.95ms +step:896/1695 train_time:86868ms step_avg:96.95ms +step:897/1695 train_time:86964ms step_avg:96.95ms +step:898/1695 train_time:87062ms step_avg:96.95ms +step:899/1695 train_time:87159ms step_avg:96.95ms +step:900/1695 train_time:87256ms step_avg:96.95ms +step:901/1695 train_time:87351ms step_avg:96.95ms +step:902/1695 train_time:87447ms step_avg:96.95ms +step:903/1695 train_time:87542ms step_avg:96.95ms +step:904/1695 train_time:87638ms step_avg:96.95ms +step:905/1695 train_time:87734ms step_avg:96.94ms +step:906/1695 train_time:87829ms step_avg:96.94ms +step:907/1695 train_time:87925ms step_avg:96.94ms +step:908/1695 train_time:88021ms step_avg:96.94ms +step:909/1695 train_time:88118ms step_avg:96.94ms +step:910/1695 train_time:88216ms step_avg:96.94ms +step:911/1695 train_time:88312ms step_avg:96.94ms +step:912/1695 train_time:88407ms step_avg:96.94ms +step:913/1695 train_time:88503ms step_avg:96.94ms +step:914/1695 train_time:88599ms step_avg:96.94ms +step:915/1695 train_time:88695ms step_avg:96.93ms +step:916/1695 train_time:88791ms step_avg:96.93ms +step:917/1695 train_time:88887ms step_avg:96.93ms +step:918/1695 train_time:88983ms step_avg:96.93ms +step:919/1695 train_time:89080ms step_avg:96.93ms +step:920/1695 train_time:89177ms step_avg:96.93ms +step:921/1695 train_time:89274ms step_avg:96.93ms +step:922/1695 train_time:89370ms step_avg:96.93ms +step:923/1695 train_time:89467ms step_avg:96.93ms +step:924/1695 train_time:89562ms step_avg:96.93ms +step:925/1695 train_time:89658ms step_avg:96.93ms +step:926/1695 train_time:89752ms step_avg:96.92ms +step:927/1695 train_time:89848ms step_avg:96.92ms +step:928/1695 train_time:89944ms step_avg:96.92ms +step:929/1695 train_time:90041ms step_avg:96.92ms +step:930/1695 train_time:90138ms step_avg:96.92ms +step:931/1695 train_time:90235ms step_avg:96.92ms +step:932/1695 train_time:90332ms step_avg:96.92ms +step:933/1695 train_time:90428ms step_avg:96.92ms +step:934/1695 train_time:90523ms step_avg:96.92ms +step:935/1695 train_time:90620ms step_avg:96.92ms +step:936/1695 train_time:90717ms step_avg:96.92ms +step:937/1695 train_time:90813ms step_avg:96.92ms +step:938/1695 train_time:90908ms step_avg:96.92ms +step:939/1695 train_time:91003ms step_avg:96.92ms +step:940/1695 train_time:91099ms step_avg:96.91ms +step:941/1695 train_time:91195ms step_avg:96.91ms +step:942/1695 train_time:91291ms step_avg:96.91ms +step:943/1695 train_time:91386ms step_avg:96.91ms +step:944/1695 train_time:91482ms step_avg:96.91ms +step:945/1695 train_time:91578ms step_avg:96.91ms +step:946/1695 train_time:91675ms step_avg:96.91ms +step:947/1695 train_time:91771ms step_avg:96.91ms +step:948/1695 train_time:91866ms step_avg:96.91ms +step:949/1695 train_time:91962ms step_avg:96.90ms +step:950/1695 train_time:92057ms step_avg:96.90ms +step:951/1695 train_time:92153ms step_avg:96.90ms +step:952/1695 train_time:92249ms step_avg:96.90ms +step:953/1695 train_time:92345ms step_avg:96.90ms +step:954/1695 train_time:92441ms step_avg:96.90ms +step:955/1695 train_time:92538ms step_avg:96.90ms +step:956/1695 train_time:92636ms step_avg:96.90ms +step:957/1695 train_time:92732ms step_avg:96.90ms +step:958/1695 train_time:92827ms step_avg:96.90ms +step:959/1695 train_time:92923ms step_avg:96.90ms +step:960/1695 train_time:93018ms step_avg:96.89ms +step:961/1695 train_time:93114ms step_avg:96.89ms +step:962/1695 train_time:93210ms step_avg:96.89ms +step:963/1695 train_time:93305ms step_avg:96.89ms +step:964/1695 train_time:93401ms step_avg:96.89ms +step:965/1695 train_time:93496ms step_avg:96.89ms +step:966/1695 train_time:93592ms step_avg:96.89ms +step:967/1695 train_time:93688ms step_avg:96.88ms +step:968/1695 train_time:93783ms step_avg:96.88ms +step:969/1695 train_time:93880ms step_avg:96.88ms +step:970/1695 train_time:93976ms step_avg:96.88ms +step:971/1695 train_time:94073ms step_avg:96.88ms +step:972/1695 train_time:94168ms step_avg:96.88ms +step:973/1695 train_time:94263ms step_avg:96.88ms +step:974/1695 train_time:94360ms step_avg:96.88ms +step:975/1695 train_time:94456ms step_avg:96.88ms +step:976/1695 train_time:94553ms step_avg:96.88ms +step:977/1695 train_time:94648ms step_avg:96.88ms +step:978/1695 train_time:94743ms step_avg:96.87ms +step:979/1695 train_time:94840ms step_avg:96.87ms +step:980/1695 train_time:94937ms step_avg:96.87ms +step:981/1695 train_time:95033ms step_avg:96.87ms +step:982/1695 train_time:95129ms step_avg:96.87ms +step:983/1695 train_time:95224ms step_avg:96.87ms +step:984/1695 train_time:95320ms step_avg:96.87ms +step:985/1695 train_time:95416ms step_avg:96.87ms +step:986/1695 train_time:95512ms step_avg:96.87ms +step:987/1695 train_time:95608ms step_avg:96.87ms +step:988/1695 train_time:95703ms step_avg:96.87ms +step:989/1695 train_time:95799ms step_avg:96.86ms +step:990/1695 train_time:95895ms step_avg:96.86ms +step:991/1695 train_time:95991ms step_avg:96.86ms +step:992/1695 train_time:96087ms step_avg:96.86ms +step:993/1695 train_time:96183ms step_avg:96.86ms +step:994/1695 train_time:96280ms step_avg:96.86ms +step:995/1695 train_time:96375ms step_avg:96.86ms +step:996/1695 train_time:96471ms step_avg:96.86ms +step:997/1695 train_time:96566ms step_avg:96.86ms +step:998/1695 train_time:96663ms step_avg:96.86ms +step:999/1695 train_time:96758ms step_avg:96.85ms +step:1000/1695 train_time:96854ms step_avg:96.85ms +step:1000/1695 val_loss:3.4839 train_time:96948ms step_avg:96.95ms +step:1001/1695 train_time:96972ms step_avg:96.88ms +step:1002/1695 train_time:97055ms step_avg:96.86ms +step:1003/1695 train_time:97153ms step_avg:96.86ms +step:1004/1695 train_time:97250ms step_avg:96.86ms +step:1005/1695 train_time:97345ms step_avg:96.86ms +step:1006/1695 train_time:97440ms step_avg:96.86ms +step:1007/1695 train_time:97534ms step_avg:96.86ms +step:1008/1695 train_time:97629ms step_avg:96.85ms +step:1009/1695 train_time:97724ms step_avg:96.85ms +step:1010/1695 train_time:97819ms step_avg:96.85ms +step:1011/1695 train_time:97914ms step_avg:96.85ms +step:1012/1695 train_time:98013ms step_avg:96.85ms +step:1013/1695 train_time:98112ms step_avg:96.85ms +step:1014/1695 train_time:98211ms step_avg:96.86ms +step:1015/1695 train_time:98309ms step_avg:96.86ms +step:1016/1695 train_time:98406ms step_avg:96.86ms +step:1017/1695 train_time:98501ms step_avg:96.85ms +step:1018/1695 train_time:98596ms step_avg:96.85ms +step:1019/1695 train_time:98690ms step_avg:96.85ms +step:1020/1695 train_time:98786ms step_avg:96.85ms +step:1021/1695 train_time:98883ms step_avg:96.85ms +step:1022/1695 train_time:98979ms step_avg:96.85ms +step:1023/1695 train_time:99077ms step_avg:96.85ms +step:1024/1695 train_time:99173ms step_avg:96.85ms +step:1025/1695 train_time:99270ms step_avg:96.85ms +step:1026/1695 train_time:99366ms step_avg:96.85ms +step:1027/1695 train_time:99462ms step_avg:96.85ms +step:1028/1695 train_time:99556ms step_avg:96.84ms +step:1029/1695 train_time:99651ms step_avg:96.84ms +step:1030/1695 train_time:99746ms step_avg:96.84ms +step:1031/1695 train_time:99842ms step_avg:96.84ms +step:1032/1695 train_time:99938ms step_avg:96.84ms +step:1033/1695 train_time:100035ms step_avg:96.84ms +step:1034/1695 train_time:100131ms step_avg:96.84ms +step:1035/1695 train_time:100228ms step_avg:96.84ms +step:1036/1695 train_time:100552ms step_avg:97.06ms +step:1037/1695 train_time:100740ms step_avg:97.15ms +step:1038/1695 train_time:100833ms step_avg:97.14ms +step:1039/1695 train_time:100928ms step_avg:97.14ms +step:1040/1695 train_time:101024ms step_avg:97.14ms +step:1041/1695 train_time:101118ms step_avg:97.14ms +step:1042/1695 train_time:101212ms step_avg:97.13ms +step:1043/1695 train_time:101307ms step_avg:97.13ms +step:1044/1695 train_time:101402ms step_avg:97.13ms +step:1045/1695 train_time:101497ms step_avg:97.13ms +step:1046/1695 train_time:101598ms step_avg:97.13ms +step:1047/1695 train_time:101696ms step_avg:97.13ms +step:1048/1695 train_time:101794ms step_avg:97.13ms +step:1049/1695 train_time:101890ms step_avg:97.13ms +step:1050/1695 train_time:101986ms step_avg:97.13ms +step:1051/1695 train_time:102083ms step_avg:97.13ms +step:1052/1695 train_time:102178ms step_avg:97.13ms +step:1053/1695 train_time:102272ms step_avg:97.12ms +step:1054/1695 train_time:102367ms step_avg:97.12ms +step:1055/1695 train_time:102462ms step_avg:97.12ms +step:1056/1695 train_time:102560ms step_avg:97.12ms +step:1057/1695 train_time:102656ms step_avg:97.12ms +step:1058/1695 train_time:102753ms step_avg:97.12ms +step:1059/1695 train_time:102850ms step_avg:97.12ms +step:1060/1695 train_time:102947ms step_avg:97.12ms +step:1061/1695 train_time:103044ms step_avg:97.12ms +step:1062/1695 train_time:103140ms step_avg:97.12ms +step:1063/1695 train_time:103235ms step_avg:97.12ms +step:1064/1695 train_time:103330ms step_avg:97.11ms +step:1065/1695 train_time:103425ms step_avg:97.11ms +step:1066/1695 train_time:103521ms step_avg:97.11ms +step:1067/1695 train_time:103617ms step_avg:97.11ms +step:1068/1695 train_time:103714ms step_avg:97.11ms +step:1069/1695 train_time:103810ms step_avg:97.11ms +step:1070/1695 train_time:103907ms step_avg:97.11ms +step:1071/1695 train_time:104004ms step_avg:97.11ms +step:1072/1695 train_time:104100ms step_avg:97.11ms +step:1073/1695 train_time:104195ms step_avg:97.11ms +step:1074/1695 train_time:104289ms step_avg:97.10ms +step:1075/1695 train_time:104385ms step_avg:97.10ms +step:1076/1695 train_time:104481ms step_avg:97.10ms +step:1077/1695 train_time:104578ms step_avg:97.10ms +step:1078/1695 train_time:104673ms step_avg:97.10ms +step:1079/1695 train_time:104769ms step_avg:97.10ms +step:1080/1695 train_time:104865ms step_avg:97.10ms +step:1081/1695 train_time:104961ms step_avg:97.10ms +step:1082/1695 train_time:105056ms step_avg:97.09ms +step:1083/1695 train_time:105152ms step_avg:97.09ms +step:1084/1695 train_time:105248ms step_avg:97.09ms +step:1085/1695 train_time:105344ms step_avg:97.09ms +step:1086/1695 train_time:105439ms step_avg:97.09ms +step:1087/1695 train_time:105535ms step_avg:97.09ms +step:1088/1695 train_time:105631ms step_avg:97.09ms +step:1089/1695 train_time:105729ms step_avg:97.09ms +step:1090/1695 train_time:105825ms step_avg:97.09ms +step:1091/1695 train_time:105922ms step_avg:97.09ms +step:1092/1695 train_time:106018ms step_avg:97.09ms +step:1093/1695 train_time:106114ms step_avg:97.08ms +step:1094/1695 train_time:106209ms step_avg:97.08ms +step:1095/1695 train_time:106306ms step_avg:97.08ms +step:1096/1695 train_time:106402ms step_avg:97.08ms +step:1097/1695 train_time:106497ms step_avg:97.08ms +step:1098/1695 train_time:106592ms step_avg:97.08ms +step:1099/1695 train_time:106688ms step_avg:97.08ms +step:1100/1695 train_time:106785ms step_avg:97.08ms +step:1101/1695 train_time:106882ms step_avg:97.08ms +step:1102/1695 train_time:106978ms step_avg:97.08ms +step:1103/1695 train_time:107073ms step_avg:97.07ms +step:1104/1695 train_time:107169ms step_avg:97.07ms +step:1105/1695 train_time:107265ms step_avg:97.07ms +step:1106/1695 train_time:107362ms step_avg:97.07ms +step:1107/1695 train_time:107457ms step_avg:97.07ms +step:1108/1695 train_time:107552ms step_avg:97.07ms +step:1109/1695 train_time:107648ms step_avg:97.07ms +step:1110/1695 train_time:107745ms step_avg:97.07ms +step:1111/1695 train_time:107841ms step_avg:97.07ms +step:1112/1695 train_time:107938ms step_avg:97.07ms +step:1113/1695 train_time:108034ms step_avg:97.07ms +step:1114/1695 train_time:108130ms step_avg:97.06ms +step:1115/1695 train_time:108227ms step_avg:97.06ms +step:1116/1695 train_time:108323ms step_avg:97.06ms +step:1117/1695 train_time:108419ms step_avg:97.06ms +step:1118/1695 train_time:108515ms step_avg:97.06ms +step:1119/1695 train_time:108611ms step_avg:97.06ms +step:1120/1695 train_time:108708ms step_avg:97.06ms +step:1121/1695 train_time:108804ms step_avg:97.06ms +step:1122/1695 train_time:108901ms step_avg:97.06ms +step:1123/1695 train_time:108995ms step_avg:97.06ms +step:1124/1695 train_time:109090ms step_avg:97.06ms +step:1125/1695 train_time:109187ms step_avg:97.06ms +step:1125/1695 val_loss:3.4370 train_time:109281ms step_avg:97.14ms +step:1126/1695 train_time:109306ms step_avg:97.08ms +step:1127/1695 train_time:109389ms step_avg:97.06ms +step:1128/1695 train_time:109486ms step_avg:97.06ms +step:1129/1695 train_time:109581ms step_avg:97.06ms +step:1130/1695 train_time:109676ms step_avg:97.06ms +step:1131/1695 train_time:109771ms step_avg:97.06ms +step:1132/1695 train_time:109865ms step_avg:97.05ms +step:1133/1695 train_time:109961ms step_avg:97.05ms +step:1134/1695 train_time:110058ms step_avg:97.05ms +step:1135/1695 train_time:110154ms step_avg:97.05ms +step:1136/1695 train_time:110253ms step_avg:97.05ms +step:1137/1695 train_time:110354ms step_avg:97.06ms +step:1138/1695 train_time:110454ms step_avg:97.06ms +step:1139/1695 train_time:110552ms step_avg:97.06ms +step:1140/1695 train_time:110650ms step_avg:97.06ms +step:1141/1695 train_time:110746ms step_avg:97.06ms +step:1142/1695 train_time:110842ms step_avg:97.06ms +step:1143/1695 train_time:110939ms step_avg:97.06ms +step:1144/1695 train_time:111035ms step_avg:97.06ms +step:1145/1695 train_time:111133ms step_avg:97.06ms +step:1146/1695 train_time:111230ms step_avg:97.06ms +step:1147/1695 train_time:111329ms step_avg:97.06ms +step:1148/1695 train_time:111428ms step_avg:97.06ms +step:1149/1695 train_time:111526ms step_avg:97.06ms +step:1150/1695 train_time:111624ms step_avg:97.06ms +step:1151/1695 train_time:111721ms step_avg:97.06ms +step:1152/1695 train_time:111819ms step_avg:97.06ms +step:1153/1695 train_time:111916ms step_avg:97.06ms +step:1154/1695 train_time:112013ms step_avg:97.06ms +step:1155/1695 train_time:112110ms step_avg:97.06ms +step:1156/1695 train_time:112207ms step_avg:97.07ms +step:1157/1695 train_time:112305ms step_avg:97.07ms +step:1158/1695 train_time:112403ms step_avg:97.07ms +step:1159/1695 train_time:112502ms step_avg:97.07ms +step:1160/1695 train_time:112601ms step_avg:97.07ms +step:1161/1695 train_time:112699ms step_avg:97.07ms +step:1162/1695 train_time:112796ms step_avg:97.07ms +step:1163/1695 train_time:112894ms step_avg:97.07ms +step:1164/1695 train_time:112991ms step_avg:97.07ms +step:1165/1695 train_time:113088ms step_avg:97.07ms +step:1166/1695 train_time:113185ms step_avg:97.07ms +step:1167/1695 train_time:113282ms step_avg:97.07ms +step:1168/1695 train_time:113380ms step_avg:97.07ms +step:1169/1695 train_time:113479ms step_avg:97.07ms +step:1170/1695 train_time:113579ms step_avg:97.08ms +step:1171/1695 train_time:113676ms step_avg:97.08ms +step:1172/1695 train_time:113774ms step_avg:97.08ms +step:1173/1695 train_time:113870ms step_avg:97.08ms +step:1174/1695 train_time:113966ms step_avg:97.08ms +step:1175/1695 train_time:114063ms step_avg:97.07ms +step:1176/1695 train_time:114161ms step_avg:97.08ms +step:1177/1695 train_time:114259ms step_avg:97.08ms +step:1178/1695 train_time:114357ms step_avg:97.08ms +step:1179/1695 train_time:114455ms step_avg:97.08ms +step:1180/1695 train_time:114553ms step_avg:97.08ms +step:1181/1695 train_time:114651ms step_avg:97.08ms +step:1182/1695 train_time:114748ms step_avg:97.08ms +step:1183/1695 train_time:114845ms step_avg:97.08ms +step:1184/1695 train_time:114942ms step_avg:97.08ms +step:1185/1695 train_time:115041ms step_avg:97.08ms +step:1186/1695 train_time:115140ms step_avg:97.08ms +step:1187/1695 train_time:115238ms step_avg:97.08ms +step:1188/1695 train_time:115337ms step_avg:97.09ms +step:1189/1695 train_time:115435ms step_avg:97.09ms +step:1190/1695 train_time:115533ms step_avg:97.09ms +step:1191/1695 train_time:115632ms step_avg:97.09ms +step:1192/1695 train_time:115730ms step_avg:97.09ms +step:1193/1695 train_time:115826ms step_avg:97.09ms +step:1194/1695 train_time:115923ms step_avg:97.09ms +step:1195/1695 train_time:116021ms step_avg:97.09ms +step:1196/1695 train_time:116119ms step_avg:97.09ms +step:1197/1695 train_time:116216ms step_avg:97.09ms +step:1198/1695 train_time:116313ms step_avg:97.09ms +step:1199/1695 train_time:116411ms step_avg:97.09ms +step:1200/1695 train_time:116508ms step_avg:97.09ms +step:1201/1695 train_time:116605ms step_avg:97.09ms +step:1202/1695 train_time:116704ms step_avg:97.09ms +step:1203/1695 train_time:116801ms step_avg:97.09ms +step:1204/1695 train_time:116899ms step_avg:97.09ms +step:1205/1695 train_time:116997ms step_avg:97.09ms +step:1206/1695 train_time:117095ms step_avg:97.09ms +step:1207/1695 train_time:117193ms step_avg:97.09ms +step:1208/1695 train_time:117515ms step_avg:97.28ms +step:1209/1695 train_time:117719ms step_avg:97.37ms +step:1210/1695 train_time:117814ms step_avg:97.37ms +step:1211/1695 train_time:117911ms step_avg:97.37ms +step:1212/1695 train_time:118008ms step_avg:97.37ms +step:1213/1695 train_time:118103ms step_avg:97.36ms +step:1214/1695 train_time:118200ms step_avg:97.36ms +step:1215/1695 train_time:118297ms step_avg:97.36ms +step:1216/1695 train_time:118393ms step_avg:97.36ms +step:1217/1695 train_time:118491ms step_avg:97.36ms +step:1218/1695 train_time:118592ms step_avg:97.37ms +step:1219/1695 train_time:118695ms step_avg:97.37ms +step:1220/1695 train_time:118794ms step_avg:97.37ms +step:1221/1695 train_time:118893ms step_avg:97.37ms +step:1222/1695 train_time:118990ms step_avg:97.37ms +step:1223/1695 train_time:119087ms step_avg:97.37ms +step:1224/1695 train_time:119183ms step_avg:97.37ms +step:1225/1695 train_time:119281ms step_avg:97.37ms +step:1226/1695 train_time:119378ms step_avg:97.37ms +step:1227/1695 train_time:119475ms step_avg:97.37ms +step:1228/1695 train_time:119573ms step_avg:97.37ms +step:1229/1695 train_time:119673ms step_avg:97.37ms +step:1230/1695 train_time:119772ms step_avg:97.38ms +step:1231/1695 train_time:119870ms step_avg:97.38ms +step:1232/1695 train_time:119966ms step_avg:97.38ms +step:1233/1695 train_time:120064ms step_avg:97.38ms +step:1234/1695 train_time:120162ms step_avg:97.38ms +step:1235/1695 train_time:120259ms step_avg:97.38ms +step:1236/1695 train_time:120357ms step_avg:97.38ms +step:1237/1695 train_time:120454ms step_avg:97.38ms +step:1238/1695 train_time:120552ms step_avg:97.38ms +step:1239/1695 train_time:120650ms step_avg:97.38ms +step:1240/1695 train_time:120748ms step_avg:97.38ms +step:1241/1695 train_time:120845ms step_avg:97.38ms +step:1242/1695 train_time:120944ms step_avg:97.38ms +step:1243/1695 train_time:121041ms step_avg:97.38ms +step:1244/1695 train_time:121139ms step_avg:97.38ms +step:1245/1695 train_time:121235ms step_avg:97.38ms +step:1246/1695 train_time:121332ms step_avg:97.38ms +step:1247/1695 train_time:121429ms step_avg:97.38ms +step:1248/1695 train_time:121527ms step_avg:97.38ms +step:1249/1695 train_time:121625ms step_avg:97.38ms +step:1250/1695 train_time:121724ms step_avg:97.38ms +step:1250/1695 val_loss:3.3885 train_time:121819ms step_avg:97.46ms +step:1251/1695 train_time:121843ms step_avg:97.40ms +step:1252/1695 train_time:121929ms step_avg:97.39ms +step:1253/1695 train_time:122027ms step_avg:97.39ms +step:1254/1695 train_time:122123ms step_avg:97.39ms +step:1255/1695 train_time:122220ms step_avg:97.39ms +step:1256/1695 train_time:122317ms step_avg:97.39ms +step:1257/1695 train_time:122414ms step_avg:97.39ms +step:1258/1695 train_time:122510ms step_avg:97.38ms +step:1259/1695 train_time:122606ms step_avg:97.38ms +step:1260/1695 train_time:122703ms step_avg:97.38ms +step:1261/1695 train_time:122803ms step_avg:97.39ms +step:1262/1695 train_time:122905ms step_avg:97.39ms +step:1263/1695 train_time:123004ms step_avg:97.39ms +step:1264/1695 train_time:123101ms step_avg:97.39ms +step:1265/1695 train_time:123199ms step_avg:97.39ms +step:1266/1695 train_time:123296ms step_avg:97.39ms +step:1267/1695 train_time:123393ms step_avg:97.39ms +step:1268/1695 train_time:123490ms step_avg:97.39ms +step:1269/1695 train_time:123587ms step_avg:97.39ms +step:1270/1695 train_time:123686ms step_avg:97.39ms +step:1271/1695 train_time:123781ms step_avg:97.39ms +step:1272/1695 train_time:123881ms step_avg:97.39ms +step:1273/1695 train_time:123982ms step_avg:97.39ms +step:1274/1695 train_time:124082ms step_avg:97.40ms +step:1275/1695 train_time:124180ms step_avg:97.40ms +step:1276/1695 train_time:124278ms step_avg:97.40ms +step:1277/1695 train_time:124376ms step_avg:97.40ms +step:1278/1695 train_time:124474ms step_avg:97.40ms +step:1279/1695 train_time:124570ms step_avg:97.40ms +step:1280/1695 train_time:124666ms step_avg:97.40ms +step:1281/1695 train_time:124763ms step_avg:97.40ms +step:1282/1695 train_time:124862ms step_avg:97.40ms +step:1283/1695 train_time:124961ms step_avg:97.40ms +step:1284/1695 train_time:125061ms step_avg:97.40ms +step:1285/1695 train_time:125160ms step_avg:97.40ms +step:1286/1695 train_time:125258ms step_avg:97.40ms +step:1287/1695 train_time:125356ms step_avg:97.40ms +step:1288/1695 train_time:125454ms step_avg:97.40ms +step:1289/1695 train_time:125552ms step_avg:97.40ms +step:1290/1695 train_time:125649ms step_avg:97.40ms +step:1291/1695 train_time:125746ms step_avg:97.40ms +step:1292/1695 train_time:125843ms step_avg:97.40ms +step:1293/1695 train_time:125941ms step_avg:97.40ms +step:1294/1695 train_time:126039ms step_avg:97.40ms +step:1295/1695 train_time:126138ms step_avg:97.40ms +step:1296/1695 train_time:126237ms step_avg:97.40ms +step:1297/1695 train_time:126335ms step_avg:97.41ms +step:1298/1695 train_time:126432ms step_avg:97.41ms +step:1299/1695 train_time:126529ms step_avg:97.41ms +step:1300/1695 train_time:126626ms step_avg:97.40ms +step:1301/1695 train_time:126723ms step_avg:97.40ms +step:1302/1695 train_time:126821ms step_avg:97.40ms +step:1303/1695 train_time:126919ms step_avg:97.41ms +step:1304/1695 train_time:127018ms step_avg:97.41ms +step:1305/1695 train_time:127116ms step_avg:97.41ms +step:1306/1695 train_time:127214ms step_avg:97.41ms +step:1307/1695 train_time:127312ms step_avg:97.41ms +step:1308/1695 train_time:127410ms step_avg:97.41ms +step:1309/1695 train_time:127508ms step_avg:97.41ms +step:1310/1695 train_time:127605ms step_avg:97.41ms +step:1311/1695 train_time:127703ms step_avg:97.41ms +step:1312/1695 train_time:127800ms step_avg:97.41ms +step:1313/1695 train_time:127898ms step_avg:97.41ms +step:1314/1695 train_time:127996ms step_avg:97.41ms +step:1315/1695 train_time:128093ms step_avg:97.41ms +step:1316/1695 train_time:128191ms step_avg:97.41ms +step:1317/1695 train_time:128289ms step_avg:97.41ms +step:1318/1695 train_time:128386ms step_avg:97.41ms +step:1319/1695 train_time:128483ms step_avg:97.41ms +step:1320/1695 train_time:128582ms step_avg:97.41ms +step:1321/1695 train_time:128679ms step_avg:97.41ms +step:1322/1695 train_time:128777ms step_avg:97.41ms +step:1323/1695 train_time:128875ms step_avg:97.41ms +step:1324/1695 train_time:128973ms step_avg:97.41ms +step:1325/1695 train_time:129070ms step_avg:97.41ms +step:1326/1695 train_time:129168ms step_avg:97.41ms +step:1327/1695 train_time:129265ms step_avg:97.41ms +step:1328/1695 train_time:129363ms step_avg:97.41ms +step:1329/1695 train_time:129461ms step_avg:97.41ms +step:1330/1695 train_time:129559ms step_avg:97.41ms +step:1331/1695 train_time:129658ms step_avg:97.41ms +step:1332/1695 train_time:129756ms step_avg:97.41ms +step:1333/1695 train_time:129854ms step_avg:97.42ms +step:1334/1695 train_time:129952ms step_avg:97.42ms +step:1335/1695 train_time:130049ms step_avg:97.41ms +step:1336/1695 train_time:130146ms step_avg:97.41ms +step:1337/1695 train_time:130244ms step_avg:97.42ms +step:1338/1695 train_time:130342ms step_avg:97.42ms +step:1339/1695 train_time:130440ms step_avg:97.42ms +step:1340/1695 train_time:130539ms step_avg:97.42ms +step:1341/1695 train_time:130637ms step_avg:97.42ms +step:1342/1695 train_time:130735ms step_avg:97.42ms +step:1343/1695 train_time:130832ms step_avg:97.42ms +step:1344/1695 train_time:130929ms step_avg:97.42ms +step:1345/1695 train_time:131026ms step_avg:97.42ms +step:1346/1695 train_time:131124ms step_avg:97.42ms +step:1347/1695 train_time:131220ms step_avg:97.42ms +step:1348/1695 train_time:131318ms step_avg:97.42ms +step:1349/1695 train_time:131417ms step_avg:97.42ms +step:1350/1695 train_time:131515ms step_avg:97.42ms +step:1351/1695 train_time:131613ms step_avg:97.42ms +step:1352/1695 train_time:131710ms step_avg:97.42ms +step:1353/1695 train_time:131808ms step_avg:97.42ms +step:1354/1695 train_time:131905ms step_avg:97.42ms +step:1355/1695 train_time:132003ms step_avg:97.42ms +step:1356/1695 train_time:132102ms step_avg:97.42ms +step:1357/1695 train_time:132200ms step_avg:97.42ms +step:1358/1695 train_time:132297ms step_avg:97.42ms +step:1359/1695 train_time:132395ms step_avg:97.42ms +step:1360/1695 train_time:132492ms step_avg:97.42ms +step:1361/1695 train_time:132590ms step_avg:97.42ms +step:1362/1695 train_time:132688ms step_avg:97.42ms +step:1363/1695 train_time:132786ms step_avg:97.42ms +step:1364/1695 train_time:132883ms step_avg:97.42ms +step:1365/1695 train_time:132981ms step_avg:97.42ms +step:1366/1695 train_time:133080ms step_avg:97.42ms +step:1367/1695 train_time:133179ms step_avg:97.42ms +step:1368/1695 train_time:133277ms step_avg:97.42ms +step:1369/1695 train_time:133375ms step_avg:97.43ms +step:1370/1695 train_time:133473ms step_avg:97.43ms +step:1371/1695 train_time:133571ms step_avg:97.43ms +step:1372/1695 train_time:133669ms step_avg:97.43ms +step:1373/1695 train_time:133766ms step_avg:97.43ms +step:1374/1695 train_time:133863ms step_avg:97.43ms +step:1375/1695 train_time:133961ms step_avg:97.43ms +step:1375/1695 val_loss:3.3505 train_time:134057ms step_avg:97.50ms +step:1376/1695 train_time:134084ms step_avg:97.45ms +step:1377/1695 train_time:134163ms step_avg:97.43ms +step:1378/1695 train_time:134261ms step_avg:97.43ms +step:1379/1695 train_time:134359ms step_avg:97.43ms +step:1380/1695 train_time:134456ms step_avg:97.43ms +step:1381/1695 train_time:134781ms step_avg:97.60ms +step:1382/1695 train_time:134987ms step_avg:97.68ms +step:1383/1695 train_time:135083ms step_avg:97.67ms +step:1384/1695 train_time:135179ms step_avg:97.67ms +step:1385/1695 train_time:135276ms step_avg:97.67ms +step:1386/1695 train_time:135374ms step_avg:97.67ms +step:1387/1695 train_time:135471ms step_avg:97.67ms +step:1388/1695 train_time:135568ms step_avg:97.67ms +step:1389/1695 train_time:135665ms step_avg:97.67ms +step:1390/1695 train_time:135763ms step_avg:97.67ms +step:1391/1695 train_time:135867ms step_avg:97.68ms +step:1392/1695 train_time:135968ms step_avg:97.68ms +step:1393/1695 train_time:136065ms step_avg:97.68ms +step:1394/1695 train_time:136163ms step_avg:97.68ms +step:1395/1695 train_time:136260ms step_avg:97.68ms +step:1396/1695 train_time:136356ms step_avg:97.68ms +step:1397/1695 train_time:136453ms step_avg:97.68ms +step:1398/1695 train_time:136549ms step_avg:97.67ms +step:1399/1695 train_time:136646ms step_avg:97.67ms +step:1400/1695 train_time:136743ms step_avg:97.67ms +step:1401/1695 train_time:136841ms step_avg:97.67ms +step:1402/1695 train_time:136940ms step_avg:97.67ms +step:1403/1695 train_time:137039ms step_avg:97.68ms +step:1404/1695 train_time:137137ms step_avg:97.68ms +step:1405/1695 train_time:137234ms step_avg:97.68ms +step:1406/1695 train_time:137332ms step_avg:97.68ms +step:1407/1695 train_time:137429ms step_avg:97.67ms +step:1408/1695 train_time:137525ms step_avg:97.67ms +step:1409/1695 train_time:137622ms step_avg:97.67ms +step:1410/1695 train_time:137719ms step_avg:97.67ms +step:1411/1695 train_time:137817ms step_avg:97.67ms +step:1412/1695 train_time:137916ms step_avg:97.67ms +step:1413/1695 train_time:138014ms step_avg:97.67ms +step:1414/1695 train_time:138113ms step_avg:97.68ms +step:1415/1695 train_time:138212ms step_avg:97.68ms +step:1416/1695 train_time:138309ms step_avg:97.68ms +step:1417/1695 train_time:138405ms step_avg:97.67ms +step:1418/1695 train_time:138501ms step_avg:97.67ms +step:1419/1695 train_time:138598ms step_avg:97.67ms +step:1420/1695 train_time:138696ms step_avg:97.67ms +step:1421/1695 train_time:138794ms step_avg:97.67ms +step:1422/1695 train_time:138893ms step_avg:97.67ms +step:1423/1695 train_time:138990ms step_avg:97.67ms +step:1424/1695 train_time:139089ms step_avg:97.67ms +step:1425/1695 train_time:139188ms step_avg:97.68ms +step:1426/1695 train_time:139286ms step_avg:97.68ms +step:1427/1695 train_time:139382ms step_avg:97.68ms +step:1428/1695 train_time:139479ms step_avg:97.67ms +step:1429/1695 train_time:139577ms step_avg:97.67ms +step:1430/1695 train_time:139675ms step_avg:97.67ms +step:1431/1695 train_time:139772ms step_avg:97.67ms +step:1432/1695 train_time:139871ms step_avg:97.68ms +step:1433/1695 train_time:139969ms step_avg:97.68ms +step:1434/1695 train_time:140067ms step_avg:97.68ms +step:1435/1695 train_time:140164ms step_avg:97.68ms +step:1436/1695 train_time:140261ms step_avg:97.68ms +step:1437/1695 train_time:140358ms step_avg:97.67ms +step:1438/1695 train_time:140455ms step_avg:97.67ms +step:1439/1695 train_time:140553ms step_avg:97.67ms +step:1440/1695 train_time:140651ms step_avg:97.67ms +step:1441/1695 train_time:140749ms step_avg:97.67ms +step:1442/1695 train_time:140847ms step_avg:97.67ms +step:1443/1695 train_time:140945ms step_avg:97.68ms +step:1444/1695 train_time:141043ms step_avg:97.68ms +step:1445/1695 train_time:141140ms step_avg:97.67ms +step:1446/1695 train_time:141238ms step_avg:97.67ms +step:1447/1695 train_time:141335ms step_avg:97.67ms +step:1448/1695 train_time:141433ms step_avg:97.67ms +step:1449/1695 train_time:141530ms step_avg:97.67ms +step:1450/1695 train_time:141628ms step_avg:97.67ms +step:1451/1695 train_time:141725ms step_avg:97.67ms +step:1452/1695 train_time:141822ms step_avg:97.67ms +step:1453/1695 train_time:141920ms step_avg:97.67ms +step:1454/1695 train_time:142019ms step_avg:97.67ms +step:1455/1695 train_time:142118ms step_avg:97.68ms +step:1456/1695 train_time:142215ms step_avg:97.68ms +step:1457/1695 train_time:142313ms step_avg:97.68ms +step:1458/1695 train_time:142410ms step_avg:97.68ms +step:1459/1695 train_time:142507ms step_avg:97.67ms +step:1460/1695 train_time:142604ms step_avg:97.67ms +step:1461/1695 train_time:142701ms step_avg:97.67ms +step:1462/1695 train_time:142798ms step_avg:97.67ms +step:1463/1695 train_time:142896ms step_avg:97.67ms +step:1464/1695 train_time:142994ms step_avg:97.67ms +step:1465/1695 train_time:143092ms step_avg:97.67ms +step:1466/1695 train_time:143190ms step_avg:97.67ms +step:1467/1695 train_time:143287ms step_avg:97.67ms +step:1468/1695 train_time:143385ms step_avg:97.67ms +step:1469/1695 train_time:143482ms step_avg:97.67ms +step:1470/1695 train_time:143579ms step_avg:97.67ms +step:1471/1695 train_time:143678ms step_avg:97.67ms +step:1472/1695 train_time:143776ms step_avg:97.67ms +step:1473/1695 train_time:143873ms step_avg:97.67ms +step:1474/1695 train_time:143972ms step_avg:97.67ms +step:1475/1695 train_time:144070ms step_avg:97.67ms +step:1476/1695 train_time:144167ms step_avg:97.67ms +step:1477/1695 train_time:144265ms step_avg:97.67ms +step:1478/1695 train_time:144361ms step_avg:97.67ms +step:1479/1695 train_time:144459ms step_avg:97.67ms +step:1480/1695 train_time:144555ms step_avg:97.67ms +step:1481/1695 train_time:144653ms step_avg:97.67ms +step:1482/1695 train_time:144751ms step_avg:97.67ms +step:1483/1695 train_time:144849ms step_avg:97.67ms +step:1484/1695 train_time:144947ms step_avg:97.67ms +step:1485/1695 train_time:145044ms step_avg:97.67ms +step:1486/1695 train_time:145141ms step_avg:97.67ms +step:1487/1695 train_time:145239ms step_avg:97.67ms +step:1488/1695 train_time:145337ms step_avg:97.67ms +step:1489/1695 train_time:145435ms step_avg:97.67ms +step:1490/1695 train_time:145533ms step_avg:97.67ms +step:1491/1695 train_time:145630ms step_avg:97.67ms +step:1492/1695 train_time:145728ms step_avg:97.67ms +step:1493/1695 train_time:145825ms step_avg:97.67ms +step:1494/1695 train_time:145922ms step_avg:97.67ms +step:1495/1695 train_time:146020ms step_avg:97.67ms +step:1496/1695 train_time:146118ms step_avg:97.67ms +step:1497/1695 train_time:146216ms step_avg:97.67ms +step:1498/1695 train_time:146315ms step_avg:97.67ms +step:1499/1695 train_time:146413ms step_avg:97.67ms +step:1500/1695 train_time:146511ms step_avg:97.67ms +step:1500/1695 val_loss:3.3179 train_time:146606ms step_avg:97.74ms +step:1501/1695 train_time:146632ms step_avg:97.69ms +step:1502/1695 train_time:146715ms step_avg:97.68ms +step:1503/1695 train_time:146816ms step_avg:97.68ms +step:1504/1695 train_time:146914ms step_avg:97.68ms +step:1505/1695 train_time:147011ms step_avg:97.68ms +step:1506/1695 train_time:147108ms step_avg:97.68ms +step:1507/1695 train_time:147205ms step_avg:97.68ms +step:1508/1695 train_time:147301ms step_avg:97.68ms +step:1509/1695 train_time:147397ms step_avg:97.68ms +step:1510/1695 train_time:147494ms step_avg:97.68ms +step:1511/1695 train_time:147596ms step_avg:97.68ms +step:1512/1695 train_time:147698ms step_avg:97.68ms +step:1513/1695 train_time:147797ms step_avg:97.68ms +step:1514/1695 train_time:147896ms step_avg:97.69ms +step:1515/1695 train_time:147994ms step_avg:97.69ms +step:1516/1695 train_time:148092ms step_avg:97.69ms +step:1517/1695 train_time:148189ms step_avg:97.69ms +step:1518/1695 train_time:148286ms step_avg:97.68ms +step:1519/1695 train_time:148382ms step_avg:97.68ms +step:1520/1695 train_time:148478ms step_avg:97.68ms +step:1521/1695 train_time:148577ms step_avg:97.68ms +step:1522/1695 train_time:148678ms step_avg:97.69ms +step:1523/1695 train_time:148777ms step_avg:97.69ms +step:1524/1695 train_time:148876ms step_avg:97.69ms +step:1525/1695 train_time:148975ms step_avg:97.69ms +step:1526/1695 train_time:149074ms step_avg:97.69ms +step:1527/1695 train_time:149172ms step_avg:97.69ms +step:1528/1695 train_time:149269ms step_avg:97.69ms +step:1529/1695 train_time:149367ms step_avg:97.69ms +step:1530/1695 train_time:149465ms step_avg:97.69ms +step:1531/1695 train_time:149562ms step_avg:97.69ms +step:1532/1695 train_time:149660ms step_avg:97.69ms +step:1533/1695 train_time:149758ms step_avg:97.69ms +step:1534/1695 train_time:149856ms step_avg:97.69ms +step:1535/1695 train_time:149955ms step_avg:97.69ms +step:1536/1695 train_time:150054ms step_avg:97.69ms +step:1537/1695 train_time:150152ms step_avg:97.69ms +step:1538/1695 train_time:150249ms step_avg:97.69ms +step:1539/1695 train_time:150346ms step_avg:97.69ms +step:1540/1695 train_time:150443ms step_avg:97.69ms +step:1541/1695 train_time:150540ms step_avg:97.69ms +step:1542/1695 train_time:150638ms step_avg:97.69ms +step:1543/1695 train_time:150736ms step_avg:97.69ms +step:1544/1695 train_time:150835ms step_avg:97.69ms +step:1545/1695 train_time:150932ms step_avg:97.69ms +step:1546/1695 train_time:151030ms step_avg:97.69ms +step:1547/1695 train_time:151128ms step_avg:97.69ms +step:1548/1695 train_time:151225ms step_avg:97.69ms +step:1549/1695 train_time:151322ms step_avg:97.69ms +step:1550/1695 train_time:151419ms step_avg:97.69ms +step:1551/1695 train_time:151517ms step_avg:97.69ms +step:1552/1695 train_time:151888ms step_avg:97.87ms +step:1553/1695 train_time:151963ms step_avg:97.85ms +step:1554/1695 train_time:152058ms step_avg:97.85ms +step:1555/1695 train_time:152155ms step_avg:97.85ms +step:1556/1695 train_time:152252ms step_avg:97.85ms +step:1557/1695 train_time:152349ms step_avg:97.85ms +step:1558/1695 train_time:152445ms step_avg:97.85ms +step:1559/1695 train_time:152541ms step_avg:97.85ms +step:1560/1695 train_time:152638ms step_avg:97.84ms +step:1561/1695 train_time:152735ms step_avg:97.84ms +step:1562/1695 train_time:152839ms step_avg:97.85ms +step:1563/1695 train_time:152939ms step_avg:97.85ms +step:1564/1695 train_time:153039ms step_avg:97.85ms +step:1565/1695 train_time:153136ms step_avg:97.85ms +step:1566/1695 train_time:153235ms step_avg:97.85ms +step:1567/1695 train_time:153331ms step_avg:97.85ms +step:1568/1695 train_time:153428ms step_avg:97.85ms +step:1569/1695 train_time:153525ms step_avg:97.85ms +step:1570/1695 train_time:153622ms step_avg:97.85ms +step:1571/1695 train_time:153720ms step_avg:97.85ms +step:1572/1695 train_time:153819ms step_avg:97.85ms +step:1573/1695 train_time:153917ms step_avg:97.85ms +step:1574/1695 train_time:154017ms step_avg:97.85ms +step:1575/1695 train_time:154115ms step_avg:97.85ms +step:1576/1695 train_time:154215ms step_avg:97.85ms +step:1577/1695 train_time:154312ms step_avg:97.85ms +step:1578/1695 train_time:154410ms step_avg:97.85ms +step:1579/1695 train_time:154507ms step_avg:97.85ms +step:1580/1695 train_time:154604ms step_avg:97.85ms +step:1581/1695 train_time:154701ms step_avg:97.85ms +step:1582/1695 train_time:154798ms step_avg:97.85ms +step:1583/1695 train_time:154897ms step_avg:97.85ms +step:1584/1695 train_time:154996ms step_avg:97.85ms +step:1585/1695 train_time:155096ms step_avg:97.85ms +step:1586/1695 train_time:155195ms step_avg:97.85ms +step:1587/1695 train_time:155293ms step_avg:97.85ms +step:1588/1695 train_time:155391ms step_avg:97.85ms +step:1589/1695 train_time:155489ms step_avg:97.85ms +step:1590/1695 train_time:155586ms step_avg:97.85ms +step:1591/1695 train_time:155682ms step_avg:97.85ms +step:1592/1695 train_time:155779ms step_avg:97.85ms +step:1593/1695 train_time:155876ms step_avg:97.85ms +step:1594/1695 train_time:155976ms step_avg:97.85ms +step:1595/1695 train_time:156075ms step_avg:97.85ms +step:1596/1695 train_time:156175ms step_avg:97.85ms +step:1597/1695 train_time:156275ms step_avg:97.86ms +step:1598/1695 train_time:156375ms step_avg:97.86ms +step:1599/1695 train_time:156473ms step_avg:97.86ms +step:1600/1695 train_time:156571ms step_avg:97.86ms +step:1601/1695 train_time:156669ms step_avg:97.86ms +step:1602/1695 train_time:156767ms step_avg:97.86ms +step:1603/1695 train_time:156864ms step_avg:97.86ms +step:1604/1695 train_time:156962ms step_avg:97.86ms +step:1605/1695 train_time:157060ms step_avg:97.86ms +step:1606/1695 train_time:157159ms step_avg:97.86ms +step:1607/1695 train_time:157257ms step_avg:97.86ms +step:1608/1695 train_time:157354ms step_avg:97.86ms +step:1609/1695 train_time:157453ms step_avg:97.86ms +step:1610/1695 train_time:157551ms step_avg:97.86ms +step:1611/1695 train_time:157649ms step_avg:97.86ms +step:1612/1695 train_time:157747ms step_avg:97.86ms +step:1613/1695 train_time:157844ms step_avg:97.86ms +step:1614/1695 train_time:157942ms step_avg:97.86ms +step:1615/1695 train_time:158039ms step_avg:97.86ms +step:1616/1695 train_time:158137ms step_avg:97.86ms +step:1617/1695 train_time:158235ms step_avg:97.86ms +step:1618/1695 train_time:158333ms step_avg:97.86ms +step:1619/1695 train_time:158432ms step_avg:97.86ms +step:1620/1695 train_time:158530ms step_avg:97.86ms +step:1621/1695 train_time:158628ms step_avg:97.86ms +step:1622/1695 train_time:158726ms step_avg:97.86ms +step:1623/1695 train_time:158824ms step_avg:97.86ms +step:1624/1695 train_time:158922ms step_avg:97.86ms +step:1625/1695 train_time:159019ms step_avg:97.86ms +step:1625/1695 val_loss:3.2905 train_time:159114ms step_avg:97.92ms +step:1626/1695 train_time:159139ms step_avg:97.87ms +step:1627/1695 train_time:159222ms step_avg:97.86ms +step:1628/1695 train_time:159320ms step_avg:97.86ms +step:1629/1695 train_time:159418ms step_avg:97.86ms +step:1630/1695 train_time:159515ms step_avg:97.86ms +step:1631/1695 train_time:159612ms step_avg:97.86ms +step:1632/1695 train_time:159709ms step_avg:97.86ms +step:1633/1695 train_time:159806ms step_avg:97.86ms +step:1634/1695 train_time:159902ms step_avg:97.86ms +step:1635/1695 train_time:159999ms step_avg:97.86ms +step:1636/1695 train_time:160099ms step_avg:97.86ms +step:1637/1695 train_time:160200ms step_avg:97.86ms +step:1638/1695 train_time:160300ms step_avg:97.86ms +step:1639/1695 train_time:160398ms step_avg:97.86ms +step:1640/1695 train_time:160495ms step_avg:97.86ms +step:1641/1695 train_time:160592ms step_avg:97.86ms +step:1642/1695 train_time:160689ms step_avg:97.86ms +step:1643/1695 train_time:160786ms step_avg:97.86ms +step:1644/1695 train_time:160882ms step_avg:97.86ms +step:1645/1695 train_time:160980ms step_avg:97.86ms +step:1646/1695 train_time:161078ms step_avg:97.86ms +step:1647/1695 train_time:161177ms step_avg:97.86ms +step:1648/1695 train_time:161277ms step_avg:97.86ms +step:1649/1695 train_time:161378ms step_avg:97.86ms +step:1650/1695 train_time:161476ms step_avg:97.86ms +step:1651/1695 train_time:161573ms step_avg:97.86ms +step:1652/1695 train_time:161670ms step_avg:97.86ms +step:1653/1695 train_time:161767ms step_avg:97.86ms +step:1654/1695 train_time:161864ms step_avg:97.86ms +step:1655/1695 train_time:161961ms step_avg:97.86ms +step:1656/1695 train_time:162059ms step_avg:97.86ms +step:1657/1695 train_time:162157ms step_avg:97.86ms +step:1658/1695 train_time:162256ms step_avg:97.86ms +step:1659/1695 train_time:162356ms step_avg:97.86ms +step:1660/1695 train_time:162456ms step_avg:97.86ms +step:1661/1695 train_time:162555ms step_avg:97.87ms +step:1662/1695 train_time:162654ms step_avg:97.87ms +step:1663/1695 train_time:162751ms step_avg:97.87ms +step:1664/1695 train_time:162849ms step_avg:97.87ms +step:1665/1695 train_time:162946ms step_avg:97.87ms +step:1666/1695 train_time:163044ms step_avg:97.87ms +step:1667/1695 train_time:163142ms step_avg:97.87ms +step:1668/1695 train_time:163239ms step_avg:97.87ms +step:1669/1695 train_time:163337ms step_avg:97.87ms +step:1670/1695 train_time:163435ms step_avg:97.87ms +step:1671/1695 train_time:163534ms step_avg:97.87ms +step:1672/1695 train_time:163633ms step_avg:97.87ms +step:1673/1695 train_time:163731ms step_avg:97.87ms +step:1674/1695 train_time:163829ms step_avg:97.87ms +step:1675/1695 train_time:163927ms step_avg:97.87ms +step:1676/1695 train_time:164024ms step_avg:97.87ms +step:1677/1695 train_time:164122ms step_avg:97.87ms +step:1678/1695 train_time:164219ms step_avg:97.87ms +step:1679/1695 train_time:164317ms step_avg:97.87ms +step:1680/1695 train_time:164415ms step_avg:97.87ms +step:1681/1695 train_time:164513ms step_avg:97.87ms +step:1682/1695 train_time:164612ms step_avg:97.87ms +step:1683/1695 train_time:164710ms step_avg:97.87ms +step:1684/1695 train_time:164809ms step_avg:97.87ms +step:1685/1695 train_time:164906ms step_avg:97.87ms +step:1686/1695 train_time:165004ms step_avg:97.87ms +step:1687/1695 train_time:165101ms step_avg:97.87ms +step:1688/1695 train_time:165199ms step_avg:97.87ms +step:1689/1695 train_time:165296ms step_avg:97.87ms +step:1690/1695 train_time:165393ms step_avg:97.87ms +step:1691/1695 train_time:165491ms step_avg:97.87ms +step:1692/1695 train_time:165589ms step_avg:97.87ms +step:1693/1695 train_time:165686ms step_avg:97.87ms +step:1694/1695 train_time:165783ms step_avg:97.86ms +step:1695/1695 train_time:165881ms step_avg:97.86ms +step:1695/1695 val_loss:3.2790 train_time:165977ms step_avg:97.92ms +peak memory allocated: 34000 MiB reserved: 49756 MiB diff --git a/records/082725_FA3/README.md b/records/082725_FA3/README.md new file mode 100644 index 000000000..a4079630d --- /dev/null +++ b/records/082725_FA3/README.md @@ -0,0 +1,147 @@ +# New record 08/27/25 + +This submission includes recent WR changes by +@ClassicLarry [(08/23/25)](https://github.com/ClassicLarry/modded-nanogpt/tree/master/records/082325_SparseAttnGate) +and @byronxu99 [(07/18/25)](https://github.com/KellerJordan/modded-nanogpt/pull/109). + +The main idea of this record is to use input tensors with `batch_size > 1` throughout our training run. +Increasing `batch_size` increases GPU utilization and allows us to use shorter input sequences for training. +However, since Flex Attention's is inefficient for `batch_size > 1`, we use [Flash Attention v3](https://github.com/Dao-AILab/flash-attention). +The official version of this module is incompatible with `torch.compile` and causes graph breaks. +However, a [recent PR](https://github.com/Dao-AILab/flash-attention/pull/1769) by +[@guilhermeleobas](https://github.com/guilhermeleobas) addresses this issue. + + +## Timing and Validation + +Validated over 7 runs: +- In 1695 training steps, this run achieves a loss <3.28 (`p=0.0031`) +- In 166.10 seconds on average, or <166.25 seconds (`p=0.0024`), + +``` +import scipy.stats +import torch +import numpy as np + +accs = [ + 3.2769, 3.2782, 3.2790, 3.2791, 3.2791, 3.2780, 3.2782 +] + +times = [ + 166.247, 166.117, 165.977, 166.135, 166.045, 166.044, 166.157 +] + +print('p=%.4f' % scipy.stats.ttest_1samp(accs, 3.28, alternative='less').pvalue) +# p=0.0008 + +print('p=%.4f' % scipy.stats.ttest_1samp(times, 166.25, alternative='less').pvalue) +# p=0.0024 + +print(f"{np.mean(times):.4f}") +# 166.1031 +``` + +In my timing, this is a 2.1 second mean improvement over [PR#117])(https://github.com/KellerJordan/modded-nanogpt/pull/117). +The number of steps can also probably be brought down by 5-15 while achieving loss <3.28. + +I used SXM5 8 x H100 via Prime Intellect for validation compute. + +## Further Details + +### Motivation + +PyTorch's Flex Attention experiences a slowdown >10% wallclock for inputs with `batch_size > 1`. +As such, previous records would train on very long sequence lengths (`48 * 1024`) with no batch dimension. +Attention is approximately `O(|seq_len|^2 x |batch_size|)`, so this is theoretically bad, +but it was mitigated by using aggressive blocking masking. +Attention used a `block_mask` which only grew at most to `1664` tokens (and was often shorter due to document masking). +However, GPU utilization for attention is higher when tokens are distributed along the batch dimension. + + +Additionally, increasing the batch size allows us to decrease sequence length while maintaining the total +number of tokens processed per step. +WR#26 by @ClassicLarry found that validation loss decreases when we train only +on sequences beginning with the Beginning of Sequence token (``). +Decreasing the sequence length ensures makes it more likely that `` is present in the attention window. +In order generate batches where each sequence begins with ``, I have created the helper class +`EOSBatchFinder`. This class pre-indexes shards with the location of `` for slight speedups. + +### Flash Attention 3 + +Most of the Hopper-specific benefits in Flash Attention 3 are incorporated into +PyTorch's Flex Attention already. However, the latter implementation is fastest with `batch_size == 1`, +Flash Attention 3 is as fast as Flex Attention for 1 dimensional input sequences, and increases +in speed as we distribute tokens along the batch dimension. +I measured a 9% wallclock decrease for FA3 when using an optimal ratio of batch dimension to sequence length +(`24: 2048`) over a single batch dimension (`1: 49152`) (on a single Hopper H100). + +As mentioned above, we need to use an unmerged PR in order to use FA3 with `torch.compile`. +You can build the wheel like so: + +``` +pip install -U pip wheel setuptools ninja numpy packaging psutil + +git clone https://github.com/guilhermeleobas/flash-attention.git +cd flash-attention/hopper +git switch guilhermeleobas/fa3-compile + +export MAX_JOBS=32 # Can increase based on machine +export FLASH_ATTENTION_FORCE_BUILD=TRUE # skip prebuilt wheel fetch +export FLASH_ATTENTION_DISABLE_SM80=TRUE # Hopper-only +export FLASH_ATTENTION_DISABLE_FP16=TRUE # leave BF16, FP8 +export FLASH_ATTENTION_DISABLE_HDIM64=TRUE # NanoGPT only uses HDIM = 128 +export FLASH_ATTENTION_DISABLE_HDIM96=TRUE +export FLASH_ATTENTION_DISABLE_HDIM192=TRUE +export FLASH_ATTENTION_DISABLE_HDIM256=TRUE + +python setup.py bdist_wheel +``` + +Additionally, I have uploaded a prebuilt wheel +[here](https://github.com/varunneal/flash-attention/releases/tag/v3.0.0b1-alpha), +though it will likely be faster to build it yourself than download this wheel. + +For exact reproduction, I recommend that you install Torch Nightly 2.9.0.dev20250718 and +install the FA3 wheel afterward: + +``` +pip install --pre "torch==2.9.0.dev20250718+cu126" --index-url https://download.pytorch.org/whl/nightly/cu126 + +# typical path to FA3 Wheel +pip install flash-attention/hopper/dist/flash_attn_3-3.0.0b1-cp39-abi3-linux_x86_64.whl +``` + +For me, Torch Nightly 2.9.0.dev20250713 was incompatible with PR#109. + +### Attention Masks + +Unfortunately, Flash Attention does not support complex Block Masks like Flex Attention. +Therefore, `create_blockmasks` was removed. Instead, we only are given the parameter `window_size` +where we can specify the number of left tokens to attend to. + +I kept the existing long-short sliding window block mask pattern, as well as the idea +that the window sizes should linearly increase over the length of the training run. +To aid with this, I modified `get_lr(step)` to instead be `get_lr_and_ws(step)`. +Additionally, I added a hyperparameter `ws_schedule` which specifies what the +longer window size should be during each portion of the run. I additionally added the +size of blocks in a window as a hyperparameter `bandwidth=128`. + +I have picked a linear schedule with three steps: `ws_schedule=(3, 7, 11)`. +Currently, `torch.compile` creates a new compilation graph per each step in `ws_schedule`. +Therefore, each graph needs to be warmed up separately. I have increased the number +of warmup steps from `10` to `60`. The compile time is dominated by the first iteration +so this will take approximately `len(ws_schedule)` times longer than before. + +Removing document masking had a noticeably negative impact on validation loss, +however the benefits of a short sequence length counteract this. + +### Potential Improvements + +- Batch size scheduling: Previously, the block mask acted as a proxy for batch size. +Now block size can be controlled explicitly and sequenced according to critical batch +size theory. I have added code in `distributed_data_generator` that allows for changing the +batch size and sequence length yielded after the generator is created. +- The current block mask window schedule `(3, 7, 11)` can almost certainly be improved upon. +- Hyperparameter tuning might change with smaller sequence length. Rotary base, validation sequence length, learning rates +etc. should be re-tuned. I haven't done that for this run. +- FA3 has additional features over Flex Attention that may be useful. \ No newline at end of file diff --git a/records/082725_FA3/ba9be2f3-1e6f-4a1a-827e-a47a702c67b0.txt b/records/082725_FA3/ba9be2f3-1e6f-4a1a-827e-a47a702c67b0.txt new file mode 100644 index 000000000..7a5ed0b1c --- /dev/null +++ b/records/082725_FA3/ba9be2f3-1e6f-4a1a-827e-a47a702c67b0.txt @@ -0,0 +1,2808 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + + while True: + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + + if align_to_bos: + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] + else: + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + + pos += batch_span + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. + # optimization + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 60 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): + inputs, targets = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, ws, ws // 2) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * lr + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Wed Aug 27 03:58:09 2025 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.6 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:00:0B.0 Off | Off | +| N/A 30C P0 115W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:00:0C.0 Off | Off | +| N/A 33C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:00:0D.0 Off | Off | +| N/A 34C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:00:0E.0 Off | Off | +| N/A 31C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:00:0F.0 Off | Off | +| N/A 30C P0 110W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:00:10.0 Off | Off | +| N/A 35C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:00:11.0 Off | Off | +| N/A 33C P0 112W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:00:12.0 Off | Off | +| N/A 32C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| ++---------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.02ms +step:1/1695 train_time:516ms step_avg:515.52ms +step:2/1695 train_time:539ms step_avg:269.65ms +step:3/1695 train_time:612ms step_avg:203.90ms +step:4/1695 train_time:704ms step_avg:175.97ms +step:5/1695 train_time:797ms step_avg:159.42ms +step:6/1695 train_time:891ms step_avg:148.48ms +step:7/1695 train_time:984ms step_avg:140.60ms +step:8/1695 train_time:1078ms step_avg:134.78ms +step:9/1695 train_time:1172ms step_avg:130.23ms +step:10/1695 train_time:1265ms step_avg:126.49ms +step:11/1695 train_time:1359ms step_avg:123.52ms +step:12/1695 train_time:1457ms step_avg:121.44ms +step:13/1695 train_time:1555ms step_avg:119.64ms +step:14/1695 train_time:1650ms step_avg:117.89ms +step:15/1695 train_time:1745ms step_avg:116.30ms +step:16/1695 train_time:1839ms step_avg:114.95ms +step:17/1695 train_time:1933ms step_avg:113.72ms +step:18/1695 train_time:2027ms step_avg:112.62ms +step:19/1695 train_time:2122ms step_avg:111.67ms +step:20/1695 train_time:2216ms step_avg:110.82ms +step:21/1695 train_time:2311ms step_avg:110.03ms +step:22/1695 train_time:2405ms step_avg:109.33ms +step:23/1695 train_time:2501ms step_avg:108.73ms +step:24/1695 train_time:2597ms step_avg:108.21ms +step:25/1695 train_time:2693ms step_avg:107.73ms +step:26/1695 train_time:2788ms step_avg:107.23ms +step:27/1695 train_time:2882ms step_avg:106.74ms +step:28/1695 train_time:2977ms step_avg:106.33ms +step:29/1695 train_time:3071ms step_avg:105.90ms +step:30/1695 train_time:3165ms step_avg:105.50ms +step:31/1695 train_time:3259ms step_avg:105.14ms +step:32/1695 train_time:3355ms step_avg:104.84ms +step:33/1695 train_time:3449ms step_avg:104.53ms +step:34/1695 train_time:3545ms step_avg:104.26ms +step:35/1695 train_time:3640ms step_avg:104.01ms +step:36/1695 train_time:3736ms step_avg:103.78ms +step:37/1695 train_time:3831ms step_avg:103.53ms +step:38/1695 train_time:3925ms step_avg:103.28ms +step:39/1695 train_time:4019ms step_avg:103.06ms +step:40/1695 train_time:4113ms step_avg:102.84ms +step:41/1695 train_time:4207ms step_avg:102.60ms +step:42/1695 train_time:4301ms step_avg:102.40ms +step:43/1695 train_time:4396ms step_avg:102.23ms +step:44/1695 train_time:4491ms step_avg:102.07ms +step:45/1695 train_time:4587ms step_avg:101.92ms +step:46/1695 train_time:4681ms step_avg:101.76ms +step:47/1695 train_time:4777ms step_avg:101.63ms +step:48/1695 train_time:4873ms step_avg:101.53ms +step:49/1695 train_time:4966ms step_avg:101.35ms +step:50/1695 train_time:5060ms step_avg:101.21ms +step:51/1695 train_time:5155ms step_avg:101.07ms +step:52/1695 train_time:5249ms step_avg:100.94ms +step:53/1695 train_time:5343ms step_avg:100.82ms +step:54/1695 train_time:5439ms step_avg:100.72ms +step:55/1695 train_time:5534ms step_avg:100.62ms +step:56/1695 train_time:5629ms step_avg:100.51ms +step:57/1695 train_time:5723ms step_avg:100.41ms +step:58/1695 train_time:5818ms step_avg:100.32ms +step:59/1695 train_time:5913ms step_avg:100.23ms +step:60/1695 train_time:6007ms step_avg:100.11ms +step:61/1695 train_time:6100ms step_avg:100.00ms +step:62/1695 train_time:6196ms step_avg:99.93ms +step:63/1695 train_time:6290ms step_avg:99.84ms +step:64/1695 train_time:6384ms step_avg:99.75ms +step:65/1695 train_time:6479ms step_avg:99.68ms +step:66/1695 train_time:6573ms step_avg:99.59ms +step:67/1695 train_time:6667ms step_avg:99.51ms +step:68/1695 train_time:6762ms step_avg:99.44ms +step:69/1695 train_time:6856ms step_avg:99.37ms +step:70/1695 train_time:6950ms step_avg:99.29ms +step:71/1695 train_time:7044ms step_avg:99.21ms +step:72/1695 train_time:7139ms step_avg:99.16ms +step:73/1695 train_time:7234ms step_avg:99.10ms +step:74/1695 train_time:7329ms step_avg:99.04ms +step:75/1695 train_time:7423ms step_avg:98.98ms +step:76/1695 train_time:7519ms step_avg:98.94ms +step:77/1695 train_time:7614ms step_avg:98.88ms +step:78/1695 train_time:7709ms step_avg:98.83ms +step:79/1695 train_time:7803ms step_avg:98.77ms +step:80/1695 train_time:7897ms step_avg:98.71ms +step:81/1695 train_time:7991ms step_avg:98.66ms +step:82/1695 train_time:8085ms step_avg:98.60ms +step:83/1695 train_time:8179ms step_avg:98.55ms +step:84/1695 train_time:8274ms step_avg:98.50ms +step:85/1695 train_time:8368ms step_avg:98.45ms +step:86/1695 train_time:8462ms step_avg:98.39ms +step:87/1695 train_time:8558ms step_avg:98.36ms +step:88/1695 train_time:8653ms step_avg:98.33ms +step:89/1695 train_time:8747ms step_avg:98.28ms +step:90/1695 train_time:8841ms step_avg:98.24ms +step:91/1695 train_time:8936ms step_avg:98.20ms +step:92/1695 train_time:9031ms step_avg:98.16ms +step:93/1695 train_time:9125ms step_avg:98.12ms +step:94/1695 train_time:9219ms step_avg:98.08ms +step:95/1695 train_time:9313ms step_avg:98.03ms +step:96/1695 train_time:9406ms step_avg:97.98ms +step:97/1695 train_time:9500ms step_avg:97.94ms +step:98/1695 train_time:9596ms step_avg:97.91ms +step:99/1695 train_time:9690ms step_avg:97.88ms +step:100/1695 train_time:9784ms step_avg:97.84ms +step:101/1695 train_time:9878ms step_avg:97.80ms +step:102/1695 train_time:9973ms step_avg:97.78ms +step:103/1695 train_time:10067ms step_avg:97.74ms +step:104/1695 train_time:10162ms step_avg:97.71ms +step:105/1695 train_time:10256ms step_avg:97.67ms +step:106/1695 train_time:10350ms step_avg:97.64ms +step:107/1695 train_time:10444ms step_avg:97.60ms +step:108/1695 train_time:10540ms step_avg:97.59ms +step:109/1695 train_time:10635ms step_avg:97.57ms +step:110/1695 train_time:10731ms step_avg:97.55ms +step:111/1695 train_time:10825ms step_avg:97.52ms +step:112/1695 train_time:10920ms step_avg:97.50ms +step:113/1695 train_time:11014ms step_avg:97.47ms +step:114/1695 train_time:11108ms step_avg:97.44ms +step:115/1695 train_time:11201ms step_avg:97.40ms +step:116/1695 train_time:11297ms step_avg:97.38ms +step:117/1695 train_time:11391ms step_avg:97.36ms +step:118/1695 train_time:11485ms step_avg:97.33ms +step:119/1695 train_time:11580ms step_avg:97.31ms +step:120/1695 train_time:11675ms step_avg:97.29ms +step:121/1695 train_time:11769ms step_avg:97.27ms +step:122/1695 train_time:11863ms step_avg:97.24ms +step:123/1695 train_time:11959ms step_avg:97.22ms +step:124/1695 train_time:12054ms step_avg:97.21ms +step:125/1695 train_time:12148ms step_avg:97.18ms +step:125/1695 val_loss:4.3195 train_time:12239ms step_avg:97.92ms +step:126/1695 train_time:12266ms step_avg:97.35ms +step:127/1695 train_time:12343ms step_avg:97.19ms +step:128/1695 train_time:12442ms step_avg:97.21ms +step:129/1695 train_time:12537ms step_avg:97.19ms +step:130/1695 train_time:12631ms step_avg:97.16ms +step:131/1695 train_time:12725ms step_avg:97.14ms +step:132/1695 train_time:12818ms step_avg:97.11ms +step:133/1695 train_time:12911ms step_avg:97.07ms +step:134/1695 train_time:13005ms step_avg:97.05ms +step:135/1695 train_time:13098ms step_avg:97.02ms +step:136/1695 train_time:13191ms step_avg:97.00ms +step:137/1695 train_time:13287ms step_avg:96.99ms +step:138/1695 train_time:13385ms step_avg:97.00ms +step:139/1695 train_time:13481ms step_avg:96.98ms +step:140/1695 train_time:13575ms step_avg:96.97ms +step:141/1695 train_time:13669ms step_avg:96.94ms +step:142/1695 train_time:13763ms step_avg:96.92ms +step:143/1695 train_time:13856ms step_avg:96.90ms +step:144/1695 train_time:13949ms step_avg:96.87ms +step:145/1695 train_time:14043ms step_avg:96.85ms +step:146/1695 train_time:14136ms step_avg:96.82ms +step:147/1695 train_time:14230ms step_avg:96.80ms +step:148/1695 train_time:14326ms step_avg:96.79ms +step:149/1695 train_time:14422ms step_avg:96.79ms +step:150/1695 train_time:14517ms step_avg:96.78ms +step:151/1695 train_time:14612ms step_avg:96.77ms +step:152/1695 train_time:14707ms step_avg:96.75ms +step:153/1695 train_time:14801ms step_avg:96.74ms +step:154/1695 train_time:14895ms step_avg:96.72ms +step:155/1695 train_time:14989ms step_avg:96.70ms +step:156/1695 train_time:15083ms step_avg:96.69ms +step:157/1695 train_time:15176ms step_avg:96.66ms +step:158/1695 train_time:15270ms step_avg:96.65ms +step:159/1695 train_time:15365ms step_avg:96.63ms +step:160/1695 train_time:15461ms step_avg:96.63ms +step:161/1695 train_time:15556ms step_avg:96.62ms +step:162/1695 train_time:15650ms step_avg:96.60ms +step:163/1695 train_time:15744ms step_avg:96.59ms +step:164/1695 train_time:15839ms step_avg:96.58ms +step:165/1695 train_time:15933ms step_avg:96.56ms +step:166/1695 train_time:16027ms step_avg:96.55ms +step:167/1695 train_time:16121ms step_avg:96.53ms +step:168/1695 train_time:16215ms step_avg:96.52ms +step:169/1695 train_time:16309ms step_avg:96.50ms +step:170/1695 train_time:16404ms step_avg:96.50ms +step:171/1695 train_time:16499ms step_avg:96.49ms +step:172/1695 train_time:16594ms step_avg:96.47ms +step:173/1695 train_time:16931ms step_avg:97.87ms +step:174/1695 train_time:17020ms step_avg:97.82ms +step:175/1695 train_time:17114ms step_avg:97.79ms +step:176/1695 train_time:17207ms step_avg:97.77ms +step:177/1695 train_time:17301ms step_avg:97.74ms +step:178/1695 train_time:17394ms step_avg:97.72ms +step:179/1695 train_time:17487ms step_avg:97.69ms +step:180/1695 train_time:17581ms step_avg:97.67ms +step:181/1695 train_time:17673ms step_avg:97.64ms +step:182/1695 train_time:17767ms step_avg:97.62ms +step:183/1695 train_time:17864ms step_avg:97.62ms +step:184/1695 train_time:17961ms step_avg:97.61ms +step:185/1695 train_time:18055ms step_avg:97.60ms +step:186/1695 train_time:18149ms step_avg:97.58ms +step:187/1695 train_time:18244ms step_avg:97.56ms +step:188/1695 train_time:18339ms step_avg:97.55ms +step:189/1695 train_time:18432ms step_avg:97.52ms +step:190/1695 train_time:18526ms step_avg:97.50ms +step:191/1695 train_time:18620ms step_avg:97.49ms +step:192/1695 train_time:18713ms step_avg:97.46ms +step:193/1695 train_time:18808ms step_avg:97.45ms +step:194/1695 train_time:18904ms step_avg:97.44ms +step:195/1695 train_time:19000ms step_avg:97.43ms +step:196/1695 train_time:19093ms step_avg:97.42ms +step:197/1695 train_time:19188ms step_avg:97.40ms +step:198/1695 train_time:19283ms step_avg:97.39ms +step:199/1695 train_time:19377ms step_avg:97.37ms +step:200/1695 train_time:19470ms step_avg:97.35ms +step:201/1695 train_time:19564ms step_avg:97.33ms +step:202/1695 train_time:19657ms step_avg:97.31ms +step:203/1695 train_time:19751ms step_avg:97.30ms +step:204/1695 train_time:19847ms step_avg:97.29ms +step:205/1695 train_time:19942ms step_avg:97.28ms +step:206/1695 train_time:20036ms step_avg:97.26ms +step:207/1695 train_time:20129ms step_avg:97.24ms +step:208/1695 train_time:20224ms step_avg:97.23ms +step:209/1695 train_time:20318ms step_avg:97.21ms +step:210/1695 train_time:20411ms step_avg:97.20ms +step:211/1695 train_time:20506ms step_avg:97.18ms +step:212/1695 train_time:20601ms step_avg:97.17ms +step:213/1695 train_time:20693ms step_avg:97.15ms +step:214/1695 train_time:20787ms step_avg:97.14ms +step:215/1695 train_time:20883ms step_avg:97.13ms +step:216/1695 train_time:20977ms step_avg:97.12ms +step:217/1695 train_time:21071ms step_avg:97.10ms +step:218/1695 train_time:21166ms step_avg:97.09ms +step:219/1695 train_time:21261ms step_avg:97.08ms +step:220/1695 train_time:21355ms step_avg:97.07ms +step:221/1695 train_time:21449ms step_avg:97.05ms +step:222/1695 train_time:21544ms step_avg:97.04ms +step:223/1695 train_time:21637ms step_avg:97.03ms +step:224/1695 train_time:21730ms step_avg:97.01ms +step:225/1695 train_time:21825ms step_avg:97.00ms +step:226/1695 train_time:21919ms step_avg:96.99ms +step:227/1695 train_time:22013ms step_avg:96.97ms +step:228/1695 train_time:22108ms step_avg:96.97ms +step:229/1695 train_time:22203ms step_avg:96.96ms +step:230/1695 train_time:22298ms step_avg:96.95ms +step:231/1695 train_time:22391ms step_avg:96.93ms +step:232/1695 train_time:22486ms step_avg:96.92ms +step:233/1695 train_time:22580ms step_avg:96.91ms +step:234/1695 train_time:22673ms step_avg:96.89ms +step:235/1695 train_time:22767ms step_avg:96.88ms +step:236/1695 train_time:22862ms step_avg:96.87ms +step:237/1695 train_time:22957ms step_avg:96.86ms +step:238/1695 train_time:23051ms step_avg:96.85ms +step:239/1695 train_time:23146ms step_avg:96.85ms +step:240/1695 train_time:23242ms step_avg:96.84ms +step:241/1695 train_time:23336ms step_avg:96.83ms +step:242/1695 train_time:23430ms step_avg:96.82ms +step:243/1695 train_time:23526ms step_avg:96.81ms +step:244/1695 train_time:23621ms step_avg:96.81ms +step:245/1695 train_time:23715ms step_avg:96.79ms +step:246/1695 train_time:23809ms step_avg:96.78ms +step:247/1695 train_time:23902ms step_avg:96.77ms +step:248/1695 train_time:23996ms step_avg:96.76ms +step:249/1695 train_time:24089ms step_avg:96.74ms +step:250/1695 train_time:24184ms step_avg:96.73ms +step:250/1695 val_loss:3.9759 train_time:24277ms step_avg:97.11ms +step:251/1695 train_time:24301ms step_avg:96.82ms +step:252/1695 train_time:24381ms step_avg:96.75ms +step:253/1695 train_time:24478ms step_avg:96.75ms +step:254/1695 train_time:24574ms step_avg:96.75ms +step:255/1695 train_time:24668ms step_avg:96.74ms +step:256/1695 train_time:24760ms step_avg:96.72ms +step:257/1695 train_time:24854ms step_avg:96.71ms +step:258/1695 train_time:24948ms step_avg:96.70ms +step:259/1695 train_time:25041ms step_avg:96.68ms +step:260/1695 train_time:25134ms step_avg:96.67ms +step:261/1695 train_time:25228ms step_avg:96.66ms +step:262/1695 train_time:25322ms step_avg:96.65ms +step:263/1695 train_time:25418ms step_avg:96.65ms +step:264/1695 train_time:25514ms step_avg:96.64ms +step:265/1695 train_time:25609ms step_avg:96.64ms +step:266/1695 train_time:25703ms step_avg:96.63ms +step:267/1695 train_time:25796ms step_avg:96.62ms +step:268/1695 train_time:25890ms step_avg:96.60ms +step:269/1695 train_time:25983ms step_avg:96.59ms +step:270/1695 train_time:26076ms step_avg:96.58ms +step:271/1695 train_time:26169ms step_avg:96.57ms +step:272/1695 train_time:26264ms step_avg:96.56ms +step:273/1695 train_time:26357ms step_avg:96.55ms +step:274/1695 train_time:26453ms step_avg:96.54ms +step:275/1695 train_time:26549ms step_avg:96.54ms +step:276/1695 train_time:26643ms step_avg:96.53ms +step:277/1695 train_time:26737ms step_avg:96.52ms +step:278/1695 train_time:26831ms step_avg:96.51ms +step:279/1695 train_time:26924ms step_avg:96.50ms +step:280/1695 train_time:27017ms step_avg:96.49ms +step:281/1695 train_time:27111ms step_avg:96.48ms +step:282/1695 train_time:27205ms step_avg:96.47ms +step:283/1695 train_time:27298ms step_avg:96.46ms +step:284/1695 train_time:27393ms step_avg:96.45ms +step:285/1695 train_time:27488ms step_avg:96.45ms +step:286/1695 train_time:27582ms step_avg:96.44ms +step:287/1695 train_time:27677ms step_avg:96.43ms +step:288/1695 train_time:27772ms step_avg:96.43ms +step:289/1695 train_time:27868ms step_avg:96.43ms +step:290/1695 train_time:27961ms step_avg:96.42ms +step:291/1695 train_time:28055ms step_avg:96.41ms +step:292/1695 train_time:28149ms step_avg:96.40ms +step:293/1695 train_time:28242ms step_avg:96.39ms +step:294/1695 train_time:28336ms step_avg:96.38ms +step:295/1695 train_time:28431ms step_avg:96.38ms +step:296/1695 train_time:28526ms step_avg:96.37ms +step:297/1695 train_time:28619ms step_avg:96.36ms +step:298/1695 train_time:28713ms step_avg:96.35ms +step:299/1695 train_time:28809ms step_avg:96.35ms +step:300/1695 train_time:28903ms step_avg:96.34ms +step:301/1695 train_time:28997ms step_avg:96.33ms +step:302/1695 train_time:29091ms step_avg:96.33ms +step:303/1695 train_time:29186ms step_avg:96.32ms +step:304/1695 train_time:29279ms step_avg:96.31ms +step:305/1695 train_time:29374ms step_avg:96.31ms +step:306/1695 train_time:29469ms step_avg:96.31ms +step:307/1695 train_time:29565ms step_avg:96.30ms +step:308/1695 train_time:29659ms step_avg:96.30ms +step:309/1695 train_time:29753ms step_avg:96.29ms +step:310/1695 train_time:29848ms step_avg:96.29ms +step:311/1695 train_time:29942ms step_avg:96.28ms +step:312/1695 train_time:30036ms step_avg:96.27ms +step:313/1695 train_time:30131ms step_avg:96.26ms +step:314/1695 train_time:30224ms step_avg:96.25ms +step:315/1695 train_time:30317ms step_avg:96.25ms +step:316/1695 train_time:30412ms step_avg:96.24ms +step:317/1695 train_time:30508ms step_avg:96.24ms +step:318/1695 train_time:30602ms step_avg:96.23ms +step:319/1695 train_time:30696ms step_avg:96.23ms +step:320/1695 train_time:30790ms step_avg:96.22ms +step:321/1695 train_time:30884ms step_avg:96.21ms +step:322/1695 train_time:30978ms step_avg:96.21ms +step:323/1695 train_time:31073ms step_avg:96.20ms +step:324/1695 train_time:31168ms step_avg:96.20ms +step:325/1695 train_time:31261ms step_avg:96.19ms +step:326/1695 train_time:31354ms step_avg:96.18ms +step:327/1695 train_time:31448ms step_avg:96.17ms +step:328/1695 train_time:31543ms step_avg:96.17ms +step:329/1695 train_time:31637ms step_avg:96.16ms +step:330/1695 train_time:31733ms step_avg:96.16ms +step:331/1695 train_time:31828ms step_avg:96.16ms +step:332/1695 train_time:31921ms step_avg:96.15ms +step:333/1695 train_time:32015ms step_avg:96.14ms +step:334/1695 train_time:32109ms step_avg:96.14ms +step:335/1695 train_time:32203ms step_avg:96.13ms +step:336/1695 train_time:32296ms step_avg:96.12ms +step:337/1695 train_time:32390ms step_avg:96.11ms +step:338/1695 train_time:32483ms step_avg:96.10ms +step:339/1695 train_time:32577ms step_avg:96.10ms +step:340/1695 train_time:32672ms step_avg:96.09ms +step:341/1695 train_time:32767ms step_avg:96.09ms +step:342/1695 train_time:32861ms step_avg:96.08ms +step:343/1695 train_time:32956ms step_avg:96.08ms +step:344/1695 train_time:33050ms step_avg:96.08ms +step:345/1695 train_time:33388ms step_avg:96.78ms +step:346/1695 train_time:33462ms step_avg:96.71ms +step:347/1695 train_time:33554ms step_avg:96.70ms +step:348/1695 train_time:33647ms step_avg:96.69ms +step:349/1695 train_time:33740ms step_avg:96.68ms +step:350/1695 train_time:33833ms step_avg:96.67ms +step:351/1695 train_time:33927ms step_avg:96.66ms +step:352/1695 train_time:34019ms step_avg:96.65ms +step:353/1695 train_time:34112ms step_avg:96.64ms +step:354/1695 train_time:34205ms step_avg:96.62ms +step:355/1695 train_time:34300ms step_avg:96.62ms +step:356/1695 train_time:34397ms step_avg:96.62ms +step:357/1695 train_time:34494ms step_avg:96.62ms +step:358/1695 train_time:34589ms step_avg:96.62ms +step:359/1695 train_time:34682ms step_avg:96.61ms +step:360/1695 train_time:34775ms step_avg:96.60ms +step:361/1695 train_time:34868ms step_avg:96.59ms +step:362/1695 train_time:34961ms step_avg:96.58ms +step:363/1695 train_time:35055ms step_avg:96.57ms +step:364/1695 train_time:35148ms step_avg:96.56ms +step:365/1695 train_time:35243ms step_avg:96.56ms +step:366/1695 train_time:35338ms step_avg:96.55ms +step:367/1695 train_time:35434ms step_avg:96.55ms +step:368/1695 train_time:35530ms step_avg:96.55ms +step:369/1695 train_time:35624ms step_avg:96.54ms +step:370/1695 train_time:35717ms step_avg:96.53ms +step:371/1695 train_time:35811ms step_avg:96.53ms +step:372/1695 train_time:35905ms step_avg:96.52ms +step:373/1695 train_time:35998ms step_avg:96.51ms +step:374/1695 train_time:36092ms step_avg:96.50ms +step:375/1695 train_time:36187ms step_avg:96.50ms +step:375/1695 val_loss:3.8237 train_time:36278ms step_avg:96.74ms +step:376/1695 train_time:36303ms step_avg:96.55ms +step:377/1695 train_time:36381ms step_avg:96.50ms +step:378/1695 train_time:36478ms step_avg:96.50ms +step:379/1695 train_time:36573ms step_avg:96.50ms +step:380/1695 train_time:36667ms step_avg:96.49ms +step:381/1695 train_time:36761ms step_avg:96.48ms +step:382/1695 train_time:36854ms step_avg:96.48ms +step:383/1695 train_time:36948ms step_avg:96.47ms +step:384/1695 train_time:37040ms step_avg:96.46ms +step:385/1695 train_time:37133ms step_avg:96.45ms +step:386/1695 train_time:37227ms step_avg:96.44ms +step:387/1695 train_time:37323ms step_avg:96.44ms +step:388/1695 train_time:37419ms step_avg:96.44ms +step:389/1695 train_time:37514ms step_avg:96.44ms +step:390/1695 train_time:37609ms step_avg:96.43ms +step:391/1695 train_time:37703ms step_avg:96.43ms +step:392/1695 train_time:37795ms step_avg:96.42ms +step:393/1695 train_time:37890ms step_avg:96.41ms +step:394/1695 train_time:37982ms step_avg:96.40ms +step:395/1695 train_time:38075ms step_avg:96.39ms +step:396/1695 train_time:38169ms step_avg:96.39ms +step:397/1695 train_time:38263ms step_avg:96.38ms +step:398/1695 train_time:38358ms step_avg:96.38ms +step:399/1695 train_time:38453ms step_avg:96.37ms +step:400/1695 train_time:38548ms step_avg:96.37ms +step:401/1695 train_time:38642ms step_avg:96.36ms +step:402/1695 train_time:38736ms step_avg:96.36ms +step:403/1695 train_time:38830ms step_avg:96.35ms +step:404/1695 train_time:38924ms step_avg:96.35ms +step:405/1695 train_time:39017ms step_avg:96.34ms +step:406/1695 train_time:39111ms step_avg:96.33ms +step:407/1695 train_time:39205ms step_avg:96.33ms +step:408/1695 train_time:39298ms step_avg:96.32ms +step:409/1695 train_time:39394ms step_avg:96.32ms +step:410/1695 train_time:39489ms step_avg:96.31ms +step:411/1695 train_time:39583ms step_avg:96.31ms +step:412/1695 train_time:39676ms step_avg:96.30ms +step:413/1695 train_time:39771ms step_avg:96.30ms +step:414/1695 train_time:39865ms step_avg:96.29ms +step:415/1695 train_time:39959ms step_avg:96.29ms +step:416/1695 train_time:40053ms step_avg:96.28ms +step:417/1695 train_time:40148ms step_avg:96.28ms +step:418/1695 train_time:40241ms step_avg:96.27ms +step:419/1695 train_time:40335ms step_avg:96.27ms +step:420/1695 train_time:40430ms step_avg:96.26ms +step:421/1695 train_time:40525ms step_avg:96.26ms +step:422/1695 train_time:40618ms step_avg:96.25ms +step:423/1695 train_time:40712ms step_avg:96.25ms +step:424/1695 train_time:40806ms step_avg:96.24ms +step:425/1695 train_time:40900ms step_avg:96.23ms +step:426/1695 train_time:40994ms step_avg:96.23ms +step:427/1695 train_time:41088ms step_avg:96.23ms +step:428/1695 train_time:41181ms step_avg:96.22ms +step:429/1695 train_time:41275ms step_avg:96.21ms +step:430/1695 train_time:41370ms step_avg:96.21ms +step:431/1695 train_time:41465ms step_avg:96.21ms +step:432/1695 train_time:41559ms step_avg:96.20ms +step:433/1695 train_time:41654ms step_avg:96.20ms +step:434/1695 train_time:41748ms step_avg:96.19ms +step:435/1695 train_time:41841ms step_avg:96.19ms +step:436/1695 train_time:41934ms step_avg:96.18ms +step:437/1695 train_time:42028ms step_avg:96.17ms +step:438/1695 train_time:42121ms step_avg:96.17ms +step:439/1695 train_time:42215ms step_avg:96.16ms +step:440/1695 train_time:42309ms step_avg:96.16ms +step:441/1695 train_time:42403ms step_avg:96.15ms +step:442/1695 train_time:42497ms step_avg:96.15ms +step:443/1695 train_time:42592ms step_avg:96.15ms +step:444/1695 train_time:42687ms step_avg:96.14ms +step:445/1695 train_time:42781ms step_avg:96.14ms +step:446/1695 train_time:42874ms step_avg:96.13ms +step:447/1695 train_time:42968ms step_avg:96.12ms +step:448/1695 train_time:43061ms step_avg:96.12ms +step:449/1695 train_time:43155ms step_avg:96.11ms +step:450/1695 train_time:43249ms step_avg:96.11ms +step:451/1695 train_time:43343ms step_avg:96.10ms +step:452/1695 train_time:43437ms step_avg:96.10ms +step:453/1695 train_time:43531ms step_avg:96.09ms +step:454/1695 train_time:43625ms step_avg:96.09ms +step:455/1695 train_time:43719ms step_avg:96.08ms +step:456/1695 train_time:43813ms step_avg:96.08ms +step:457/1695 train_time:43907ms step_avg:96.08ms +step:458/1695 train_time:44000ms step_avg:96.07ms +step:459/1695 train_time:44095ms step_avg:96.07ms +step:460/1695 train_time:44189ms step_avg:96.06ms +step:461/1695 train_time:44283ms step_avg:96.06ms +step:462/1695 train_time:44377ms step_avg:96.05ms +step:463/1695 train_time:44471ms step_avg:96.05ms +step:464/1695 train_time:44566ms step_avg:96.05ms +step:465/1695 train_time:44660ms step_avg:96.04ms +step:466/1695 train_time:44754ms step_avg:96.04ms +step:467/1695 train_time:44849ms step_avg:96.04ms +step:468/1695 train_time:44942ms step_avg:96.03ms +step:469/1695 train_time:45036ms step_avg:96.03ms +step:470/1695 train_time:45131ms step_avg:96.02ms +step:471/1695 train_time:45224ms step_avg:96.02ms +step:472/1695 train_time:45318ms step_avg:96.01ms +step:473/1695 train_time:45413ms step_avg:96.01ms +step:474/1695 train_time:45507ms step_avg:96.01ms +step:475/1695 train_time:45601ms step_avg:96.00ms +step:476/1695 train_time:45695ms step_avg:96.00ms +step:477/1695 train_time:45789ms step_avg:95.99ms +step:478/1695 train_time:45882ms step_avg:95.99ms +step:479/1695 train_time:45976ms step_avg:95.98ms +step:480/1695 train_time:46071ms step_avg:95.98ms +step:481/1695 train_time:46165ms step_avg:95.98ms +step:482/1695 train_time:46259ms step_avg:95.97ms +step:483/1695 train_time:46353ms step_avg:95.97ms +step:484/1695 train_time:46449ms step_avg:95.97ms +step:485/1695 train_time:46542ms step_avg:95.96ms +step:486/1695 train_time:46636ms step_avg:95.96ms +step:487/1695 train_time:46731ms step_avg:95.96ms +step:488/1695 train_time:46825ms step_avg:95.95ms +step:489/1695 train_time:46918ms step_avg:95.95ms +step:490/1695 train_time:47013ms step_avg:95.94ms +step:491/1695 train_time:47107ms step_avg:95.94ms +step:492/1695 train_time:47200ms step_avg:95.94ms +step:493/1695 train_time:47295ms step_avg:95.93ms +step:494/1695 train_time:47390ms step_avg:95.93ms +step:495/1695 train_time:47484ms step_avg:95.93ms +step:496/1695 train_time:47577ms step_avg:95.92ms +step:497/1695 train_time:47672ms step_avg:95.92ms +step:498/1695 train_time:47766ms step_avg:95.92ms +step:499/1695 train_time:47860ms step_avg:95.91ms +step:500/1695 train_time:47954ms step_avg:95.91ms +step:500/1695 val_loss:3.7206 train_time:48046ms step_avg:96.09ms +step:501/1695 train_time:48071ms step_avg:95.95ms +step:502/1695 train_time:48149ms step_avg:95.91ms +step:503/1695 train_time:48247ms step_avg:95.92ms +step:504/1695 train_time:48342ms step_avg:95.92ms +step:505/1695 train_time:48436ms step_avg:95.91ms +step:506/1695 train_time:48529ms step_avg:95.91ms +step:507/1695 train_time:48622ms step_avg:95.90ms +step:508/1695 train_time:48715ms step_avg:95.90ms +step:509/1695 train_time:48808ms step_avg:95.89ms +step:510/1695 train_time:48901ms step_avg:95.88ms +step:511/1695 train_time:48994ms step_avg:95.88ms +step:512/1695 train_time:49090ms step_avg:95.88ms +step:513/1695 train_time:49185ms step_avg:95.88ms +step:514/1695 train_time:49281ms step_avg:95.88ms +step:515/1695 train_time:49377ms step_avg:95.88ms +step:516/1695 train_time:49471ms step_avg:95.87ms +step:517/1695 train_time:49564ms step_avg:95.87ms +step:518/1695 train_time:49657ms step_avg:95.86ms +step:519/1695 train_time:49991ms step_avg:96.32ms +step:520/1695 train_time:50182ms step_avg:96.50ms +step:521/1695 train_time:50274ms step_avg:96.50ms +step:522/1695 train_time:50366ms step_avg:96.49ms +step:523/1695 train_time:50458ms step_avg:96.48ms +step:524/1695 train_time:50551ms step_avg:96.47ms +step:525/1695 train_time:50644ms step_avg:96.46ms +step:526/1695 train_time:50737ms step_avg:96.46ms +step:527/1695 train_time:50829ms step_avg:96.45ms +step:528/1695 train_time:50922ms step_avg:96.44ms +step:529/1695 train_time:51018ms step_avg:96.44ms +step:530/1695 train_time:51114ms step_avg:96.44ms +step:531/1695 train_time:51210ms step_avg:96.44ms +step:532/1695 train_time:51305ms step_avg:96.44ms +step:533/1695 train_time:51399ms step_avg:96.43ms +step:534/1695 train_time:51493ms step_avg:96.43ms +step:535/1695 train_time:51586ms step_avg:96.42ms +step:536/1695 train_time:51679ms step_avg:96.42ms +step:537/1695 train_time:51772ms step_avg:96.41ms +step:538/1695 train_time:51865ms step_avg:96.40ms +step:539/1695 train_time:51958ms step_avg:96.40ms +step:540/1695 train_time:52052ms step_avg:96.39ms +step:541/1695 train_time:52147ms step_avg:96.39ms +step:542/1695 train_time:52241ms step_avg:96.39ms +step:543/1695 train_time:52336ms step_avg:96.38ms +step:544/1695 train_time:52429ms step_avg:96.38ms +step:545/1695 train_time:52523ms step_avg:96.37ms +step:546/1695 train_time:52617ms step_avg:96.37ms +step:547/1695 train_time:52710ms step_avg:96.36ms +step:548/1695 train_time:52803ms step_avg:96.36ms +step:549/1695 train_time:52898ms step_avg:96.35ms +step:550/1695 train_time:52991ms step_avg:96.35ms +step:551/1695 train_time:53086ms step_avg:96.34ms +step:552/1695 train_time:53182ms step_avg:96.34ms +step:553/1695 train_time:53276ms step_avg:96.34ms +step:554/1695 train_time:53370ms step_avg:96.34ms +step:555/1695 train_time:53464ms step_avg:96.33ms +step:556/1695 train_time:53558ms step_avg:96.33ms +step:557/1695 train_time:53652ms step_avg:96.32ms +step:558/1695 train_time:53745ms step_avg:96.32ms +step:559/1695 train_time:53839ms step_avg:96.31ms +step:560/1695 train_time:53933ms step_avg:96.31ms +step:561/1695 train_time:54027ms step_avg:96.30ms +step:562/1695 train_time:54121ms step_avg:96.30ms +step:563/1695 train_time:54216ms step_avg:96.30ms +step:564/1695 train_time:54311ms step_avg:96.30ms +step:565/1695 train_time:54405ms step_avg:96.29ms +step:566/1695 train_time:54499ms step_avg:96.29ms +step:567/1695 train_time:54594ms step_avg:96.29ms +step:568/1695 train_time:54690ms step_avg:96.28ms +step:569/1695 train_time:54785ms step_avg:96.28ms +step:570/1695 train_time:54881ms step_avg:96.28ms +step:571/1695 train_time:54978ms step_avg:96.28ms +step:572/1695 train_time:55074ms step_avg:96.28ms +step:573/1695 train_time:55169ms step_avg:96.28ms +step:574/1695 train_time:55265ms step_avg:96.28ms +step:575/1695 train_time:55363ms step_avg:96.28ms +step:576/1695 train_time:55460ms step_avg:96.28ms +step:577/1695 train_time:55557ms step_avg:96.29ms +step:578/1695 train_time:55653ms step_avg:96.29ms +step:579/1695 train_time:55748ms step_avg:96.28ms +step:580/1695 train_time:55845ms step_avg:96.28ms +step:581/1695 train_time:55940ms step_avg:96.28ms +step:582/1695 train_time:56037ms step_avg:96.28ms +step:583/1695 train_time:56132ms step_avg:96.28ms +step:584/1695 train_time:56228ms step_avg:96.28ms +step:585/1695 train_time:56324ms step_avg:96.28ms +step:586/1695 train_time:56421ms step_avg:96.28ms +step:587/1695 train_time:56519ms step_avg:96.28ms +step:588/1695 train_time:56617ms step_avg:96.29ms +step:589/1695 train_time:56712ms step_avg:96.29ms +step:590/1695 train_time:56807ms step_avg:96.28ms +step:591/1695 train_time:56903ms step_avg:96.28ms +step:592/1695 train_time:57000ms step_avg:96.28ms +step:593/1695 train_time:57096ms step_avg:96.28ms +step:594/1695 train_time:57192ms step_avg:96.28ms +step:595/1695 train_time:57288ms step_avg:96.28ms +step:596/1695 train_time:57384ms step_avg:96.28ms +step:597/1695 train_time:57481ms step_avg:96.28ms +step:598/1695 train_time:57577ms step_avg:96.28ms +step:599/1695 train_time:57673ms step_avg:96.28ms +step:600/1695 train_time:57769ms step_avg:96.28ms +step:601/1695 train_time:57864ms step_avg:96.28ms +step:602/1695 train_time:57960ms step_avg:96.28ms +step:603/1695 train_time:58057ms step_avg:96.28ms +step:604/1695 train_time:58153ms step_avg:96.28ms +step:605/1695 train_time:58248ms step_avg:96.28ms +step:606/1695 train_time:58344ms step_avg:96.28ms +step:607/1695 train_time:58440ms step_avg:96.28ms +step:608/1695 train_time:58537ms step_avg:96.28ms +step:609/1695 train_time:58633ms step_avg:96.28ms +step:610/1695 train_time:58728ms step_avg:96.28ms +step:611/1695 train_time:58824ms step_avg:96.27ms +step:612/1695 train_time:58920ms step_avg:96.27ms +step:613/1695 train_time:59016ms step_avg:96.27ms +step:614/1695 train_time:59111ms step_avg:96.27ms +step:615/1695 train_time:59208ms step_avg:96.27ms +step:616/1695 train_time:59305ms step_avg:96.27ms +step:617/1695 train_time:59401ms step_avg:96.27ms +step:618/1695 train_time:59499ms step_avg:96.28ms +step:619/1695 train_time:59596ms step_avg:96.28ms +step:620/1695 train_time:59692ms step_avg:96.28ms +step:621/1695 train_time:59789ms step_avg:96.28ms +step:622/1695 train_time:59885ms step_avg:96.28ms +step:623/1695 train_time:59982ms step_avg:96.28ms +step:624/1695 train_time:60079ms step_avg:96.28ms +step:625/1695 train_time:60176ms step_avg:96.28ms +step:625/1695 val_loss:3.6228 train_time:60270ms step_avg:96.43ms +step:626/1695 train_time:60294ms step_avg:96.32ms +step:627/1695 train_time:60378ms step_avg:96.30ms +step:628/1695 train_time:60473ms step_avg:96.30ms +step:629/1695 train_time:60570ms step_avg:96.30ms +step:630/1695 train_time:60665ms step_avg:96.29ms +step:631/1695 train_time:60761ms step_avg:96.29ms +step:632/1695 train_time:60855ms step_avg:96.29ms +step:633/1695 train_time:60949ms step_avg:96.29ms +step:634/1695 train_time:61044ms step_avg:96.28ms +step:635/1695 train_time:61139ms step_avg:96.28ms +step:636/1695 train_time:61237ms step_avg:96.28ms +step:637/1695 train_time:61335ms step_avg:96.29ms +step:638/1695 train_time:61432ms step_avg:96.29ms +step:639/1695 train_time:61530ms step_avg:96.29ms +step:640/1695 train_time:61627ms step_avg:96.29ms +step:641/1695 train_time:61723ms step_avg:96.29ms +step:642/1695 train_time:61818ms step_avg:96.29ms +step:643/1695 train_time:61912ms step_avg:96.29ms +step:644/1695 train_time:62007ms step_avg:96.28ms +step:645/1695 train_time:62103ms step_avg:96.28ms +step:646/1695 train_time:62199ms step_avg:96.28ms +step:647/1695 train_time:62297ms step_avg:96.29ms +step:648/1695 train_time:62394ms step_avg:96.29ms +step:649/1695 train_time:62491ms step_avg:96.29ms +step:650/1695 train_time:62588ms step_avg:96.29ms +step:651/1695 train_time:62686ms step_avg:96.29ms +step:652/1695 train_time:62783ms step_avg:96.29ms +step:653/1695 train_time:62879ms step_avg:96.29ms +step:654/1695 train_time:62973ms step_avg:96.29ms +step:655/1695 train_time:63068ms step_avg:96.29ms +step:656/1695 train_time:63166ms step_avg:96.29ms +step:657/1695 train_time:63264ms step_avg:96.29ms +step:658/1695 train_time:63362ms step_avg:96.29ms +step:659/1695 train_time:63459ms step_avg:96.30ms +step:660/1695 train_time:63555ms step_avg:96.30ms +step:661/1695 train_time:63652ms step_avg:96.30ms +step:662/1695 train_time:63747ms step_avg:96.30ms +step:663/1695 train_time:63844ms step_avg:96.30ms +step:664/1695 train_time:63939ms step_avg:96.29ms +step:665/1695 train_time:64034ms step_avg:96.29ms +step:666/1695 train_time:64130ms step_avg:96.29ms +step:667/1695 train_time:64227ms step_avg:96.29ms +step:668/1695 train_time:64325ms step_avg:96.29ms +step:669/1695 train_time:64422ms step_avg:96.30ms +step:670/1695 train_time:64518ms step_avg:96.30ms +step:671/1695 train_time:64614ms step_avg:96.30ms +step:672/1695 train_time:64710ms step_avg:96.29ms +step:673/1695 train_time:64806ms step_avg:96.29ms +step:674/1695 train_time:64902ms step_avg:96.29ms +step:675/1695 train_time:64998ms step_avg:96.29ms +step:676/1695 train_time:65093ms step_avg:96.29ms +step:677/1695 train_time:65190ms step_avg:96.29ms +step:678/1695 train_time:65287ms step_avg:96.29ms +step:679/1695 train_time:65384ms step_avg:96.30ms +step:680/1695 train_time:65482ms step_avg:96.30ms +step:681/1695 train_time:65578ms step_avg:96.30ms +step:682/1695 train_time:65674ms step_avg:96.30ms +step:683/1695 train_time:65769ms step_avg:96.29ms +step:684/1695 train_time:65866ms step_avg:96.30ms +step:685/1695 train_time:65962ms step_avg:96.29ms +step:686/1695 train_time:66058ms step_avg:96.29ms +step:687/1695 train_time:66153ms step_avg:96.29ms +step:688/1695 train_time:66249ms step_avg:96.29ms +step:689/1695 train_time:66345ms step_avg:96.29ms +step:690/1695 train_time:66441ms step_avg:96.29ms +step:691/1695 train_time:66884ms step_avg:96.79ms +step:692/1695 train_time:66970ms step_avg:96.78ms +step:693/1695 train_time:67064ms step_avg:96.77ms +step:694/1695 train_time:67159ms step_avg:96.77ms +step:695/1695 train_time:67254ms step_avg:96.77ms +step:696/1695 train_time:67348ms step_avg:96.77ms +step:697/1695 train_time:67443ms step_avg:96.76ms +step:698/1695 train_time:67538ms step_avg:96.76ms +step:699/1695 train_time:67633ms step_avg:96.76ms +step:700/1695 train_time:67727ms step_avg:96.75ms +step:701/1695 train_time:67826ms step_avg:96.76ms +step:702/1695 train_time:67927ms step_avg:96.76ms +step:703/1695 train_time:68025ms step_avg:96.76ms +step:704/1695 train_time:68122ms step_avg:96.76ms +step:705/1695 train_time:68218ms step_avg:96.76ms +step:706/1695 train_time:68313ms step_avg:96.76ms +step:707/1695 train_time:68408ms step_avg:96.76ms +step:708/1695 train_time:68503ms step_avg:96.76ms +step:709/1695 train_time:68598ms step_avg:96.75ms +step:710/1695 train_time:68693ms step_avg:96.75ms +step:711/1695 train_time:68789ms step_avg:96.75ms +step:712/1695 train_time:68885ms step_avg:96.75ms +step:713/1695 train_time:68982ms step_avg:96.75ms +step:714/1695 train_time:69079ms step_avg:96.75ms +step:715/1695 train_time:69175ms step_avg:96.75ms +step:716/1695 train_time:69271ms step_avg:96.75ms +step:717/1695 train_time:69366ms step_avg:96.74ms +step:718/1695 train_time:69461ms step_avg:96.74ms +step:719/1695 train_time:69556ms step_avg:96.74ms +step:720/1695 train_time:69652ms step_avg:96.74ms +step:721/1695 train_time:69748ms step_avg:96.74ms +step:722/1695 train_time:69845ms step_avg:96.74ms +step:723/1695 train_time:69942ms step_avg:96.74ms +step:724/1695 train_time:70038ms step_avg:96.74ms +step:725/1695 train_time:70134ms step_avg:96.74ms +step:726/1695 train_time:70230ms step_avg:96.74ms +step:727/1695 train_time:70328ms step_avg:96.74ms +step:728/1695 train_time:70425ms step_avg:96.74ms +step:729/1695 train_time:70520ms step_avg:96.74ms +step:730/1695 train_time:70615ms step_avg:96.73ms +step:731/1695 train_time:70710ms step_avg:96.73ms +step:732/1695 train_time:70806ms step_avg:96.73ms +step:733/1695 train_time:70904ms step_avg:96.73ms +step:734/1695 train_time:71000ms step_avg:96.73ms +step:735/1695 train_time:71097ms step_avg:96.73ms +step:736/1695 train_time:71193ms step_avg:96.73ms +step:737/1695 train_time:71289ms step_avg:96.73ms +step:738/1695 train_time:71386ms step_avg:96.73ms +step:739/1695 train_time:71483ms step_avg:96.73ms +step:740/1695 train_time:71579ms step_avg:96.73ms +step:741/1695 train_time:71674ms step_avg:96.73ms +step:742/1695 train_time:71769ms step_avg:96.72ms +step:743/1695 train_time:71866ms step_avg:96.72ms +step:744/1695 train_time:71963ms step_avg:96.72ms +step:745/1695 train_time:72059ms step_avg:96.72ms +step:746/1695 train_time:72155ms step_avg:96.72ms +step:747/1695 train_time:72251ms step_avg:96.72ms +step:748/1695 train_time:72348ms step_avg:96.72ms +step:749/1695 train_time:72444ms step_avg:96.72ms +step:750/1695 train_time:72540ms step_avg:96.72ms +step:750/1695 val_loss:3.5691 train_time:72633ms step_avg:96.84ms +step:751/1695 train_time:72658ms step_avg:96.75ms +step:752/1695 train_time:72740ms step_avg:96.73ms +step:753/1695 train_time:72841ms step_avg:96.73ms +step:754/1695 train_time:72937ms step_avg:96.73ms +step:755/1695 train_time:73032ms step_avg:96.73ms +step:756/1695 train_time:73127ms step_avg:96.73ms +step:757/1695 train_time:73221ms step_avg:96.73ms +step:758/1695 train_time:73316ms step_avg:96.72ms +step:759/1695 train_time:73411ms step_avg:96.72ms +step:760/1695 train_time:73506ms step_avg:96.72ms +step:761/1695 train_time:73604ms step_avg:96.72ms +step:762/1695 train_time:73701ms step_avg:96.72ms +step:763/1695 train_time:73800ms step_avg:96.72ms +step:764/1695 train_time:73896ms step_avg:96.72ms +step:765/1695 train_time:73992ms step_avg:96.72ms +step:766/1695 train_time:74088ms step_avg:96.72ms +step:767/1695 train_time:74183ms step_avg:96.72ms +step:768/1695 train_time:74278ms step_avg:96.72ms +step:769/1695 train_time:74373ms step_avg:96.71ms +step:770/1695 train_time:74470ms step_avg:96.71ms +step:771/1695 train_time:74566ms step_avg:96.71ms +step:772/1695 train_time:74663ms step_avg:96.71ms +step:773/1695 train_time:74761ms step_avg:96.71ms +step:774/1695 train_time:74857ms step_avg:96.72ms +step:775/1695 train_time:74954ms step_avg:96.71ms +step:776/1695 train_time:75051ms step_avg:96.72ms +step:777/1695 train_time:75147ms step_avg:96.71ms +step:778/1695 train_time:75242ms step_avg:96.71ms +step:779/1695 train_time:75338ms step_avg:96.71ms +step:780/1695 train_time:75434ms step_avg:96.71ms +step:781/1695 train_time:75531ms step_avg:96.71ms +step:782/1695 train_time:75627ms step_avg:96.71ms +step:783/1695 train_time:75724ms step_avg:96.71ms +step:784/1695 train_time:75820ms step_avg:96.71ms +step:785/1695 train_time:75916ms step_avg:96.71ms +step:786/1695 train_time:76013ms step_avg:96.71ms +step:787/1695 train_time:76109ms step_avg:96.71ms +step:788/1695 train_time:76206ms step_avg:96.71ms +step:789/1695 train_time:76301ms step_avg:96.71ms +step:790/1695 train_time:76397ms step_avg:96.71ms +step:791/1695 train_time:76493ms step_avg:96.70ms +step:792/1695 train_time:76591ms step_avg:96.71ms +step:793/1695 train_time:76689ms step_avg:96.71ms +step:794/1695 train_time:76785ms step_avg:96.71ms +step:795/1695 train_time:76881ms step_avg:96.71ms +step:796/1695 train_time:76977ms step_avg:96.71ms +step:797/1695 train_time:77073ms step_avg:96.70ms +step:798/1695 train_time:77169ms step_avg:96.70ms +step:799/1695 train_time:77264ms step_avg:96.70ms +step:800/1695 train_time:77359ms step_avg:96.70ms +step:801/1695 train_time:77455ms step_avg:96.70ms +step:802/1695 train_time:77551ms step_avg:96.70ms +step:803/1695 train_time:77647ms step_avg:96.70ms +step:804/1695 train_time:77743ms step_avg:96.69ms +step:805/1695 train_time:77839ms step_avg:96.69ms +step:806/1695 train_time:77936ms step_avg:96.69ms +step:807/1695 train_time:78033ms step_avg:96.69ms +step:808/1695 train_time:78130ms step_avg:96.69ms +step:809/1695 train_time:78226ms step_avg:96.69ms +step:810/1695 train_time:78321ms step_avg:96.69ms +step:811/1695 train_time:78417ms step_avg:96.69ms +step:812/1695 train_time:78513ms step_avg:96.69ms +step:813/1695 train_time:78609ms step_avg:96.69ms +step:814/1695 train_time:78706ms step_avg:96.69ms +step:815/1695 train_time:78802ms step_avg:96.69ms +step:816/1695 train_time:78898ms step_avg:96.69ms +step:817/1695 train_time:78996ms step_avg:96.69ms +step:818/1695 train_time:79093ms step_avg:96.69ms +step:819/1695 train_time:79190ms step_avg:96.69ms +step:820/1695 train_time:79286ms step_avg:96.69ms +step:821/1695 train_time:79381ms step_avg:96.69ms +step:822/1695 train_time:79477ms step_avg:96.69ms +step:823/1695 train_time:79574ms step_avg:96.69ms +step:824/1695 train_time:79671ms step_avg:96.69ms +step:825/1695 train_time:79767ms step_avg:96.69ms +step:826/1695 train_time:79863ms step_avg:96.69ms +step:827/1695 train_time:79959ms step_avg:96.69ms +step:828/1695 train_time:80056ms step_avg:96.69ms +step:829/1695 train_time:80153ms step_avg:96.69ms +step:830/1695 train_time:80249ms step_avg:96.69ms +step:831/1695 train_time:80345ms step_avg:96.68ms +step:832/1695 train_time:80440ms step_avg:96.68ms +step:833/1695 train_time:80536ms step_avg:96.68ms +step:834/1695 train_time:80633ms step_avg:96.68ms +step:835/1695 train_time:80730ms step_avg:96.68ms +step:836/1695 train_time:80826ms step_avg:96.68ms +step:837/1695 train_time:80922ms step_avg:96.68ms +step:838/1695 train_time:81019ms step_avg:96.68ms +step:839/1695 train_time:81116ms step_avg:96.68ms +step:840/1695 train_time:81213ms step_avg:96.68ms +step:841/1695 train_time:81309ms step_avg:96.68ms +step:842/1695 train_time:81405ms step_avg:96.68ms +step:843/1695 train_time:81502ms step_avg:96.68ms +step:844/1695 train_time:81597ms step_avg:96.68ms +step:845/1695 train_time:81694ms step_avg:96.68ms +step:846/1695 train_time:81791ms step_avg:96.68ms +step:847/1695 train_time:81888ms step_avg:96.68ms +step:848/1695 train_time:81983ms step_avg:96.68ms +step:849/1695 train_time:82078ms step_avg:96.68ms +step:850/1695 train_time:82174ms step_avg:96.68ms +step:851/1695 train_time:82271ms step_avg:96.68ms +step:852/1695 train_time:82368ms step_avg:96.68ms +step:853/1695 train_time:82464ms step_avg:96.68ms +step:854/1695 train_time:82559ms step_avg:96.67ms +step:855/1695 train_time:82655ms step_avg:96.67ms +step:856/1695 train_time:82752ms step_avg:96.67ms +step:857/1695 train_time:82848ms step_avg:96.67ms +step:858/1695 train_time:82944ms step_avg:96.67ms +step:859/1695 train_time:83039ms step_avg:96.67ms +step:860/1695 train_time:83136ms step_avg:96.67ms +step:861/1695 train_time:83233ms step_avg:96.67ms +step:862/1695 train_time:83331ms step_avg:96.67ms +step:863/1695 train_time:83651ms step_avg:96.93ms +step:864/1695 train_time:83850ms step_avg:97.05ms +step:865/1695 train_time:83944ms step_avg:97.05ms +step:866/1695 train_time:84039ms step_avg:97.04ms +step:867/1695 train_time:84134ms step_avg:97.04ms +step:868/1695 train_time:84229ms step_avg:97.04ms +step:869/1695 train_time:84323ms step_avg:97.03ms +step:870/1695 train_time:84418ms step_avg:97.03ms +step:871/1695 train_time:84513ms step_avg:97.03ms +step:872/1695 train_time:84609ms step_avg:97.03ms +step:873/1695 train_time:84705ms step_avg:97.03ms +step:874/1695 train_time:84806ms step_avg:97.03ms +step:875/1695 train_time:84904ms step_avg:97.03ms +step:875/1695 val_loss:3.5252 train_time:84998ms step_avg:97.14ms +step:876/1695 train_time:85024ms step_avg:97.06ms +step:877/1695 train_time:85101ms step_avg:97.04ms +step:878/1695 train_time:85198ms step_avg:97.04ms +step:879/1695 train_time:85293ms step_avg:97.03ms +step:880/1695 train_time:85388ms step_avg:97.03ms +step:881/1695 train_time:85484ms step_avg:97.03ms +step:882/1695 train_time:85579ms step_avg:97.03ms +step:883/1695 train_time:85673ms step_avg:97.03ms +step:884/1695 train_time:85768ms step_avg:97.02ms +step:885/1695 train_time:85863ms step_avg:97.02ms +step:886/1695 train_time:85962ms step_avg:97.02ms +step:887/1695 train_time:86061ms step_avg:97.02ms +step:888/1695 train_time:86157ms step_avg:97.02ms +step:889/1695 train_time:86253ms step_avg:97.02ms +step:890/1695 train_time:86350ms step_avg:97.02ms +step:891/1695 train_time:86446ms step_avg:97.02ms +step:892/1695 train_time:86542ms step_avg:97.02ms +step:893/1695 train_time:86637ms step_avg:97.02ms +step:894/1695 train_time:86732ms step_avg:97.02ms +step:895/1695 train_time:86828ms step_avg:97.01ms +step:896/1695 train_time:86924ms step_avg:97.01ms +step:897/1695 train_time:87021ms step_avg:97.01ms +step:898/1695 train_time:87117ms step_avg:97.01ms +step:899/1695 train_time:87213ms step_avg:97.01ms +step:900/1695 train_time:87309ms step_avg:97.01ms +step:901/1695 train_time:87406ms step_avg:97.01ms +step:902/1695 train_time:87502ms step_avg:97.01ms +step:903/1695 train_time:87598ms step_avg:97.01ms +step:904/1695 train_time:87693ms step_avg:97.01ms +step:905/1695 train_time:87789ms step_avg:97.00ms +step:906/1695 train_time:87885ms step_avg:97.00ms +step:907/1695 train_time:87982ms step_avg:97.00ms +step:908/1695 train_time:88078ms step_avg:97.00ms +step:909/1695 train_time:88174ms step_avg:97.00ms +step:910/1695 train_time:88270ms step_avg:97.00ms +step:911/1695 train_time:88366ms step_avg:97.00ms +step:912/1695 train_time:88462ms step_avg:97.00ms +step:913/1695 train_time:88558ms step_avg:97.00ms +step:914/1695 train_time:88653ms step_avg:96.99ms +step:915/1695 train_time:88749ms step_avg:96.99ms +step:916/1695 train_time:88845ms step_avg:96.99ms +step:917/1695 train_time:88942ms step_avg:96.99ms +step:918/1695 train_time:89038ms step_avg:96.99ms +step:919/1695 train_time:89133ms step_avg:96.99ms +step:920/1695 train_time:89230ms step_avg:96.99ms +step:921/1695 train_time:89327ms step_avg:96.99ms +step:922/1695 train_time:89423ms step_avg:96.99ms +step:923/1695 train_time:89519ms step_avg:96.99ms +step:924/1695 train_time:89615ms step_avg:96.99ms +step:925/1695 train_time:89711ms step_avg:96.98ms +step:926/1695 train_time:89806ms step_avg:96.98ms +step:927/1695 train_time:89902ms step_avg:96.98ms +step:928/1695 train_time:89998ms step_avg:96.98ms +step:929/1695 train_time:90094ms step_avg:96.98ms +step:930/1695 train_time:90190ms step_avg:96.98ms +step:931/1695 train_time:90287ms step_avg:96.98ms +step:932/1695 train_time:90383ms step_avg:96.98ms +step:933/1695 train_time:90479ms step_avg:96.98ms +step:934/1695 train_time:90575ms step_avg:96.98ms +step:935/1695 train_time:90670ms step_avg:96.97ms +step:936/1695 train_time:90767ms step_avg:96.97ms +step:937/1695 train_time:90863ms step_avg:96.97ms +step:938/1695 train_time:90960ms step_avg:96.97ms +step:939/1695 train_time:91055ms step_avg:96.97ms +step:940/1695 train_time:91151ms step_avg:96.97ms +step:941/1695 train_time:91247ms step_avg:96.97ms +step:942/1695 train_time:91343ms step_avg:96.97ms +step:943/1695 train_time:91440ms step_avg:96.97ms +step:944/1695 train_time:91535ms step_avg:96.97ms +step:945/1695 train_time:91631ms step_avg:96.96ms +step:946/1695 train_time:91726ms step_avg:96.96ms +step:947/1695 train_time:91822ms step_avg:96.96ms +step:948/1695 train_time:91918ms step_avg:96.96ms +step:949/1695 train_time:92013ms step_avg:96.96ms +step:950/1695 train_time:92109ms step_avg:96.96ms +step:951/1695 train_time:92205ms step_avg:96.96ms +step:952/1695 train_time:92301ms step_avg:96.95ms +step:953/1695 train_time:92396ms step_avg:96.95ms +step:954/1695 train_time:92491ms step_avg:96.95ms +step:955/1695 train_time:92587ms step_avg:96.95ms +step:956/1695 train_time:92683ms step_avg:96.95ms +step:957/1695 train_time:92779ms step_avg:96.95ms +step:958/1695 train_time:92875ms step_avg:96.95ms +step:959/1695 train_time:92971ms step_avg:96.95ms +step:960/1695 train_time:93068ms step_avg:96.95ms +step:961/1695 train_time:93164ms step_avg:96.95ms +step:962/1695 train_time:93260ms step_avg:96.94ms +step:963/1695 train_time:93356ms step_avg:96.94ms +step:964/1695 train_time:93453ms step_avg:96.94ms +step:965/1695 train_time:93549ms step_avg:96.94ms +step:966/1695 train_time:93646ms step_avg:96.94ms +step:967/1695 train_time:93743ms step_avg:96.94ms +step:968/1695 train_time:93839ms step_avg:96.94ms +step:969/1695 train_time:93934ms step_avg:96.94ms +step:970/1695 train_time:94030ms step_avg:96.94ms +step:971/1695 train_time:94126ms step_avg:96.94ms +step:972/1695 train_time:94222ms step_avg:96.94ms +step:973/1695 train_time:94318ms step_avg:96.94ms +step:974/1695 train_time:94414ms step_avg:96.93ms +step:975/1695 train_time:94510ms step_avg:96.93ms +step:976/1695 train_time:94607ms step_avg:96.93ms +step:977/1695 train_time:94702ms step_avg:96.93ms +step:978/1695 train_time:94799ms step_avg:96.93ms +step:979/1695 train_time:94894ms step_avg:96.93ms +step:980/1695 train_time:94990ms step_avg:96.93ms +step:981/1695 train_time:95087ms step_avg:96.93ms +step:982/1695 train_time:95184ms step_avg:96.93ms +step:983/1695 train_time:95280ms step_avg:96.93ms +step:984/1695 train_time:95376ms step_avg:96.93ms +step:985/1695 train_time:95472ms step_avg:96.93ms +step:986/1695 train_time:95568ms step_avg:96.92ms +step:987/1695 train_time:95664ms step_avg:96.92ms +step:988/1695 train_time:95760ms step_avg:96.92ms +step:989/1695 train_time:95855ms step_avg:96.92ms +step:990/1695 train_time:95950ms step_avg:96.92ms +step:991/1695 train_time:96047ms step_avg:96.92ms +step:992/1695 train_time:96144ms step_avg:96.92ms +step:993/1695 train_time:96240ms step_avg:96.92ms +step:994/1695 train_time:96335ms step_avg:96.92ms +step:995/1695 train_time:96431ms step_avg:96.92ms +step:996/1695 train_time:96528ms step_avg:96.92ms +step:997/1695 train_time:96625ms step_avg:96.92ms +step:998/1695 train_time:96722ms step_avg:96.92ms +step:999/1695 train_time:96817ms step_avg:96.91ms +step:1000/1695 train_time:96912ms step_avg:96.91ms +step:1000/1695 val_loss:3.4845 train_time:97007ms step_avg:97.01ms +step:1001/1695 train_time:97031ms step_avg:96.93ms +step:1002/1695 train_time:97110ms step_avg:96.92ms +step:1003/1695 train_time:97207ms step_avg:96.92ms +step:1004/1695 train_time:97304ms step_avg:96.92ms +step:1005/1695 train_time:97399ms step_avg:96.91ms +step:1006/1695 train_time:97495ms step_avg:96.91ms +step:1007/1695 train_time:97589ms step_avg:96.91ms +step:1008/1695 train_time:97684ms step_avg:96.91ms +step:1009/1695 train_time:97779ms step_avg:96.91ms +step:1010/1695 train_time:97874ms step_avg:96.90ms +step:1011/1695 train_time:97971ms step_avg:96.90ms +step:1012/1695 train_time:98069ms step_avg:96.91ms +step:1013/1695 train_time:98165ms step_avg:96.91ms +step:1014/1695 train_time:98263ms step_avg:96.91ms +step:1015/1695 train_time:98359ms step_avg:96.91ms +step:1016/1695 train_time:98454ms step_avg:96.90ms +step:1017/1695 train_time:98549ms step_avg:96.90ms +step:1018/1695 train_time:98644ms step_avg:96.90ms +step:1019/1695 train_time:98740ms step_avg:96.90ms +step:1020/1695 train_time:98835ms step_avg:96.90ms +step:1021/1695 train_time:98930ms step_avg:96.89ms +step:1022/1695 train_time:99026ms step_avg:96.89ms +step:1023/1695 train_time:99123ms step_avg:96.89ms +step:1024/1695 train_time:99220ms step_avg:96.89ms +step:1025/1695 train_time:99317ms step_avg:96.89ms +step:1026/1695 train_time:99414ms step_avg:96.90ms +step:1027/1695 train_time:99510ms step_avg:96.89ms +step:1028/1695 train_time:99605ms step_avg:96.89ms +step:1029/1695 train_time:99700ms step_avg:96.89ms +step:1030/1695 train_time:99795ms step_avg:96.89ms +step:1031/1695 train_time:99891ms step_avg:96.89ms +step:1032/1695 train_time:99986ms step_avg:96.89ms +step:1033/1695 train_time:100082ms step_avg:96.88ms +step:1034/1695 train_time:100179ms step_avg:96.88ms +step:1035/1695 train_time:100274ms step_avg:96.88ms +step:1036/1695 train_time:100600ms step_avg:97.10ms +step:1037/1695 train_time:100773ms step_avg:97.18ms +step:1038/1695 train_time:100866ms step_avg:97.17ms +step:1039/1695 train_time:100961ms step_avg:97.17ms +step:1040/1695 train_time:101056ms step_avg:97.17ms +step:1041/1695 train_time:101151ms step_avg:97.17ms +step:1042/1695 train_time:101245ms step_avg:97.16ms +step:1043/1695 train_time:101341ms step_avg:97.16ms +step:1044/1695 train_time:101436ms step_avg:97.16ms +step:1045/1695 train_time:101530ms step_avg:97.16ms +step:1046/1695 train_time:101626ms step_avg:97.16ms +step:1047/1695 train_time:101728ms step_avg:97.16ms +step:1048/1695 train_time:101826ms step_avg:97.16ms +step:1049/1695 train_time:101923ms step_avg:97.16ms +step:1050/1695 train_time:102019ms step_avg:97.16ms +step:1051/1695 train_time:102115ms step_avg:97.16ms +step:1052/1695 train_time:102210ms step_avg:97.16ms +step:1053/1695 train_time:102304ms step_avg:97.16ms +step:1054/1695 train_time:102399ms step_avg:97.15ms +step:1055/1695 train_time:102494ms step_avg:97.15ms +step:1056/1695 train_time:102590ms step_avg:97.15ms +step:1057/1695 train_time:102688ms step_avg:97.15ms +step:1058/1695 train_time:102784ms step_avg:97.15ms +step:1059/1695 train_time:102881ms step_avg:97.15ms +step:1060/1695 train_time:102978ms step_avg:97.15ms +step:1061/1695 train_time:103075ms step_avg:97.15ms +step:1062/1695 train_time:103171ms step_avg:97.15ms +step:1063/1695 train_time:103265ms step_avg:97.15ms +step:1064/1695 train_time:103360ms step_avg:97.14ms +step:1065/1695 train_time:103456ms step_avg:97.14ms +step:1066/1695 train_time:103553ms step_avg:97.14ms +step:1067/1695 train_time:103650ms step_avg:97.14ms +step:1068/1695 train_time:103746ms step_avg:97.14ms +step:1069/1695 train_time:103842ms step_avg:97.14ms +step:1070/1695 train_time:103939ms step_avg:97.14ms +step:1071/1695 train_time:104036ms step_avg:97.14ms +step:1072/1695 train_time:104132ms step_avg:97.14ms +step:1073/1695 train_time:104227ms step_avg:97.14ms +step:1074/1695 train_time:104322ms step_avg:97.13ms +step:1075/1695 train_time:104418ms step_avg:97.13ms +step:1076/1695 train_time:104514ms step_avg:97.13ms +step:1077/1695 train_time:104609ms step_avg:97.13ms +step:1078/1695 train_time:104705ms step_avg:97.13ms +step:1079/1695 train_time:104802ms step_avg:97.13ms +step:1080/1695 train_time:104898ms step_avg:97.13ms +step:1081/1695 train_time:104995ms step_avg:97.13ms +step:1082/1695 train_time:105091ms step_avg:97.13ms +step:1083/1695 train_time:105187ms step_avg:97.13ms +step:1084/1695 train_time:105282ms step_avg:97.12ms +step:1085/1695 train_time:105378ms step_avg:97.12ms +step:1086/1695 train_time:105474ms step_avg:97.12ms +step:1087/1695 train_time:105569ms step_avg:97.12ms +step:1088/1695 train_time:105664ms step_avg:97.12ms +step:1089/1695 train_time:105761ms step_avg:97.12ms +step:1090/1695 train_time:105858ms step_avg:97.12ms +step:1091/1695 train_time:105955ms step_avg:97.12ms +step:1092/1695 train_time:106050ms step_avg:97.12ms +step:1093/1695 train_time:106146ms step_avg:97.11ms +step:1094/1695 train_time:106241ms step_avg:97.11ms +step:1095/1695 train_time:106339ms step_avg:97.11ms +step:1096/1695 train_time:106435ms step_avg:97.11ms +step:1097/1695 train_time:106531ms step_avg:97.11ms +step:1098/1695 train_time:106627ms step_avg:97.11ms +step:1099/1695 train_time:106723ms step_avg:97.11ms +step:1100/1695 train_time:106819ms step_avg:97.11ms +step:1101/1695 train_time:106915ms step_avg:97.11ms +step:1102/1695 train_time:107012ms step_avg:97.11ms +step:1103/1695 train_time:107107ms step_avg:97.11ms +step:1104/1695 train_time:107203ms step_avg:97.10ms +step:1105/1695 train_time:107300ms step_avg:97.10ms +step:1106/1695 train_time:107396ms step_avg:97.10ms +step:1107/1695 train_time:107492ms step_avg:97.10ms +step:1108/1695 train_time:107587ms step_avg:97.10ms +step:1109/1695 train_time:107683ms step_avg:97.10ms +step:1110/1695 train_time:107779ms step_avg:97.10ms +step:1111/1695 train_time:107875ms step_avg:97.10ms +step:1112/1695 train_time:107971ms step_avg:97.10ms +step:1113/1695 train_time:108066ms step_avg:97.09ms +step:1114/1695 train_time:108163ms step_avg:97.09ms +step:1115/1695 train_time:108259ms step_avg:97.09ms +step:1116/1695 train_time:108356ms step_avg:97.09ms +step:1117/1695 train_time:108453ms step_avg:97.09ms +step:1118/1695 train_time:108549ms step_avg:97.09ms +step:1119/1695 train_time:108645ms step_avg:97.09ms +step:1120/1695 train_time:108741ms step_avg:97.09ms +step:1121/1695 train_time:108838ms step_avg:97.09ms +step:1122/1695 train_time:108934ms step_avg:97.09ms +step:1123/1695 train_time:109030ms step_avg:97.09ms +step:1124/1695 train_time:109126ms step_avg:97.09ms +step:1125/1695 train_time:109222ms step_avg:97.09ms +step:1125/1695 val_loss:3.4375 train_time:109315ms step_avg:97.17ms +step:1126/1695 train_time:109340ms step_avg:97.10ms +step:1127/1695 train_time:109421ms step_avg:97.09ms +step:1128/1695 train_time:109520ms step_avg:97.09ms +step:1129/1695 train_time:109618ms step_avg:97.09ms +step:1130/1695 train_time:109713ms step_avg:97.09ms +step:1131/1695 train_time:109808ms step_avg:97.09ms +step:1132/1695 train_time:109903ms step_avg:97.09ms +step:1133/1695 train_time:110000ms step_avg:97.09ms +step:1134/1695 train_time:110096ms step_avg:97.09ms +step:1135/1695 train_time:110193ms step_avg:97.09ms +step:1136/1695 train_time:110294ms step_avg:97.09ms +step:1137/1695 train_time:110395ms step_avg:97.09ms +step:1138/1695 train_time:110495ms step_avg:97.10ms +step:1139/1695 train_time:110595ms step_avg:97.10ms +step:1140/1695 train_time:110691ms step_avg:97.10ms +step:1141/1695 train_time:110788ms step_avg:97.10ms +step:1142/1695 train_time:110886ms step_avg:97.10ms +step:1143/1695 train_time:110983ms step_avg:97.10ms +step:1144/1695 train_time:111080ms step_avg:97.10ms +step:1145/1695 train_time:111178ms step_avg:97.10ms +step:1146/1695 train_time:111276ms step_avg:97.10ms +step:1147/1695 train_time:111375ms step_avg:97.10ms +step:1148/1695 train_time:111474ms step_avg:97.10ms +step:1149/1695 train_time:111572ms step_avg:97.10ms +step:1150/1695 train_time:111669ms step_avg:97.10ms +step:1151/1695 train_time:111766ms step_avg:97.10ms +step:1152/1695 train_time:111863ms step_avg:97.10ms +step:1153/1695 train_time:111959ms step_avg:97.10ms +step:1154/1695 train_time:112057ms step_avg:97.10ms +step:1155/1695 train_time:112154ms step_avg:97.10ms +step:1156/1695 train_time:112251ms step_avg:97.10ms +step:1157/1695 train_time:112349ms step_avg:97.10ms +step:1158/1695 train_time:112448ms step_avg:97.11ms +step:1159/1695 train_time:112547ms step_avg:97.11ms +step:1160/1695 train_time:112645ms step_avg:97.11ms +step:1161/1695 train_time:112744ms step_avg:97.11ms +step:1162/1695 train_time:112843ms step_avg:97.11ms +step:1163/1695 train_time:112940ms step_avg:97.11ms +step:1164/1695 train_time:113037ms step_avg:97.11ms +step:1165/1695 train_time:113135ms step_avg:97.11ms +step:1166/1695 train_time:113232ms step_avg:97.11ms +step:1167/1695 train_time:113330ms step_avg:97.11ms +step:1168/1695 train_time:113428ms step_avg:97.11ms +step:1169/1695 train_time:113527ms step_avg:97.11ms +step:1170/1695 train_time:113626ms step_avg:97.12ms +step:1171/1695 train_time:113723ms step_avg:97.12ms +step:1172/1695 train_time:113822ms step_avg:97.12ms +step:1173/1695 train_time:113919ms step_avg:97.12ms +step:1174/1695 train_time:114018ms step_avg:97.12ms +step:1175/1695 train_time:114115ms step_avg:97.12ms +step:1176/1695 train_time:114212ms step_avg:97.12ms +step:1177/1695 train_time:114310ms step_avg:97.12ms +step:1178/1695 train_time:114406ms step_avg:97.12ms +step:1179/1695 train_time:114504ms step_avg:97.12ms +step:1180/1695 train_time:114604ms step_avg:97.12ms +step:1181/1695 train_time:114703ms step_avg:97.12ms +step:1182/1695 train_time:114801ms step_avg:97.12ms +step:1183/1695 train_time:114898ms step_avg:97.12ms +step:1184/1695 train_time:114996ms step_avg:97.13ms +step:1185/1695 train_time:115095ms step_avg:97.13ms +step:1186/1695 train_time:115193ms step_avg:97.13ms +step:1187/1695 train_time:115290ms step_avg:97.13ms +step:1188/1695 train_time:115388ms step_avg:97.13ms +step:1189/1695 train_time:115486ms step_avg:97.13ms +step:1190/1695 train_time:115584ms step_avg:97.13ms +step:1191/1695 train_time:115682ms step_avg:97.13ms +step:1192/1695 train_time:115779ms step_avg:97.13ms +step:1193/1695 train_time:115876ms step_avg:97.13ms +step:1194/1695 train_time:115973ms step_avg:97.13ms +step:1195/1695 train_time:116071ms step_avg:97.13ms +step:1196/1695 train_time:116169ms step_avg:97.13ms +step:1197/1695 train_time:116266ms step_avg:97.13ms +step:1198/1695 train_time:116365ms step_avg:97.13ms +step:1199/1695 train_time:116464ms step_avg:97.13ms +step:1200/1695 train_time:116562ms step_avg:97.14ms +step:1201/1695 train_time:116661ms step_avg:97.14ms +step:1202/1695 train_time:116758ms step_avg:97.14ms +step:1203/1695 train_time:116856ms step_avg:97.14ms +step:1204/1695 train_time:116955ms step_avg:97.14ms +step:1205/1695 train_time:117053ms step_avg:97.14ms +step:1206/1695 train_time:117150ms step_avg:97.14ms +step:1207/1695 train_time:117247ms step_avg:97.14ms +step:1208/1695 train_time:117580ms step_avg:97.33ms +step:1209/1695 train_time:117762ms step_avg:97.40ms +step:1210/1695 train_time:117858ms step_avg:97.40ms +step:1211/1695 train_time:117955ms step_avg:97.40ms +step:1212/1695 train_time:118051ms step_avg:97.40ms +step:1213/1695 train_time:118147ms step_avg:97.40ms +step:1214/1695 train_time:118243ms step_avg:97.40ms +step:1215/1695 train_time:118340ms step_avg:97.40ms +step:1216/1695 train_time:118436ms step_avg:97.40ms +step:1217/1695 train_time:118533ms step_avg:97.40ms +step:1218/1695 train_time:118636ms step_avg:97.40ms +step:1219/1695 train_time:118739ms step_avg:97.41ms +step:1220/1695 train_time:118838ms step_avg:97.41ms +step:1221/1695 train_time:118935ms step_avg:97.41ms +step:1222/1695 train_time:119031ms step_avg:97.41ms +step:1223/1695 train_time:119128ms step_avg:97.41ms +step:1224/1695 train_time:119225ms step_avg:97.41ms +step:1225/1695 train_time:119323ms step_avg:97.41ms +step:1226/1695 train_time:119420ms step_avg:97.41ms +step:1227/1695 train_time:119519ms step_avg:97.41ms +step:1228/1695 train_time:119618ms step_avg:97.41ms +step:1229/1695 train_time:119719ms step_avg:97.41ms +step:1230/1695 train_time:119818ms step_avg:97.41ms +step:1231/1695 train_time:119916ms step_avg:97.41ms +step:1232/1695 train_time:120013ms step_avg:97.41ms +step:1233/1695 train_time:120109ms step_avg:97.41ms +step:1234/1695 train_time:120206ms step_avg:97.41ms +step:1235/1695 train_time:120302ms step_avg:97.41ms +step:1236/1695 train_time:120399ms step_avg:97.41ms +step:1237/1695 train_time:120498ms step_avg:97.41ms +step:1238/1695 train_time:120595ms step_avg:97.41ms +step:1239/1695 train_time:120694ms step_avg:97.41ms +step:1240/1695 train_time:120792ms step_avg:97.41ms +step:1241/1695 train_time:120890ms step_avg:97.41ms +step:1242/1695 train_time:120987ms step_avg:97.41ms +step:1243/1695 train_time:121085ms step_avg:97.41ms +step:1244/1695 train_time:121183ms step_avg:97.41ms +step:1245/1695 train_time:121280ms step_avg:97.41ms +step:1246/1695 train_time:121377ms step_avg:97.41ms +step:1247/1695 train_time:121475ms step_avg:97.41ms +step:1248/1695 train_time:121573ms step_avg:97.41ms +step:1249/1695 train_time:121671ms step_avg:97.41ms +step:1250/1695 train_time:121769ms step_avg:97.41ms +step:1250/1695 val_loss:3.3901 train_time:121865ms step_avg:97.49ms +step:1251/1695 train_time:121890ms step_avg:97.43ms +step:1252/1695 train_time:121970ms step_avg:97.42ms +step:1253/1695 train_time:122068ms step_avg:97.42ms +step:1254/1695 train_time:122165ms step_avg:97.42ms +step:1255/1695 train_time:122262ms step_avg:97.42ms +step:1256/1695 train_time:122358ms step_avg:97.42ms +step:1257/1695 train_time:122455ms step_avg:97.42ms +step:1258/1695 train_time:122552ms step_avg:97.42ms +step:1259/1695 train_time:122648ms step_avg:97.42ms +step:1260/1695 train_time:122746ms step_avg:97.42ms +step:1261/1695 train_time:122850ms step_avg:97.42ms +step:1262/1695 train_time:122952ms step_avg:97.43ms +step:1263/1695 train_time:123051ms step_avg:97.43ms +step:1264/1695 train_time:123148ms step_avg:97.43ms +step:1265/1695 train_time:123246ms step_avg:97.43ms +step:1266/1695 train_time:123344ms step_avg:97.43ms +step:1267/1695 train_time:123440ms step_avg:97.43ms +step:1268/1695 train_time:123537ms step_avg:97.43ms +step:1269/1695 train_time:123633ms step_avg:97.43ms +step:1270/1695 train_time:123732ms step_avg:97.43ms +step:1271/1695 train_time:123833ms step_avg:97.43ms +step:1272/1695 train_time:123933ms step_avg:97.43ms +step:1273/1695 train_time:124031ms step_avg:97.43ms +step:1274/1695 train_time:124131ms step_avg:97.43ms +step:1275/1695 train_time:124230ms step_avg:97.44ms +step:1276/1695 train_time:124329ms step_avg:97.44ms +step:1277/1695 train_time:124427ms step_avg:97.44ms +step:1278/1695 train_time:124525ms step_avg:97.44ms +step:1279/1695 train_time:124622ms step_avg:97.44ms +step:1280/1695 train_time:124719ms step_avg:97.44ms +step:1281/1695 train_time:124817ms step_avg:97.44ms +step:1282/1695 train_time:124915ms step_avg:97.44ms +step:1283/1695 train_time:125014ms step_avg:97.44ms +step:1284/1695 train_time:125113ms step_avg:97.44ms +step:1285/1695 train_time:125211ms step_avg:97.44ms +step:1286/1695 train_time:125310ms step_avg:97.44ms +step:1287/1695 train_time:125409ms step_avg:97.44ms +step:1288/1695 train_time:125508ms step_avg:97.44ms +step:1289/1695 train_time:125606ms step_avg:97.44ms +step:1290/1695 train_time:125703ms step_avg:97.44ms +step:1291/1695 train_time:125801ms step_avg:97.44ms +step:1292/1695 train_time:125898ms step_avg:97.44ms +step:1293/1695 train_time:125996ms step_avg:97.44ms +step:1294/1695 train_time:126094ms step_avg:97.44ms +step:1295/1695 train_time:126193ms step_avg:97.45ms +step:1296/1695 train_time:126291ms step_avg:97.45ms +step:1297/1695 train_time:126390ms step_avg:97.45ms +step:1298/1695 train_time:126489ms step_avg:97.45ms +step:1299/1695 train_time:126586ms step_avg:97.45ms +step:1300/1695 train_time:126684ms step_avg:97.45ms +step:1301/1695 train_time:126781ms step_avg:97.45ms +step:1302/1695 train_time:126879ms step_avg:97.45ms +step:1303/1695 train_time:126976ms step_avg:97.45ms +step:1304/1695 train_time:127074ms step_avg:97.45ms +step:1305/1695 train_time:127172ms step_avg:97.45ms +step:1306/1695 train_time:127271ms step_avg:97.45ms +step:1307/1695 train_time:127369ms step_avg:97.45ms +step:1308/1695 train_time:127467ms step_avg:97.45ms +step:1309/1695 train_time:127566ms step_avg:97.45ms +step:1310/1695 train_time:127664ms step_avg:97.45ms +step:1311/1695 train_time:127762ms step_avg:97.45ms +step:1312/1695 train_time:127860ms step_avg:97.45ms +step:1313/1695 train_time:127957ms step_avg:97.45ms +step:1314/1695 train_time:128054ms step_avg:97.45ms +step:1315/1695 train_time:128151ms step_avg:97.45ms +step:1316/1695 train_time:128250ms step_avg:97.45ms +step:1317/1695 train_time:128349ms step_avg:97.46ms +step:1318/1695 train_time:128448ms step_avg:97.46ms +step:1319/1695 train_time:128546ms step_avg:97.46ms +step:1320/1695 train_time:128645ms step_avg:97.46ms +step:1321/1695 train_time:128743ms step_avg:97.46ms +step:1322/1695 train_time:128840ms step_avg:97.46ms +step:1323/1695 train_time:128938ms step_avg:97.46ms +step:1324/1695 train_time:129035ms step_avg:97.46ms +step:1325/1695 train_time:129132ms step_avg:97.46ms +step:1326/1695 train_time:129230ms step_avg:97.46ms +step:1327/1695 train_time:129328ms step_avg:97.46ms +step:1328/1695 train_time:129427ms step_avg:97.46ms +step:1329/1695 train_time:129525ms step_avg:97.46ms +step:1330/1695 train_time:129623ms step_avg:97.46ms +step:1331/1695 train_time:129721ms step_avg:97.46ms +step:1332/1695 train_time:129820ms step_avg:97.46ms +step:1333/1695 train_time:129917ms step_avg:97.46ms +step:1334/1695 train_time:130014ms step_avg:97.46ms +step:1335/1695 train_time:130112ms step_avg:97.46ms +step:1336/1695 train_time:130209ms step_avg:97.46ms +step:1337/1695 train_time:130306ms step_avg:97.46ms +step:1338/1695 train_time:130405ms step_avg:97.46ms +step:1339/1695 train_time:130503ms step_avg:97.46ms +step:1340/1695 train_time:130600ms step_avg:97.46ms +step:1341/1695 train_time:130698ms step_avg:97.46ms +step:1342/1695 train_time:130795ms step_avg:97.46ms +step:1343/1695 train_time:130894ms step_avg:97.46ms +step:1344/1695 train_time:130992ms step_avg:97.46ms +step:1345/1695 train_time:131090ms step_avg:97.46ms +step:1346/1695 train_time:131188ms step_avg:97.46ms +step:1347/1695 train_time:131285ms step_avg:97.46ms +step:1348/1695 train_time:131382ms step_avg:97.46ms +step:1349/1695 train_time:131480ms step_avg:97.46ms +step:1350/1695 train_time:131577ms step_avg:97.46ms +step:1351/1695 train_time:131675ms step_avg:97.46ms +step:1352/1695 train_time:131773ms step_avg:97.47ms +step:1353/1695 train_time:131872ms step_avg:97.47ms +step:1354/1695 train_time:131970ms step_avg:97.47ms +step:1355/1695 train_time:132069ms step_avg:97.47ms +step:1356/1695 train_time:132167ms step_avg:97.47ms +step:1357/1695 train_time:132264ms step_avg:97.47ms +step:1358/1695 train_time:132362ms step_avg:97.47ms +step:1359/1695 train_time:132460ms step_avg:97.47ms +step:1360/1695 train_time:132556ms step_avg:97.47ms +step:1361/1695 train_time:132654ms step_avg:97.47ms +step:1362/1695 train_time:132752ms step_avg:97.47ms +step:1363/1695 train_time:132851ms step_avg:97.47ms +step:1364/1695 train_time:132950ms step_avg:97.47ms +step:1365/1695 train_time:133049ms step_avg:97.47ms +step:1366/1695 train_time:133147ms step_avg:97.47ms +step:1367/1695 train_time:133246ms step_avg:97.47ms +step:1368/1695 train_time:133344ms step_avg:97.47ms +step:1369/1695 train_time:133443ms step_avg:97.47ms +step:1370/1695 train_time:133540ms step_avg:97.47ms +step:1371/1695 train_time:133637ms step_avg:97.47ms +step:1372/1695 train_time:133734ms step_avg:97.47ms +step:1373/1695 train_time:133830ms step_avg:97.47ms +step:1374/1695 train_time:133929ms step_avg:97.47ms +step:1375/1695 train_time:134027ms step_avg:97.47ms +step:1375/1695 val_loss:3.3508 train_time:134124ms step_avg:97.54ms +step:1376/1695 train_time:134149ms step_avg:97.49ms +step:1377/1695 train_time:134233ms step_avg:97.48ms +step:1378/1695 train_time:134332ms step_avg:97.48ms +step:1379/1695 train_time:134430ms step_avg:97.48ms +step:1380/1695 train_time:134528ms step_avg:97.48ms +step:1381/1695 train_time:134984ms step_avg:97.74ms +step:1382/1695 train_time:135059ms step_avg:97.73ms +step:1383/1695 train_time:135155ms step_avg:97.73ms +step:1384/1695 train_time:135251ms step_avg:97.72ms +step:1385/1695 train_time:135348ms step_avg:97.72ms +step:1386/1695 train_time:135445ms step_avg:97.72ms +step:1387/1695 train_time:135542ms step_avg:97.72ms +step:1388/1695 train_time:135638ms step_avg:97.72ms +step:1389/1695 train_time:135734ms step_avg:97.72ms +step:1390/1695 train_time:135832ms step_avg:97.72ms +step:1391/1695 train_time:135934ms step_avg:97.72ms +step:1392/1695 train_time:136034ms step_avg:97.73ms +step:1393/1695 train_time:136133ms step_avg:97.73ms +step:1394/1695 train_time:136230ms step_avg:97.73ms +step:1395/1695 train_time:136328ms step_avg:97.73ms +step:1396/1695 train_time:136425ms step_avg:97.73ms +step:1397/1695 train_time:136523ms step_avg:97.73ms +step:1398/1695 train_time:136620ms step_avg:97.73ms +step:1399/1695 train_time:136717ms step_avg:97.72ms +step:1400/1695 train_time:136814ms step_avg:97.72ms +step:1401/1695 train_time:136913ms step_avg:97.73ms +step:1402/1695 train_time:137012ms step_avg:97.73ms +step:1403/1695 train_time:137111ms step_avg:97.73ms +step:1404/1695 train_time:137211ms step_avg:97.73ms +step:1405/1695 train_time:137309ms step_avg:97.73ms +step:1406/1695 train_time:137407ms step_avg:97.73ms +step:1407/1695 train_time:137504ms step_avg:97.73ms +step:1408/1695 train_time:137601ms step_avg:97.73ms +step:1409/1695 train_time:137698ms step_avg:97.73ms +step:1410/1695 train_time:137795ms step_avg:97.73ms +step:1411/1695 train_time:137894ms step_avg:97.73ms +step:1412/1695 train_time:137993ms step_avg:97.73ms +step:1413/1695 train_time:138092ms step_avg:97.73ms +step:1414/1695 train_time:138191ms step_avg:97.73ms +step:1415/1695 train_time:138289ms step_avg:97.73ms +step:1416/1695 train_time:138386ms step_avg:97.73ms +step:1417/1695 train_time:138483ms step_avg:97.73ms +step:1418/1695 train_time:138580ms step_avg:97.73ms +step:1419/1695 train_time:138677ms step_avg:97.73ms +step:1420/1695 train_time:138774ms step_avg:97.73ms +step:1421/1695 train_time:138873ms step_avg:97.73ms +step:1422/1695 train_time:138971ms step_avg:97.73ms +step:1423/1695 train_time:139071ms step_avg:97.73ms +step:1424/1695 train_time:139170ms step_avg:97.73ms +step:1425/1695 train_time:139269ms step_avg:97.73ms +step:1426/1695 train_time:139367ms step_avg:97.73ms +step:1427/1695 train_time:139467ms step_avg:97.73ms +step:1428/1695 train_time:139565ms step_avg:97.73ms +step:1429/1695 train_time:139662ms step_avg:97.73ms +step:1430/1695 train_time:139760ms step_avg:97.73ms +step:1431/1695 train_time:139857ms step_avg:97.73ms +step:1432/1695 train_time:139954ms step_avg:97.73ms +step:1433/1695 train_time:140052ms step_avg:97.73ms +step:1434/1695 train_time:140149ms step_avg:97.73ms +step:1435/1695 train_time:140246ms step_avg:97.73ms +step:1436/1695 train_time:140344ms step_avg:97.73ms +step:1437/1695 train_time:140441ms step_avg:97.73ms +step:1438/1695 train_time:140538ms step_avg:97.73ms +step:1439/1695 train_time:140635ms step_avg:97.73ms +step:1440/1695 train_time:140733ms step_avg:97.73ms +step:1441/1695 train_time:140831ms step_avg:97.73ms +step:1442/1695 train_time:140929ms step_avg:97.73ms +step:1443/1695 train_time:141027ms step_avg:97.73ms +step:1444/1695 train_time:141125ms step_avg:97.73ms +step:1445/1695 train_time:141223ms step_avg:97.73ms +step:1446/1695 train_time:141320ms step_avg:97.73ms +step:1447/1695 train_time:141417ms step_avg:97.73ms +step:1448/1695 train_time:141514ms step_avg:97.73ms +step:1449/1695 train_time:141612ms step_avg:97.73ms +step:1450/1695 train_time:141710ms step_avg:97.73ms +step:1451/1695 train_time:141808ms step_avg:97.73ms +step:1452/1695 train_time:141905ms step_avg:97.73ms +step:1453/1695 train_time:142003ms step_avg:97.73ms +step:1454/1695 train_time:142100ms step_avg:97.73ms +step:1455/1695 train_time:142198ms step_avg:97.73ms +step:1456/1695 train_time:142295ms step_avg:97.73ms +step:1457/1695 train_time:142393ms step_avg:97.73ms +step:1458/1695 train_time:142491ms step_avg:97.73ms +step:1459/1695 train_time:142590ms step_avg:97.73ms +step:1460/1695 train_time:142687ms step_avg:97.73ms +step:1461/1695 train_time:142784ms step_avg:97.73ms +step:1462/1695 train_time:142881ms step_avg:97.73ms +step:1463/1695 train_time:142979ms step_avg:97.73ms +step:1464/1695 train_time:143076ms step_avg:97.73ms +step:1465/1695 train_time:143174ms step_avg:97.73ms +step:1466/1695 train_time:143271ms step_avg:97.73ms +step:1467/1695 train_time:143369ms step_avg:97.73ms +step:1468/1695 train_time:143468ms step_avg:97.73ms +step:1469/1695 train_time:143568ms step_avg:97.73ms +step:1470/1695 train_time:143665ms step_avg:97.73ms +step:1471/1695 train_time:143763ms step_avg:97.73ms +step:1472/1695 train_time:143860ms step_avg:97.73ms +step:1473/1695 train_time:143957ms step_avg:97.73ms +step:1474/1695 train_time:144054ms step_avg:97.73ms +step:1475/1695 train_time:144151ms step_avg:97.73ms +step:1476/1695 train_time:144248ms step_avg:97.73ms +step:1477/1695 train_time:144346ms step_avg:97.73ms +step:1478/1695 train_time:144444ms step_avg:97.73ms +step:1479/1695 train_time:144542ms step_avg:97.73ms +step:1480/1695 train_time:144640ms step_avg:97.73ms +step:1481/1695 train_time:144737ms step_avg:97.73ms +step:1482/1695 train_time:144834ms step_avg:97.73ms +step:1483/1695 train_time:144931ms step_avg:97.73ms +step:1484/1695 train_time:145029ms step_avg:97.73ms +step:1485/1695 train_time:145127ms step_avg:97.73ms +step:1486/1695 train_time:145225ms step_avg:97.73ms +step:1487/1695 train_time:145322ms step_avg:97.73ms +step:1488/1695 train_time:145420ms step_avg:97.73ms +step:1489/1695 train_time:145518ms step_avg:97.73ms +step:1490/1695 train_time:145616ms step_avg:97.73ms +step:1491/1695 train_time:145714ms step_avg:97.73ms +step:1492/1695 train_time:145811ms step_avg:97.73ms +step:1493/1695 train_time:145909ms step_avg:97.73ms +step:1494/1695 train_time:146007ms step_avg:97.73ms +step:1495/1695 train_time:146105ms step_avg:97.73ms +step:1496/1695 train_time:146203ms step_avg:97.73ms +step:1497/1695 train_time:146300ms step_avg:97.73ms +step:1498/1695 train_time:146397ms step_avg:97.73ms +step:1499/1695 train_time:146494ms step_avg:97.73ms +step:1500/1695 train_time:146592ms step_avg:97.73ms +step:1500/1695 val_loss:3.3185 train_time:146688ms step_avg:97.79ms +step:1501/1695 train_time:146713ms step_avg:97.74ms +step:1502/1695 train_time:146798ms step_avg:97.74ms +step:1503/1695 train_time:146898ms step_avg:97.74ms +step:1504/1695 train_time:146996ms step_avg:97.74ms +step:1505/1695 train_time:147093ms step_avg:97.74ms +step:1506/1695 train_time:147190ms step_avg:97.74ms +step:1507/1695 train_time:147287ms step_avg:97.73ms +step:1508/1695 train_time:147383ms step_avg:97.73ms +step:1509/1695 train_time:147479ms step_avg:97.73ms +step:1510/1695 train_time:147576ms step_avg:97.73ms +step:1511/1695 train_time:147675ms step_avg:97.73ms +step:1512/1695 train_time:147778ms step_avg:97.74ms +step:1513/1695 train_time:147878ms step_avg:97.74ms +step:1514/1695 train_time:147977ms step_avg:97.74ms +step:1515/1695 train_time:148075ms step_avg:97.74ms +step:1516/1695 train_time:148173ms step_avg:97.74ms +step:1517/1695 train_time:148270ms step_avg:97.74ms +step:1518/1695 train_time:148368ms step_avg:97.74ms +step:1519/1695 train_time:148465ms step_avg:97.74ms +step:1520/1695 train_time:148562ms step_avg:97.74ms +step:1521/1695 train_time:148659ms step_avg:97.74ms +step:1522/1695 train_time:148758ms step_avg:97.74ms +step:1523/1695 train_time:148858ms step_avg:97.74ms +step:1524/1695 train_time:148957ms step_avg:97.74ms +step:1525/1695 train_time:149056ms step_avg:97.74ms +step:1526/1695 train_time:149154ms step_avg:97.74ms +step:1527/1695 train_time:149252ms step_avg:97.74ms +step:1528/1695 train_time:149349ms step_avg:97.74ms +step:1529/1695 train_time:149446ms step_avg:97.74ms +step:1530/1695 train_time:149544ms step_avg:97.74ms +step:1531/1695 train_time:149641ms step_avg:97.74ms +step:1532/1695 train_time:149739ms step_avg:97.74ms +step:1533/1695 train_time:149836ms step_avg:97.74ms +step:1534/1695 train_time:149935ms step_avg:97.74ms +step:1535/1695 train_time:150033ms step_avg:97.74ms +step:1536/1695 train_time:150131ms step_avg:97.74ms +step:1537/1695 train_time:150228ms step_avg:97.74ms +step:1538/1695 train_time:150325ms step_avg:97.74ms +step:1539/1695 train_time:150423ms step_avg:97.74ms +step:1540/1695 train_time:150520ms step_avg:97.74ms +step:1541/1695 train_time:150617ms step_avg:97.74ms +step:1542/1695 train_time:150715ms step_avg:97.74ms +step:1543/1695 train_time:150815ms step_avg:97.74ms +step:1544/1695 train_time:150914ms step_avg:97.74ms +step:1545/1695 train_time:151013ms step_avg:97.74ms +step:1546/1695 train_time:151111ms step_avg:97.74ms +step:1547/1695 train_time:151209ms step_avg:97.74ms +step:1548/1695 train_time:151307ms step_avg:97.74ms +step:1549/1695 train_time:151404ms step_avg:97.74ms +step:1550/1695 train_time:151502ms step_avg:97.74ms +step:1551/1695 train_time:151599ms step_avg:97.74ms +step:1552/1695 train_time:152044ms step_avg:97.97ms +step:1553/1695 train_time:152117ms step_avg:97.95ms +step:1554/1695 train_time:152213ms step_avg:97.95ms +step:1555/1695 train_time:152309ms step_avg:97.95ms +step:1556/1695 train_time:152406ms step_avg:97.95ms +step:1557/1695 train_time:152502ms step_avg:97.95ms +step:1558/1695 train_time:152598ms step_avg:97.94ms +step:1559/1695 train_time:152694ms step_avg:97.94ms +step:1560/1695 train_time:152791ms step_avg:97.94ms +step:1561/1695 train_time:152888ms step_avg:97.94ms +step:1562/1695 train_time:152990ms step_avg:97.95ms +step:1563/1695 train_time:153092ms step_avg:97.95ms +step:1564/1695 train_time:153194ms step_avg:97.95ms +step:1565/1695 train_time:153294ms step_avg:97.95ms +step:1566/1695 train_time:153391ms step_avg:97.95ms +step:1567/1695 train_time:153490ms step_avg:97.95ms +step:1568/1695 train_time:153587ms step_avg:97.95ms +step:1569/1695 train_time:153684ms step_avg:97.95ms +step:1570/1695 train_time:153780ms step_avg:97.95ms +step:1571/1695 train_time:153877ms step_avg:97.95ms +step:1572/1695 train_time:153976ms step_avg:97.95ms +step:1573/1695 train_time:154077ms step_avg:97.95ms +step:1574/1695 train_time:154176ms step_avg:97.95ms +step:1575/1695 train_time:154276ms step_avg:97.95ms +step:1576/1695 train_time:154375ms step_avg:97.95ms +step:1577/1695 train_time:154473ms step_avg:97.95ms +step:1578/1695 train_time:154571ms step_avg:97.95ms +step:1579/1695 train_time:154669ms step_avg:97.95ms +step:1580/1695 train_time:154766ms step_avg:97.95ms +step:1581/1695 train_time:154863ms step_avg:97.95ms +step:1582/1695 train_time:154961ms step_avg:97.95ms +step:1583/1695 train_time:155059ms step_avg:97.95ms +step:1584/1695 train_time:155156ms step_avg:97.95ms +step:1585/1695 train_time:155255ms step_avg:97.95ms +step:1586/1695 train_time:155354ms step_avg:97.95ms +step:1587/1695 train_time:155454ms step_avg:97.95ms +step:1588/1695 train_time:155553ms step_avg:97.96ms +step:1589/1695 train_time:155651ms step_avg:97.96ms +step:1590/1695 train_time:155749ms step_avg:97.96ms +step:1591/1695 train_time:155847ms step_avg:97.96ms +step:1592/1695 train_time:155946ms step_avg:97.96ms +step:1593/1695 train_time:156045ms step_avg:97.96ms +step:1594/1695 train_time:156143ms step_avg:97.96ms +step:1595/1695 train_time:156241ms step_avg:97.96ms +step:1596/1695 train_time:156339ms step_avg:97.96ms +step:1597/1695 train_time:156436ms step_avg:97.96ms +step:1598/1695 train_time:156534ms step_avg:97.96ms +step:1599/1695 train_time:156632ms step_avg:97.96ms +step:1600/1695 train_time:156731ms step_avg:97.96ms +step:1601/1695 train_time:156829ms step_avg:97.96ms +step:1602/1695 train_time:156928ms step_avg:97.96ms +step:1603/1695 train_time:157027ms step_avg:97.96ms +step:1604/1695 train_time:157126ms step_avg:97.96ms +step:1605/1695 train_time:157224ms step_avg:97.96ms +step:1606/1695 train_time:157323ms step_avg:97.96ms +step:1607/1695 train_time:157420ms step_avg:97.96ms +step:1608/1695 train_time:157518ms step_avg:97.96ms +step:1609/1695 train_time:157615ms step_avg:97.96ms +step:1610/1695 train_time:157712ms step_avg:97.96ms +step:1611/1695 train_time:157810ms step_avg:97.96ms +step:1612/1695 train_time:157909ms step_avg:97.96ms +step:1613/1695 train_time:158007ms step_avg:97.96ms +step:1614/1695 train_time:158106ms step_avg:97.96ms +step:1615/1695 train_time:158204ms step_avg:97.96ms +step:1616/1695 train_time:158302ms step_avg:97.96ms +step:1617/1695 train_time:158400ms step_avg:97.96ms +step:1618/1695 train_time:158498ms step_avg:97.96ms +step:1619/1695 train_time:158594ms step_avg:97.96ms +step:1620/1695 train_time:158691ms step_avg:97.96ms +step:1621/1695 train_time:158789ms step_avg:97.96ms +step:1622/1695 train_time:158887ms step_avg:97.96ms +step:1623/1695 train_time:158984ms step_avg:97.96ms +step:1624/1695 train_time:159083ms step_avg:97.96ms +step:1625/1695 train_time:159181ms step_avg:97.96ms +step:1625/1695 val_loss:3.2909 train_time:159277ms step_avg:98.02ms +step:1626/1695 train_time:159302ms step_avg:97.97ms +step:1627/1695 train_time:159385ms step_avg:97.96ms +step:1628/1695 train_time:159484ms step_avg:97.96ms +step:1629/1695 train_time:159582ms step_avg:97.96ms +step:1630/1695 train_time:159678ms step_avg:97.96ms +step:1631/1695 train_time:159775ms step_avg:97.96ms +step:1632/1695 train_time:159872ms step_avg:97.96ms +step:1633/1695 train_time:159970ms step_avg:97.96ms +step:1634/1695 train_time:160066ms step_avg:97.96ms +step:1635/1695 train_time:160163ms step_avg:97.96ms +step:1636/1695 train_time:160263ms step_avg:97.96ms +step:1637/1695 train_time:160363ms step_avg:97.96ms +step:1638/1695 train_time:160463ms step_avg:97.96ms +step:1639/1695 train_time:160561ms step_avg:97.96ms +step:1640/1695 train_time:160658ms step_avg:97.96ms +step:1641/1695 train_time:160756ms step_avg:97.96ms +step:1642/1695 train_time:160853ms step_avg:97.96ms +step:1643/1695 train_time:160949ms step_avg:97.96ms +step:1644/1695 train_time:161046ms step_avg:97.96ms +step:1645/1695 train_time:161142ms step_avg:97.96ms +step:1646/1695 train_time:161240ms step_avg:97.96ms +step:1647/1695 train_time:161339ms step_avg:97.96ms +step:1648/1695 train_time:161438ms step_avg:97.96ms +step:1649/1695 train_time:161536ms step_avg:97.96ms +step:1650/1695 train_time:161634ms step_avg:97.96ms +step:1651/1695 train_time:161732ms step_avg:97.96ms +step:1652/1695 train_time:161831ms step_avg:97.96ms +step:1653/1695 train_time:161930ms step_avg:97.96ms +step:1654/1695 train_time:162027ms step_avg:97.96ms +step:1655/1695 train_time:162124ms step_avg:97.96ms +step:1656/1695 train_time:162223ms step_avg:97.96ms +step:1657/1695 train_time:162320ms step_avg:97.96ms +step:1658/1695 train_time:162418ms step_avg:97.96ms +step:1659/1695 train_time:162516ms step_avg:97.96ms +step:1660/1695 train_time:162614ms step_avg:97.96ms +step:1661/1695 train_time:162712ms step_avg:97.96ms +step:1662/1695 train_time:162811ms step_avg:97.96ms +step:1663/1695 train_time:162909ms step_avg:97.96ms +step:1664/1695 train_time:163007ms step_avg:97.96ms +step:1665/1695 train_time:163105ms step_avg:97.96ms +step:1666/1695 train_time:163204ms step_avg:97.96ms +step:1667/1695 train_time:163303ms step_avg:97.96ms +step:1668/1695 train_time:163402ms step_avg:97.96ms +step:1669/1695 train_time:163499ms step_avg:97.96ms +step:1670/1695 train_time:163597ms step_avg:97.96ms +step:1671/1695 train_time:163694ms step_avg:97.96ms +step:1672/1695 train_time:163791ms step_avg:97.96ms +step:1673/1695 train_time:163889ms step_avg:97.96ms +step:1674/1695 train_time:163987ms step_avg:97.96ms +step:1675/1695 train_time:164085ms step_avg:97.96ms +step:1676/1695 train_time:164183ms step_avg:97.96ms +step:1677/1695 train_time:164281ms step_avg:97.96ms +step:1678/1695 train_time:164379ms step_avg:97.96ms +step:1679/1695 train_time:164476ms step_avg:97.96ms +step:1680/1695 train_time:164573ms step_avg:97.96ms +step:1681/1695 train_time:164671ms step_avg:97.96ms +step:1682/1695 train_time:164768ms step_avg:97.96ms +step:1683/1695 train_time:164866ms step_avg:97.96ms +step:1684/1695 train_time:164963ms step_avg:97.96ms +step:1685/1695 train_time:165060ms step_avg:97.96ms +step:1686/1695 train_time:165157ms step_avg:97.96ms +step:1687/1695 train_time:165255ms step_avg:97.96ms +step:1688/1695 train_time:165354ms step_avg:97.96ms +step:1689/1695 train_time:165453ms step_avg:97.96ms +step:1690/1695 train_time:165551ms step_avg:97.96ms +step:1691/1695 train_time:165650ms step_avg:97.96ms +step:1692/1695 train_time:165748ms step_avg:97.96ms +step:1693/1695 train_time:165845ms step_avg:97.96ms +step:1694/1695 train_time:165943ms step_avg:97.96ms +step:1695/1695 train_time:166040ms step_avg:97.96ms +step:1695/1695 val_loss:3.2791 train_time:166135ms step_avg:98.01ms +peak memory allocated: 34000 MiB reserved: 49416 MiB diff --git a/records/082725_FA3/bb331245-5e49-4366-b902-6caff64ed8d6.txt b/records/082725_FA3/bb331245-5e49-4366-b902-6caff64ed8d6.txt new file mode 100644 index 000000000..f68fe219a --- /dev/null +++ b/records/082725_FA3/bb331245-5e49-4366-b902-6caff64ed8d6.txt @@ -0,0 +1,2808 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + + while True: + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + + if align_to_bos: + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] + else: + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + + pos += batch_span + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. + # optimization + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 60 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): + inputs, targets = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, ws, ws // 2) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * lr + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Wed Aug 27 04:10:32 2025 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.6 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:00:0B.0 Off | Off | +| N/A 29C P0 114W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:00:0C.0 Off | Off | +| N/A 31C P0 112W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:00:0D.0 Off | Off | +| N/A 32C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:00:0E.0 Off | Off | +| N/A 30C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:00:0F.0 Off | Off | +| N/A 29C P0 111W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:00:10.0 Off | Off | +| N/A 33C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:00:11.0 Off | Off | +| N/A 32C P0 110W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:00:12.0 Off | Off | +| N/A 31C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| ++---------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1695 train_time:507ms step_avg:507.01ms +step:2/1695 train_time:531ms step_avg:265.61ms +step:3/1695 train_time:603ms step_avg:201.15ms +step:4/1695 train_time:695ms step_avg:173.81ms +step:5/1695 train_time:789ms step_avg:157.80ms +step:6/1695 train_time:883ms step_avg:147.20ms +step:7/1695 train_time:976ms step_avg:139.42ms +step:8/1695 train_time:1069ms step_avg:133.63ms +step:9/1695 train_time:1162ms step_avg:129.16ms +step:10/1695 train_time:1256ms step_avg:125.61ms +step:11/1695 train_time:1350ms step_avg:122.74ms +step:12/1695 train_time:1446ms step_avg:120.48ms +step:13/1695 train_time:1542ms step_avg:118.60ms +step:14/1695 train_time:1637ms step_avg:116.89ms +step:15/1695 train_time:1731ms step_avg:115.41ms +step:16/1695 train_time:1827ms step_avg:114.16ms +step:17/1695 train_time:1921ms step_avg:112.98ms +step:18/1695 train_time:2014ms step_avg:111.90ms +step:19/1695 train_time:2108ms step_avg:110.97ms +step:20/1695 train_time:2204ms step_avg:110.18ms +step:21/1695 train_time:2297ms step_avg:109.37ms +step:22/1695 train_time:2392ms step_avg:108.71ms +step:23/1695 train_time:2487ms step_avg:108.14ms +step:24/1695 train_time:2583ms step_avg:107.62ms +step:25/1695 train_time:2677ms step_avg:107.07ms +step:26/1695 train_time:2771ms step_avg:106.58ms +step:27/1695 train_time:2866ms step_avg:106.14ms +step:28/1695 train_time:2960ms step_avg:105.72ms +step:29/1695 train_time:3054ms step_avg:105.31ms +step:30/1695 train_time:3149ms step_avg:104.96ms +step:31/1695 train_time:3243ms step_avg:104.62ms +step:32/1695 train_time:3337ms step_avg:104.27ms +step:33/1695 train_time:3432ms step_avg:104.01ms +step:34/1695 train_time:3529ms step_avg:103.78ms +step:35/1695 train_time:3624ms step_avg:103.56ms +step:36/1695 train_time:3719ms step_avg:103.30ms +step:37/1695 train_time:3814ms step_avg:103.07ms +step:38/1695 train_time:3909ms step_avg:102.86ms +step:39/1695 train_time:4004ms step_avg:102.66ms +step:40/1695 train_time:4098ms step_avg:102.45ms +step:41/1695 train_time:4192ms step_avg:102.24ms +step:42/1695 train_time:4286ms step_avg:102.04ms +step:43/1695 train_time:4380ms step_avg:101.86ms +step:44/1695 train_time:4474ms step_avg:101.68ms +step:45/1695 train_time:4569ms step_avg:101.53ms +step:46/1695 train_time:4664ms step_avg:101.39ms +step:47/1695 train_time:4757ms step_avg:101.22ms +step:48/1695 train_time:4852ms step_avg:101.07ms +step:49/1695 train_time:4947ms step_avg:100.95ms +step:50/1695 train_time:5041ms step_avg:100.82ms +step:51/1695 train_time:5135ms step_avg:100.68ms +step:52/1695 train_time:5229ms step_avg:100.56ms +step:53/1695 train_time:5324ms step_avg:100.45ms +step:54/1695 train_time:5418ms step_avg:100.33ms +step:55/1695 train_time:5513ms step_avg:100.23ms +step:56/1695 train_time:5608ms step_avg:100.14ms +step:57/1695 train_time:5703ms step_avg:100.05ms +step:58/1695 train_time:5796ms step_avg:99.94ms +step:59/1695 train_time:5891ms step_avg:99.85ms +step:60/1695 train_time:5986ms step_avg:99.76ms +step:61/1695 train_time:6079ms step_avg:99.66ms +step:62/1695 train_time:6173ms step_avg:99.56ms +step:63/1695 train_time:6268ms step_avg:99.50ms +step:64/1695 train_time:6364ms step_avg:99.44ms +step:65/1695 train_time:6458ms step_avg:99.36ms +step:66/1695 train_time:6553ms step_avg:99.29ms +step:67/1695 train_time:6649ms step_avg:99.24ms +step:68/1695 train_time:6745ms step_avg:99.19ms +step:69/1695 train_time:6837ms step_avg:99.08ms +step:70/1695 train_time:6931ms step_avg:99.02ms +step:71/1695 train_time:7026ms step_avg:98.95ms +step:72/1695 train_time:7119ms step_avg:98.87ms +step:73/1695 train_time:7213ms step_avg:98.81ms +step:74/1695 train_time:7307ms step_avg:98.75ms +step:75/1695 train_time:7402ms step_avg:98.69ms +step:76/1695 train_time:7495ms step_avg:98.62ms +step:77/1695 train_time:7590ms step_avg:98.58ms +step:78/1695 train_time:7686ms step_avg:98.53ms +step:79/1695 train_time:7779ms step_avg:98.47ms +step:80/1695 train_time:7873ms step_avg:98.41ms +step:81/1695 train_time:7969ms step_avg:98.38ms +step:82/1695 train_time:8064ms step_avg:98.34ms +step:83/1695 train_time:8158ms step_avg:98.28ms +step:84/1695 train_time:8252ms step_avg:98.23ms +step:85/1695 train_time:8346ms step_avg:98.18ms +step:86/1695 train_time:8439ms step_avg:98.13ms +step:87/1695 train_time:8533ms step_avg:98.08ms +step:88/1695 train_time:8629ms step_avg:98.06ms +step:89/1695 train_time:8724ms step_avg:98.02ms +step:90/1695 train_time:8817ms step_avg:97.97ms +step:91/1695 train_time:8911ms step_avg:97.93ms +step:92/1695 train_time:9006ms step_avg:97.89ms +step:93/1695 train_time:9100ms step_avg:97.85ms +step:94/1695 train_time:9194ms step_avg:97.80ms +step:95/1695 train_time:9289ms step_avg:97.77ms +step:96/1695 train_time:9383ms step_avg:97.74ms +step:97/1695 train_time:9476ms step_avg:97.69ms +step:98/1695 train_time:9570ms step_avg:97.66ms +step:99/1695 train_time:9665ms step_avg:97.63ms +step:100/1695 train_time:9760ms step_avg:97.60ms +step:101/1695 train_time:9854ms step_avg:97.56ms +step:102/1695 train_time:9949ms step_avg:97.54ms +step:103/1695 train_time:10043ms step_avg:97.51ms +step:104/1695 train_time:10137ms step_avg:97.47ms +step:105/1695 train_time:10231ms step_avg:97.44ms +step:106/1695 train_time:10326ms step_avg:97.41ms +step:107/1695 train_time:10420ms step_avg:97.38ms +step:108/1695 train_time:10513ms step_avg:97.34ms +step:109/1695 train_time:10608ms step_avg:97.32ms +step:110/1695 train_time:10703ms step_avg:97.30ms +step:111/1695 train_time:10797ms step_avg:97.27ms +step:112/1695 train_time:10891ms step_avg:97.24ms +step:113/1695 train_time:10985ms step_avg:97.21ms +step:114/1695 train_time:11078ms step_avg:97.18ms +step:115/1695 train_time:11172ms step_avg:97.15ms +step:116/1695 train_time:11267ms step_avg:97.13ms +step:117/1695 train_time:11362ms step_avg:97.11ms +step:118/1695 train_time:11456ms step_avg:97.08ms +step:119/1695 train_time:11550ms step_avg:97.06ms +step:120/1695 train_time:11644ms step_avg:97.03ms +step:121/1695 train_time:11737ms step_avg:97.00ms +step:122/1695 train_time:11832ms step_avg:96.98ms +step:123/1695 train_time:11927ms step_avg:96.97ms +step:124/1695 train_time:12022ms step_avg:96.95ms +step:125/1695 train_time:12115ms step_avg:96.92ms +step:125/1695 val_loss:4.3104 train_time:12207ms step_avg:97.66ms +step:126/1695 train_time:12232ms step_avg:97.08ms +step:127/1695 train_time:12310ms step_avg:96.93ms +step:128/1695 train_time:12410ms step_avg:96.96ms +step:129/1695 train_time:12506ms step_avg:96.94ms +step:130/1695 train_time:12599ms step_avg:96.92ms +step:131/1695 train_time:12692ms step_avg:96.89ms +step:132/1695 train_time:12785ms step_avg:96.86ms +step:133/1695 train_time:12878ms step_avg:96.83ms +step:134/1695 train_time:12972ms step_avg:96.80ms +step:135/1695 train_time:13064ms step_avg:96.77ms +step:136/1695 train_time:13157ms step_avg:96.75ms +step:137/1695 train_time:13252ms step_avg:96.73ms +step:138/1695 train_time:13348ms step_avg:96.73ms +step:139/1695 train_time:13444ms step_avg:96.72ms +step:140/1695 train_time:13539ms step_avg:96.70ms +step:141/1695 train_time:13633ms step_avg:96.69ms +step:142/1695 train_time:13727ms step_avg:96.67ms +step:143/1695 train_time:13821ms step_avg:96.65ms +step:144/1695 train_time:13916ms step_avg:96.64ms +step:145/1695 train_time:14010ms step_avg:96.62ms +step:146/1695 train_time:14103ms step_avg:96.59ms +step:147/1695 train_time:14197ms step_avg:96.58ms +step:148/1695 train_time:14293ms step_avg:96.57ms +step:149/1695 train_time:14387ms step_avg:96.56ms +step:150/1695 train_time:14482ms step_avg:96.55ms +step:151/1695 train_time:14578ms step_avg:96.55ms +step:152/1695 train_time:14673ms step_avg:96.53ms +step:153/1695 train_time:14766ms step_avg:96.51ms +step:154/1695 train_time:14860ms step_avg:96.49ms +step:155/1695 train_time:14953ms step_avg:96.47ms +step:156/1695 train_time:15046ms step_avg:96.45ms +step:157/1695 train_time:15140ms step_avg:96.43ms +step:158/1695 train_time:15233ms step_avg:96.41ms +step:159/1695 train_time:15327ms step_avg:96.40ms +step:160/1695 train_time:15422ms step_avg:96.39ms +step:161/1695 train_time:15517ms step_avg:96.38ms +step:162/1695 train_time:15613ms step_avg:96.38ms +step:163/1695 train_time:15706ms step_avg:96.36ms +step:164/1695 train_time:15800ms step_avg:96.34ms +step:165/1695 train_time:15893ms step_avg:96.32ms +step:166/1695 train_time:15987ms step_avg:96.30ms +step:167/1695 train_time:16080ms step_avg:96.29ms +step:168/1695 train_time:16174ms step_avg:96.27ms +step:169/1695 train_time:16267ms step_avg:96.26ms +step:170/1695 train_time:16361ms step_avg:96.24ms +step:171/1695 train_time:16455ms step_avg:96.23ms +step:172/1695 train_time:16551ms step_avg:96.22ms +step:173/1695 train_time:16932ms step_avg:97.87ms +step:174/1695 train_time:17007ms step_avg:97.74ms +step:175/1695 train_time:17099ms step_avg:97.71ms +step:176/1695 train_time:17192ms step_avg:97.68ms +step:177/1695 train_time:17285ms step_avg:97.65ms +step:178/1695 train_time:17378ms step_avg:97.63ms +step:179/1695 train_time:17472ms step_avg:97.61ms +step:180/1695 train_time:17565ms step_avg:97.58ms +step:181/1695 train_time:17658ms step_avg:97.56ms +step:182/1695 train_time:17752ms step_avg:97.54ms +step:183/1695 train_time:17847ms step_avg:97.53ms +step:184/1695 train_time:17944ms step_avg:97.52ms +step:185/1695 train_time:18041ms step_avg:97.52ms +step:186/1695 train_time:18135ms step_avg:97.50ms +step:187/1695 train_time:18229ms step_avg:97.48ms +step:188/1695 train_time:18322ms step_avg:97.46ms +step:189/1695 train_time:18416ms step_avg:97.44ms +step:190/1695 train_time:18510ms step_avg:97.42ms +step:191/1695 train_time:18602ms step_avg:97.40ms +step:192/1695 train_time:18696ms step_avg:97.38ms +step:193/1695 train_time:18790ms step_avg:97.36ms +step:194/1695 train_time:18885ms step_avg:97.34ms +step:195/1695 train_time:18980ms step_avg:97.33ms +step:196/1695 train_time:19075ms step_avg:97.32ms +step:197/1695 train_time:19169ms step_avg:97.31ms +step:198/1695 train_time:19263ms step_avg:97.29ms +step:199/1695 train_time:19357ms step_avg:97.27ms +step:200/1695 train_time:19451ms step_avg:97.26ms +step:201/1695 train_time:19544ms step_avg:97.23ms +step:202/1695 train_time:19638ms step_avg:97.22ms +step:203/1695 train_time:19733ms step_avg:97.21ms +step:204/1695 train_time:19826ms step_avg:97.19ms +step:205/1695 train_time:19921ms step_avg:97.18ms +step:206/1695 train_time:20016ms step_avg:97.17ms +step:207/1695 train_time:20111ms step_avg:97.15ms +step:208/1695 train_time:20204ms step_avg:97.14ms +step:209/1695 train_time:20299ms step_avg:97.12ms +step:210/1695 train_time:20393ms step_avg:97.11ms +step:211/1695 train_time:20487ms step_avg:97.10ms +step:212/1695 train_time:20581ms step_avg:97.08ms +step:213/1695 train_time:20674ms step_avg:97.06ms +step:214/1695 train_time:20768ms step_avg:97.05ms +step:215/1695 train_time:20862ms step_avg:97.03ms +step:216/1695 train_time:20957ms step_avg:97.02ms +step:217/1695 train_time:21051ms step_avg:97.01ms +step:218/1695 train_time:21145ms step_avg:97.00ms +step:219/1695 train_time:21240ms step_avg:96.98ms +step:220/1695 train_time:21334ms step_avg:96.97ms +step:221/1695 train_time:21428ms step_avg:96.96ms +step:222/1695 train_time:21521ms step_avg:96.94ms +step:223/1695 train_time:21616ms step_avg:96.93ms +step:224/1695 train_time:21710ms step_avg:96.92ms +step:225/1695 train_time:21804ms step_avg:96.91ms +step:226/1695 train_time:21899ms step_avg:96.90ms +step:227/1695 train_time:21994ms step_avg:96.89ms +step:228/1695 train_time:22087ms step_avg:96.87ms +step:229/1695 train_time:22181ms step_avg:96.86ms +step:230/1695 train_time:22276ms step_avg:96.85ms +step:231/1695 train_time:22371ms step_avg:96.85ms +step:232/1695 train_time:22465ms step_avg:96.83ms +step:233/1695 train_time:22559ms step_avg:96.82ms +step:234/1695 train_time:22653ms step_avg:96.81ms +step:235/1695 train_time:22747ms step_avg:96.79ms +step:236/1695 train_time:22840ms step_avg:96.78ms +step:237/1695 train_time:22935ms step_avg:96.77ms +step:238/1695 train_time:23029ms step_avg:96.76ms +step:239/1695 train_time:23122ms step_avg:96.75ms +step:240/1695 train_time:23217ms step_avg:96.74ms +step:241/1695 train_time:23312ms step_avg:96.73ms +step:242/1695 train_time:23405ms step_avg:96.72ms +step:243/1695 train_time:23499ms step_avg:96.71ms +step:244/1695 train_time:23593ms step_avg:96.69ms +step:245/1695 train_time:23686ms step_avg:96.68ms +step:246/1695 train_time:23780ms step_avg:96.67ms +step:247/1695 train_time:23875ms step_avg:96.66ms +step:248/1695 train_time:23969ms step_avg:96.65ms +step:249/1695 train_time:24063ms step_avg:96.64ms +step:250/1695 train_time:24157ms step_avg:96.63ms +step:250/1695 val_loss:3.9654 train_time:24251ms step_avg:97.00ms +step:251/1695 train_time:24276ms step_avg:96.72ms +step:252/1695 train_time:24355ms step_avg:96.65ms +step:253/1695 train_time:24453ms step_avg:96.65ms +step:254/1695 train_time:24548ms step_avg:96.65ms +step:255/1695 train_time:24642ms step_avg:96.64ms +step:256/1695 train_time:24735ms step_avg:96.62ms +step:257/1695 train_time:24828ms step_avg:96.61ms +step:258/1695 train_time:24921ms step_avg:96.59ms +step:259/1695 train_time:25014ms step_avg:96.58ms +step:260/1695 train_time:25108ms step_avg:96.57ms +step:261/1695 train_time:25201ms step_avg:96.55ms +step:262/1695 train_time:25295ms step_avg:96.55ms +step:263/1695 train_time:25391ms step_avg:96.54ms +step:264/1695 train_time:25487ms step_avg:96.54ms +step:265/1695 train_time:25582ms step_avg:96.54ms +step:266/1695 train_time:25676ms step_avg:96.52ms +step:267/1695 train_time:25769ms step_avg:96.51ms +step:268/1695 train_time:25862ms step_avg:96.50ms +step:269/1695 train_time:25955ms step_avg:96.49ms +step:270/1695 train_time:26048ms step_avg:96.47ms +step:271/1695 train_time:26141ms step_avg:96.46ms +step:272/1695 train_time:26235ms step_avg:96.45ms +step:273/1695 train_time:26330ms step_avg:96.45ms +step:274/1695 train_time:26426ms step_avg:96.45ms +step:275/1695 train_time:26522ms step_avg:96.44ms +step:276/1695 train_time:26616ms step_avg:96.44ms +step:277/1695 train_time:26710ms step_avg:96.43ms +step:278/1695 train_time:26803ms step_avg:96.42ms +step:279/1695 train_time:26897ms step_avg:96.40ms +step:280/1695 train_time:26990ms step_avg:96.39ms +step:281/1695 train_time:27083ms step_avg:96.38ms +step:282/1695 train_time:27176ms step_avg:96.37ms +step:283/1695 train_time:27270ms step_avg:96.36ms +step:284/1695 train_time:27365ms step_avg:96.36ms +step:285/1695 train_time:27459ms step_avg:96.35ms +step:286/1695 train_time:27553ms step_avg:96.34ms +step:287/1695 train_time:27648ms step_avg:96.34ms +step:288/1695 train_time:27742ms step_avg:96.33ms +step:289/1695 train_time:27836ms step_avg:96.32ms +step:290/1695 train_time:27930ms step_avg:96.31ms +step:291/1695 train_time:28024ms step_avg:96.30ms +step:292/1695 train_time:28118ms step_avg:96.29ms +step:293/1695 train_time:28211ms step_avg:96.28ms +step:294/1695 train_time:28305ms step_avg:96.28ms +step:295/1695 train_time:28399ms step_avg:96.27ms +step:296/1695 train_time:28493ms step_avg:96.26ms +step:297/1695 train_time:28587ms step_avg:96.25ms +step:298/1695 train_time:28682ms step_avg:96.25ms +step:299/1695 train_time:28776ms step_avg:96.24ms +step:300/1695 train_time:28870ms step_avg:96.23ms +step:301/1695 train_time:28964ms step_avg:96.23ms +step:302/1695 train_time:29057ms step_avg:96.22ms +step:303/1695 train_time:29151ms step_avg:96.21ms +step:304/1695 train_time:29245ms step_avg:96.20ms +step:305/1695 train_time:29340ms step_avg:96.20ms +step:306/1695 train_time:29434ms step_avg:96.19ms +step:307/1695 train_time:29528ms step_avg:96.18ms +step:308/1695 train_time:29623ms step_avg:96.18ms +step:309/1695 train_time:29717ms step_avg:96.17ms +step:310/1695 train_time:29812ms step_avg:96.17ms +step:311/1695 train_time:29907ms step_avg:96.16ms +step:312/1695 train_time:30001ms step_avg:96.16ms +step:313/1695 train_time:30094ms step_avg:96.15ms +step:314/1695 train_time:30188ms step_avg:96.14ms +step:315/1695 train_time:30282ms step_avg:96.13ms +step:316/1695 train_time:30375ms step_avg:96.12ms +step:317/1695 train_time:30469ms step_avg:96.12ms +step:318/1695 train_time:30563ms step_avg:96.11ms +step:319/1695 train_time:30658ms step_avg:96.11ms +step:320/1695 train_time:30751ms step_avg:96.10ms +step:321/1695 train_time:30846ms step_avg:96.09ms +step:322/1695 train_time:30941ms step_avg:96.09ms +step:323/1695 train_time:31034ms step_avg:96.08ms +step:324/1695 train_time:31128ms step_avg:96.07ms +step:325/1695 train_time:31223ms step_avg:96.07ms +step:326/1695 train_time:31317ms step_avg:96.06ms +step:327/1695 train_time:31410ms step_avg:96.06ms +step:328/1695 train_time:31504ms step_avg:96.05ms +step:329/1695 train_time:31597ms step_avg:96.04ms +step:330/1695 train_time:31691ms step_avg:96.03ms +step:331/1695 train_time:31785ms step_avg:96.03ms +step:332/1695 train_time:31881ms step_avg:96.03ms +step:333/1695 train_time:31974ms step_avg:96.02ms +step:334/1695 train_time:32069ms step_avg:96.01ms +step:335/1695 train_time:32163ms step_avg:96.01ms +step:336/1695 train_time:32257ms step_avg:96.00ms +step:337/1695 train_time:32350ms step_avg:95.99ms +step:338/1695 train_time:32446ms step_avg:95.99ms +step:339/1695 train_time:32541ms step_avg:95.99ms +step:340/1695 train_time:32634ms step_avg:95.98ms +step:341/1695 train_time:32728ms step_avg:95.98ms +step:342/1695 train_time:32823ms step_avg:95.97ms +step:343/1695 train_time:32916ms step_avg:95.97ms +step:344/1695 train_time:33010ms step_avg:95.96ms +step:345/1695 train_time:33345ms step_avg:96.65ms +step:346/1695 train_time:33427ms step_avg:96.61ms +step:347/1695 train_time:33519ms step_avg:96.60ms +step:348/1695 train_time:33612ms step_avg:96.59ms +step:349/1695 train_time:33705ms step_avg:96.58ms +step:350/1695 train_time:33798ms step_avg:96.57ms +step:351/1695 train_time:33891ms step_avg:96.55ms +step:352/1695 train_time:33984ms step_avg:96.54ms +step:353/1695 train_time:34077ms step_avg:96.53ms +step:354/1695 train_time:34169ms step_avg:96.52ms +step:355/1695 train_time:34266ms step_avg:96.52ms +step:356/1695 train_time:34363ms step_avg:96.52ms +step:357/1695 train_time:34458ms step_avg:96.52ms +step:358/1695 train_time:34552ms step_avg:96.51ms +step:359/1695 train_time:34646ms step_avg:96.51ms +step:360/1695 train_time:34741ms step_avg:96.50ms +step:361/1695 train_time:34834ms step_avg:96.49ms +step:362/1695 train_time:34927ms step_avg:96.48ms +step:363/1695 train_time:35020ms step_avg:96.47ms +step:364/1695 train_time:35113ms step_avg:96.46ms +step:365/1695 train_time:35207ms step_avg:96.46ms +step:366/1695 train_time:35303ms step_avg:96.45ms +step:367/1695 train_time:35398ms step_avg:96.45ms +step:368/1695 train_time:35492ms step_avg:96.45ms +step:369/1695 train_time:35587ms step_avg:96.44ms +step:370/1695 train_time:35681ms step_avg:96.44ms +step:371/1695 train_time:35775ms step_avg:96.43ms +step:372/1695 train_time:35868ms step_avg:96.42ms +step:373/1695 train_time:35961ms step_avg:96.41ms +step:374/1695 train_time:36054ms step_avg:96.40ms +step:375/1695 train_time:36147ms step_avg:96.39ms +step:375/1695 val_loss:3.8151 train_time:36240ms step_avg:96.64ms +step:376/1695 train_time:36265ms step_avg:96.45ms +step:377/1695 train_time:36343ms step_avg:96.40ms +step:378/1695 train_time:36441ms step_avg:96.41ms +step:379/1695 train_time:36535ms step_avg:96.40ms +step:380/1695 train_time:36628ms step_avg:96.39ms +step:381/1695 train_time:36721ms step_avg:96.38ms +step:382/1695 train_time:36814ms step_avg:96.37ms +step:383/1695 train_time:36907ms step_avg:96.36ms +step:384/1695 train_time:36999ms step_avg:96.35ms +step:385/1695 train_time:37092ms step_avg:96.34ms +step:386/1695 train_time:37185ms step_avg:96.33ms +step:387/1695 train_time:37280ms step_avg:96.33ms +step:388/1695 train_time:37376ms step_avg:96.33ms +step:389/1695 train_time:37474ms step_avg:96.33ms +step:390/1695 train_time:37570ms step_avg:96.33ms +step:391/1695 train_time:37665ms step_avg:96.33ms +step:392/1695 train_time:37758ms step_avg:96.32ms +step:393/1695 train_time:37851ms step_avg:96.31ms +step:394/1695 train_time:37945ms step_avg:96.31ms +step:395/1695 train_time:38038ms step_avg:96.30ms +step:396/1695 train_time:38132ms step_avg:96.29ms +step:397/1695 train_time:38226ms step_avg:96.29ms +step:398/1695 train_time:38320ms step_avg:96.28ms +step:399/1695 train_time:38414ms step_avg:96.28ms +step:400/1695 train_time:38510ms step_avg:96.27ms +step:401/1695 train_time:38606ms step_avg:96.27ms +step:402/1695 train_time:38700ms step_avg:96.27ms +step:403/1695 train_time:38793ms step_avg:96.26ms +step:404/1695 train_time:38888ms step_avg:96.26ms +step:405/1695 train_time:38981ms step_avg:96.25ms +step:406/1695 train_time:39074ms step_avg:96.24ms +step:407/1695 train_time:39168ms step_avg:96.24ms +step:408/1695 train_time:39262ms step_avg:96.23ms +step:409/1695 train_time:39356ms step_avg:96.22ms +step:410/1695 train_time:39450ms step_avg:96.22ms +step:411/1695 train_time:39545ms step_avg:96.22ms +step:412/1695 train_time:39639ms step_avg:96.21ms +step:413/1695 train_time:39733ms step_avg:96.21ms +step:414/1695 train_time:39827ms step_avg:96.20ms +step:415/1695 train_time:39920ms step_avg:96.19ms +step:416/1695 train_time:40013ms step_avg:96.19ms +step:417/1695 train_time:40107ms step_avg:96.18ms +step:418/1695 train_time:40200ms step_avg:96.17ms +step:419/1695 train_time:40294ms step_avg:96.17ms +step:420/1695 train_time:40388ms step_avg:96.16ms +step:421/1695 train_time:40482ms step_avg:96.16ms +step:422/1695 train_time:40577ms step_avg:96.15ms +step:423/1695 train_time:40672ms step_avg:96.15ms +step:424/1695 train_time:40767ms step_avg:96.15ms +step:425/1695 train_time:40862ms step_avg:96.15ms +step:426/1695 train_time:40955ms step_avg:96.14ms +step:427/1695 train_time:41049ms step_avg:96.13ms +step:428/1695 train_time:41143ms step_avg:96.13ms +step:429/1695 train_time:41236ms step_avg:96.12ms +step:430/1695 train_time:41330ms step_avg:96.12ms +step:431/1695 train_time:41424ms step_avg:96.11ms +step:432/1695 train_time:41518ms step_avg:96.11ms +step:433/1695 train_time:41612ms step_avg:96.10ms +step:434/1695 train_time:41707ms step_avg:96.10ms +step:435/1695 train_time:41800ms step_avg:96.09ms +step:436/1695 train_time:41894ms step_avg:96.09ms +step:437/1695 train_time:41988ms step_avg:96.08ms +step:438/1695 train_time:42081ms step_avg:96.08ms +step:439/1695 train_time:42175ms step_avg:96.07ms +step:440/1695 train_time:42269ms step_avg:96.07ms +step:441/1695 train_time:42365ms step_avg:96.06ms +step:442/1695 train_time:42458ms step_avg:96.06ms +step:443/1695 train_time:42553ms step_avg:96.06ms +step:444/1695 train_time:42647ms step_avg:96.05ms +step:445/1695 train_time:42741ms step_avg:96.05ms +step:446/1695 train_time:42835ms step_avg:96.04ms +step:447/1695 train_time:42929ms step_avg:96.04ms +step:448/1695 train_time:43023ms step_avg:96.03ms +step:449/1695 train_time:43116ms step_avg:96.03ms +step:450/1695 train_time:43210ms step_avg:96.02ms +step:451/1695 train_time:43305ms step_avg:96.02ms +step:452/1695 train_time:43398ms step_avg:96.01ms +step:453/1695 train_time:43492ms step_avg:96.01ms +step:454/1695 train_time:43587ms step_avg:96.01ms +step:455/1695 train_time:43680ms step_avg:96.00ms +step:456/1695 train_time:43774ms step_avg:96.00ms +step:457/1695 train_time:43869ms step_avg:95.99ms +step:458/1695 train_time:43964ms step_avg:95.99ms +step:459/1695 train_time:44057ms step_avg:95.99ms +step:460/1695 train_time:44151ms step_avg:95.98ms +step:461/1695 train_time:44245ms step_avg:95.98ms +step:462/1695 train_time:44338ms step_avg:95.97ms +step:463/1695 train_time:44432ms step_avg:95.97ms +step:464/1695 train_time:44526ms step_avg:95.96ms +step:465/1695 train_time:44620ms step_avg:95.96ms +step:466/1695 train_time:44714ms step_avg:95.95ms +step:467/1695 train_time:44808ms step_avg:95.95ms +step:468/1695 train_time:44902ms step_avg:95.95ms +step:469/1695 train_time:44995ms step_avg:95.94ms +step:470/1695 train_time:45090ms step_avg:95.94ms +step:471/1695 train_time:45184ms step_avg:95.93ms +step:472/1695 train_time:45277ms step_avg:95.93ms +step:473/1695 train_time:45371ms step_avg:95.92ms +step:474/1695 train_time:45466ms step_avg:95.92ms +step:475/1695 train_time:45559ms step_avg:95.91ms +step:476/1695 train_time:45653ms step_avg:95.91ms +step:477/1695 train_time:45748ms step_avg:95.91ms +step:478/1695 train_time:45843ms step_avg:95.91ms +step:479/1695 train_time:45936ms step_avg:95.90ms +step:480/1695 train_time:46031ms step_avg:95.90ms +step:481/1695 train_time:46125ms step_avg:95.89ms +step:482/1695 train_time:46218ms step_avg:95.89ms +step:483/1695 train_time:46312ms step_avg:95.88ms +step:484/1695 train_time:46406ms step_avg:95.88ms +step:485/1695 train_time:46499ms step_avg:95.87ms +step:486/1695 train_time:46593ms step_avg:95.87ms +step:487/1695 train_time:46687ms step_avg:95.87ms +step:488/1695 train_time:46780ms step_avg:95.86ms +step:489/1695 train_time:46874ms step_avg:95.86ms +step:490/1695 train_time:46969ms step_avg:95.86ms +step:491/1695 train_time:47064ms step_avg:95.85ms +step:492/1695 train_time:47157ms step_avg:95.85ms +step:493/1695 train_time:47251ms step_avg:95.84ms +step:494/1695 train_time:47345ms step_avg:95.84ms +step:495/1695 train_time:47439ms step_avg:95.84ms +step:496/1695 train_time:47533ms step_avg:95.83ms +step:497/1695 train_time:47628ms step_avg:95.83ms +step:498/1695 train_time:47722ms step_avg:95.83ms +step:499/1695 train_time:47814ms step_avg:95.82ms +step:500/1695 train_time:47908ms step_avg:95.82ms +step:500/1695 val_loss:3.7156 train_time:48001ms step_avg:96.00ms +step:501/1695 train_time:48026ms step_avg:95.86ms +step:502/1695 train_time:48105ms step_avg:95.83ms +step:503/1695 train_time:48205ms step_avg:95.83ms +step:504/1695 train_time:48300ms step_avg:95.83ms +step:505/1695 train_time:48393ms step_avg:95.83ms +step:506/1695 train_time:48486ms step_avg:95.82ms +step:507/1695 train_time:48579ms step_avg:95.82ms +step:508/1695 train_time:48673ms step_avg:95.81ms +step:509/1695 train_time:48765ms step_avg:95.81ms +step:510/1695 train_time:48859ms step_avg:95.80ms +step:511/1695 train_time:48952ms step_avg:95.80ms +step:512/1695 train_time:49046ms step_avg:95.79ms +step:513/1695 train_time:49142ms step_avg:95.79ms +step:514/1695 train_time:49239ms step_avg:95.80ms +step:515/1695 train_time:49334ms step_avg:95.79ms +step:516/1695 train_time:49427ms step_avg:95.79ms +step:517/1695 train_time:49520ms step_avg:95.78ms +step:518/1695 train_time:49614ms step_avg:95.78ms +step:519/1695 train_time:50068ms step_avg:96.47ms +step:520/1695 train_time:50139ms step_avg:96.42ms +step:521/1695 train_time:50231ms step_avg:96.41ms +step:522/1695 train_time:50324ms step_avg:96.41ms +step:523/1695 train_time:50417ms step_avg:96.40ms +step:524/1695 train_time:50510ms step_avg:96.39ms +step:525/1695 train_time:50602ms step_avg:96.39ms +step:526/1695 train_time:50695ms step_avg:96.38ms +step:527/1695 train_time:50789ms step_avg:96.37ms +step:528/1695 train_time:50882ms step_avg:96.37ms +step:529/1695 train_time:50977ms step_avg:96.37ms +step:530/1695 train_time:51074ms step_avg:96.37ms +step:531/1695 train_time:51171ms step_avg:96.37ms +step:532/1695 train_time:51264ms step_avg:96.36ms +step:533/1695 train_time:51358ms step_avg:96.36ms +step:534/1695 train_time:51451ms step_avg:96.35ms +step:535/1695 train_time:51543ms step_avg:96.34ms +step:536/1695 train_time:51636ms step_avg:96.34ms +step:537/1695 train_time:51730ms step_avg:96.33ms +step:538/1695 train_time:51823ms step_avg:96.32ms +step:539/1695 train_time:51916ms step_avg:96.32ms +step:540/1695 train_time:52011ms step_avg:96.32ms +step:541/1695 train_time:52105ms step_avg:96.31ms +step:542/1695 train_time:52200ms step_avg:96.31ms +step:543/1695 train_time:52295ms step_avg:96.31ms +step:544/1695 train_time:52389ms step_avg:96.30ms +step:545/1695 train_time:52482ms step_avg:96.30ms +step:546/1695 train_time:52577ms step_avg:96.29ms +step:547/1695 train_time:52671ms step_avg:96.29ms +step:548/1695 train_time:52764ms step_avg:96.28ms +step:549/1695 train_time:52858ms step_avg:96.28ms +step:550/1695 train_time:52951ms step_avg:96.27ms +step:551/1695 train_time:53045ms step_avg:96.27ms +step:552/1695 train_time:53139ms step_avg:96.27ms +step:553/1695 train_time:53233ms step_avg:96.26ms +step:554/1695 train_time:53326ms step_avg:96.26ms +step:555/1695 train_time:53420ms step_avg:96.25ms +step:556/1695 train_time:53514ms step_avg:96.25ms +step:557/1695 train_time:53608ms step_avg:96.24ms +step:558/1695 train_time:53701ms step_avg:96.24ms +step:559/1695 train_time:53795ms step_avg:96.24ms +step:560/1695 train_time:53890ms step_avg:96.23ms +step:561/1695 train_time:53983ms step_avg:96.23ms +step:562/1695 train_time:54077ms step_avg:96.22ms +step:563/1695 train_time:54172ms step_avg:96.22ms +step:564/1695 train_time:54265ms step_avg:96.21ms +step:565/1695 train_time:54359ms step_avg:96.21ms +step:566/1695 train_time:54453ms step_avg:96.21ms +step:567/1695 train_time:54546ms step_avg:96.20ms +step:568/1695 train_time:54641ms step_avg:96.20ms +step:569/1695 train_time:54737ms step_avg:96.20ms +step:570/1695 train_time:54833ms step_avg:96.20ms +step:571/1695 train_time:54929ms step_avg:96.20ms +step:572/1695 train_time:55024ms step_avg:96.20ms +step:573/1695 train_time:55120ms step_avg:96.20ms +step:574/1695 train_time:55216ms step_avg:96.20ms +step:575/1695 train_time:55312ms step_avg:96.19ms +step:576/1695 train_time:55407ms step_avg:96.19ms +step:577/1695 train_time:55502ms step_avg:96.19ms +step:578/1695 train_time:55599ms step_avg:96.19ms +step:579/1695 train_time:55695ms step_avg:96.19ms +step:580/1695 train_time:55792ms step_avg:96.19ms +step:581/1695 train_time:55887ms step_avg:96.19ms +step:582/1695 train_time:55983ms step_avg:96.19ms +step:583/1695 train_time:56080ms step_avg:96.19ms +step:584/1695 train_time:56177ms step_avg:96.19ms +step:585/1695 train_time:56274ms step_avg:96.19ms +step:586/1695 train_time:56371ms step_avg:96.20ms +step:587/1695 train_time:56467ms step_avg:96.20ms +step:588/1695 train_time:56562ms step_avg:96.19ms +step:589/1695 train_time:56659ms step_avg:96.19ms +step:590/1695 train_time:56755ms step_avg:96.20ms +step:591/1695 train_time:56851ms step_avg:96.20ms +step:592/1695 train_time:56946ms step_avg:96.19ms +step:593/1695 train_time:57041ms step_avg:96.19ms +step:594/1695 train_time:57138ms step_avg:96.19ms +step:595/1695 train_time:57234ms step_avg:96.19ms +step:596/1695 train_time:57332ms step_avg:96.19ms +step:597/1695 train_time:57428ms step_avg:96.19ms +step:598/1695 train_time:57523ms step_avg:96.19ms +step:599/1695 train_time:57619ms step_avg:96.19ms +step:600/1695 train_time:57716ms step_avg:96.19ms +step:601/1695 train_time:57812ms step_avg:96.19ms +step:602/1695 train_time:57906ms step_avg:96.19ms +step:603/1695 train_time:58002ms step_avg:96.19ms +step:604/1695 train_time:58098ms step_avg:96.19ms +step:605/1695 train_time:58194ms step_avg:96.19ms +step:606/1695 train_time:58290ms step_avg:96.19ms +step:607/1695 train_time:58386ms step_avg:96.19ms +step:608/1695 train_time:58482ms step_avg:96.19ms +step:609/1695 train_time:58579ms step_avg:96.19ms +step:610/1695 train_time:58676ms step_avg:96.19ms +step:611/1695 train_time:58773ms step_avg:96.19ms +step:612/1695 train_time:58868ms step_avg:96.19ms +step:613/1695 train_time:58963ms step_avg:96.19ms +step:614/1695 train_time:59059ms step_avg:96.19ms +step:615/1695 train_time:59156ms step_avg:96.19ms +step:616/1695 train_time:59252ms step_avg:96.19ms +step:617/1695 train_time:59348ms step_avg:96.19ms +step:618/1695 train_time:59444ms step_avg:96.19ms +step:619/1695 train_time:59540ms step_avg:96.19ms +step:620/1695 train_time:59636ms step_avg:96.19ms +step:621/1695 train_time:59732ms step_avg:96.19ms +step:622/1695 train_time:59828ms step_avg:96.19ms +step:623/1695 train_time:59923ms step_avg:96.18ms +step:624/1695 train_time:60019ms step_avg:96.18ms +step:625/1695 train_time:60116ms step_avg:96.19ms +step:625/1695 val_loss:3.6216 train_time:60211ms step_avg:96.34ms +step:626/1695 train_time:60235ms step_avg:96.22ms +step:627/1695 train_time:60317ms step_avg:96.20ms +step:628/1695 train_time:60413ms step_avg:96.20ms +step:629/1695 train_time:60508ms step_avg:96.20ms +step:630/1695 train_time:60603ms step_avg:96.19ms +step:631/1695 train_time:60697ms step_avg:96.19ms +step:632/1695 train_time:60792ms step_avg:96.19ms +step:633/1695 train_time:60888ms step_avg:96.19ms +step:634/1695 train_time:60982ms step_avg:96.19ms +step:635/1695 train_time:61078ms step_avg:96.19ms +step:636/1695 train_time:61178ms step_avg:96.19ms +step:637/1695 train_time:61278ms step_avg:96.20ms +step:638/1695 train_time:61377ms step_avg:96.20ms +step:639/1695 train_time:61475ms step_avg:96.20ms +step:640/1695 train_time:61572ms step_avg:96.21ms +step:641/1695 train_time:61668ms step_avg:96.21ms +step:642/1695 train_time:61763ms step_avg:96.20ms +step:643/1695 train_time:61858ms step_avg:96.20ms +step:644/1695 train_time:61954ms step_avg:96.20ms +step:645/1695 train_time:62049ms step_avg:96.20ms +step:646/1695 train_time:62144ms step_avg:96.20ms +step:647/1695 train_time:62241ms step_avg:96.20ms +step:648/1695 train_time:62339ms step_avg:96.20ms +step:649/1695 train_time:62436ms step_avg:96.20ms +step:650/1695 train_time:62533ms step_avg:96.21ms +step:651/1695 train_time:62630ms step_avg:96.21ms +step:652/1695 train_time:62725ms step_avg:96.20ms +step:653/1695 train_time:62820ms step_avg:96.20ms +step:654/1695 train_time:62916ms step_avg:96.20ms +step:655/1695 train_time:63011ms step_avg:96.20ms +step:656/1695 train_time:63107ms step_avg:96.20ms +step:657/1695 train_time:63202ms step_avg:96.20ms +step:658/1695 train_time:63298ms step_avg:96.20ms +step:659/1695 train_time:63396ms step_avg:96.20ms +step:660/1695 train_time:63494ms step_avg:96.20ms +step:661/1695 train_time:63591ms step_avg:96.20ms +step:662/1695 train_time:63688ms step_avg:96.21ms +step:663/1695 train_time:63784ms step_avg:96.20ms +step:664/1695 train_time:63879ms step_avg:96.20ms +step:665/1695 train_time:63974ms step_avg:96.20ms +step:666/1695 train_time:64070ms step_avg:96.20ms +step:667/1695 train_time:64165ms step_avg:96.20ms +step:668/1695 train_time:64260ms step_avg:96.20ms +step:669/1695 train_time:64356ms step_avg:96.20ms +step:670/1695 train_time:64453ms step_avg:96.20ms +step:671/1695 train_time:64550ms step_avg:96.20ms +step:672/1695 train_time:64647ms step_avg:96.20ms +step:673/1695 train_time:64742ms step_avg:96.20ms +step:674/1695 train_time:64838ms step_avg:96.20ms +step:675/1695 train_time:64934ms step_avg:96.20ms +step:676/1695 train_time:65030ms step_avg:96.20ms +step:677/1695 train_time:65126ms step_avg:96.20ms +step:678/1695 train_time:65222ms step_avg:96.20ms +step:679/1695 train_time:65318ms step_avg:96.20ms +step:680/1695 train_time:65414ms step_avg:96.20ms +step:681/1695 train_time:65510ms step_avg:96.20ms +step:682/1695 train_time:65606ms step_avg:96.20ms +step:683/1695 train_time:65701ms step_avg:96.20ms +step:684/1695 train_time:65798ms step_avg:96.20ms +step:685/1695 train_time:65894ms step_avg:96.20ms +step:686/1695 train_time:65990ms step_avg:96.20ms +step:687/1695 train_time:66086ms step_avg:96.20ms +step:688/1695 train_time:66181ms step_avg:96.19ms +step:689/1695 train_time:66277ms step_avg:96.19ms +step:690/1695 train_time:66373ms step_avg:96.19ms +step:691/1695 train_time:66817ms step_avg:96.70ms +step:692/1695 train_time:66898ms step_avg:96.67ms +step:693/1695 train_time:66992ms step_avg:96.67ms +step:694/1695 train_time:67087ms step_avg:96.67ms +step:695/1695 train_time:67181ms step_avg:96.66ms +step:696/1695 train_time:67277ms step_avg:96.66ms +step:697/1695 train_time:67371ms step_avg:96.66ms +step:698/1695 train_time:67466ms step_avg:96.66ms +step:699/1695 train_time:67560ms step_avg:96.65ms +step:700/1695 train_time:67656ms step_avg:96.65ms +step:701/1695 train_time:67756ms step_avg:96.66ms +step:702/1695 train_time:67856ms step_avg:96.66ms +step:703/1695 train_time:67953ms step_avg:96.66ms +step:704/1695 train_time:68049ms step_avg:96.66ms +step:705/1695 train_time:68144ms step_avg:96.66ms +step:706/1695 train_time:68238ms step_avg:96.65ms +step:707/1695 train_time:68334ms step_avg:96.65ms +step:708/1695 train_time:68430ms step_avg:96.65ms +step:709/1695 train_time:68524ms step_avg:96.65ms +step:710/1695 train_time:68618ms step_avg:96.65ms +step:711/1695 train_time:68715ms step_avg:96.65ms +step:712/1695 train_time:68813ms step_avg:96.65ms +step:713/1695 train_time:68909ms step_avg:96.65ms +step:714/1695 train_time:69005ms step_avg:96.65ms +step:715/1695 train_time:69101ms step_avg:96.64ms +step:716/1695 train_time:69196ms step_avg:96.64ms +step:717/1695 train_time:69293ms step_avg:96.64ms +step:718/1695 train_time:69390ms step_avg:96.64ms +step:719/1695 train_time:69485ms step_avg:96.64ms +step:720/1695 train_time:69580ms step_avg:96.64ms +step:721/1695 train_time:69676ms step_avg:96.64ms +step:722/1695 train_time:69774ms step_avg:96.64ms +step:723/1695 train_time:69871ms step_avg:96.64ms +step:724/1695 train_time:69969ms step_avg:96.64ms +step:725/1695 train_time:70065ms step_avg:96.64ms +step:726/1695 train_time:70160ms step_avg:96.64ms +step:727/1695 train_time:70256ms step_avg:96.64ms +step:728/1695 train_time:70353ms step_avg:96.64ms +step:729/1695 train_time:70450ms step_avg:96.64ms +step:730/1695 train_time:70547ms step_avg:96.64ms +step:731/1695 train_time:70642ms step_avg:96.64ms +step:732/1695 train_time:70737ms step_avg:96.64ms +step:733/1695 train_time:70834ms step_avg:96.64ms +step:734/1695 train_time:70933ms step_avg:96.64ms +step:735/1695 train_time:71029ms step_avg:96.64ms +step:736/1695 train_time:71125ms step_avg:96.64ms +step:737/1695 train_time:71220ms step_avg:96.63ms +step:738/1695 train_time:71316ms step_avg:96.63ms +step:739/1695 train_time:71413ms step_avg:96.63ms +step:740/1695 train_time:71508ms step_avg:96.63ms +step:741/1695 train_time:71603ms step_avg:96.63ms +step:742/1695 train_time:71698ms step_avg:96.63ms +step:743/1695 train_time:71794ms step_avg:96.63ms +step:744/1695 train_time:71891ms step_avg:96.63ms +step:745/1695 train_time:71988ms step_avg:96.63ms +step:746/1695 train_time:72083ms step_avg:96.63ms +step:747/1695 train_time:72179ms step_avg:96.63ms +step:748/1695 train_time:72275ms step_avg:96.62ms +step:749/1695 train_time:72371ms step_avg:96.62ms +step:750/1695 train_time:72467ms step_avg:96.62ms +step:750/1695 val_loss:3.5657 train_time:72560ms step_avg:96.75ms +step:751/1695 train_time:72585ms step_avg:96.65ms +step:752/1695 train_time:72667ms step_avg:96.63ms +step:753/1695 train_time:72765ms step_avg:96.63ms +step:754/1695 train_time:72860ms step_avg:96.63ms +step:755/1695 train_time:72956ms step_avg:96.63ms +step:756/1695 train_time:73050ms step_avg:96.63ms +step:757/1695 train_time:73145ms step_avg:96.62ms +step:758/1695 train_time:73239ms step_avg:96.62ms +step:759/1695 train_time:73334ms step_avg:96.62ms +step:760/1695 train_time:73429ms step_avg:96.62ms +step:761/1695 train_time:73526ms step_avg:96.62ms +step:762/1695 train_time:73625ms step_avg:96.62ms +step:763/1695 train_time:73723ms step_avg:96.62ms +step:764/1695 train_time:73819ms step_avg:96.62ms +step:765/1695 train_time:73916ms step_avg:96.62ms +step:766/1695 train_time:74011ms step_avg:96.62ms +step:767/1695 train_time:74106ms step_avg:96.62ms +step:768/1695 train_time:74201ms step_avg:96.62ms +step:769/1695 train_time:74296ms step_avg:96.61ms +step:770/1695 train_time:74391ms step_avg:96.61ms +step:771/1695 train_time:74487ms step_avg:96.61ms +step:772/1695 train_time:74584ms step_avg:96.61ms +step:773/1695 train_time:74682ms step_avg:96.61ms +step:774/1695 train_time:74779ms step_avg:96.61ms +step:775/1695 train_time:74875ms step_avg:96.61ms +step:776/1695 train_time:74971ms step_avg:96.61ms +step:777/1695 train_time:75066ms step_avg:96.61ms +step:778/1695 train_time:75161ms step_avg:96.61ms +step:779/1695 train_time:75257ms step_avg:96.61ms +step:780/1695 train_time:75352ms step_avg:96.61ms +step:781/1695 train_time:75446ms step_avg:96.60ms +step:782/1695 train_time:75542ms step_avg:96.60ms +step:783/1695 train_time:75639ms step_avg:96.60ms +step:784/1695 train_time:75736ms step_avg:96.60ms +step:785/1695 train_time:75833ms step_avg:96.60ms +step:786/1695 train_time:75930ms step_avg:96.60ms +step:787/1695 train_time:76024ms step_avg:96.60ms +step:788/1695 train_time:76119ms step_avg:96.60ms +step:789/1695 train_time:76215ms step_avg:96.60ms +step:790/1695 train_time:76309ms step_avg:96.59ms +step:791/1695 train_time:76404ms step_avg:96.59ms +step:792/1695 train_time:76500ms step_avg:96.59ms +step:793/1695 train_time:76597ms step_avg:96.59ms +step:794/1695 train_time:76694ms step_avg:96.59ms +step:795/1695 train_time:76790ms step_avg:96.59ms +step:796/1695 train_time:76886ms step_avg:96.59ms +step:797/1695 train_time:76982ms step_avg:96.59ms +step:798/1695 train_time:77077ms step_avg:96.59ms +step:799/1695 train_time:77173ms step_avg:96.59ms +step:800/1695 train_time:77268ms step_avg:96.58ms +step:801/1695 train_time:77362ms step_avg:96.58ms +step:802/1695 train_time:77458ms step_avg:96.58ms +step:803/1695 train_time:77555ms step_avg:96.58ms +step:804/1695 train_time:77651ms step_avg:96.58ms +step:805/1695 train_time:77747ms step_avg:96.58ms +step:806/1695 train_time:77843ms step_avg:96.58ms +step:807/1695 train_time:77939ms step_avg:96.58ms +step:808/1695 train_time:78036ms step_avg:96.58ms +step:809/1695 train_time:78132ms step_avg:96.58ms +step:810/1695 train_time:78228ms step_avg:96.58ms +step:811/1695 train_time:78322ms step_avg:96.58ms +step:812/1695 train_time:78418ms step_avg:96.57ms +step:813/1695 train_time:78514ms step_avg:96.57ms +step:814/1695 train_time:78611ms step_avg:96.57ms +step:815/1695 train_time:78706ms step_avg:96.57ms +step:816/1695 train_time:78802ms step_avg:96.57ms +step:817/1695 train_time:78899ms step_avg:96.57ms +step:818/1695 train_time:78995ms step_avg:96.57ms +step:819/1695 train_time:79091ms step_avg:96.57ms +step:820/1695 train_time:79186ms step_avg:96.57ms +step:821/1695 train_time:79281ms step_avg:96.57ms +step:822/1695 train_time:79378ms step_avg:96.57ms +step:823/1695 train_time:79474ms step_avg:96.57ms +step:824/1695 train_time:79570ms step_avg:96.57ms +step:825/1695 train_time:79665ms step_avg:96.56ms +step:826/1695 train_time:79761ms step_avg:96.56ms +step:827/1695 train_time:79857ms step_avg:96.56ms +step:828/1695 train_time:79953ms step_avg:96.56ms +step:829/1695 train_time:80049ms step_avg:96.56ms +step:830/1695 train_time:80145ms step_avg:96.56ms +step:831/1695 train_time:80241ms step_avg:96.56ms +step:832/1695 train_time:80337ms step_avg:96.56ms +step:833/1695 train_time:80434ms step_avg:96.56ms +step:834/1695 train_time:80529ms step_avg:96.56ms +step:835/1695 train_time:80624ms step_avg:96.56ms +step:836/1695 train_time:80720ms step_avg:96.55ms +step:837/1695 train_time:80816ms step_avg:96.55ms +step:838/1695 train_time:80912ms step_avg:96.55ms +step:839/1695 train_time:81007ms step_avg:96.55ms +step:840/1695 train_time:81103ms step_avg:96.55ms +step:841/1695 train_time:81199ms step_avg:96.55ms +step:842/1695 train_time:81294ms step_avg:96.55ms +step:843/1695 train_time:81390ms step_avg:96.55ms +step:844/1695 train_time:81485ms step_avg:96.55ms +step:845/1695 train_time:81581ms step_avg:96.55ms +step:846/1695 train_time:81676ms step_avg:96.54ms +step:847/1695 train_time:81772ms step_avg:96.54ms +step:848/1695 train_time:81867ms step_avg:96.54ms +step:849/1695 train_time:81963ms step_avg:96.54ms +step:850/1695 train_time:82059ms step_avg:96.54ms +step:851/1695 train_time:82155ms step_avg:96.54ms +step:852/1695 train_time:82251ms step_avg:96.54ms +step:853/1695 train_time:82347ms step_avg:96.54ms +step:854/1695 train_time:82443ms step_avg:96.54ms +step:855/1695 train_time:82539ms step_avg:96.54ms +step:856/1695 train_time:82635ms step_avg:96.54ms +step:857/1695 train_time:82730ms step_avg:96.53ms +step:858/1695 train_time:82826ms step_avg:96.53ms +step:859/1695 train_time:82921ms step_avg:96.53ms +step:860/1695 train_time:83017ms step_avg:96.53ms +step:861/1695 train_time:83113ms step_avg:96.53ms +step:862/1695 train_time:83208ms step_avg:96.53ms +step:863/1695 train_time:83631ms step_avg:96.91ms +step:864/1695 train_time:83735ms step_avg:96.92ms +step:865/1695 train_time:83829ms step_avg:96.91ms +step:866/1695 train_time:83923ms step_avg:96.91ms +step:867/1695 train_time:84018ms step_avg:96.91ms +step:868/1695 train_time:84113ms step_avg:96.90ms +step:869/1695 train_time:84207ms step_avg:96.90ms +step:870/1695 train_time:84301ms step_avg:96.90ms +step:871/1695 train_time:84397ms step_avg:96.90ms +step:872/1695 train_time:84492ms step_avg:96.89ms +step:873/1695 train_time:84593ms step_avg:96.90ms +step:874/1695 train_time:84691ms step_avg:96.90ms +step:875/1695 train_time:84788ms step_avg:96.90ms +step:875/1695 val_loss:3.5240 train_time:84881ms step_avg:97.01ms +step:876/1695 train_time:84907ms step_avg:96.93ms +step:877/1695 train_time:84991ms step_avg:96.91ms +step:878/1695 train_time:85089ms step_avg:96.91ms +step:879/1695 train_time:85186ms step_avg:96.91ms +step:880/1695 train_time:85282ms step_avg:96.91ms +step:881/1695 train_time:85377ms step_avg:96.91ms +step:882/1695 train_time:85472ms step_avg:96.91ms +step:883/1695 train_time:85567ms step_avg:96.90ms +step:884/1695 train_time:85663ms step_avg:96.90ms +step:885/1695 train_time:85757ms step_avg:96.90ms +step:886/1695 train_time:85854ms step_avg:96.90ms +step:887/1695 train_time:85952ms step_avg:96.90ms +step:888/1695 train_time:86049ms step_avg:96.90ms +step:889/1695 train_time:86145ms step_avg:96.90ms +step:890/1695 train_time:86242ms step_avg:96.90ms +step:891/1695 train_time:86339ms step_avg:96.90ms +step:892/1695 train_time:86435ms step_avg:96.90ms +step:893/1695 train_time:86530ms step_avg:96.90ms +step:894/1695 train_time:86625ms step_avg:96.90ms +step:895/1695 train_time:86720ms step_avg:96.89ms +step:896/1695 train_time:86816ms step_avg:96.89ms +step:897/1695 train_time:86913ms step_avg:96.89ms +step:898/1695 train_time:87009ms step_avg:96.89ms +step:899/1695 train_time:87105ms step_avg:96.89ms +step:900/1695 train_time:87201ms step_avg:96.89ms +step:901/1695 train_time:87298ms step_avg:96.89ms +step:902/1695 train_time:87394ms step_avg:96.89ms +step:903/1695 train_time:87489ms step_avg:96.89ms +step:904/1695 train_time:87584ms step_avg:96.88ms +step:905/1695 train_time:87679ms step_avg:96.88ms +step:906/1695 train_time:87776ms step_avg:96.88ms +step:907/1695 train_time:87872ms step_avg:96.88ms +step:908/1695 train_time:87968ms step_avg:96.88ms +step:909/1695 train_time:88065ms step_avg:96.88ms +step:910/1695 train_time:88162ms step_avg:96.88ms +step:911/1695 train_time:88259ms step_avg:96.88ms +step:912/1695 train_time:88354ms step_avg:96.88ms +step:913/1695 train_time:88449ms step_avg:96.88ms +step:914/1695 train_time:88544ms step_avg:96.88ms +step:915/1695 train_time:88640ms step_avg:96.87ms +step:916/1695 train_time:88736ms step_avg:96.87ms +step:917/1695 train_time:88831ms step_avg:96.87ms +step:918/1695 train_time:88927ms step_avg:96.87ms +step:919/1695 train_time:89023ms step_avg:96.87ms +step:920/1695 train_time:89119ms step_avg:96.87ms +step:921/1695 train_time:89216ms step_avg:96.87ms +step:922/1695 train_time:89312ms step_avg:96.87ms +step:923/1695 train_time:89407ms step_avg:96.87ms +step:924/1695 train_time:89504ms step_avg:96.87ms +step:925/1695 train_time:89600ms step_avg:96.86ms +step:926/1695 train_time:89696ms step_avg:96.86ms +step:927/1695 train_time:89792ms step_avg:96.86ms +step:928/1695 train_time:89887ms step_avg:96.86ms +step:929/1695 train_time:89983ms step_avg:96.86ms +step:930/1695 train_time:90080ms step_avg:96.86ms +step:931/1695 train_time:90177ms step_avg:96.86ms +step:932/1695 train_time:90273ms step_avg:96.86ms +step:933/1695 train_time:90369ms step_avg:96.86ms +step:934/1695 train_time:90465ms step_avg:96.86ms +step:935/1695 train_time:90561ms step_avg:96.86ms +step:936/1695 train_time:90657ms step_avg:96.86ms +step:937/1695 train_time:90753ms step_avg:96.86ms +step:938/1695 train_time:90848ms step_avg:96.85ms +step:939/1695 train_time:90944ms step_avg:96.85ms +step:940/1695 train_time:91040ms step_avg:96.85ms +step:941/1695 train_time:91136ms step_avg:96.85ms +step:942/1695 train_time:91232ms step_avg:96.85ms +step:943/1695 train_time:91329ms step_avg:96.85ms +step:944/1695 train_time:91424ms step_avg:96.85ms +step:945/1695 train_time:91520ms step_avg:96.85ms +step:946/1695 train_time:91616ms step_avg:96.85ms +step:947/1695 train_time:91711ms step_avg:96.84ms +step:948/1695 train_time:91807ms step_avg:96.84ms +step:949/1695 train_time:91904ms step_avg:96.84ms +step:950/1695 train_time:92001ms step_avg:96.84ms +step:951/1695 train_time:92098ms step_avg:96.84ms +step:952/1695 train_time:92194ms step_avg:96.84ms +step:953/1695 train_time:92290ms step_avg:96.84ms +step:954/1695 train_time:92385ms step_avg:96.84ms +step:955/1695 train_time:92481ms step_avg:96.84ms +step:956/1695 train_time:92578ms step_avg:96.84ms +step:957/1695 train_time:92675ms step_avg:96.84ms +step:958/1695 train_time:92770ms step_avg:96.84ms +step:959/1695 train_time:92866ms step_avg:96.84ms +step:960/1695 train_time:92963ms step_avg:96.84ms +step:961/1695 train_time:93060ms step_avg:96.84ms +step:962/1695 train_time:93158ms step_avg:96.84ms +step:963/1695 train_time:93254ms step_avg:96.84ms +step:964/1695 train_time:93349ms step_avg:96.83ms +step:965/1695 train_time:93444ms step_avg:96.83ms +step:966/1695 train_time:93540ms step_avg:96.83ms +step:967/1695 train_time:93636ms step_avg:96.83ms +step:968/1695 train_time:93733ms step_avg:96.83ms +step:969/1695 train_time:93828ms step_avg:96.83ms +step:970/1695 train_time:93924ms step_avg:96.83ms +step:971/1695 train_time:94021ms step_avg:96.83ms +step:972/1695 train_time:94119ms step_avg:96.83ms +step:973/1695 train_time:94215ms step_avg:96.83ms +step:974/1695 train_time:94310ms step_avg:96.83ms +step:975/1695 train_time:94406ms step_avg:96.83ms +step:976/1695 train_time:94501ms step_avg:96.82ms +step:977/1695 train_time:94597ms step_avg:96.82ms +step:978/1695 train_time:94693ms step_avg:96.82ms +step:979/1695 train_time:94788ms step_avg:96.82ms +step:980/1695 train_time:94884ms step_avg:96.82ms +step:981/1695 train_time:94980ms step_avg:96.82ms +step:982/1695 train_time:95077ms step_avg:96.82ms +step:983/1695 train_time:95173ms step_avg:96.82ms +step:984/1695 train_time:95268ms step_avg:96.82ms +step:985/1695 train_time:95364ms step_avg:96.82ms +step:986/1695 train_time:95460ms step_avg:96.82ms +step:987/1695 train_time:95556ms step_avg:96.81ms +step:988/1695 train_time:95652ms step_avg:96.81ms +step:989/1695 train_time:95747ms step_avg:96.81ms +step:990/1695 train_time:95844ms step_avg:96.81ms +step:991/1695 train_time:95940ms step_avg:96.81ms +step:992/1695 train_time:96037ms step_avg:96.81ms +step:993/1695 train_time:96133ms step_avg:96.81ms +step:994/1695 train_time:96229ms step_avg:96.81ms +step:995/1695 train_time:96325ms step_avg:96.81ms +step:996/1695 train_time:96422ms step_avg:96.81ms +step:997/1695 train_time:96518ms step_avg:96.81ms +step:998/1695 train_time:96613ms step_avg:96.81ms +step:999/1695 train_time:96708ms step_avg:96.80ms +step:1000/1695 train_time:96804ms step_avg:96.80ms +step:1000/1695 val_loss:3.4845 train_time:96898ms step_avg:96.90ms +step:1001/1695 train_time:96924ms step_avg:96.83ms +step:1002/1695 train_time:97001ms step_avg:96.81ms +step:1003/1695 train_time:97099ms step_avg:96.81ms +step:1004/1695 train_time:97195ms step_avg:96.81ms +step:1005/1695 train_time:97291ms step_avg:96.81ms +step:1006/1695 train_time:97386ms step_avg:96.81ms +step:1007/1695 train_time:97481ms step_avg:96.80ms +step:1008/1695 train_time:97576ms step_avg:96.80ms +step:1009/1695 train_time:97672ms step_avg:96.80ms +step:1010/1695 train_time:97766ms step_avg:96.80ms +step:1011/1695 train_time:97863ms step_avg:96.80ms +step:1012/1695 train_time:97961ms step_avg:96.80ms +step:1013/1695 train_time:98057ms step_avg:96.80ms +step:1014/1695 train_time:98153ms step_avg:96.80ms +step:1015/1695 train_time:98249ms step_avg:96.80ms +step:1016/1695 train_time:98346ms step_avg:96.80ms +step:1017/1695 train_time:98440ms step_avg:96.79ms +step:1018/1695 train_time:98535ms step_avg:96.79ms +step:1019/1695 train_time:98632ms step_avg:96.79ms +step:1020/1695 train_time:98728ms step_avg:96.79ms +step:1021/1695 train_time:98823ms step_avg:96.79ms +step:1022/1695 train_time:98919ms step_avg:96.79ms +step:1023/1695 train_time:99016ms step_avg:96.79ms +step:1024/1695 train_time:99114ms step_avg:96.79ms +step:1025/1695 train_time:99211ms step_avg:96.79ms +step:1026/1695 train_time:99307ms step_avg:96.79ms +step:1027/1695 train_time:99402ms step_avg:96.79ms +step:1028/1695 train_time:99497ms step_avg:96.79ms +step:1029/1695 train_time:99593ms step_avg:96.79ms +step:1030/1695 train_time:99690ms step_avg:96.79ms +step:1031/1695 train_time:99786ms step_avg:96.79ms +step:1032/1695 train_time:99882ms step_avg:96.78ms +step:1033/1695 train_time:99978ms step_avg:96.78ms +step:1034/1695 train_time:100075ms step_avg:96.78ms +step:1035/1695 train_time:100172ms step_avg:96.78ms +step:1036/1695 train_time:100506ms step_avg:97.01ms +step:1037/1695 train_time:100695ms step_avg:97.10ms +step:1038/1695 train_time:100788ms step_avg:97.10ms +step:1039/1695 train_time:100882ms step_avg:97.10ms +step:1040/1695 train_time:100977ms step_avg:97.09ms +step:1041/1695 train_time:101073ms step_avg:97.09ms +step:1042/1695 train_time:101168ms step_avg:97.09ms +step:1043/1695 train_time:101262ms step_avg:97.09ms +step:1044/1695 train_time:101357ms step_avg:97.09ms +step:1045/1695 train_time:101452ms step_avg:97.08ms +step:1046/1695 train_time:101548ms step_avg:97.08ms +step:1047/1695 train_time:101649ms step_avg:97.09ms +step:1048/1695 train_time:101747ms step_avg:97.09ms +step:1049/1695 train_time:101842ms step_avg:97.09ms +step:1050/1695 train_time:101937ms step_avg:97.08ms +step:1051/1695 train_time:102034ms step_avg:97.08ms +step:1052/1695 train_time:102130ms step_avg:97.08ms +step:1053/1695 train_time:102225ms step_avg:97.08ms +step:1054/1695 train_time:102320ms step_avg:97.08ms +step:1055/1695 train_time:102415ms step_avg:97.08ms +step:1056/1695 train_time:102511ms step_avg:97.07ms +step:1057/1695 train_time:102610ms step_avg:97.08ms +step:1058/1695 train_time:102707ms step_avg:97.08ms +step:1059/1695 train_time:102804ms step_avg:97.08ms +step:1060/1695 train_time:102900ms step_avg:97.08ms +step:1061/1695 train_time:102996ms step_avg:97.07ms +step:1062/1695 train_time:103092ms step_avg:97.07ms +step:1063/1695 train_time:103187ms step_avg:97.07ms +step:1064/1695 train_time:103282ms step_avg:97.07ms +step:1065/1695 train_time:103377ms step_avg:97.07ms +step:1066/1695 train_time:103473ms step_avg:97.07ms +step:1067/1695 train_time:103569ms step_avg:97.07ms +step:1068/1695 train_time:103666ms step_avg:97.07ms +step:1069/1695 train_time:103762ms step_avg:97.06ms +step:1070/1695 train_time:103858ms step_avg:97.06ms +step:1071/1695 train_time:103954ms step_avg:97.06ms +step:1072/1695 train_time:104050ms step_avg:97.06ms +step:1073/1695 train_time:104145ms step_avg:97.06ms +step:1074/1695 train_time:104241ms step_avg:97.06ms +step:1075/1695 train_time:104336ms step_avg:97.06ms +step:1076/1695 train_time:104432ms step_avg:97.06ms +step:1077/1695 train_time:104527ms step_avg:97.05ms +step:1078/1695 train_time:104624ms step_avg:97.05ms +step:1079/1695 train_time:104719ms step_avg:97.05ms +step:1080/1695 train_time:104817ms step_avg:97.05ms +step:1081/1695 train_time:104915ms step_avg:97.05ms +step:1082/1695 train_time:105011ms step_avg:97.05ms +step:1083/1695 train_time:105107ms step_avg:97.05ms +step:1084/1695 train_time:105203ms step_avg:97.05ms +step:1085/1695 train_time:105298ms step_avg:97.05ms +step:1086/1695 train_time:105394ms step_avg:97.05ms +step:1087/1695 train_time:105489ms step_avg:97.05ms +step:1088/1695 train_time:105585ms step_avg:97.05ms +step:1089/1695 train_time:105680ms step_avg:97.04ms +step:1090/1695 train_time:105776ms step_avg:97.04ms +step:1091/1695 train_time:105873ms step_avg:97.04ms +step:1092/1695 train_time:105970ms step_avg:97.04ms +step:1093/1695 train_time:106065ms step_avg:97.04ms +step:1094/1695 train_time:106160ms step_avg:97.04ms +step:1095/1695 train_time:106255ms step_avg:97.04ms +step:1096/1695 train_time:106351ms step_avg:97.04ms +step:1097/1695 train_time:106448ms step_avg:97.04ms +step:1098/1695 train_time:106544ms step_avg:97.03ms +step:1099/1695 train_time:106640ms step_avg:97.03ms +step:1100/1695 train_time:106735ms step_avg:97.03ms +step:1101/1695 train_time:106832ms step_avg:97.03ms +step:1102/1695 train_time:106929ms step_avg:97.03ms +step:1103/1695 train_time:107026ms step_avg:97.03ms +step:1104/1695 train_time:107121ms step_avg:97.03ms +step:1105/1695 train_time:107216ms step_avg:97.03ms +step:1106/1695 train_time:107312ms step_avg:97.03ms +step:1107/1695 train_time:107408ms step_avg:97.03ms +step:1108/1695 train_time:107503ms step_avg:97.02ms +step:1109/1695 train_time:107599ms step_avg:97.02ms +step:1110/1695 train_time:107695ms step_avg:97.02ms +step:1111/1695 train_time:107792ms step_avg:97.02ms +step:1112/1695 train_time:107889ms step_avg:97.02ms +step:1113/1695 train_time:107985ms step_avg:97.02ms +step:1114/1695 train_time:108080ms step_avg:97.02ms +step:1115/1695 train_time:108176ms step_avg:97.02ms +step:1116/1695 train_time:108272ms step_avg:97.02ms +step:1117/1695 train_time:108369ms step_avg:97.02ms +step:1118/1695 train_time:108464ms step_avg:97.02ms +step:1119/1695 train_time:108560ms step_avg:97.02ms +step:1120/1695 train_time:108655ms step_avg:97.01ms +step:1121/1695 train_time:108752ms step_avg:97.01ms +step:1122/1695 train_time:108848ms step_avg:97.01ms +step:1123/1695 train_time:108944ms step_avg:97.01ms +step:1124/1695 train_time:109040ms step_avg:97.01ms +step:1125/1695 train_time:109136ms step_avg:97.01ms +step:1125/1695 val_loss:3.4374 train_time:109230ms step_avg:97.09ms +step:1126/1695 train_time:109255ms step_avg:97.03ms +step:1127/1695 train_time:109339ms step_avg:97.02ms +step:1128/1695 train_time:109436ms step_avg:97.02ms +step:1129/1695 train_time:109533ms step_avg:97.02ms +step:1130/1695 train_time:109628ms step_avg:97.02ms +step:1131/1695 train_time:109724ms step_avg:97.01ms +step:1132/1695 train_time:109818ms step_avg:97.01ms +step:1133/1695 train_time:109915ms step_avg:97.01ms +step:1134/1695 train_time:110012ms step_avg:97.01ms +step:1135/1695 train_time:110108ms step_avg:97.01ms +step:1136/1695 train_time:110207ms step_avg:97.01ms +step:1137/1695 train_time:110306ms step_avg:97.02ms +step:1138/1695 train_time:110406ms step_avg:97.02ms +step:1139/1695 train_time:110505ms step_avg:97.02ms +step:1140/1695 train_time:110602ms step_avg:97.02ms +step:1141/1695 train_time:110699ms step_avg:97.02ms +step:1142/1695 train_time:110796ms step_avg:97.02ms +step:1143/1695 train_time:110894ms step_avg:97.02ms +step:1144/1695 train_time:110992ms step_avg:97.02ms +step:1145/1695 train_time:111089ms step_avg:97.02ms +step:1146/1695 train_time:111188ms step_avg:97.02ms +step:1147/1695 train_time:111286ms step_avg:97.02ms +step:1148/1695 train_time:111384ms step_avg:97.02ms +step:1149/1695 train_time:111483ms step_avg:97.03ms +step:1150/1695 train_time:111581ms step_avg:97.03ms +step:1151/1695 train_time:111678ms step_avg:97.03ms +step:1152/1695 train_time:111776ms step_avg:97.03ms +step:1153/1695 train_time:111873ms step_avg:97.03ms +step:1154/1695 train_time:111970ms step_avg:97.03ms +step:1155/1695 train_time:112067ms step_avg:97.03ms +step:1156/1695 train_time:112165ms step_avg:97.03ms +step:1157/1695 train_time:112264ms step_avg:97.03ms +step:1158/1695 train_time:112361ms step_avg:97.03ms +step:1159/1695 train_time:112460ms step_avg:97.03ms +step:1160/1695 train_time:112558ms step_avg:97.03ms +step:1161/1695 train_time:112656ms step_avg:97.03ms +step:1162/1695 train_time:112754ms step_avg:97.03ms +step:1163/1695 train_time:112852ms step_avg:97.04ms +step:1164/1695 train_time:112949ms step_avg:97.04ms +step:1165/1695 train_time:113046ms step_avg:97.04ms +step:1166/1695 train_time:113143ms step_avg:97.04ms +step:1167/1695 train_time:113241ms step_avg:97.04ms +step:1168/1695 train_time:113339ms step_avg:97.04ms +step:1169/1695 train_time:113437ms step_avg:97.04ms +step:1170/1695 train_time:113535ms step_avg:97.04ms +step:1171/1695 train_time:113634ms step_avg:97.04ms +step:1172/1695 train_time:113733ms step_avg:97.04ms +step:1173/1695 train_time:113830ms step_avg:97.04ms +step:1174/1695 train_time:113927ms step_avg:97.04ms +step:1175/1695 train_time:114025ms step_avg:97.04ms +step:1176/1695 train_time:114121ms step_avg:97.04ms +step:1177/1695 train_time:114219ms step_avg:97.04ms +step:1178/1695 train_time:114317ms step_avg:97.04ms +step:1179/1695 train_time:114416ms step_avg:97.05ms +step:1180/1695 train_time:114515ms step_avg:97.05ms +step:1181/1695 train_time:114613ms step_avg:97.05ms +step:1182/1695 train_time:114712ms step_avg:97.05ms +step:1183/1695 train_time:114809ms step_avg:97.05ms +step:1184/1695 train_time:114908ms step_avg:97.05ms +step:1185/1695 train_time:115005ms step_avg:97.05ms +step:1186/1695 train_time:115102ms step_avg:97.05ms +step:1187/1695 train_time:115199ms step_avg:97.05ms +step:1188/1695 train_time:115296ms step_avg:97.05ms +step:1189/1695 train_time:115395ms step_avg:97.05ms +step:1190/1695 train_time:115494ms step_avg:97.05ms +step:1191/1695 train_time:115593ms step_avg:97.06ms +step:1192/1695 train_time:115692ms step_avg:97.06ms +step:1193/1695 train_time:115790ms step_avg:97.06ms +step:1194/1695 train_time:115888ms step_avg:97.06ms +step:1195/1695 train_time:115986ms step_avg:97.06ms +step:1196/1695 train_time:116084ms step_avg:97.06ms +step:1197/1695 train_time:116183ms step_avg:97.06ms +step:1198/1695 train_time:116280ms step_avg:97.06ms +step:1199/1695 train_time:116378ms step_avg:97.06ms +step:1200/1695 train_time:116476ms step_avg:97.06ms +step:1201/1695 train_time:116574ms step_avg:97.06ms +step:1202/1695 train_time:116672ms step_avg:97.07ms +step:1203/1695 train_time:116770ms step_avg:97.07ms +step:1204/1695 train_time:116868ms step_avg:97.07ms +step:1205/1695 train_time:116966ms step_avg:97.07ms +step:1206/1695 train_time:117064ms step_avg:97.07ms +step:1207/1695 train_time:117162ms step_avg:97.07ms +step:1208/1695 train_time:117508ms step_avg:97.27ms +step:1209/1695 train_time:117691ms step_avg:97.35ms +step:1210/1695 train_time:117787ms step_avg:97.34ms +step:1211/1695 train_time:117883ms step_avg:97.34ms +step:1212/1695 train_time:117979ms step_avg:97.34ms +step:1213/1695 train_time:118076ms step_avg:97.34ms +step:1214/1695 train_time:118174ms step_avg:97.34ms +step:1215/1695 train_time:118270ms step_avg:97.34ms +step:1216/1695 train_time:118367ms step_avg:97.34ms +step:1217/1695 train_time:118463ms step_avg:97.34ms +step:1218/1695 train_time:118566ms step_avg:97.34ms +step:1219/1695 train_time:118667ms step_avg:97.35ms +step:1220/1695 train_time:118767ms step_avg:97.35ms +step:1221/1695 train_time:118863ms step_avg:97.35ms +step:1222/1695 train_time:118959ms step_avg:97.35ms +step:1223/1695 train_time:119056ms step_avg:97.35ms +step:1224/1695 train_time:119152ms step_avg:97.35ms +step:1225/1695 train_time:119249ms step_avg:97.35ms +step:1226/1695 train_time:119346ms step_avg:97.35ms +step:1227/1695 train_time:119443ms step_avg:97.35ms +step:1228/1695 train_time:119541ms step_avg:97.35ms +step:1229/1695 train_time:119640ms step_avg:97.35ms +step:1230/1695 train_time:119740ms step_avg:97.35ms +step:1231/1695 train_time:119838ms step_avg:97.35ms +step:1232/1695 train_time:119936ms step_avg:97.35ms +step:1233/1695 train_time:120033ms step_avg:97.35ms +step:1234/1695 train_time:120130ms step_avg:97.35ms +step:1235/1695 train_time:120227ms step_avg:97.35ms +step:1236/1695 train_time:120323ms step_avg:97.35ms +step:1237/1695 train_time:120420ms step_avg:97.35ms +step:1238/1695 train_time:120517ms step_avg:97.35ms +step:1239/1695 train_time:120616ms step_avg:97.35ms +step:1240/1695 train_time:120716ms step_avg:97.35ms +step:1241/1695 train_time:120815ms step_avg:97.35ms +step:1242/1695 train_time:120914ms step_avg:97.35ms +step:1243/1695 train_time:121012ms step_avg:97.36ms +step:1244/1695 train_time:121110ms step_avg:97.35ms +step:1245/1695 train_time:121206ms step_avg:97.35ms +step:1246/1695 train_time:121304ms step_avg:97.35ms +step:1247/1695 train_time:121400ms step_avg:97.35ms +step:1248/1695 train_time:121498ms step_avg:97.35ms +step:1249/1695 train_time:121596ms step_avg:97.35ms +step:1250/1695 train_time:121696ms step_avg:97.36ms +step:1250/1695 val_loss:3.3886 train_time:121792ms step_avg:97.43ms +step:1251/1695 train_time:121818ms step_avg:97.38ms +step:1252/1695 train_time:121899ms step_avg:97.36ms +step:1253/1695 train_time:121997ms step_avg:97.36ms +step:1254/1695 train_time:122094ms step_avg:97.36ms +step:1255/1695 train_time:122190ms step_avg:97.36ms +step:1256/1695 train_time:122287ms step_avg:97.36ms +step:1257/1695 train_time:122383ms step_avg:97.36ms +step:1258/1695 train_time:122480ms step_avg:97.36ms +step:1259/1695 train_time:122576ms step_avg:97.36ms +step:1260/1695 train_time:122673ms step_avg:97.36ms +step:1261/1695 train_time:122774ms step_avg:97.36ms +step:1262/1695 train_time:122874ms step_avg:97.36ms +step:1263/1695 train_time:122972ms step_avg:97.36ms +step:1264/1695 train_time:123070ms step_avg:97.37ms +step:1265/1695 train_time:123167ms step_avg:97.37ms +step:1266/1695 train_time:123265ms step_avg:97.37ms +step:1267/1695 train_time:123361ms step_avg:97.36ms +step:1268/1695 train_time:123458ms step_avg:97.36ms +step:1269/1695 train_time:123554ms step_avg:97.36ms +step:1270/1695 train_time:123650ms step_avg:97.36ms +step:1271/1695 train_time:123749ms step_avg:97.36ms +step:1272/1695 train_time:123849ms step_avg:97.37ms +step:1273/1695 train_time:123947ms step_avg:97.37ms +step:1274/1695 train_time:124045ms step_avg:97.37ms +step:1275/1695 train_time:124143ms step_avg:97.37ms +step:1276/1695 train_time:124242ms step_avg:97.37ms +step:1277/1695 train_time:124340ms step_avg:97.37ms +step:1278/1695 train_time:124437ms step_avg:97.37ms +step:1279/1695 train_time:124534ms step_avg:97.37ms +step:1280/1695 train_time:124632ms step_avg:97.37ms +step:1281/1695 train_time:124730ms step_avg:97.37ms +step:1282/1695 train_time:124828ms step_avg:97.37ms +step:1283/1695 train_time:124926ms step_avg:97.37ms +step:1284/1695 train_time:125024ms step_avg:97.37ms +step:1285/1695 train_time:125122ms step_avg:97.37ms +step:1286/1695 train_time:125220ms step_avg:97.37ms +step:1287/1695 train_time:125318ms step_avg:97.37ms +step:1288/1695 train_time:125415ms step_avg:97.37ms +step:1289/1695 train_time:125512ms step_avg:97.37ms +step:1290/1695 train_time:125609ms step_avg:97.37ms +step:1291/1695 train_time:125707ms step_avg:97.37ms +step:1292/1695 train_time:125806ms step_avg:97.37ms +step:1293/1695 train_time:125904ms step_avg:97.37ms +step:1294/1695 train_time:126003ms step_avg:97.37ms +step:1295/1695 train_time:126102ms step_avg:97.38ms +step:1296/1695 train_time:126200ms step_avg:97.38ms +step:1297/1695 train_time:126297ms step_avg:97.38ms +step:1298/1695 train_time:126395ms step_avg:97.38ms +step:1299/1695 train_time:126492ms step_avg:97.38ms +step:1300/1695 train_time:126589ms step_avg:97.38ms +step:1301/1695 train_time:126686ms step_avg:97.38ms +step:1302/1695 train_time:126785ms step_avg:97.38ms +step:1303/1695 train_time:126883ms step_avg:97.38ms +step:1304/1695 train_time:126981ms step_avg:97.38ms +step:1305/1695 train_time:127079ms step_avg:97.38ms +step:1306/1695 train_time:127178ms step_avg:97.38ms +step:1307/1695 train_time:127276ms step_avg:97.38ms +step:1308/1695 train_time:127373ms step_avg:97.38ms +step:1309/1695 train_time:127470ms step_avg:97.38ms +step:1310/1695 train_time:127568ms step_avg:97.38ms +step:1311/1695 train_time:127665ms step_avg:97.38ms +step:1312/1695 train_time:127764ms step_avg:97.38ms +step:1313/1695 train_time:127862ms step_avg:97.38ms +step:1314/1695 train_time:127961ms step_avg:97.38ms +step:1315/1695 train_time:128059ms step_avg:97.38ms +step:1316/1695 train_time:128158ms step_avg:97.38ms +step:1317/1695 train_time:128255ms step_avg:97.38ms +step:1318/1695 train_time:128352ms step_avg:97.38ms +step:1319/1695 train_time:128449ms step_avg:97.38ms +step:1320/1695 train_time:128546ms step_avg:97.38ms +step:1321/1695 train_time:128644ms step_avg:97.38ms +step:1322/1695 train_time:128743ms step_avg:97.38ms +step:1323/1695 train_time:128841ms step_avg:97.39ms +step:1324/1695 train_time:128939ms step_avg:97.39ms +step:1325/1695 train_time:129038ms step_avg:97.39ms +step:1326/1695 train_time:129136ms step_avg:97.39ms +step:1327/1695 train_time:129233ms step_avg:97.39ms +step:1328/1695 train_time:129332ms step_avg:97.39ms +step:1329/1695 train_time:129429ms step_avg:97.39ms +step:1330/1695 train_time:129525ms step_avg:97.39ms +step:1331/1695 train_time:129623ms step_avg:97.39ms +step:1332/1695 train_time:129721ms step_avg:97.39ms +step:1333/1695 train_time:129820ms step_avg:97.39ms +step:1334/1695 train_time:129917ms step_avg:97.39ms +step:1335/1695 train_time:130015ms step_avg:97.39ms +step:1336/1695 train_time:130112ms step_avg:97.39ms +step:1337/1695 train_time:130209ms step_avg:97.39ms +step:1338/1695 train_time:130307ms step_avg:97.39ms +step:1339/1695 train_time:130405ms step_avg:97.39ms +step:1340/1695 train_time:130503ms step_avg:97.39ms +step:1341/1695 train_time:130601ms step_avg:97.39ms +step:1342/1695 train_time:130699ms step_avg:97.39ms +step:1343/1695 train_time:130797ms step_avg:97.39ms +step:1344/1695 train_time:130894ms step_avg:97.39ms +step:1345/1695 train_time:130991ms step_avg:97.39ms +step:1346/1695 train_time:131088ms step_avg:97.39ms +step:1347/1695 train_time:131185ms step_avg:97.39ms +step:1348/1695 train_time:131283ms step_avg:97.39ms +step:1349/1695 train_time:131381ms step_avg:97.39ms +step:1350/1695 train_time:131481ms step_avg:97.39ms +step:1351/1695 train_time:131579ms step_avg:97.39ms +step:1352/1695 train_time:131676ms step_avg:97.39ms +step:1353/1695 train_time:131774ms step_avg:97.39ms +step:1354/1695 train_time:131871ms step_avg:97.39ms +step:1355/1695 train_time:131969ms step_avg:97.39ms +step:1356/1695 train_time:132066ms step_avg:97.39ms +step:1357/1695 train_time:132164ms step_avg:97.39ms +step:1358/1695 train_time:132262ms step_avg:97.39ms +step:1359/1695 train_time:132360ms step_avg:97.40ms +step:1360/1695 train_time:132459ms step_avg:97.40ms +step:1361/1695 train_time:132558ms step_avg:97.40ms +step:1362/1695 train_time:132655ms step_avg:97.40ms +step:1363/1695 train_time:132753ms step_avg:97.40ms +step:1364/1695 train_time:132850ms step_avg:97.40ms +step:1365/1695 train_time:132948ms step_avg:97.40ms +step:1366/1695 train_time:133046ms step_avg:97.40ms +step:1367/1695 train_time:133144ms step_avg:97.40ms +step:1368/1695 train_time:133242ms step_avg:97.40ms +step:1369/1695 train_time:133340ms step_avg:97.40ms +step:1370/1695 train_time:133439ms step_avg:97.40ms +step:1371/1695 train_time:133537ms step_avg:97.40ms +step:1372/1695 train_time:133634ms step_avg:97.40ms +step:1373/1695 train_time:133732ms step_avg:97.40ms +step:1374/1695 train_time:133829ms step_avg:97.40ms +step:1375/1695 train_time:133926ms step_avg:97.40ms +step:1375/1695 val_loss:3.3508 train_time:134022ms step_avg:97.47ms +step:1376/1695 train_time:134049ms step_avg:97.42ms +step:1377/1695 train_time:134131ms step_avg:97.41ms +step:1378/1695 train_time:134229ms step_avg:97.41ms +step:1379/1695 train_time:134327ms step_avg:97.41ms +step:1380/1695 train_time:134424ms step_avg:97.41ms +step:1381/1695 train_time:134877ms step_avg:97.67ms +step:1382/1695 train_time:134952ms step_avg:97.65ms +step:1383/1695 train_time:135047ms step_avg:97.65ms +step:1384/1695 train_time:135144ms step_avg:97.65ms +step:1385/1695 train_time:135241ms step_avg:97.65ms +step:1386/1695 train_time:135338ms step_avg:97.65ms +step:1387/1695 train_time:135434ms step_avg:97.65ms +step:1388/1695 train_time:135530ms step_avg:97.64ms +step:1389/1695 train_time:135626ms step_avg:97.64ms +step:1390/1695 train_time:135723ms step_avg:97.64ms +step:1391/1695 train_time:135831ms step_avg:97.65ms +step:1392/1695 train_time:135931ms step_avg:97.65ms +step:1393/1695 train_time:136030ms step_avg:97.65ms +step:1394/1695 train_time:136128ms step_avg:97.65ms +step:1395/1695 train_time:136225ms step_avg:97.65ms +step:1396/1695 train_time:136322ms step_avg:97.65ms +step:1397/1695 train_time:136419ms step_avg:97.65ms +step:1398/1695 train_time:136516ms step_avg:97.65ms +step:1399/1695 train_time:136613ms step_avg:97.65ms +step:1400/1695 train_time:136711ms step_avg:97.65ms +step:1401/1695 train_time:136810ms step_avg:97.65ms +step:1402/1695 train_time:136910ms step_avg:97.65ms +step:1403/1695 train_time:137008ms step_avg:97.65ms +step:1404/1695 train_time:137106ms step_avg:97.65ms +step:1405/1695 train_time:137204ms step_avg:97.65ms +step:1406/1695 train_time:137301ms step_avg:97.65ms +step:1407/1695 train_time:137398ms step_avg:97.65ms +step:1408/1695 train_time:137494ms step_avg:97.65ms +step:1409/1695 train_time:137591ms step_avg:97.65ms +step:1410/1695 train_time:137688ms step_avg:97.65ms +step:1411/1695 train_time:137787ms step_avg:97.65ms +step:1412/1695 train_time:137887ms step_avg:97.65ms +step:1413/1695 train_time:137987ms step_avg:97.66ms +step:1414/1695 train_time:138085ms step_avg:97.66ms +step:1415/1695 train_time:138183ms step_avg:97.66ms +step:1416/1695 train_time:138281ms step_avg:97.66ms +step:1417/1695 train_time:138378ms step_avg:97.66ms +step:1418/1695 train_time:138476ms step_avg:97.66ms +step:1419/1695 train_time:138574ms step_avg:97.66ms +step:1420/1695 train_time:138669ms step_avg:97.65ms +step:1421/1695 train_time:138766ms step_avg:97.65ms +step:1422/1695 train_time:138866ms step_avg:97.66ms +step:1423/1695 train_time:138965ms step_avg:97.66ms +step:1424/1695 train_time:139065ms step_avg:97.66ms +step:1425/1695 train_time:139163ms step_avg:97.66ms +step:1426/1695 train_time:139260ms step_avg:97.66ms +step:1427/1695 train_time:139359ms step_avg:97.66ms +step:1428/1695 train_time:139456ms step_avg:97.66ms +step:1429/1695 train_time:139553ms step_avg:97.66ms +step:1430/1695 train_time:139650ms step_avg:97.66ms +step:1431/1695 train_time:139748ms step_avg:97.66ms +step:1432/1695 train_time:139845ms step_avg:97.66ms +step:1433/1695 train_time:139945ms step_avg:97.66ms +step:1434/1695 train_time:140045ms step_avg:97.66ms +step:1435/1695 train_time:140144ms step_avg:97.66ms +step:1436/1695 train_time:140243ms step_avg:97.66ms +step:1437/1695 train_time:140341ms step_avg:97.66ms +step:1438/1695 train_time:140440ms step_avg:97.66ms +step:1439/1695 train_time:140539ms step_avg:97.66ms +step:1440/1695 train_time:140637ms step_avg:97.66ms +step:1441/1695 train_time:140733ms step_avg:97.66ms +step:1442/1695 train_time:140830ms step_avg:97.66ms +step:1443/1695 train_time:140927ms step_avg:97.66ms +step:1444/1695 train_time:141024ms step_avg:97.66ms +step:1445/1695 train_time:141124ms step_avg:97.66ms +step:1446/1695 train_time:141222ms step_avg:97.66ms +step:1447/1695 train_time:141321ms step_avg:97.66ms +step:1448/1695 train_time:141421ms step_avg:97.67ms +step:1449/1695 train_time:141519ms step_avg:97.67ms +step:1450/1695 train_time:141618ms step_avg:97.67ms +step:1451/1695 train_time:141716ms step_avg:97.67ms +step:1452/1695 train_time:141813ms step_avg:97.67ms +step:1453/1695 train_time:141909ms step_avg:97.67ms +step:1454/1695 train_time:142006ms step_avg:97.67ms +step:1455/1695 train_time:142104ms step_avg:97.67ms +step:1456/1695 train_time:142202ms step_avg:97.67ms +step:1457/1695 train_time:142302ms step_avg:97.67ms +step:1458/1695 train_time:142401ms step_avg:97.67ms +step:1459/1695 train_time:142500ms step_avg:97.67ms +step:1460/1695 train_time:142599ms step_avg:97.67ms +step:1461/1695 train_time:142697ms step_avg:97.67ms +step:1462/1695 train_time:142795ms step_avg:97.67ms +step:1463/1695 train_time:142892ms step_avg:97.67ms +step:1464/1695 train_time:142989ms step_avg:97.67ms +step:1465/1695 train_time:143087ms step_avg:97.67ms +step:1466/1695 train_time:143185ms step_avg:97.67ms +step:1467/1695 train_time:143283ms step_avg:97.67ms +step:1468/1695 train_time:143382ms step_avg:97.67ms +step:1469/1695 train_time:143481ms step_avg:97.67ms +step:1470/1695 train_time:143579ms step_avg:97.67ms +step:1471/1695 train_time:143678ms step_avg:97.67ms +step:1472/1695 train_time:143775ms step_avg:97.67ms +step:1473/1695 train_time:143872ms step_avg:97.67ms +step:1474/1695 train_time:143968ms step_avg:97.67ms +step:1475/1695 train_time:144065ms step_avg:97.67ms +step:1476/1695 train_time:144163ms step_avg:97.67ms +step:1477/1695 train_time:144261ms step_avg:97.67ms +step:1478/1695 train_time:144359ms step_avg:97.67ms +step:1479/1695 train_time:144458ms step_avg:97.67ms +step:1480/1695 train_time:144557ms step_avg:97.67ms +step:1481/1695 train_time:144654ms step_avg:97.67ms +step:1482/1695 train_time:144751ms step_avg:97.67ms +step:1483/1695 train_time:144849ms step_avg:97.67ms +step:1484/1695 train_time:144946ms step_avg:97.67ms +step:1485/1695 train_time:145044ms step_avg:97.67ms +step:1486/1695 train_time:145141ms step_avg:97.67ms +step:1487/1695 train_time:145238ms step_avg:97.67ms +step:1488/1695 train_time:145335ms step_avg:97.67ms +step:1489/1695 train_time:145433ms step_avg:97.67ms +step:1490/1695 train_time:145532ms step_avg:97.67ms +step:1491/1695 train_time:145630ms step_avg:97.67ms +step:1492/1695 train_time:145728ms step_avg:97.67ms +step:1493/1695 train_time:145826ms step_avg:97.67ms +step:1494/1695 train_time:145924ms step_avg:97.67ms +step:1495/1695 train_time:146023ms step_avg:97.67ms +step:1496/1695 train_time:146121ms step_avg:97.67ms +step:1497/1695 train_time:146218ms step_avg:97.67ms +step:1498/1695 train_time:146316ms step_avg:97.67ms +step:1499/1695 train_time:146414ms step_avg:97.67ms +step:1500/1695 train_time:146512ms step_avg:97.67ms +step:1500/1695 val_loss:3.3173 train_time:146608ms step_avg:97.74ms +step:1501/1695 train_time:146635ms step_avg:97.69ms +step:1502/1695 train_time:146715ms step_avg:97.68ms +step:1503/1695 train_time:146815ms step_avg:97.68ms +step:1504/1695 train_time:146912ms step_avg:97.68ms +step:1505/1695 train_time:147009ms step_avg:97.68ms +step:1506/1695 train_time:147105ms step_avg:97.68ms +step:1507/1695 train_time:147202ms step_avg:97.68ms +step:1508/1695 train_time:147299ms step_avg:97.68ms +step:1509/1695 train_time:147395ms step_avg:97.68ms +step:1510/1695 train_time:147491ms step_avg:97.68ms +step:1511/1695 train_time:147591ms step_avg:97.68ms +step:1512/1695 train_time:147692ms step_avg:97.68ms +step:1513/1695 train_time:147792ms step_avg:97.68ms +step:1514/1695 train_time:147890ms step_avg:97.68ms +step:1515/1695 train_time:147987ms step_avg:97.68ms +step:1516/1695 train_time:148084ms step_avg:97.68ms +step:1517/1695 train_time:148181ms step_avg:97.68ms +step:1518/1695 train_time:148278ms step_avg:97.68ms +step:1519/1695 train_time:148375ms step_avg:97.68ms +step:1520/1695 train_time:148472ms step_avg:97.68ms +step:1521/1695 train_time:148570ms step_avg:97.68ms +step:1522/1695 train_time:148669ms step_avg:97.68ms +step:1523/1695 train_time:148769ms step_avg:97.68ms +step:1524/1695 train_time:148867ms step_avg:97.68ms +step:1525/1695 train_time:148965ms step_avg:97.68ms +step:1526/1695 train_time:149062ms step_avg:97.68ms +step:1527/1695 train_time:149159ms step_avg:97.68ms +step:1528/1695 train_time:149256ms step_avg:97.68ms +step:1529/1695 train_time:149352ms step_avg:97.68ms +step:1530/1695 train_time:149448ms step_avg:97.68ms +step:1531/1695 train_time:149546ms step_avg:97.68ms +step:1532/1695 train_time:149645ms step_avg:97.68ms +step:1533/1695 train_time:149745ms step_avg:97.68ms +step:1534/1695 train_time:149844ms step_avg:97.68ms +step:1535/1695 train_time:149943ms step_avg:97.68ms +step:1536/1695 train_time:150041ms step_avg:97.68ms +step:1537/1695 train_time:150138ms step_avg:97.68ms +step:1538/1695 train_time:150235ms step_avg:97.68ms +step:1539/1695 train_time:150333ms step_avg:97.68ms +step:1540/1695 train_time:150429ms step_avg:97.68ms +step:1541/1695 train_time:150527ms step_avg:97.68ms +step:1542/1695 train_time:150625ms step_avg:97.68ms +step:1543/1695 train_time:150724ms step_avg:97.68ms +step:1544/1695 train_time:150823ms step_avg:97.68ms +step:1545/1695 train_time:150922ms step_avg:97.68ms +step:1546/1695 train_time:151020ms step_avg:97.68ms +step:1547/1695 train_time:151118ms step_avg:97.68ms +step:1548/1695 train_time:151214ms step_avg:97.68ms +step:1549/1695 train_time:151311ms step_avg:97.68ms +step:1550/1695 train_time:151407ms step_avg:97.68ms +step:1551/1695 train_time:151505ms step_avg:97.68ms +step:1552/1695 train_time:151855ms step_avg:97.84ms +step:1553/1695 train_time:152033ms step_avg:97.90ms +step:1554/1695 train_time:152130ms step_avg:97.90ms +step:1555/1695 train_time:152226ms step_avg:97.89ms +step:1556/1695 train_time:152323ms step_avg:97.89ms +step:1557/1695 train_time:152420ms step_avg:97.89ms +step:1558/1695 train_time:152516ms step_avg:97.89ms +step:1559/1695 train_time:152612ms step_avg:97.89ms +step:1560/1695 train_time:152708ms step_avg:97.89ms +step:1561/1695 train_time:152804ms step_avg:97.89ms +step:1562/1695 train_time:152908ms step_avg:97.89ms +step:1563/1695 train_time:153011ms step_avg:97.90ms +step:1564/1695 train_time:153110ms step_avg:97.90ms +step:1565/1695 train_time:153207ms step_avg:97.90ms +step:1566/1695 train_time:153305ms step_avg:97.90ms +step:1567/1695 train_time:153401ms step_avg:97.89ms +step:1568/1695 train_time:153499ms step_avg:97.89ms +step:1569/1695 train_time:153596ms step_avg:97.89ms +step:1570/1695 train_time:153694ms step_avg:97.89ms +step:1571/1695 train_time:153791ms step_avg:97.89ms +step:1572/1695 train_time:153889ms step_avg:97.89ms +step:1573/1695 train_time:153989ms step_avg:97.89ms +step:1574/1695 train_time:154087ms step_avg:97.90ms +step:1575/1695 train_time:154185ms step_avg:97.90ms +step:1576/1695 train_time:154283ms step_avg:97.90ms +step:1577/1695 train_time:154380ms step_avg:97.89ms +step:1578/1695 train_time:154477ms step_avg:97.89ms +step:1579/1695 train_time:154574ms step_avg:97.89ms +step:1580/1695 train_time:154672ms step_avg:97.89ms +step:1581/1695 train_time:154769ms step_avg:97.89ms +step:1582/1695 train_time:154867ms step_avg:97.89ms +step:1583/1695 train_time:154967ms step_avg:97.89ms +step:1584/1695 train_time:155066ms step_avg:97.90ms +step:1585/1695 train_time:155164ms step_avg:97.90ms +step:1586/1695 train_time:155262ms step_avg:97.90ms +step:1587/1695 train_time:155360ms step_avg:97.90ms +step:1588/1695 train_time:155457ms step_avg:97.89ms +step:1589/1695 train_time:155554ms step_avg:97.89ms +step:1590/1695 train_time:155651ms step_avg:97.89ms +step:1591/1695 train_time:155749ms step_avg:97.89ms +step:1592/1695 train_time:155846ms step_avg:97.89ms +step:1593/1695 train_time:155945ms step_avg:97.89ms +step:1594/1695 train_time:156044ms step_avg:97.89ms +step:1595/1695 train_time:156143ms step_avg:97.90ms +step:1596/1695 train_time:156240ms step_avg:97.89ms +step:1597/1695 train_time:156338ms step_avg:97.89ms +step:1598/1695 train_time:156435ms step_avg:97.89ms +step:1599/1695 train_time:156532ms step_avg:97.89ms +step:1600/1695 train_time:156629ms step_avg:97.89ms +step:1601/1695 train_time:156727ms step_avg:97.89ms +step:1602/1695 train_time:156825ms step_avg:97.89ms +step:1603/1695 train_time:156923ms step_avg:97.89ms +step:1604/1695 train_time:157022ms step_avg:97.89ms +step:1605/1695 train_time:157121ms step_avg:97.89ms +step:1606/1695 train_time:157220ms step_avg:97.90ms +step:1607/1695 train_time:157318ms step_avg:97.90ms +step:1608/1695 train_time:157416ms step_avg:97.90ms +step:1609/1695 train_time:157515ms step_avg:97.90ms +step:1610/1695 train_time:157611ms step_avg:97.90ms +step:1611/1695 train_time:157709ms step_avg:97.90ms +step:1612/1695 train_time:157806ms step_avg:97.89ms +step:1613/1695 train_time:157904ms step_avg:97.89ms +step:1614/1695 train_time:158003ms step_avg:97.90ms +step:1615/1695 train_time:158102ms step_avg:97.90ms +step:1616/1695 train_time:158200ms step_avg:97.90ms +step:1617/1695 train_time:158299ms step_avg:97.90ms +step:1618/1695 train_time:158398ms step_avg:97.90ms +step:1619/1695 train_time:158495ms step_avg:97.90ms +step:1620/1695 train_time:158593ms step_avg:97.90ms +step:1621/1695 train_time:158691ms step_avg:97.90ms +step:1622/1695 train_time:158787ms step_avg:97.90ms +step:1623/1695 train_time:158885ms step_avg:97.90ms +step:1624/1695 train_time:158983ms step_avg:97.90ms +step:1625/1695 train_time:159082ms step_avg:97.90ms +step:1625/1695 val_loss:3.2899 train_time:159178ms step_avg:97.96ms +step:1626/1695 train_time:159206ms step_avg:97.91ms +step:1627/1695 train_time:159288ms step_avg:97.90ms +step:1628/1695 train_time:159387ms step_avg:97.90ms +step:1629/1695 train_time:159485ms step_avg:97.90ms +step:1630/1695 train_time:159582ms step_avg:97.90ms +step:1631/1695 train_time:159679ms step_avg:97.90ms +step:1632/1695 train_time:159776ms step_avg:97.90ms +step:1633/1695 train_time:159872ms step_avg:97.90ms +step:1634/1695 train_time:159968ms step_avg:97.90ms +step:1635/1695 train_time:160065ms step_avg:97.90ms +step:1636/1695 train_time:160166ms step_avg:97.90ms +step:1637/1695 train_time:160267ms step_avg:97.90ms +step:1638/1695 train_time:160367ms step_avg:97.90ms +step:1639/1695 train_time:160466ms step_avg:97.90ms +step:1640/1695 train_time:160563ms step_avg:97.90ms +step:1641/1695 train_time:160661ms step_avg:97.90ms +step:1642/1695 train_time:160759ms step_avg:97.90ms +step:1643/1695 train_time:160856ms step_avg:97.90ms +step:1644/1695 train_time:160953ms step_avg:97.90ms +step:1645/1695 train_time:161050ms step_avg:97.90ms +step:1646/1695 train_time:161148ms step_avg:97.90ms +step:1647/1695 train_time:161247ms step_avg:97.90ms +step:1648/1695 train_time:161346ms step_avg:97.90ms +step:1649/1695 train_time:161445ms step_avg:97.90ms +step:1650/1695 train_time:161544ms step_avg:97.91ms +step:1651/1695 train_time:161641ms step_avg:97.90ms +step:1652/1695 train_time:161739ms step_avg:97.91ms +step:1653/1695 train_time:161838ms step_avg:97.91ms +step:1654/1695 train_time:161935ms step_avg:97.91ms +step:1655/1695 train_time:162032ms step_avg:97.90ms +step:1656/1695 train_time:162128ms step_avg:97.90ms +step:1657/1695 train_time:162226ms step_avg:97.90ms +step:1658/1695 train_time:162326ms step_avg:97.90ms +step:1659/1695 train_time:162424ms step_avg:97.90ms +step:1660/1695 train_time:162522ms step_avg:97.90ms +step:1661/1695 train_time:162620ms step_avg:97.90ms +step:1662/1695 train_time:162717ms step_avg:97.90ms +step:1663/1695 train_time:162815ms step_avg:97.90ms +step:1664/1695 train_time:162913ms step_avg:97.90ms +step:1665/1695 train_time:163010ms step_avg:97.90ms +step:1666/1695 train_time:163107ms step_avg:97.90ms +step:1667/1695 train_time:163205ms step_avg:97.90ms +step:1668/1695 train_time:163305ms step_avg:97.90ms +step:1669/1695 train_time:163404ms step_avg:97.91ms +step:1670/1695 train_time:163503ms step_avg:97.91ms +step:1671/1695 train_time:163601ms step_avg:97.91ms +step:1672/1695 train_time:163699ms step_avg:97.91ms +step:1673/1695 train_time:163797ms step_avg:97.91ms +step:1674/1695 train_time:163896ms step_avg:97.91ms +step:1675/1695 train_time:163994ms step_avg:97.91ms +step:1676/1695 train_time:164090ms step_avg:97.91ms +step:1677/1695 train_time:164187ms step_avg:97.91ms +step:1678/1695 train_time:164285ms step_avg:97.91ms +step:1679/1695 train_time:164384ms step_avg:97.91ms +step:1680/1695 train_time:164482ms step_avg:97.91ms +step:1681/1695 train_time:164581ms step_avg:97.91ms +step:1682/1695 train_time:164679ms step_avg:97.91ms +step:1683/1695 train_time:164777ms step_avg:97.91ms +step:1684/1695 train_time:164875ms step_avg:97.91ms +step:1685/1695 train_time:164972ms step_avg:97.91ms +step:1686/1695 train_time:165068ms step_avg:97.91ms +step:1687/1695 train_time:165167ms step_avg:97.91ms +step:1688/1695 train_time:165265ms step_avg:97.91ms +step:1689/1695 train_time:165363ms step_avg:97.91ms +step:1690/1695 train_time:165460ms step_avg:97.91ms +step:1691/1695 train_time:165558ms step_avg:97.91ms +step:1692/1695 train_time:165656ms step_avg:97.91ms +step:1693/1695 train_time:165754ms step_avg:97.91ms +step:1694/1695 train_time:165851ms step_avg:97.91ms +step:1695/1695 train_time:165949ms step_avg:97.90ms +step:1695/1695 val_loss:3.2780 train_time:166044ms step_avg:97.96ms +peak memory allocated: 34000 MiB reserved: 49636 MiB diff --git a/records/082725_FA3/be1069a9-64f4-4316-bd26-4a7f5b697509.txt b/records/082725_FA3/be1069a9-64f4-4316-bd26-4a7f5b697509.txt new file mode 100644 index 000000000..789ccc0d1 --- /dev/null +++ b/records/082725_FA3/be1069a9-64f4-4316-bd26-4a7f5b697509.txt @@ -0,0 +1,2808 @@ +import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +from dataclasses import dataclass +from functools import lru_cache +from pathlib import Path + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +#torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 + +# ----------------------------------------------------------------------------- +# Custom operators: FP8 matmul by @YouJiacheng + +@torch.library.custom_op("nanogpt::mm", mutates_args=()) +def mm_op(x: Tensor, w: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor, Tensor]: + @torch.compile + def impl(x: Tensor, w: Tensor): + assert x.is_contiguous() and w.is_contiguous() + x_f8 = x.div(x_s).to(torch.float8_e4m3fn) + w_f8 = w.div(w_s).to(torch.float8_e4m3fn) + out = torch._scaled_mm( + x_f8, + w_f8.T, + out_dtype=torch.bfloat16, + scale_a=x.new_tensor(x_s, dtype=torch.float32), + scale_b=x.new_tensor(w_s, dtype=torch.float32), + use_fast_accum=True, + ) + return out, x_f8, w_f8 + + return impl(x, w) + +@mm_op.register_fake +def _(x: Tensor, w: Tensor, *_): + assert x.ndim == w.ndim == 2 + assert x.shape[1] == w.shape[1] + assert x.device == w.device + assert x.is_contiguous() and w.is_contiguous() + return x @ w.T, x.to(torch.float8_e4m3fn), w.to(torch.float8_e4m3fn) + +@torch.library.custom_op("nanogpt::mm_backward", mutates_args=()) +def mm_backward_op(g: Tensor, x_f8: Tensor, w_f8: Tensor, x_s: float, w_s: float, grad_s: float) -> tuple[Tensor, Tensor]: + @torch.compile + def impl(grad: Tensor, x_f8: Tensor, w_f8: Tensor): + assert grad.is_contiguous() + x_inv_s = grad.new_tensor(x_s, dtype=torch.float32) + w_inv_s = grad.new_tensor(w_s, dtype=torch.float32) + grad_inv_s = grad.new_tensor(grad_s, dtype=torch.float32) + grad_f8 = grad.div(grad_s).to(torch.float8_e5m2) + grad_x = torch._scaled_mm( + grad_f8, + w_f8.T.contiguous().T, + out_dtype=torch.bfloat16, + scale_a=grad_inv_s, + scale_b=w_inv_s, + use_fast_accum=False, + ) + # faster than grad_f8_t @ x_f8, for (d_out, d_in) == (50304, 768) + grad_w = torch._scaled_mm( + x_f8.T.contiguous(), + grad_f8.T.contiguous().T, + out_dtype=torch.float32, + scale_a=x_inv_s, + scale_b=grad_inv_s, + use_fast_accum=False, + ).T + return grad_x, grad_w + + return impl(g, x_f8, w_f8) + +@mm_backward_op.register_fake +def _(g: Tensor, x_f8: Tensor, w_f8: Tensor, *_): + return x_f8.to(torch.bfloat16), w_f8.T.contiguous().T.to(torch.float32) + +def backward(ctx, grad_out: Tensor, *_): + x_f8, w_f8 = ctx.saved_tensors + x_s, w_s, grad_s = ctx.scales + grad_x, grad_w = torch.ops.nanogpt.mm_backward( + grad_out, x_f8, w_f8, x_s, w_s, grad_s + ) + return grad_x, grad_w, None, None, None + +def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): + *_, x_s, w_s, grad_s = inputs + _, x_f8, w_f8 = output + ctx.save_for_backward(x_f8, w_f8) + ctx.scales = x_s, w_s, grad_s + ctx.set_materialize_grads(False) + +mm_op.register_autograd(backward, setup_context=setup_context) + +# ----------------------------------------------------------------------------- +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): + """ + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() + if G.size(-2) > G.size(-1): + X = X.mT + + # Ensure spectral norm is at most 1 + X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + + # Perform the NS iterations + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies + + if G.size(-2) > G.size(-1): + X = X.mT + return X + +# ----------------------------------------------------------------------------- +# Muon optimizer + +class Muon(torch.optim.Optimizer): + """ + Muon - MomentUm Orthogonalized by Newton-schulz + + https://kellerjordan.github.io/posts/muon/ + + Muon internally runs standard SGD-momentum, and then performs an orthogonalization post- + processing step, in which each 2D parameter's update is replaced with the nearest orthogonal + matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has + the advantage that it can be stably run in bfloat16 on the GPU. + + Warning: This optimizer should not be used for the embedding layer, the final fully connected layer, + or any {0,1}-D parameters; those should all be optimized by a standard method (e.g., AdamW). + """ + def __init__(self, params, lr=0.02, weight_decay=0.01, momentum=0.95): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + + @torch.no_grad() + def step(self): + # Efficient systems-wise implementation of step developed by @YouJiacheng, + # @KonstantinWilleke, @alexrgilbert, @adricarda, @tuttyfrutyee, @vdlad, + # @ryanyang0, and @vagrawal. + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + grad = torch.empty_like(params[-1]) + grad_pad = [param.grad for param in params] + [torch.zeros_like(params[-1])] * world_size + for base_i in range(0, len(params), world_size): + if base_i + rank < len(params): + grad = params[base_i + rank].grad + # This gives strange dynamo warnings + reduce_scatter_futures.append(dist.reduce_scatter(grad, grad_pad[base_i:base_i + world_size], op=dist.ReduceOp.AVG, async_op=True).get_future()) + + idx = 0 + for group in self.param_groups: + params: list[Tensor] = group["params"] + params_pad = params + [torch.empty_like(params[-1])] * world_size + momentum = group["momentum"] + for base_i in range(0, len(params), world_size): + reduce_scatter_futures[idx].wait() + if base_i + rank < len(params): + p = params[base_i + rank] + grad = p.grad + eff_lr = group["lr"] * max(1, p.size(-2) / p.size(-1)) ** 0.5 * getattr(p, "lr_mul", 1.0) + eff_weight_decay = group["lr"] * group["weight_decay"] * getattr(p, "wd_mul", 1.0) + state = self.state[p] + if len(state) == 0: + state["momentum_buffer"] = torch.zeros_like(grad) + momentum_buffer = state["momentum_buffer"] + p.mul_(1 - eff_weight_decay) + momentum_buffer.lerp_(grad, 1 - momentum) + grad = grad.lerp_(momentum_buffer, momentum) + v = newton_schulz_triton(grad) + p.add_(other=v, alpha=-eff_lr) + idx += 1 + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +class DistAdam(torch.optim.Optimizer): + def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + params = list(params) + sizes = {p.shape for p in params} + # create one buffer per unique parameter-size + param_groups = [] + for size in sizes: + group_params = [p for p in params if p.shape == size] + param_groups.append(dict(params=group_params)) + super().__init__(param_groups, defaults) + # DistributedAdam implementation by @vagrawal + + @torch.compile + @torch.no_grad() + def step(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + reduce_scatter_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] + grad_slices = [] + for group in self.param_groups: + params: list[Tensor] = group["params"] + for base_i in range(len(params)): + grad = params[base_i].grad + rank_size = grad.shape[0] // world_size + grad_slice = torch.empty_like(grad[:rank_size]) + reduce_scatter_futures.append(dist.reduce_scatter_tensor(grad_slice, grad, op=dist.ReduceOp.AVG, async_op=True).get_future()) + grad_slices.append(grad_slice) + + idx = 0 + for group in self.param_groups: + beta1, beta2 = group['betas'] + eps = group['eps'] + wd = group['weight_decay'] + params = group['params'] + for base in range(len(params)): + reduce_scatter_futures[idx].wait() + p = params[base] + rank_size = p.shape[0] // world_size + p_slice = p[rank * rank_size:(rank + 1) * rank_size] + lr = group['lr'] * getattr(p, "lr_mul", 1.0) + state = self.state[p] + g_slice = grad_slices[idx] + # State init + if not state: + state['step'] = torch.tensor(0, dtype=torch.int64, device=p.device) + state['exp_avg'] = torch.zeros_like(p_slice) + state['exp_avg_sq'] = torch.zeros_like(p_slice) + exp_avg = state['exp_avg'] + exp_avg_sq = state['exp_avg_sq'] + state['step'] += 1 + t = state['step'] + # weight decay + if wd != 0: + eff_weight_decay = lr * wd * getattr(p, "wd_mul", 1.0) + p_slice.mul_(1 - eff_weight_decay) + # update running averages + exp_avg.mul_(beta1).add_(g_slice, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(g_slice, g_slice, value=1 - beta2) + # bias corrections + bias1 = 1 - beta1 ** t + bias2 = 1 - beta2 ** t + # compute step + denom = exp_avg_sq.sqrt().add_(eps) + step_size = lr * (torch.sqrt(bias2) / bias1) + update = exp_avg.div(denom).mul_(step_size) + p_slice.add_(other=update, alpha=-1.0) + idx += 1 + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() + +# ----------------------------------------------------------------------------- +# PyTorch nn.Module definitions for the model + +def norm(x: Tensor): + return F.rms_norm(x, (x.size(-1),)) + +class CastedLinear(nn.Linear): + def __init__(self, in_features: int, out_features: int, use_fp8=False, x_s=1.0, w_s=1.0, grad_s=1.0): + super().__init__(in_features, out_features, bias=False) + self.use_fp8 = use_fp8 + self.x_s = x_s + self.w_s = w_s + self.grad_s = grad_s + + def reset_parameters(self) -> None: + std = 0.5 * (self.in_features ** -0.5) # 0.5 is a bit better than the default 1/sqrt(3) + bound = (3 ** 0.5) * std + with torch.no_grad(): + self.weight.uniform_(-bound, bound) + + def forward(self, x: Tensor): + if self.use_fp8 and self.training: + _x = x.flatten(0, -2) + out: Tensor = torch.ops.nanogpt.mm(_x, self.weight, x_s=self.x_s, w_s=self.w_s, grad_s=self.grad_s)[0] + return out.reshape(*x.shape[:-1], -1) + else: + return F.linear(x, self.weight.type_as(x)) + +class Rotary(nn.Module): + def __init__(self, dim: int, max_seq_len: int): + super().__init__() + # half-truncate RoPE by @YouJiacheng (w/ base freq tuning) + angular_freq = (1 / 1024) ** torch.linspace(0, 1, steps=dim//4, dtype=torch.float32) + angular_freq = torch.cat([angular_freq, angular_freq.new_zeros(dim//4)]) + t = torch.arange(max_seq_len, dtype=torch.float32) + theta = torch.einsum("i,j -> ij", t, angular_freq) + self.cos = nn.Buffer(theta.cos(), persistent=False) + self.sin = nn.Buffer(theta.sin(), persistent=False) + + def forward(self, x_BTHD: Tensor): + assert self.cos.size(0) >= x_BTHD.size(-3) + cos, sin = self.cos[None, :x_BTHD.size(-3), None, :], self.sin[None, :x_BTHD.size(-3), None, :] + x1, x2 = x_BTHD.to(dtype=torch.float32).chunk(2, dim=-1) + y1 = x1 * cos + x2 * sin + y2 = x1 * (-sin) + x2 * cos + return torch.cat((y1, y2), 3).type_as(x_BTHD) + +class CausalSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): + super().__init__() + self.num_heads = num_heads + self.head_dim = head_dim + hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng + # https://x.com/hi_tysam/status/1879699187107033311 + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero + self.rotary = Rotary(head_dim, max_seq_len) + # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun + # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 + self.attn_scale = 0.12 + + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): + B, T = x.size(0), x.size(1) # batch size, sequence length + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + q, k = norm(q), norm(k) # QK norm @Grad62304977 + q, k = self.rotary(q), self.rotary(k) + if ve is not None: + v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 + else: # skip mid-layers token value embeddings by @YouJiacheng + v = lambdas[0] * v + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) + y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side + y = F.linear(y, self.qkvo_w[3].type_as(y)) + return y + +class MLP(nn.Module): + def __init__(self, dim: int): + super().__init__() + hdim = 4 * dim + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 + + def forward(self, x: Tensor): + x = F.linear(x, self.c_fc.T.type_as(x)) + x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 + x = F.linear(x, self.c_proj.type_as(x)) + return x + +class Block(nn.Module): + def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): + super().__init__() + # skip attention of blocks.7 (the 8th layer) by @YouJiacheng + self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None + self.mlp = MLP(dim) + + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): + x = lambdas[0] * x + lambdas[1] * x0 + if self.attn is not None: + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) + x = x + self.mlp(norm(x)) + return x + +# ----------------------------------------------------------------------------- +# The main model + +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + +class GPT(nn.Module): + def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: int, max_seq_len: int): + super().__init__() + vocab_size = next_multiple_of_n(vocab_size, n=128) + self.embed = nn.Embedding(vocab_size, model_dim) + # token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual implementation following https://arxiv.org/abs/2410.17897 + # value embedding code simplification inspired by @ragulpr https://github.com/KellerJordan/modded-nanogpt/pull/78 + self.value_embeds = nn.ModuleList([nn.Embedding(vocab_size, model_dim) for _ in range(3)]) + self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) + # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. + # suggested to me by @Grad62304977. this originates from Karpathy's experiments. + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) + self.lm_head.weight.detach().zero_() # @Grad62304977 + # Add learnable skip connection weights for decoder layers + assert num_layers % 2 == 0 + pad = (-num_layers * 5) % dist.get_world_size() + self.scalars = nn.Parameter(torch.cat([ + torch.ones(num_layers), # skip_weights + *[torch.tensor([1.0, 0.0]) for _ in range(num_layers)], # block lambdas + *[torch.tensor([0.5, 0.5]) for _ in range(num_layers)], # SA lambdas + torch.ones(pad), + ])) + # set learning rates + for param in self.embed.parameters(): + param.lr_mul = 75. + for param in self.value_embeds.parameters(): + param.lr_mul = 75. + self.lm_head.weight.lr_mul = 1.0 + self.scalars.lr_mul = 5.0 + + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 + + ve = [value_embed(input_seq) for value_embed in self.value_embeds] + # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure + ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] + assert len(ve) == len(self.blocks) + + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) + + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 + + # U-net design by @brendanh0gan + skip_connections = [] + skip_weights = self.scalars[:(len(self.blocks) // 2)] + lambdas = self.scalars[1 * len(self.blocks): 3 * len(self.blocks)].view(-1, 2) + sa_lambdas = self.scalars[3 * len(self.blocks): 5 * len(self.blocks)].view(-1, 2) + + n = len(self.blocks) // 2 + + for i in range(len(self.blocks)): + if i >= n: + x = x + skip_weights[i - n] * skip_connections.pop() + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) + if i < n: + skip_connections.append(x) + + x = norm(x) + logits = self.lm_head(x).float() + # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") + return loss + +# ----------------------------------------------------------------------------- +# Distributed data loader + +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) # header is 256 int32 + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) # number of tokens (claimed) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) # avoid pin_memory copy by @YouJiacheng + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) # avoid bytes->array copy by @YouJiacheng + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + + while True: + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + + if align_to_bos: + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] + else: + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + + pos += batch_span + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + + +# ----------------------------------------------------------------------------- +# int main + +@dataclass +class Hyperparameters: + # data + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. + # optimization + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate + # evaluation and logging + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + +args = Hyperparameters() + +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + +# torchrun sets these env variables +rank = int(os.environ["RANK"]) +world_size = int(os.environ["WORLD_SIZE"]) +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size +assert torch.cuda.is_available() +device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) +torch.cuda.set_device(device) +dist.init_process_group(backend="nccl", device_id=device) +dist.barrier() +master_process = (rank == 0) # this process will do logging, checkpointing etc. + +# begin logging +logfile = None +if master_process: + run_id = args.run_id + os.makedirs("logs", exist_ok=True) + logfile = f"logs/{run_id}.txt" + print(logfile) +def print0(s, console=False): + if master_process: + with open(logfile, "a") as f: + if console: + print(s) + print(s, file=f) + +# begin by printing this file (the Python code) +print0(code) +print0("="*100) +# log information about the hardware/software environment this is running on +print0(f"Running Python {sys.version}") +print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + +def nvidia_smi(): + import subprocess # avoid top level import + return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout +print0(nvidia_smi()) +print0("="*100) + +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +for param in model.parameters(): + dist.broadcast(param.detach(), 0) + +# collect the parameters to optimize +hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] +embed_params = [p for n, p in model.named_parameters() if "embed" in n] +scalar_params = [p for p in model.parameters() if p.ndim < 2] +head_params = [model.lm_head.weight] + +# init the optimizer(s) +# small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence +# discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 +optimizer1 = DistAdam(scalar_params + head_params + embed_params, lr=0.008, betas=(0.8, 0.95), eps=1e-10, weight_decay=0.0) +optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, weight_decay=0.0) +optimizers = [optimizer1, optimizer2] +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training + assert 0 <= x < 1 + lr = 1.0 + if x >= 1 - args.cooldown_frac: + w = (1 - x) / args.cooldown_frac + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) + +######################################## +# Warmup kernels # +######################################## + +# Warmup the training kernels, then re-initialize the state so we aren't cheating +warmup_steps = 60 +initial_state = dict(model=copy.deepcopy(model.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): + inputs, targets = next(train_loader) + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() + for opt in optimizers: + opt.step() + model.zero_grad(set_to_none=True) +model.load_state_dict(initial_state["model"]) +for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) +del train_loader, initial_state + +######################################## +# Training and validation # +######################################## + +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +training_time_ms = 0 +# start the clock +torch.cuda.synchronize() +t0 = time.perf_counter() +# begin training +train_steps = args.num_iterations +for step in range(train_steps + 1): + last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) + + # --------------- VALIDATION SECTION ----------------- + if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + # stop the clock + torch.cuda.synchronize() + training_time_ms += 1000 * (time.perf_counter() - t0) + model.eval() + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) + val_loss = 0 + with torch.no_grad(): + for _ in range(val_steps): + inputs, targets = next(val_loader) + val_loss += model(inputs, targets, ws, ws // 2) + val_loss /= val_steps + del val_loader + dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) + print0(f"step:{step}/{train_steps} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/max(step, 1):.2f}ms", console=True) + model.train() + # start the clock again + torch.cuda.synchronize() + t0 = time.perf_counter() + + if last_step: + if master_process and args.save_checkpoint: + log = dict(step=step, code=code, model=model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers]) + os.makedirs(f"logs/{run_id}", exist_ok=True) + torch.save(log, f"logs/{run_id}/state_step{step:06d}.pt") + # the last step only has the validation loop, so break to avoid training + break + + # --------------- TRAINING SECTION ----------------- + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() + # set optimization hyperparameters + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * lr + for group in optimizer2.param_groups: + frac = min(step / 300, 1) # momentum warmup for muon + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + # step the optimizers + for opt in optimizers: + opt.step() + # null the gradients + model.zero_grad(set_to_none=True) + # logging + approx_training_time_ms = training_time_ms + 1000 * (time.perf_counter() - t0) + print0(f"step:{step+1}/{train_steps} train_time:{approx_training_time_ms:.0f}ms step_avg:{approx_training_time_ms/(step + 1):.2f}ms", console=True) + +print0(f"peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) +dist.destroy_process_group() +==================================================================================================== +Running Python 3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0] +Running PyTorch 2.9.0.dev20250718+cu126 compiled for CUDA 12.6 +Running Triton version 3.4.0 +Wed Aug 27 03:47:47 2025 ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.6 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:00:0B.0 Off | Off | +| N/A 32C P0 116W / 700W | 5858MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 1 NVIDIA H100 80GB HBM3 On | 00000000:00:0C.0 Off | Off | +| N/A 36C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 2 NVIDIA H100 80GB HBM3 On | 00000000:00:0D.0 Off | Off | +| N/A 37C P0 116W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 3 NVIDIA H100 80GB HBM3 On | 00000000:00:0E.0 Off | Off | +| N/A 32C P0 114W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 4 NVIDIA H100 80GB HBM3 On | 00000000:00:0F.0 Off | Off | +| N/A 32C P0 112W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 5 NVIDIA H100 80GB HBM3 On | 00000000:00:10.0 Off | Off | +| N/A 38C P0 118W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 6 NVIDIA H100 80GB HBM3 On | 00000000:00:11.0 Off | Off | +| N/A 36C P0 113W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ +| 7 NVIDIA H100 80GB HBM3 On | 00000000:00:12.0 Off | Off | +| N/A 33C P0 115W / 700W | 1519MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| ++---------------------------------------------------------------------------------------+ + +==================================================================================================== +step:0/1695 val_loss:10.8258 train_time:0ms step_avg:0.03ms +step:1/1695 train_time:520ms step_avg:519.82ms +step:2/1695 train_time:546ms step_avg:272.89ms +step:3/1695 train_time:615ms step_avg:204.90ms +step:4/1695 train_time:707ms step_avg:176.66ms +step:5/1695 train_time:800ms step_avg:159.97ms +step:6/1695 train_time:892ms step_avg:148.74ms +step:7/1695 train_time:986ms step_avg:140.81ms +step:8/1695 train_time:1079ms step_avg:134.91ms +step:9/1695 train_time:1173ms step_avg:130.29ms +step:10/1695 train_time:1266ms step_avg:126.65ms +step:11/1695 train_time:1360ms step_avg:123.62ms +step:12/1695 train_time:1455ms step_avg:121.29ms +step:13/1695 train_time:1553ms step_avg:119.48ms +step:14/1695 train_time:1649ms step_avg:117.78ms +step:15/1695 train_time:1745ms step_avg:116.36ms +step:16/1695 train_time:1840ms step_avg:115.01ms +step:17/1695 train_time:1933ms step_avg:113.72ms +step:18/1695 train_time:2027ms step_avg:112.62ms +step:19/1695 train_time:2121ms step_avg:111.64ms +step:20/1695 train_time:2215ms step_avg:110.74ms +step:21/1695 train_time:2309ms step_avg:109.95ms +step:22/1695 train_time:2405ms step_avg:109.30ms +step:23/1695 train_time:2501ms step_avg:108.76ms +step:24/1695 train_time:2596ms step_avg:108.18ms +step:25/1695 train_time:2692ms step_avg:107.66ms +step:26/1695 train_time:2787ms step_avg:107.19ms +step:27/1695 train_time:2882ms step_avg:106.75ms +step:28/1695 train_time:2976ms step_avg:106.30ms +step:29/1695 train_time:3070ms step_avg:105.87ms +step:30/1695 train_time:3164ms step_avg:105.48ms +step:31/1695 train_time:3258ms step_avg:105.10ms +step:32/1695 train_time:3352ms step_avg:104.76ms +step:33/1695 train_time:3448ms step_avg:104.49ms +step:34/1695 train_time:3546ms step_avg:104.28ms +step:35/1695 train_time:3641ms step_avg:104.04ms +step:36/1695 train_time:3735ms step_avg:103.76ms +step:37/1695 train_time:3830ms step_avg:103.51ms +step:38/1695 train_time:3925ms step_avg:103.28ms +step:39/1695 train_time:4019ms step_avg:103.05ms +step:40/1695 train_time:4113ms step_avg:102.83ms +step:41/1695 train_time:4208ms step_avg:102.63ms +step:42/1695 train_time:4302ms step_avg:102.43ms +step:43/1695 train_time:4396ms step_avg:102.22ms +step:44/1695 train_time:4490ms step_avg:102.06ms +step:45/1695 train_time:4587ms step_avg:101.92ms +step:46/1695 train_time:4682ms step_avg:101.78ms +step:47/1695 train_time:4776ms step_avg:101.62ms +step:48/1695 train_time:4871ms step_avg:101.47ms +step:49/1695 train_time:4965ms step_avg:101.33ms +step:50/1695 train_time:5061ms step_avg:101.22ms +step:51/1695 train_time:5154ms step_avg:101.05ms +step:52/1695 train_time:5248ms step_avg:100.93ms +step:53/1695 train_time:5343ms step_avg:100.82ms +step:54/1695 train_time:5439ms step_avg:100.73ms +step:55/1695 train_time:5533ms step_avg:100.59ms +step:56/1695 train_time:5628ms step_avg:100.50ms +step:57/1695 train_time:5723ms step_avg:100.41ms +step:58/1695 train_time:5819ms step_avg:100.33ms +step:59/1695 train_time:5913ms step_avg:100.22ms +step:60/1695 train_time:6008ms step_avg:100.13ms +step:61/1695 train_time:6101ms step_avg:100.02ms +step:62/1695 train_time:6195ms step_avg:99.91ms +step:63/1695 train_time:6289ms step_avg:99.82ms +step:64/1695 train_time:6384ms step_avg:99.75ms +step:65/1695 train_time:6479ms step_avg:99.68ms +step:66/1695 train_time:6573ms step_avg:99.59ms +step:67/1695 train_time:6668ms step_avg:99.52ms +step:68/1695 train_time:6762ms step_avg:99.45ms +step:69/1695 train_time:6856ms step_avg:99.37ms +step:70/1695 train_time:6950ms step_avg:99.29ms +step:71/1695 train_time:7044ms step_avg:99.21ms +step:72/1695 train_time:7138ms step_avg:99.14ms +step:73/1695 train_time:7232ms step_avg:99.07ms +step:74/1695 train_time:7327ms step_avg:99.02ms +step:75/1695 train_time:7423ms step_avg:98.97ms +step:76/1695 train_time:7518ms step_avg:98.92ms +step:77/1695 train_time:7613ms step_avg:98.87ms +step:78/1695 train_time:7709ms step_avg:98.83ms +step:79/1695 train_time:7803ms step_avg:98.78ms +step:80/1695 train_time:7897ms step_avg:98.72ms +step:81/1695 train_time:7991ms step_avg:98.65ms +step:82/1695 train_time:8085ms step_avg:98.60ms +step:83/1695 train_time:8180ms step_avg:98.56ms +step:84/1695 train_time:8274ms step_avg:98.50ms +step:85/1695 train_time:8368ms step_avg:98.45ms +step:86/1695 train_time:8463ms step_avg:98.40ms +step:87/1695 train_time:8556ms step_avg:98.35ms +step:88/1695 train_time:8651ms step_avg:98.31ms +step:89/1695 train_time:8747ms step_avg:98.29ms +step:90/1695 train_time:8843ms step_avg:98.26ms +step:91/1695 train_time:8938ms step_avg:98.22ms +step:92/1695 train_time:9031ms step_avg:98.16ms +step:93/1695 train_time:9125ms step_avg:98.12ms +step:94/1695 train_time:9220ms step_avg:98.08ms +step:95/1695 train_time:9314ms step_avg:98.04ms +step:96/1695 train_time:9408ms step_avg:98.00ms +step:97/1695 train_time:9503ms step_avg:97.97ms +step:98/1695 train_time:9597ms step_avg:97.93ms +step:99/1695 train_time:9691ms step_avg:97.89ms +step:100/1695 train_time:9787ms step_avg:97.87ms +step:101/1695 train_time:9881ms step_avg:97.84ms +step:102/1695 train_time:9975ms step_avg:97.80ms +step:103/1695 train_time:10069ms step_avg:97.76ms +step:104/1695 train_time:10164ms step_avg:97.74ms +step:105/1695 train_time:10258ms step_avg:97.70ms +step:106/1695 train_time:10352ms step_avg:97.66ms +step:107/1695 train_time:10447ms step_avg:97.64ms +step:108/1695 train_time:10543ms step_avg:97.62ms +step:109/1695 train_time:10637ms step_avg:97.59ms +step:110/1695 train_time:10732ms step_avg:97.56ms +step:111/1695 train_time:10827ms step_avg:97.54ms +step:112/1695 train_time:10922ms step_avg:97.52ms +step:113/1695 train_time:11016ms step_avg:97.49ms +step:114/1695 train_time:11110ms step_avg:97.46ms +step:115/1695 train_time:11205ms step_avg:97.44ms +step:116/1695 train_time:11299ms step_avg:97.41ms +step:117/1695 train_time:11393ms step_avg:97.38ms +step:118/1695 train_time:11488ms step_avg:97.35ms +step:119/1695 train_time:11583ms step_avg:97.33ms +step:120/1695 train_time:11677ms step_avg:97.31ms +step:121/1695 train_time:11771ms step_avg:97.28ms +step:122/1695 train_time:11867ms step_avg:97.27ms +step:123/1695 train_time:11961ms step_avg:97.24ms +step:124/1695 train_time:12054ms step_avg:97.21ms +step:125/1695 train_time:12149ms step_avg:97.19ms +step:125/1695 val_loss:4.3107 train_time:12241ms step_avg:97.93ms +step:126/1695 train_time:12268ms step_avg:97.36ms +step:127/1695 train_time:12344ms step_avg:97.20ms +step:128/1695 train_time:12448ms step_avg:97.25ms +step:129/1695 train_time:12544ms step_avg:97.24ms +step:130/1695 train_time:12638ms step_avg:97.21ms +step:131/1695 train_time:12730ms step_avg:97.18ms +step:132/1695 train_time:12824ms step_avg:97.15ms +step:133/1695 train_time:12918ms step_avg:97.13ms +step:134/1695 train_time:13011ms step_avg:97.10ms +step:135/1695 train_time:13105ms step_avg:97.07ms +step:136/1695 train_time:13198ms step_avg:97.04ms +step:137/1695 train_time:13292ms step_avg:97.02ms +step:138/1695 train_time:13389ms step_avg:97.02ms +step:139/1695 train_time:13485ms step_avg:97.01ms +step:140/1695 train_time:13580ms step_avg:97.00ms +step:141/1695 train_time:13675ms step_avg:96.98ms +step:142/1695 train_time:13770ms step_avg:96.97ms +step:143/1695 train_time:13862ms step_avg:96.94ms +step:144/1695 train_time:13955ms step_avg:96.91ms +step:145/1695 train_time:14049ms step_avg:96.89ms +step:146/1695 train_time:14144ms step_avg:96.87ms +step:147/1695 train_time:14238ms step_avg:96.86ms +step:148/1695 train_time:14333ms step_avg:96.84ms +step:149/1695 train_time:14427ms step_avg:96.83ms +step:150/1695 train_time:14524ms step_avg:96.82ms +step:151/1695 train_time:14617ms step_avg:96.80ms +step:152/1695 train_time:14711ms step_avg:96.78ms +step:153/1695 train_time:14805ms step_avg:96.76ms +step:154/1695 train_time:14899ms step_avg:96.75ms +step:155/1695 train_time:14992ms step_avg:96.72ms +step:156/1695 train_time:15085ms step_avg:96.70ms +step:157/1695 train_time:15180ms step_avg:96.69ms +step:158/1695 train_time:15274ms step_avg:96.67ms +step:159/1695 train_time:15368ms step_avg:96.66ms +step:160/1695 train_time:15463ms step_avg:96.65ms +step:161/1695 train_time:15558ms step_avg:96.63ms +step:162/1695 train_time:15652ms step_avg:96.62ms +step:163/1695 train_time:15746ms step_avg:96.60ms +step:164/1695 train_time:15841ms step_avg:96.59ms +step:165/1695 train_time:15935ms step_avg:96.57ms +step:166/1695 train_time:16029ms step_avg:96.56ms +step:167/1695 train_time:16124ms step_avg:96.55ms +step:168/1695 train_time:16217ms step_avg:96.53ms +step:169/1695 train_time:16313ms step_avg:96.52ms +step:170/1695 train_time:16406ms step_avg:96.51ms +step:171/1695 train_time:16501ms step_avg:96.50ms +step:172/1695 train_time:16595ms step_avg:96.48ms +step:173/1695 train_time:16938ms step_avg:97.91ms +step:174/1695 train_time:17053ms step_avg:98.00ms +step:175/1695 train_time:17146ms step_avg:97.98ms +step:176/1695 train_time:17239ms step_avg:97.95ms +step:177/1695 train_time:17332ms step_avg:97.92ms +step:178/1695 train_time:17426ms step_avg:97.90ms +step:179/1695 train_time:17518ms step_avg:97.87ms +step:180/1695 train_time:17611ms step_avg:97.84ms +step:181/1695 train_time:17705ms step_avg:97.82ms +step:182/1695 train_time:17798ms step_avg:97.79ms +step:183/1695 train_time:17897ms step_avg:97.80ms +step:184/1695 train_time:17993ms step_avg:97.79ms +step:185/1695 train_time:18087ms step_avg:97.77ms +step:186/1695 train_time:18181ms step_avg:97.75ms +step:187/1695 train_time:18275ms step_avg:97.73ms +step:188/1695 train_time:18369ms step_avg:97.71ms +step:189/1695 train_time:18462ms step_avg:97.68ms +step:190/1695 train_time:18555ms step_avg:97.66ms +step:191/1695 train_time:18648ms step_avg:97.63ms +step:192/1695 train_time:18743ms step_avg:97.62ms +step:193/1695 train_time:18839ms step_avg:97.61ms +step:194/1695 train_time:18934ms step_avg:97.60ms +step:195/1695 train_time:19028ms step_avg:97.58ms +step:196/1695 train_time:19122ms step_avg:97.56ms +step:197/1695 train_time:19216ms step_avg:97.55ms +step:198/1695 train_time:19310ms step_avg:97.53ms +step:199/1695 train_time:19404ms step_avg:97.51ms +step:200/1695 train_time:19498ms step_avg:97.49ms +step:201/1695 train_time:19591ms step_avg:97.47ms +step:202/1695 train_time:19684ms step_avg:97.45ms +step:203/1695 train_time:19779ms step_avg:97.43ms +step:204/1695 train_time:19873ms step_avg:97.42ms +step:205/1695 train_time:19967ms step_avg:97.40ms +step:206/1695 train_time:20062ms step_avg:97.39ms +step:207/1695 train_time:20157ms step_avg:97.38ms +step:208/1695 train_time:20251ms step_avg:97.36ms +step:209/1695 train_time:20345ms step_avg:97.34ms +step:210/1695 train_time:20439ms step_avg:97.33ms +step:211/1695 train_time:20532ms step_avg:97.31ms +step:212/1695 train_time:20625ms step_avg:97.29ms +step:213/1695 train_time:20719ms step_avg:97.27ms +step:214/1695 train_time:20814ms step_avg:97.26ms +step:215/1695 train_time:20908ms step_avg:97.25ms +step:216/1695 train_time:21002ms step_avg:97.23ms +step:217/1695 train_time:21097ms step_avg:97.22ms +step:218/1695 train_time:21191ms step_avg:97.21ms +step:219/1695 train_time:21285ms step_avg:97.19ms +step:220/1695 train_time:21380ms step_avg:97.18ms +step:221/1695 train_time:21474ms step_avg:97.17ms +step:222/1695 train_time:21568ms step_avg:97.15ms +step:223/1695 train_time:21664ms step_avg:97.15ms +step:224/1695 train_time:21756ms step_avg:97.13ms +step:225/1695 train_time:21850ms step_avg:97.11ms +step:226/1695 train_time:21945ms step_avg:97.10ms +step:227/1695 train_time:22040ms step_avg:97.09ms +step:228/1695 train_time:22134ms step_avg:97.08ms +step:229/1695 train_time:22228ms step_avg:97.06ms +step:230/1695 train_time:22322ms step_avg:97.05ms +step:231/1695 train_time:22417ms step_avg:97.04ms +step:232/1695 train_time:22510ms step_avg:97.02ms +step:233/1695 train_time:22604ms step_avg:97.01ms +step:234/1695 train_time:22699ms step_avg:97.00ms +step:235/1695 train_time:22792ms step_avg:96.99ms +step:236/1695 train_time:22886ms step_avg:96.97ms +step:237/1695 train_time:22981ms step_avg:96.96ms +step:238/1695 train_time:23075ms step_avg:96.95ms +step:239/1695 train_time:23170ms step_avg:96.94ms +step:240/1695 train_time:23264ms step_avg:96.93ms +step:241/1695 train_time:23358ms step_avg:96.92ms +step:242/1695 train_time:23451ms step_avg:96.91ms +step:243/1695 train_time:23546ms step_avg:96.90ms +step:244/1695 train_time:23641ms step_avg:96.89ms +step:245/1695 train_time:23735ms step_avg:96.88ms +step:246/1695 train_time:23829ms step_avg:96.87ms +step:247/1695 train_time:23923ms step_avg:96.85ms +step:248/1695 train_time:24018ms step_avg:96.85ms +step:249/1695 train_time:24112ms step_avg:96.83ms +step:250/1695 train_time:24206ms step_avg:96.82ms +step:250/1695 val_loss:3.9776 train_time:24298ms step_avg:97.19ms +step:251/1695 train_time:24328ms step_avg:96.92ms +step:252/1695 train_time:24399ms step_avg:96.82ms +step:253/1695 train_time:24498ms step_avg:96.83ms +step:254/1695 train_time:24592ms step_avg:96.82ms +step:255/1695 train_time:24685ms step_avg:96.81ms +step:256/1695 train_time:24778ms step_avg:96.79ms +step:257/1695 train_time:24870ms step_avg:96.77ms +step:258/1695 train_time:24964ms step_avg:96.76ms +step:259/1695 train_time:25057ms step_avg:96.74ms +step:260/1695 train_time:25150ms step_avg:96.73ms +step:261/1695 train_time:25243ms step_avg:96.72ms +step:262/1695 train_time:25338ms step_avg:96.71ms +step:263/1695 train_time:25434ms step_avg:96.71ms +step:264/1695 train_time:25529ms step_avg:96.70ms +step:265/1695 train_time:25625ms step_avg:96.70ms +step:266/1695 train_time:25719ms step_avg:96.69ms +step:267/1695 train_time:25812ms step_avg:96.68ms +step:268/1695 train_time:25905ms step_avg:96.66ms +step:269/1695 train_time:25998ms step_avg:96.65ms +step:270/1695 train_time:26091ms step_avg:96.63ms +step:271/1695 train_time:26185ms step_avg:96.62ms +step:272/1695 train_time:26279ms step_avg:96.61ms +step:273/1695 train_time:26373ms step_avg:96.60ms +step:274/1695 train_time:26468ms step_avg:96.60ms +step:275/1695 train_time:26563ms step_avg:96.59ms +step:276/1695 train_time:26658ms step_avg:96.59ms +step:277/1695 train_time:26751ms step_avg:96.57ms +step:278/1695 train_time:26845ms step_avg:96.56ms +step:279/1695 train_time:26939ms step_avg:96.56ms +step:280/1695 train_time:27032ms step_avg:96.54ms +step:281/1695 train_time:27125ms step_avg:96.53ms +step:282/1695 train_time:27219ms step_avg:96.52ms +step:283/1695 train_time:27313ms step_avg:96.51ms +step:284/1695 train_time:27407ms step_avg:96.50ms +step:285/1695 train_time:27502ms step_avg:96.50ms +step:286/1695 train_time:27597ms step_avg:96.49ms +step:287/1695 train_time:27691ms step_avg:96.49ms +step:288/1695 train_time:27785ms step_avg:96.48ms +step:289/1695 train_time:27879ms step_avg:96.47ms +step:290/1695 train_time:27973ms step_avg:96.46ms +step:291/1695 train_time:28067ms step_avg:96.45ms +step:292/1695 train_time:28160ms step_avg:96.44ms +step:293/1695 train_time:28254ms step_avg:96.43ms +step:294/1695 train_time:28347ms step_avg:96.42ms +step:295/1695 train_time:28442ms step_avg:96.41ms +step:296/1695 train_time:28537ms step_avg:96.41ms +step:297/1695 train_time:28632ms step_avg:96.40ms +step:298/1695 train_time:28726ms step_avg:96.40ms +step:299/1695 train_time:28821ms step_avg:96.39ms +step:300/1695 train_time:28915ms step_avg:96.38ms +step:301/1695 train_time:29008ms step_avg:96.37ms +step:302/1695 train_time:29101ms step_avg:96.36ms +step:303/1695 train_time:29195ms step_avg:96.35ms +step:304/1695 train_time:29289ms step_avg:96.34ms +step:305/1695 train_time:29383ms step_avg:96.34ms +step:306/1695 train_time:29478ms step_avg:96.33ms +step:307/1695 train_time:29572ms step_avg:96.32ms +step:308/1695 train_time:29667ms step_avg:96.32ms +step:309/1695 train_time:29761ms step_avg:96.31ms +step:310/1695 train_time:29855ms step_avg:96.31ms +step:311/1695 train_time:29949ms step_avg:96.30ms +step:312/1695 train_time:30044ms step_avg:96.29ms +step:313/1695 train_time:30138ms step_avg:96.29ms +step:314/1695 train_time:30231ms step_avg:96.28ms +step:315/1695 train_time:30325ms step_avg:96.27ms +step:316/1695 train_time:30419ms step_avg:96.26ms +step:317/1695 train_time:30513ms step_avg:96.26ms +step:318/1695 train_time:30607ms step_avg:96.25ms +step:319/1695 train_time:30702ms step_avg:96.24ms +step:320/1695 train_time:30796ms step_avg:96.24ms +step:321/1695 train_time:30890ms step_avg:96.23ms +step:322/1695 train_time:30984ms step_avg:96.22ms +step:323/1695 train_time:31078ms step_avg:96.22ms +step:324/1695 train_time:31171ms step_avg:96.21ms +step:325/1695 train_time:31265ms step_avg:96.20ms +step:326/1695 train_time:31359ms step_avg:96.19ms +step:327/1695 train_time:31452ms step_avg:96.18ms +step:328/1695 train_time:31545ms step_avg:96.17ms +step:329/1695 train_time:31640ms step_avg:96.17ms +step:330/1695 train_time:31735ms step_avg:96.17ms +step:331/1695 train_time:31829ms step_avg:96.16ms +step:332/1695 train_time:31923ms step_avg:96.15ms +step:333/1695 train_time:32016ms step_avg:96.14ms +step:334/1695 train_time:32109ms step_avg:96.14ms +step:335/1695 train_time:32203ms step_avg:96.13ms +step:336/1695 train_time:32298ms step_avg:96.12ms +step:337/1695 train_time:32392ms step_avg:96.12ms +step:338/1695 train_time:32486ms step_avg:96.11ms +step:339/1695 train_time:32580ms step_avg:96.11ms +step:340/1695 train_time:32674ms step_avg:96.10ms +step:341/1695 train_time:32767ms step_avg:96.09ms +step:342/1695 train_time:32862ms step_avg:96.09ms +step:343/1695 train_time:32957ms step_avg:96.08ms +step:344/1695 train_time:33050ms step_avg:96.08ms +step:345/1695 train_time:33394ms step_avg:96.79ms +step:346/1695 train_time:33465ms step_avg:96.72ms +step:347/1695 train_time:33557ms step_avg:96.71ms +step:348/1695 train_time:33650ms step_avg:96.70ms +step:349/1695 train_time:33744ms step_avg:96.69ms +step:350/1695 train_time:33837ms step_avg:96.68ms +step:351/1695 train_time:33930ms step_avg:96.67ms +step:352/1695 train_time:34023ms step_avg:96.66ms +step:353/1695 train_time:34116ms step_avg:96.65ms +step:354/1695 train_time:34209ms step_avg:96.64ms +step:355/1695 train_time:34305ms step_avg:96.63ms +step:356/1695 train_time:34401ms step_avg:96.63ms +step:357/1695 train_time:34497ms step_avg:96.63ms +step:358/1695 train_time:34591ms step_avg:96.62ms +step:359/1695 train_time:34685ms step_avg:96.61ms +step:360/1695 train_time:34779ms step_avg:96.61ms +step:361/1695 train_time:34871ms step_avg:96.60ms +step:362/1695 train_time:34965ms step_avg:96.59ms +step:363/1695 train_time:35060ms step_avg:96.58ms +step:364/1695 train_time:35153ms step_avg:96.57ms +step:365/1695 train_time:35246ms step_avg:96.56ms +step:366/1695 train_time:35341ms step_avg:96.56ms +step:367/1695 train_time:35436ms step_avg:96.56ms +step:368/1695 train_time:35530ms step_avg:96.55ms +step:369/1695 train_time:35625ms step_avg:96.54ms +step:370/1695 train_time:35719ms step_avg:96.54ms +step:371/1695 train_time:35812ms step_avg:96.53ms +step:372/1695 train_time:35905ms step_avg:96.52ms +step:373/1695 train_time:35999ms step_avg:96.51ms +step:374/1695 train_time:36092ms step_avg:96.50ms +step:375/1695 train_time:36185ms step_avg:96.49ms +step:375/1695 val_loss:3.8206 train_time:36277ms step_avg:96.74ms +step:376/1695 train_time:36303ms step_avg:96.55ms +step:377/1695 train_time:36376ms step_avg:96.49ms +step:378/1695 train_time:36477ms step_avg:96.50ms +step:379/1695 train_time:36572ms step_avg:96.50ms +step:380/1695 train_time:36667ms step_avg:96.49ms +step:381/1695 train_time:36760ms step_avg:96.48ms +step:382/1695 train_time:36852ms step_avg:96.47ms +step:383/1695 train_time:36946ms step_avg:96.46ms +step:384/1695 train_time:37039ms step_avg:96.45ms +step:385/1695 train_time:37131ms step_avg:96.44ms +step:386/1695 train_time:37224ms step_avg:96.44ms +step:387/1695 train_time:37319ms step_avg:96.43ms +step:388/1695 train_time:37415ms step_avg:96.43ms +step:389/1695 train_time:37511ms step_avg:96.43ms +step:390/1695 train_time:37607ms step_avg:96.43ms +step:391/1695 train_time:37701ms step_avg:96.42ms +step:392/1695 train_time:37794ms step_avg:96.41ms +step:393/1695 train_time:37888ms step_avg:96.41ms +step:394/1695 train_time:37982ms step_avg:96.40ms +step:395/1695 train_time:38075ms step_avg:96.39ms +step:396/1695 train_time:38168ms step_avg:96.39ms +step:397/1695 train_time:38263ms step_avg:96.38ms +step:398/1695 train_time:38358ms step_avg:96.38ms +step:399/1695 train_time:38452ms step_avg:96.37ms +step:400/1695 train_time:38548ms step_avg:96.37ms +step:401/1695 train_time:38643ms step_avg:96.37ms +step:402/1695 train_time:38737ms step_avg:96.36ms +step:403/1695 train_time:38830ms step_avg:96.35ms +step:404/1695 train_time:38924ms step_avg:96.35ms +step:405/1695 train_time:39017ms step_avg:96.34ms +step:406/1695 train_time:39110ms step_avg:96.33ms +step:407/1695 train_time:39203ms step_avg:96.32ms +step:408/1695 train_time:39296ms step_avg:96.31ms +step:409/1695 train_time:39391ms step_avg:96.31ms +step:410/1695 train_time:39486ms step_avg:96.31ms +step:411/1695 train_time:39580ms step_avg:96.30ms +step:412/1695 train_time:39673ms step_avg:96.29ms +step:413/1695 train_time:39768ms step_avg:96.29ms +step:414/1695 train_time:39863ms step_avg:96.29ms +step:415/1695 train_time:39957ms step_avg:96.28ms +step:416/1695 train_time:40050ms step_avg:96.27ms +step:417/1695 train_time:40144ms step_avg:96.27ms +step:418/1695 train_time:40237ms step_avg:96.26ms +step:419/1695 train_time:40331ms step_avg:96.25ms +step:420/1695 train_time:40425ms step_avg:96.25ms +step:421/1695 train_time:40519ms step_avg:96.25ms +step:422/1695 train_time:40613ms step_avg:96.24ms +step:423/1695 train_time:40707ms step_avg:96.23ms +step:424/1695 train_time:40802ms step_avg:96.23ms +step:425/1695 train_time:40895ms step_avg:96.22ms +step:426/1695 train_time:40990ms step_avg:96.22ms +step:427/1695 train_time:41085ms step_avg:96.22ms +step:428/1695 train_time:41178ms step_avg:96.21ms +step:429/1695 train_time:41272ms step_avg:96.20ms +step:430/1695 train_time:41365ms step_avg:96.20ms +step:431/1695 train_time:41459ms step_avg:96.19ms +step:432/1695 train_time:41553ms step_avg:96.19ms +step:433/1695 train_time:41648ms step_avg:96.18ms +step:434/1695 train_time:41743ms step_avg:96.18ms +step:435/1695 train_time:41837ms step_avg:96.18ms +step:436/1695 train_time:41930ms step_avg:96.17ms +step:437/1695 train_time:42025ms step_avg:96.17ms +step:438/1695 train_time:42119ms step_avg:96.16ms +step:439/1695 train_time:42212ms step_avg:96.15ms +step:440/1695 train_time:42305ms step_avg:96.15ms +step:441/1695 train_time:42399ms step_avg:96.14ms +step:442/1695 train_time:42492ms step_avg:96.14ms +step:443/1695 train_time:42587ms step_avg:96.13ms +step:444/1695 train_time:42680ms step_avg:96.13ms +step:445/1695 train_time:42774ms step_avg:96.12ms +step:446/1695 train_time:42868ms step_avg:96.12ms +step:447/1695 train_time:42963ms step_avg:96.11ms +step:448/1695 train_time:43057ms step_avg:96.11ms +step:449/1695 train_time:43150ms step_avg:96.10ms +step:450/1695 train_time:43245ms step_avg:96.10ms +step:451/1695 train_time:43338ms step_avg:96.09ms +step:452/1695 train_time:43432ms step_avg:96.09ms +step:453/1695 train_time:43526ms step_avg:96.08ms +step:454/1695 train_time:43621ms step_avg:96.08ms +step:455/1695 train_time:43715ms step_avg:96.08ms +step:456/1695 train_time:43809ms step_avg:96.07ms +step:457/1695 train_time:43903ms step_avg:96.07ms +step:458/1695 train_time:43997ms step_avg:96.06ms +step:459/1695 train_time:44091ms step_avg:96.06ms +step:460/1695 train_time:44185ms step_avg:96.05ms +step:461/1695 train_time:44278ms step_avg:96.05ms +step:462/1695 train_time:44372ms step_avg:96.04ms +step:463/1695 train_time:44466ms step_avg:96.04ms +step:464/1695 train_time:44560ms step_avg:96.03ms +step:465/1695 train_time:44654ms step_avg:96.03ms +step:466/1695 train_time:44748ms step_avg:96.03ms +step:467/1695 train_time:44842ms step_avg:96.02ms +step:468/1695 train_time:44935ms step_avg:96.02ms +step:469/1695 train_time:45031ms step_avg:96.01ms +step:470/1695 train_time:45125ms step_avg:96.01ms +step:471/1695 train_time:45218ms step_avg:96.00ms +step:472/1695 train_time:45312ms step_avg:96.00ms +step:473/1695 train_time:45406ms step_avg:96.00ms +step:474/1695 train_time:45499ms step_avg:95.99ms +step:475/1695 train_time:45594ms step_avg:95.99ms +step:476/1695 train_time:45689ms step_avg:95.99ms +step:477/1695 train_time:45784ms step_avg:95.98ms +step:478/1695 train_time:45877ms step_avg:95.98ms +step:479/1695 train_time:45971ms step_avg:95.97ms +step:480/1695 train_time:46065ms step_avg:95.97ms +step:481/1695 train_time:46159ms step_avg:95.96ms +step:482/1695 train_time:46252ms step_avg:95.96ms +step:483/1695 train_time:46347ms step_avg:95.96ms +step:484/1695 train_time:46442ms step_avg:95.95ms +step:485/1695 train_time:46535ms step_avg:95.95ms +step:486/1695 train_time:46629ms step_avg:95.95ms +step:487/1695 train_time:46723ms step_avg:95.94ms +step:488/1695 train_time:46818ms step_avg:95.94ms +step:489/1695 train_time:46911ms step_avg:95.93ms +step:490/1695 train_time:47004ms step_avg:95.93ms +step:491/1695 train_time:47098ms step_avg:95.92ms +step:492/1695 train_time:47191ms step_avg:95.92ms +step:493/1695 train_time:47286ms step_avg:95.91ms +step:494/1695 train_time:47381ms step_avg:95.91ms +step:495/1695 train_time:47474ms step_avg:95.91ms +step:496/1695 train_time:47568ms step_avg:95.90ms +step:497/1695 train_time:47663ms step_avg:95.90ms +step:498/1695 train_time:47756ms step_avg:95.90ms +step:499/1695 train_time:47851ms step_avg:95.89ms +step:500/1695 train_time:47944ms step_avg:95.89ms +step:500/1695 val_loss:3.7169 train_time:48035ms step_avg:96.07ms +step:501/1695 train_time:48064ms step_avg:95.94ms +step:502/1695 train_time:48136ms step_avg:95.89ms +step:503/1695 train_time:48238ms step_avg:95.90ms +step:504/1695 train_time:48335ms step_avg:95.90ms +step:505/1695 train_time:48428ms step_avg:95.90ms +step:506/1695 train_time:48521ms step_avg:95.89ms +step:507/1695 train_time:48614ms step_avg:95.89ms +step:508/1695 train_time:48707ms step_avg:95.88ms +step:509/1695 train_time:48800ms step_avg:95.87ms +step:510/1695 train_time:48893ms step_avg:95.87ms +step:511/1695 train_time:48986ms step_avg:95.86ms +step:512/1695 train_time:49080ms step_avg:95.86ms +step:513/1695 train_time:49177ms step_avg:95.86ms +step:514/1695 train_time:49275ms step_avg:95.87ms +step:515/1695 train_time:49371ms step_avg:95.87ms +step:516/1695 train_time:49465ms step_avg:95.86ms +step:517/1695 train_time:49558ms step_avg:95.86ms +step:518/1695 train_time:49652ms step_avg:95.85ms +step:519/1695 train_time:49984ms step_avg:96.31ms +step:520/1695 train_time:50159ms step_avg:96.46ms +step:521/1695 train_time:50250ms step_avg:96.45ms +step:522/1695 train_time:50343ms step_avg:96.44ms +step:523/1695 train_time:50436ms step_avg:96.44ms +step:524/1695 train_time:50529ms step_avg:96.43ms +step:525/1695 train_time:50621ms step_avg:96.42ms +step:526/1695 train_time:50714ms step_avg:96.41ms +step:527/1695 train_time:50806ms step_avg:96.41ms +step:528/1695 train_time:50898ms step_avg:96.40ms +step:529/1695 train_time:50992ms step_avg:96.39ms +step:530/1695 train_time:51091ms step_avg:96.40ms +step:531/1695 train_time:51188ms step_avg:96.40ms +step:532/1695 train_time:51282ms step_avg:96.40ms +step:533/1695 train_time:51376ms step_avg:96.39ms +step:534/1695 train_time:51470ms step_avg:96.39ms +step:535/1695 train_time:51564ms step_avg:96.38ms +step:536/1695 train_time:51656ms step_avg:96.37ms +step:537/1695 train_time:51750ms step_avg:96.37ms +step:538/1695 train_time:51842ms step_avg:96.36ms +step:539/1695 train_time:51935ms step_avg:96.35ms +step:540/1695 train_time:52032ms step_avg:96.36ms +step:541/1695 train_time:52126ms step_avg:96.35ms +step:542/1695 train_time:52220ms step_avg:96.35ms +step:543/1695 train_time:52316ms step_avg:96.35ms +step:544/1695 train_time:52411ms step_avg:96.34ms +step:545/1695 train_time:52505ms step_avg:96.34ms +step:546/1695 train_time:52598ms step_avg:96.33ms +step:547/1695 train_time:52691ms step_avg:96.33ms +step:548/1695 train_time:52784ms step_avg:96.32ms +step:549/1695 train_time:52877ms step_avg:96.32ms +step:550/1695 train_time:52971ms step_avg:96.31ms +step:551/1695 train_time:53066ms step_avg:96.31ms +step:552/1695 train_time:53161ms step_avg:96.31ms +step:553/1695 train_time:53255ms step_avg:96.30ms +step:554/1695 train_time:53352ms step_avg:96.30ms +step:555/1695 train_time:53446ms step_avg:96.30ms +step:556/1695 train_time:53539ms step_avg:96.29ms +step:557/1695 train_time:53633ms step_avg:96.29ms +step:558/1695 train_time:53727ms step_avg:96.29ms +step:559/1695 train_time:53820ms step_avg:96.28ms +step:560/1695 train_time:53914ms step_avg:96.27ms +step:561/1695 train_time:54009ms step_avg:96.27ms +step:562/1695 train_time:54102ms step_avg:96.27ms +step:563/1695 train_time:54196ms step_avg:96.26ms +step:564/1695 train_time:54291ms step_avg:96.26ms +step:565/1695 train_time:54385ms step_avg:96.26ms +step:566/1695 train_time:54479ms step_avg:96.25ms +step:567/1695 train_time:54573ms step_avg:96.25ms +step:568/1695 train_time:54670ms step_avg:96.25ms +step:569/1695 train_time:54765ms step_avg:96.25ms +step:570/1695 train_time:54859ms step_avg:96.24ms +step:571/1695 train_time:54955ms step_avg:96.24ms +step:572/1695 train_time:55052ms step_avg:96.24ms +step:573/1695 train_time:55149ms step_avg:96.25ms +step:574/1695 train_time:55245ms step_avg:96.25ms +step:575/1695 train_time:55344ms step_avg:96.25ms +step:576/1695 train_time:55437ms step_avg:96.25ms +step:577/1695 train_time:55534ms step_avg:96.25ms +step:578/1695 train_time:55631ms step_avg:96.25ms +step:579/1695 train_time:55727ms step_avg:96.25ms +step:580/1695 train_time:55823ms step_avg:96.25ms +step:581/1695 train_time:55918ms step_avg:96.24ms +step:582/1695 train_time:56015ms step_avg:96.25ms +step:583/1695 train_time:56111ms step_avg:96.25ms +step:584/1695 train_time:56209ms step_avg:96.25ms +step:585/1695 train_time:56306ms step_avg:96.25ms +step:586/1695 train_time:56402ms step_avg:96.25ms +step:587/1695 train_time:56497ms step_avg:96.25ms +step:588/1695 train_time:56593ms step_avg:96.25ms +step:589/1695 train_time:56689ms step_avg:96.25ms +step:590/1695 train_time:56786ms step_avg:96.25ms +step:591/1695 train_time:56882ms step_avg:96.25ms +step:592/1695 train_time:56978ms step_avg:96.25ms +step:593/1695 train_time:57074ms step_avg:96.25ms +step:594/1695 train_time:57172ms step_avg:96.25ms +step:595/1695 train_time:57269ms step_avg:96.25ms +step:596/1695 train_time:57365ms step_avg:96.25ms +step:597/1695 train_time:57461ms step_avg:96.25ms +step:598/1695 train_time:57555ms step_avg:96.25ms +step:599/1695 train_time:57652ms step_avg:96.25ms +step:600/1695 train_time:57749ms step_avg:96.25ms +step:601/1695 train_time:57846ms step_avg:96.25ms +step:602/1695 train_time:57942ms step_avg:96.25ms +step:603/1695 train_time:58037ms step_avg:96.25ms +step:604/1695 train_time:58133ms step_avg:96.25ms +step:605/1695 train_time:58229ms step_avg:96.25ms +step:606/1695 train_time:58326ms step_avg:96.25ms +step:607/1695 train_time:58421ms step_avg:96.25ms +step:608/1695 train_time:58516ms step_avg:96.24ms +step:609/1695 train_time:58613ms step_avg:96.25ms +step:610/1695 train_time:58710ms step_avg:96.25ms +step:611/1695 train_time:58807ms step_avg:96.25ms +step:612/1695 train_time:58903ms step_avg:96.25ms +step:613/1695 train_time:58998ms step_avg:96.24ms +step:614/1695 train_time:59095ms step_avg:96.25ms +step:615/1695 train_time:59191ms step_avg:96.25ms +step:616/1695 train_time:59288ms step_avg:96.25ms +step:617/1695 train_time:59384ms step_avg:96.25ms +step:618/1695 train_time:59479ms step_avg:96.24ms +step:619/1695 train_time:59577ms step_avg:96.25ms +step:620/1695 train_time:59674ms step_avg:96.25ms +step:621/1695 train_time:59772ms step_avg:96.25ms +step:622/1695 train_time:59868ms step_avg:96.25ms +step:623/1695 train_time:59964ms step_avg:96.25ms +step:624/1695 train_time:60059ms step_avg:96.25ms +step:625/1695 train_time:60155ms step_avg:96.25ms +step:625/1695 val_loss:3.6208 train_time:60249ms step_avg:96.40ms +step:626/1695 train_time:60275ms step_avg:96.29ms +step:627/1695 train_time:60358ms step_avg:96.26ms +step:628/1695 train_time:60456ms step_avg:96.27ms +step:629/1695 train_time:60551ms step_avg:96.27ms +step:630/1695 train_time:60646ms step_avg:96.26ms +step:631/1695 train_time:60741ms step_avg:96.26ms +step:632/1695 train_time:60836ms step_avg:96.26ms +step:633/1695 train_time:60930ms step_avg:96.26ms +step:634/1695 train_time:61025ms step_avg:96.25ms +step:635/1695 train_time:61122ms step_avg:96.25ms +step:636/1695 train_time:61217ms step_avg:96.25ms +step:637/1695 train_time:61315ms step_avg:96.26ms +step:638/1695 train_time:61413ms step_avg:96.26ms +step:639/1695 train_time:61509ms step_avg:96.26ms +step:640/1695 train_time:61606ms step_avg:96.26ms +step:641/1695 train_time:61702ms step_avg:96.26ms +step:642/1695 train_time:61797ms step_avg:96.26ms +step:643/1695 train_time:61892ms step_avg:96.26ms +step:644/1695 train_time:61988ms step_avg:96.25ms +step:645/1695 train_time:62084ms step_avg:96.25ms +step:646/1695 train_time:62181ms step_avg:96.26ms +step:647/1695 train_time:62278ms step_avg:96.26ms +step:648/1695 train_time:62376ms step_avg:96.26ms +step:649/1695 train_time:62472ms step_avg:96.26ms +step:650/1695 train_time:62568ms step_avg:96.26ms +step:651/1695 train_time:62664ms step_avg:96.26ms +step:652/1695 train_time:62760ms step_avg:96.26ms +step:653/1695 train_time:62855ms step_avg:96.26ms +step:654/1695 train_time:62950ms step_avg:96.25ms +step:655/1695 train_time:63045ms step_avg:96.25ms +step:656/1695 train_time:63141ms step_avg:96.25ms +step:657/1695 train_time:63238ms step_avg:96.25ms +step:658/1695 train_time:63335ms step_avg:96.25ms +step:659/1695 train_time:63431ms step_avg:96.25ms +step:660/1695 train_time:63529ms step_avg:96.26ms +step:661/1695 train_time:63626ms step_avg:96.26ms +step:662/1695 train_time:63721ms step_avg:96.26ms +step:663/1695 train_time:63816ms step_avg:96.25ms +step:664/1695 train_time:63911ms step_avg:96.25ms +step:665/1695 train_time:64007ms step_avg:96.25ms +step:666/1695 train_time:64103ms step_avg:96.25ms +step:667/1695 train_time:64199ms step_avg:96.25ms +step:668/1695 train_time:64295ms step_avg:96.25ms +step:669/1695 train_time:64390ms step_avg:96.25ms +step:670/1695 train_time:64487ms step_avg:96.25ms +step:671/1695 train_time:64585ms step_avg:96.25ms +step:672/1695 train_time:64682ms step_avg:96.25ms +step:673/1695 train_time:64778ms step_avg:96.25ms +step:674/1695 train_time:64873ms step_avg:96.25ms +step:675/1695 train_time:64969ms step_avg:96.25ms +step:676/1695 train_time:65065ms step_avg:96.25ms +step:677/1695 train_time:65161ms step_avg:96.25ms +step:678/1695 train_time:65257ms step_avg:96.25ms +step:679/1695 train_time:65353ms step_avg:96.25ms +step:680/1695 train_time:65449ms step_avg:96.25ms +step:681/1695 train_time:65545ms step_avg:96.25ms +step:682/1695 train_time:65642ms step_avg:96.25ms +step:683/1695 train_time:65738ms step_avg:96.25ms +step:684/1695 train_time:65833ms step_avg:96.25ms +step:685/1695 train_time:65929ms step_avg:96.25ms +step:686/1695 train_time:66024ms step_avg:96.25ms +step:687/1695 train_time:66120ms step_avg:96.25ms +step:688/1695 train_time:66216ms step_avg:96.24ms +step:689/1695 train_time:66311ms step_avg:96.24ms +step:690/1695 train_time:66408ms step_avg:96.24ms +step:691/1695 train_time:66766ms step_avg:96.62ms +step:692/1695 train_time:66929ms step_avg:96.72ms +step:693/1695 train_time:67024ms step_avg:96.72ms +step:694/1695 train_time:67118ms step_avg:96.71ms +step:695/1695 train_time:67212ms step_avg:96.71ms +step:696/1695 train_time:67307ms step_avg:96.71ms +step:697/1695 train_time:67402ms step_avg:96.70ms +step:698/1695 train_time:67496ms step_avg:96.70ms +step:699/1695 train_time:67590ms step_avg:96.70ms +step:700/1695 train_time:67686ms step_avg:96.69ms +step:701/1695 train_time:67789ms step_avg:96.70ms +step:702/1695 train_time:67889ms step_avg:96.71ms +step:703/1695 train_time:67986ms step_avg:96.71ms +step:704/1695 train_time:68082ms step_avg:96.71ms +step:705/1695 train_time:68176ms step_avg:96.70ms +step:706/1695 train_time:68272ms step_avg:96.70ms +step:707/1695 train_time:68368ms step_avg:96.70ms +step:708/1695 train_time:68463ms step_avg:96.70ms +step:709/1695 train_time:68558ms step_avg:96.70ms +step:710/1695 train_time:68653ms step_avg:96.69ms +step:711/1695 train_time:68750ms step_avg:96.70ms +step:712/1695 train_time:68848ms step_avg:96.70ms +step:713/1695 train_time:68946ms step_avg:96.70ms +step:714/1695 train_time:69043ms step_avg:96.70ms +step:715/1695 train_time:69140ms step_avg:96.70ms +step:716/1695 train_time:69235ms step_avg:96.70ms +step:717/1695 train_time:69330ms step_avg:96.69ms +step:718/1695 train_time:69426ms step_avg:96.69ms +step:719/1695 train_time:69522ms step_avg:96.69ms +step:720/1695 train_time:69617ms step_avg:96.69ms +step:721/1695 train_time:69713ms step_avg:96.69ms +step:722/1695 train_time:69810ms step_avg:96.69ms +step:723/1695 train_time:69907ms step_avg:96.69ms +step:724/1695 train_time:70003ms step_avg:96.69ms +step:725/1695 train_time:70100ms step_avg:96.69ms +step:726/1695 train_time:70195ms step_avg:96.69ms +step:727/1695 train_time:70291ms step_avg:96.69ms +step:728/1695 train_time:70387ms step_avg:96.69ms +step:729/1695 train_time:70482ms step_avg:96.68ms +step:730/1695 train_time:70578ms step_avg:96.68ms +step:731/1695 train_time:70673ms step_avg:96.68ms +step:732/1695 train_time:70769ms step_avg:96.68ms +step:733/1695 train_time:70865ms step_avg:96.68ms +step:734/1695 train_time:70963ms step_avg:96.68ms +step:735/1695 train_time:71059ms step_avg:96.68ms +step:736/1695 train_time:71155ms step_avg:96.68ms +step:737/1695 train_time:71250ms step_avg:96.68ms +step:738/1695 train_time:71345ms step_avg:96.67ms +step:739/1695 train_time:71441ms step_avg:96.67ms +step:740/1695 train_time:71537ms step_avg:96.67ms +step:741/1695 train_time:71632ms step_avg:96.67ms +step:742/1695 train_time:71728ms step_avg:96.67ms +step:743/1695 train_time:71824ms step_avg:96.67ms +step:744/1695 train_time:71920ms step_avg:96.67ms +step:745/1695 train_time:72016ms step_avg:96.67ms +step:746/1695 train_time:72111ms step_avg:96.66ms +step:747/1695 train_time:72208ms step_avg:96.66ms +step:748/1695 train_time:72303ms step_avg:96.66ms +step:749/1695 train_time:72399ms step_avg:96.66ms +step:750/1695 train_time:72495ms step_avg:96.66ms +step:750/1695 val_loss:3.5658 train_time:72587ms step_avg:96.78ms +step:751/1695 train_time:72614ms step_avg:96.69ms +step:752/1695 train_time:72692ms step_avg:96.67ms +step:753/1695 train_time:72794ms step_avg:96.67ms +step:754/1695 train_time:72890ms step_avg:96.67ms +step:755/1695 train_time:72986ms step_avg:96.67ms +step:756/1695 train_time:73081ms step_avg:96.67ms +step:757/1695 train_time:73176ms step_avg:96.67ms +step:758/1695 train_time:73271ms step_avg:96.66ms +step:759/1695 train_time:73365ms step_avg:96.66ms +step:760/1695 train_time:73460ms step_avg:96.66ms +step:761/1695 train_time:73556ms step_avg:96.66ms +step:762/1695 train_time:73653ms step_avg:96.66ms +step:763/1695 train_time:73751ms step_avg:96.66ms +step:764/1695 train_time:73849ms step_avg:96.66ms +step:765/1695 train_time:73945ms step_avg:96.66ms +step:766/1695 train_time:74040ms step_avg:96.66ms +step:767/1695 train_time:74135ms step_avg:96.66ms +step:768/1695 train_time:74231ms step_avg:96.66ms +step:769/1695 train_time:74327ms step_avg:96.65ms +step:770/1695 train_time:74421ms step_avg:96.65ms +step:771/1695 train_time:74516ms step_avg:96.65ms +step:772/1695 train_time:74614ms step_avg:96.65ms +step:773/1695 train_time:74712ms step_avg:96.65ms +step:774/1695 train_time:74809ms step_avg:96.65ms +step:775/1695 train_time:74908ms step_avg:96.65ms +step:776/1695 train_time:75004ms step_avg:96.65ms +step:777/1695 train_time:75099ms step_avg:96.65ms +step:778/1695 train_time:75194ms step_avg:96.65ms +step:779/1695 train_time:75290ms step_avg:96.65ms +step:780/1695 train_time:75386ms step_avg:96.65ms +step:781/1695 train_time:75482ms step_avg:96.65ms +step:782/1695 train_time:75578ms step_avg:96.65ms +step:783/1695 train_time:75675ms step_avg:96.65ms +step:784/1695 train_time:75772ms step_avg:96.65ms +step:785/1695 train_time:75870ms step_avg:96.65ms +step:786/1695 train_time:75967ms step_avg:96.65ms +step:787/1695 train_time:76062ms step_avg:96.65ms +step:788/1695 train_time:76158ms step_avg:96.65ms +step:789/1695 train_time:76253ms step_avg:96.65ms +step:790/1695 train_time:76350ms step_avg:96.65ms +step:791/1695 train_time:76445ms step_avg:96.64ms +step:792/1695 train_time:76541ms step_avg:96.64ms +step:793/1695 train_time:76638ms step_avg:96.64ms +step:794/1695 train_time:76735ms step_avg:96.64ms +step:795/1695 train_time:76832ms step_avg:96.64ms +step:796/1695 train_time:76930ms step_avg:96.65ms +step:797/1695 train_time:77026ms step_avg:96.65ms +step:798/1695 train_time:77121ms step_avg:96.64ms +step:799/1695 train_time:77216ms step_avg:96.64ms +step:800/1695 train_time:77312ms step_avg:96.64ms +step:801/1695 train_time:77411ms step_avg:96.64ms +step:802/1695 train_time:77506ms step_avg:96.64ms +step:803/1695 train_time:77602ms step_avg:96.64ms +step:804/1695 train_time:77697ms step_avg:96.64ms +step:805/1695 train_time:77795ms step_avg:96.64ms +step:806/1695 train_time:77893ms step_avg:96.64ms +step:807/1695 train_time:77990ms step_avg:96.64ms +step:808/1695 train_time:78087ms step_avg:96.64ms +step:809/1695 train_time:78183ms step_avg:96.64ms +step:810/1695 train_time:78281ms step_avg:96.64ms +step:811/1695 train_time:78374ms step_avg:96.64ms +step:812/1695 train_time:78471ms step_avg:96.64ms +step:813/1695 train_time:78568ms step_avg:96.64ms +step:814/1695 train_time:78663ms step_avg:96.64ms +step:815/1695 train_time:78759ms step_avg:96.64ms +step:816/1695 train_time:78855ms step_avg:96.64ms +step:817/1695 train_time:78952ms step_avg:96.64ms +step:818/1695 train_time:79048ms step_avg:96.64ms +step:819/1695 train_time:79144ms step_avg:96.64ms +step:820/1695 train_time:79239ms step_avg:96.63ms +step:821/1695 train_time:79336ms step_avg:96.63ms +step:822/1695 train_time:79431ms step_avg:96.63ms +step:823/1695 train_time:79527ms step_avg:96.63ms +step:824/1695 train_time:79623ms step_avg:96.63ms +step:825/1695 train_time:79719ms step_avg:96.63ms +step:826/1695 train_time:79816ms step_avg:96.63ms +step:827/1695 train_time:79912ms step_avg:96.63ms +step:828/1695 train_time:80008ms step_avg:96.63ms +step:829/1695 train_time:80103ms step_avg:96.63ms +step:830/1695 train_time:80199ms step_avg:96.62ms +step:831/1695 train_time:80295ms step_avg:96.62ms +step:832/1695 train_time:80391ms step_avg:96.62ms +step:833/1695 train_time:80487ms step_avg:96.62ms +step:834/1695 train_time:80584ms step_avg:96.62ms +step:835/1695 train_time:80680ms step_avg:96.62ms +step:836/1695 train_time:80775ms step_avg:96.62ms +step:837/1695 train_time:80872ms step_avg:96.62ms +step:838/1695 train_time:80968ms step_avg:96.62ms +step:839/1695 train_time:81064ms step_avg:96.62ms +step:840/1695 train_time:81160ms step_avg:96.62ms +step:841/1695 train_time:81257ms step_avg:96.62ms +step:842/1695 train_time:81352ms step_avg:96.62ms +step:843/1695 train_time:81450ms step_avg:96.62ms +step:844/1695 train_time:81546ms step_avg:96.62ms +step:845/1695 train_time:81643ms step_avg:96.62ms +step:846/1695 train_time:81738ms step_avg:96.62ms +step:847/1695 train_time:81834ms step_avg:96.62ms +step:848/1695 train_time:81931ms step_avg:96.62ms +step:849/1695 train_time:82026ms step_avg:96.62ms +step:850/1695 train_time:82122ms step_avg:96.61ms +step:851/1695 train_time:82218ms step_avg:96.61ms +step:852/1695 train_time:82313ms step_avg:96.61ms +step:853/1695 train_time:82409ms step_avg:96.61ms +step:854/1695 train_time:82506ms step_avg:96.61ms +step:855/1695 train_time:82602ms step_avg:96.61ms +step:856/1695 train_time:82698ms step_avg:96.61ms +step:857/1695 train_time:82795ms step_avg:96.61ms +step:858/1695 train_time:82891ms step_avg:96.61ms +step:859/1695 train_time:82987ms step_avg:96.61ms +step:860/1695 train_time:83083ms step_avg:96.61ms +step:861/1695 train_time:83180ms step_avg:96.61ms +step:862/1695 train_time:83275ms step_avg:96.61ms +step:863/1695 train_time:83625ms step_avg:96.90ms +step:864/1695 train_time:83802ms step_avg:96.99ms +step:865/1695 train_time:83896ms step_avg:96.99ms +step:866/1695 train_time:83991ms step_avg:96.99ms +step:867/1695 train_time:84086ms step_avg:96.99ms +step:868/1695 train_time:84180ms step_avg:96.98ms +step:869/1695 train_time:84275ms step_avg:96.98ms +step:870/1695 train_time:84370ms step_avg:96.98ms +step:871/1695 train_time:84465ms step_avg:96.97ms +step:872/1695 train_time:84559ms step_avg:96.97ms +step:873/1695 train_time:84660ms step_avg:96.98ms +step:874/1695 train_time:84758ms step_avg:96.98ms +step:875/1695 train_time:84857ms step_avg:96.98ms +step:875/1695 val_loss:3.5251 train_time:84952ms step_avg:97.09ms +step:876/1695 train_time:84978ms step_avg:97.01ms +step:877/1695 train_time:85058ms step_avg:96.99ms +step:878/1695 train_time:85158ms step_avg:96.99ms +step:879/1695 train_time:85256ms step_avg:96.99ms +step:880/1695 train_time:85352ms step_avg:96.99ms +step:881/1695 train_time:85447ms step_avg:96.99ms +step:882/1695 train_time:85541ms step_avg:96.99ms +step:883/1695 train_time:85636ms step_avg:96.98ms +step:884/1695 train_time:85731ms step_avg:96.98ms +step:885/1695 train_time:85825ms step_avg:96.98ms +step:886/1695 train_time:85920ms step_avg:96.98ms +step:887/1695 train_time:86020ms step_avg:96.98ms +step:888/1695 train_time:86118ms step_avg:96.98ms +step:889/1695 train_time:86217ms step_avg:96.98ms +step:890/1695 train_time:86314ms step_avg:96.98ms +step:891/1695 train_time:86411ms step_avg:96.98ms +step:892/1695 train_time:86505ms step_avg:96.98ms +step:893/1695 train_time:86600ms step_avg:96.98ms +step:894/1695 train_time:86695ms step_avg:96.97ms +step:895/1695 train_time:86790ms step_avg:96.97ms +step:896/1695 train_time:86886ms step_avg:96.97ms +step:897/1695 train_time:86982ms step_avg:96.97ms +step:898/1695 train_time:87080ms step_avg:96.97ms +step:899/1695 train_time:87177ms step_avg:96.97ms +step:900/1695 train_time:87276ms step_avg:96.97ms +step:901/1695 train_time:87374ms step_avg:96.97ms +step:902/1695 train_time:87471ms step_avg:96.97ms +step:903/1695 train_time:87567ms step_avg:96.97ms +step:904/1695 train_time:87661ms step_avg:96.97ms +step:905/1695 train_time:87756ms step_avg:96.97ms +step:906/1695 train_time:87852ms step_avg:96.97ms +step:907/1695 train_time:87948ms step_avg:96.97ms +step:908/1695 train_time:88044ms step_avg:96.96ms +step:909/1695 train_time:88140ms step_avg:96.96ms +step:910/1695 train_time:88237ms step_avg:96.96ms +step:911/1695 train_time:88336ms step_avg:96.97ms +step:912/1695 train_time:88433ms step_avg:96.97ms +step:913/1695 train_time:88528ms step_avg:96.96ms +step:914/1695 train_time:88624ms step_avg:96.96ms +step:915/1695 train_time:88718ms step_avg:96.96ms +step:916/1695 train_time:88816ms step_avg:96.96ms +step:917/1695 train_time:88914ms step_avg:96.96ms +step:918/1695 train_time:89010ms step_avg:96.96ms +step:919/1695 train_time:89107ms step_avg:96.96ms +step:920/1695 train_time:89203ms step_avg:96.96ms +step:921/1695 train_time:89299ms step_avg:96.96ms +step:922/1695 train_time:89396ms step_avg:96.96ms +step:923/1695 train_time:89493ms step_avg:96.96ms +step:924/1695 train_time:89590ms step_avg:96.96ms +step:925/1695 train_time:89686ms step_avg:96.96ms +step:926/1695 train_time:89782ms step_avg:96.96ms +step:927/1695 train_time:89878ms step_avg:96.96ms +step:928/1695 train_time:89974ms step_avg:96.95ms +step:929/1695 train_time:90071ms step_avg:96.95ms +step:930/1695 train_time:90167ms step_avg:96.95ms +step:931/1695 train_time:90262ms step_avg:96.95ms +step:932/1695 train_time:90358ms step_avg:96.95ms +step:933/1695 train_time:90455ms step_avg:96.95ms +step:934/1695 train_time:90551ms step_avg:96.95ms +step:935/1695 train_time:90648ms step_avg:96.95ms +step:936/1695 train_time:90743ms step_avg:96.95ms +step:937/1695 train_time:90839ms step_avg:96.95ms +step:938/1695 train_time:90935ms step_avg:96.95ms +step:939/1695 train_time:91030ms step_avg:96.94ms +step:940/1695 train_time:91126ms step_avg:96.94ms +step:941/1695 train_time:91221ms step_avg:96.94ms +step:942/1695 train_time:91317ms step_avg:96.94ms +step:943/1695 train_time:91413ms step_avg:96.94ms +step:944/1695 train_time:91510ms step_avg:96.94ms +step:945/1695 train_time:91607ms step_avg:96.94ms +step:946/1695 train_time:91703ms step_avg:96.94ms +step:947/1695 train_time:91798ms step_avg:96.94ms +step:948/1695 train_time:91894ms step_avg:96.93ms +step:949/1695 train_time:91989ms step_avg:96.93ms +step:950/1695 train_time:92085ms step_avg:96.93ms +step:951/1695 train_time:92181ms step_avg:96.93ms +step:952/1695 train_time:92277ms step_avg:96.93ms +step:953/1695 train_time:92373ms step_avg:96.93ms +step:954/1695 train_time:92469ms step_avg:96.93ms +step:955/1695 train_time:92566ms step_avg:96.93ms +step:956/1695 train_time:92662ms step_avg:96.93ms +step:957/1695 train_time:92758ms step_avg:96.93ms +step:958/1695 train_time:92854ms step_avg:96.92ms +step:959/1695 train_time:92950ms step_avg:96.92ms +step:960/1695 train_time:93046ms step_avg:96.92ms +step:961/1695 train_time:93142ms step_avg:96.92ms +step:962/1695 train_time:93238ms step_avg:96.92ms +step:963/1695 train_time:93334ms step_avg:96.92ms +step:964/1695 train_time:93431ms step_avg:96.92ms +step:965/1695 train_time:93526ms step_avg:96.92ms +step:966/1695 train_time:93621ms step_avg:96.92ms +step:967/1695 train_time:93718ms step_avg:96.92ms +step:968/1695 train_time:93815ms step_avg:96.92ms +step:969/1695 train_time:93911ms step_avg:96.92ms +step:970/1695 train_time:94008ms step_avg:96.92ms +step:971/1695 train_time:94104ms step_avg:96.91ms +step:972/1695 train_time:94200ms step_avg:96.91ms +step:973/1695 train_time:94295ms step_avg:96.91ms +step:974/1695 train_time:94392ms step_avg:96.91ms +step:975/1695 train_time:94489ms step_avg:96.91ms +step:976/1695 train_time:94585ms step_avg:96.91ms +step:977/1695 train_time:94681ms step_avg:96.91ms +step:978/1695 train_time:94778ms step_avg:96.91ms +step:979/1695 train_time:94874ms step_avg:96.91ms +step:980/1695 train_time:94970ms step_avg:96.91ms +step:981/1695 train_time:95066ms step_avg:96.91ms +step:982/1695 train_time:95161ms step_avg:96.91ms +step:983/1695 train_time:95257ms step_avg:96.90ms +step:984/1695 train_time:95353ms step_avg:96.90ms +step:985/1695 train_time:95450ms step_avg:96.90ms +step:986/1695 train_time:95547ms step_avg:96.90ms +step:987/1695 train_time:95644ms step_avg:96.90ms +step:988/1695 train_time:95739ms step_avg:96.90ms +step:989/1695 train_time:95835ms step_avg:96.90ms +step:990/1695 train_time:95932ms step_avg:96.90ms +step:991/1695 train_time:96028ms step_avg:96.90ms +step:992/1695 train_time:96123ms step_avg:96.90ms +step:993/1695 train_time:96218ms step_avg:96.90ms +step:994/1695 train_time:96314ms step_avg:96.90ms +step:995/1695 train_time:96411ms step_avg:96.90ms +step:996/1695 train_time:96508ms step_avg:96.90ms +step:997/1695 train_time:96605ms step_avg:96.90ms +step:998/1695 train_time:96701ms step_avg:96.89ms +step:999/1695 train_time:96796ms step_avg:96.89ms +step:1000/1695 train_time:96893ms step_avg:96.89ms +step:1000/1695 val_loss:3.4830 train_time:96988ms step_avg:96.99ms +step:1001/1695 train_time:97014ms step_avg:96.92ms +step:1002/1695 train_time:97096ms step_avg:96.90ms +step:1003/1695 train_time:97195ms step_avg:96.90ms +step:1004/1695 train_time:97291ms step_avg:96.90ms +step:1005/1695 train_time:97387ms step_avg:96.90ms +step:1006/1695 train_time:97482ms step_avg:96.90ms +step:1007/1695 train_time:97577ms step_avg:96.90ms +step:1008/1695 train_time:97673ms step_avg:96.90ms +step:1009/1695 train_time:97768ms step_avg:96.90ms +step:1010/1695 train_time:97862ms step_avg:96.89ms +step:1011/1695 train_time:97959ms step_avg:96.89ms +step:1012/1695 train_time:98058ms step_avg:96.90ms +step:1013/1695 train_time:98157ms step_avg:96.90ms +step:1014/1695 train_time:98256ms step_avg:96.90ms +step:1015/1695 train_time:98354ms step_avg:96.90ms +step:1016/1695 train_time:98450ms step_avg:96.90ms +step:1017/1695 train_time:98545ms step_avg:96.90ms +step:1018/1695 train_time:98640ms step_avg:96.90ms +step:1019/1695 train_time:98736ms step_avg:96.89ms +step:1020/1695 train_time:98831ms step_avg:96.89ms +step:1021/1695 train_time:98927ms step_avg:96.89ms +step:1022/1695 train_time:99025ms step_avg:96.89ms +step:1023/1695 train_time:99121ms step_avg:96.89ms +step:1024/1695 train_time:99218ms step_avg:96.89ms +step:1025/1695 train_time:99316ms step_avg:96.89ms +step:1026/1695 train_time:99413ms step_avg:96.89ms +step:1027/1695 train_time:99510ms step_avg:96.89ms +step:1028/1695 train_time:99605ms step_avg:96.89ms +step:1029/1695 train_time:99700ms step_avg:96.89ms +step:1030/1695 train_time:99795ms step_avg:96.89ms +step:1031/1695 train_time:99891ms step_avg:96.89ms +step:1032/1695 train_time:99987ms step_avg:96.89ms +step:1033/1695 train_time:100085ms step_avg:96.89ms +step:1034/1695 train_time:100181ms step_avg:96.89ms +step:1035/1695 train_time:100277ms step_avg:96.89ms +step:1036/1695 train_time:100616ms step_avg:97.12ms +step:1037/1695 train_time:100781ms step_avg:97.19ms +step:1038/1695 train_time:100876ms step_avg:97.18ms +step:1039/1695 train_time:100971ms step_avg:97.18ms +step:1040/1695 train_time:101066ms step_avg:97.18ms +step:1041/1695 train_time:101161ms step_avg:97.18ms +step:1042/1695 train_time:101256ms step_avg:97.17ms +step:1043/1695 train_time:101350ms step_avg:97.17ms +step:1044/1695 train_time:101444ms step_avg:97.17ms +step:1045/1695 train_time:101539ms step_avg:97.17ms +step:1046/1695 train_time:101641ms step_avg:97.17ms +step:1047/1695 train_time:101740ms step_avg:97.17ms +step:1048/1695 train_time:101838ms step_avg:97.17ms +step:1049/1695 train_time:101934ms step_avg:97.17ms +step:1050/1695 train_time:102031ms step_avg:97.17ms +step:1051/1695 train_time:102126ms step_avg:97.17ms +step:1052/1695 train_time:102220ms step_avg:97.17ms +step:1053/1695 train_time:102315ms step_avg:97.17ms +step:1054/1695 train_time:102411ms step_avg:97.16ms +step:1055/1695 train_time:102506ms step_avg:97.16ms +step:1056/1695 train_time:102603ms step_avg:97.16ms +step:1057/1695 train_time:102700ms step_avg:97.16ms +step:1058/1695 train_time:102797ms step_avg:97.16ms +step:1059/1695 train_time:102894ms step_avg:97.16ms +step:1060/1695 train_time:102991ms step_avg:97.16ms +step:1061/1695 train_time:103087ms step_avg:97.16ms +step:1062/1695 train_time:103182ms step_avg:97.16ms +step:1063/1695 train_time:103277ms step_avg:97.16ms +step:1064/1695 train_time:103373ms step_avg:97.16ms +step:1065/1695 train_time:103469ms step_avg:97.15ms +step:1066/1695 train_time:103565ms step_avg:97.15ms +step:1067/1695 train_time:103661ms step_avg:97.15ms +step:1068/1695 train_time:103756ms step_avg:97.15ms +step:1069/1695 train_time:103854ms step_avg:97.15ms +step:1070/1695 train_time:103951ms step_avg:97.15ms +step:1071/1695 train_time:104048ms step_avg:97.15ms +step:1072/1695 train_time:104144ms step_avg:97.15ms +step:1073/1695 train_time:104239ms step_avg:97.15ms +step:1074/1695 train_time:104335ms step_avg:97.15ms +step:1075/1695 train_time:104431ms step_avg:97.14ms +step:1076/1695 train_time:104527ms step_avg:97.14ms +step:1077/1695 train_time:104623ms step_avg:97.14ms +step:1078/1695 train_time:104719ms step_avg:97.14ms +step:1079/1695 train_time:104816ms step_avg:97.14ms +step:1080/1695 train_time:104912ms step_avg:97.14ms +step:1081/1695 train_time:105009ms step_avg:97.14ms +step:1082/1695 train_time:105105ms step_avg:97.14ms +step:1083/1695 train_time:105200ms step_avg:97.14ms +step:1084/1695 train_time:105296ms step_avg:97.14ms +step:1085/1695 train_time:105391ms step_avg:97.13ms +step:1086/1695 train_time:105487ms step_avg:97.13ms +step:1087/1695 train_time:105583ms step_avg:97.13ms +step:1088/1695 train_time:105678ms step_avg:97.13ms +step:1089/1695 train_time:105775ms step_avg:97.13ms +step:1090/1695 train_time:105871ms step_avg:97.13ms +step:1091/1695 train_time:105967ms step_avg:97.13ms +step:1092/1695 train_time:106063ms step_avg:97.13ms +step:1093/1695 train_time:106158ms step_avg:97.13ms +step:1094/1695 train_time:106254ms step_avg:97.12ms +step:1095/1695 train_time:106350ms step_avg:97.12ms +step:1096/1695 train_time:106446ms step_avg:97.12ms +step:1097/1695 train_time:106541ms step_avg:97.12ms +step:1098/1695 train_time:106636ms step_avg:97.12ms +step:1099/1695 train_time:106732ms step_avg:97.12ms +step:1100/1695 train_time:106829ms step_avg:97.12ms +step:1101/1695 train_time:106925ms step_avg:97.12ms +step:1102/1695 train_time:107021ms step_avg:97.12ms +step:1103/1695 train_time:107117ms step_avg:97.11ms +step:1104/1695 train_time:107213ms step_avg:97.11ms +step:1105/1695 train_time:107309ms step_avg:97.11ms +step:1106/1695 train_time:107405ms step_avg:97.11ms +step:1107/1695 train_time:107501ms step_avg:97.11ms +step:1108/1695 train_time:107597ms step_avg:97.11ms +step:1109/1695 train_time:107693ms step_avg:97.11ms +step:1110/1695 train_time:107790ms step_avg:97.11ms +step:1111/1695 train_time:107886ms step_avg:97.11ms +step:1112/1695 train_time:107982ms step_avg:97.11ms +step:1113/1695 train_time:108078ms step_avg:97.11ms +step:1114/1695 train_time:108174ms step_avg:97.10ms +step:1115/1695 train_time:108272ms step_avg:97.10ms +step:1116/1695 train_time:108369ms step_avg:97.10ms +step:1117/1695 train_time:108463ms step_avg:97.10ms +step:1118/1695 train_time:108558ms step_avg:97.10ms +step:1119/1695 train_time:108653ms step_avg:97.10ms +step:1120/1695 train_time:108749ms step_avg:97.10ms +step:1121/1695 train_time:108846ms step_avg:97.10ms +step:1122/1695 train_time:108942ms step_avg:97.10ms +step:1123/1695 train_time:109037ms step_avg:97.09ms +step:1124/1695 train_time:109133ms step_avg:97.09ms +step:1125/1695 train_time:109229ms step_avg:97.09ms +step:1125/1695 val_loss:3.4364 train_time:109322ms step_avg:97.18ms +step:1126/1695 train_time:109349ms step_avg:97.11ms +step:1127/1695 train_time:109426ms step_avg:97.10ms +step:1128/1695 train_time:109523ms step_avg:97.09ms +step:1129/1695 train_time:109619ms step_avg:97.09ms +step:1130/1695 train_time:109715ms step_avg:97.09ms +step:1131/1695 train_time:109810ms step_avg:97.09ms +step:1132/1695 train_time:109905ms step_avg:97.09ms +step:1133/1695 train_time:110001ms step_avg:97.09ms +step:1134/1695 train_time:110098ms step_avg:97.09ms +step:1135/1695 train_time:110195ms step_avg:97.09ms +step:1136/1695 train_time:110294ms step_avg:97.09ms +step:1137/1695 train_time:110397ms step_avg:97.09ms +step:1138/1695 train_time:110496ms step_avg:97.10ms +step:1139/1695 train_time:110596ms step_avg:97.10ms +step:1140/1695 train_time:110695ms step_avg:97.10ms +step:1141/1695 train_time:110793ms step_avg:97.10ms +step:1142/1695 train_time:110891ms step_avg:97.10ms +step:1143/1695 train_time:110988ms step_avg:97.10ms +step:1144/1695 train_time:111085ms step_avg:97.10ms +step:1145/1695 train_time:111182ms step_avg:97.10ms +step:1146/1695 train_time:111280ms step_avg:97.10ms +step:1147/1695 train_time:111379ms step_avg:97.10ms +step:1148/1695 train_time:111478ms step_avg:97.11ms +step:1149/1695 train_time:111576ms step_avg:97.11ms +step:1150/1695 train_time:111674ms step_avg:97.11ms +step:1151/1695 train_time:111773ms step_avg:97.11ms +step:1152/1695 train_time:111871ms step_avg:97.11ms +step:1153/1695 train_time:111969ms step_avg:97.11ms +step:1154/1695 train_time:112065ms step_avg:97.11ms +step:1155/1695 train_time:112162ms step_avg:97.11ms +step:1156/1695 train_time:112260ms step_avg:97.11ms +step:1157/1695 train_time:112358ms step_avg:97.11ms +step:1158/1695 train_time:112458ms step_avg:97.11ms +step:1159/1695 train_time:112557ms step_avg:97.12ms +step:1160/1695 train_time:112656ms step_avg:97.12ms +step:1161/1695 train_time:112755ms step_avg:97.12ms +step:1162/1695 train_time:112852ms step_avg:97.12ms +step:1163/1695 train_time:112951ms step_avg:97.12ms +step:1164/1695 train_time:113048ms step_avg:97.12ms +step:1165/1695 train_time:113144ms step_avg:97.12ms +step:1166/1695 train_time:113242ms step_avg:97.12ms +step:1167/1695 train_time:113340ms step_avg:97.12ms +step:1168/1695 train_time:113437ms step_avg:97.12ms +step:1169/1695 train_time:113536ms step_avg:97.12ms +step:1170/1695 train_time:113634ms step_avg:97.12ms +step:1171/1695 train_time:113733ms step_avg:97.12ms +step:1172/1695 train_time:113832ms step_avg:97.13ms +step:1173/1695 train_time:113931ms step_avg:97.13ms +step:1174/1695 train_time:114029ms step_avg:97.13ms +step:1175/1695 train_time:114127ms step_avg:97.13ms +step:1176/1695 train_time:114224ms step_avg:97.13ms +step:1177/1695 train_time:114322ms step_avg:97.13ms +step:1178/1695 train_time:114419ms step_avg:97.13ms +step:1179/1695 train_time:114517ms step_avg:97.13ms +step:1180/1695 train_time:114615ms step_avg:97.13ms +step:1181/1695 train_time:114713ms step_avg:97.13ms +step:1182/1695 train_time:114810ms step_avg:97.13ms +step:1183/1695 train_time:114908ms step_avg:97.13ms +step:1184/1695 train_time:115005ms step_avg:97.13ms +step:1185/1695 train_time:115103ms step_avg:97.13ms +step:1186/1695 train_time:115200ms step_avg:97.13ms +step:1187/1695 train_time:115298ms step_avg:97.13ms +step:1188/1695 train_time:115397ms step_avg:97.14ms +step:1189/1695 train_time:115495ms step_avg:97.14ms +step:1190/1695 train_time:115593ms step_avg:97.14ms +step:1191/1695 train_time:115691ms step_avg:97.14ms +step:1192/1695 train_time:115789ms step_avg:97.14ms +step:1193/1695 train_time:115887ms step_avg:97.14ms +step:1194/1695 train_time:115984ms step_avg:97.14ms +step:1195/1695 train_time:116082ms step_avg:97.14ms +step:1196/1695 train_time:116180ms step_avg:97.14ms +step:1197/1695 train_time:116279ms step_avg:97.14ms +step:1198/1695 train_time:116377ms step_avg:97.14ms +step:1199/1695 train_time:116476ms step_avg:97.14ms +step:1200/1695 train_time:116574ms step_avg:97.14ms +step:1201/1695 train_time:116672ms step_avg:97.15ms +step:1202/1695 train_time:116770ms step_avg:97.15ms +step:1203/1695 train_time:116868ms step_avg:97.15ms +step:1204/1695 train_time:116965ms step_avg:97.15ms +step:1205/1695 train_time:117062ms step_avg:97.15ms +step:1206/1695 train_time:117160ms step_avg:97.15ms +step:1207/1695 train_time:117259ms step_avg:97.15ms +step:1208/1695 train_time:117635ms step_avg:97.38ms +step:1209/1695 train_time:117773ms step_avg:97.41ms +step:1210/1695 train_time:117868ms step_avg:97.41ms +step:1211/1695 train_time:117964ms step_avg:97.41ms +step:1212/1695 train_time:118061ms step_avg:97.41ms +step:1213/1695 train_time:118157ms step_avg:97.41ms +step:1214/1695 train_time:118253ms step_avg:97.41ms +step:1215/1695 train_time:118350ms step_avg:97.41ms +step:1216/1695 train_time:118446ms step_avg:97.41ms +step:1217/1695 train_time:118542ms step_avg:97.41ms +step:1218/1695 train_time:118646ms step_avg:97.41ms +step:1219/1695 train_time:118750ms step_avg:97.42ms +step:1220/1695 train_time:118849ms step_avg:97.42ms +step:1221/1695 train_time:118947ms step_avg:97.42ms +step:1222/1695 train_time:119044ms step_avg:97.42ms +step:1223/1695 train_time:119140ms step_avg:97.42ms +step:1224/1695 train_time:119238ms step_avg:97.42ms +step:1225/1695 train_time:119335ms step_avg:97.42ms +step:1226/1695 train_time:119432ms step_avg:97.42ms +step:1227/1695 train_time:119529ms step_avg:97.42ms +step:1228/1695 train_time:119626ms step_avg:97.42ms +step:1229/1695 train_time:119726ms step_avg:97.42ms +step:1230/1695 train_time:119824ms step_avg:97.42ms +step:1231/1695 train_time:119923ms step_avg:97.42ms +step:1232/1695 train_time:120021ms step_avg:97.42ms +step:1233/1695 train_time:120119ms step_avg:97.42ms +step:1234/1695 train_time:120216ms step_avg:97.42ms +step:1235/1695 train_time:120313ms step_avg:97.42ms +step:1236/1695 train_time:120410ms step_avg:97.42ms +step:1237/1695 train_time:120506ms step_avg:97.42ms +step:1238/1695 train_time:120604ms step_avg:97.42ms +step:1239/1695 train_time:120703ms step_avg:97.42ms +step:1240/1695 train_time:120801ms step_avg:97.42ms +step:1241/1695 train_time:120900ms step_avg:97.42ms +step:1242/1695 train_time:120998ms step_avg:97.42ms +step:1243/1695 train_time:121096ms step_avg:97.42ms +step:1244/1695 train_time:121194ms step_avg:97.42ms +step:1245/1695 train_time:121292ms step_avg:97.42ms +step:1246/1695 train_time:121388ms step_avg:97.42ms +step:1247/1695 train_time:121485ms step_avg:97.42ms +step:1248/1695 train_time:121583ms step_avg:97.42ms +step:1249/1695 train_time:121682ms step_avg:97.42ms +step:1250/1695 train_time:121780ms step_avg:97.42ms +step:1250/1695 val_loss:3.3889 train_time:121876ms step_avg:97.50ms +step:1251/1695 train_time:121915ms step_avg:97.45ms +step:1252/1695 train_time:121984ms step_avg:97.43ms +step:1253/1695 train_time:122082ms step_avg:97.43ms +step:1254/1695 train_time:122178ms step_avg:97.43ms +step:1255/1695 train_time:122274ms step_avg:97.43ms +step:1256/1695 train_time:122371ms step_avg:97.43ms +step:1257/1695 train_time:122467ms step_avg:97.43ms +step:1258/1695 train_time:122564ms step_avg:97.43ms +step:1259/1695 train_time:122660ms step_avg:97.43ms +step:1260/1695 train_time:122759ms step_avg:97.43ms +step:1261/1695 train_time:122864ms step_avg:97.43ms +step:1262/1695 train_time:122963ms step_avg:97.43ms +step:1263/1695 train_time:123061ms step_avg:97.44ms +step:1264/1695 train_time:123158ms step_avg:97.43ms +step:1265/1695 train_time:123255ms step_avg:97.43ms +step:1266/1695 train_time:123352ms step_avg:97.43ms +step:1267/1695 train_time:123449ms step_avg:97.43ms +step:1268/1695 train_time:123546ms step_avg:97.43ms +step:1269/1695 train_time:123642ms step_avg:97.43ms +step:1270/1695 train_time:123739ms step_avg:97.43ms +step:1271/1695 train_time:123838ms step_avg:97.43ms +step:1272/1695 train_time:123937ms step_avg:97.43ms +step:1273/1695 train_time:124036ms step_avg:97.44ms +step:1274/1695 train_time:124135ms step_avg:97.44ms +step:1275/1695 train_time:124233ms step_avg:97.44ms +step:1276/1695 train_time:124329ms step_avg:97.44ms +step:1277/1695 train_time:124426ms step_avg:97.44ms +step:1278/1695 train_time:124524ms step_avg:97.44ms +step:1279/1695 train_time:124619ms step_avg:97.44ms +step:1280/1695 train_time:124717ms step_avg:97.43ms +step:1281/1695 train_time:124815ms step_avg:97.44ms +step:1282/1695 train_time:124914ms step_avg:97.44ms +step:1283/1695 train_time:125012ms step_avg:97.44ms +step:1284/1695 train_time:125111ms step_avg:97.44ms +step:1285/1695 train_time:125208ms step_avg:97.44ms +step:1286/1695 train_time:125306ms step_avg:97.44ms +step:1287/1695 train_time:125403ms step_avg:97.44ms +step:1288/1695 train_time:125499ms step_avg:97.44ms +step:1289/1695 train_time:125597ms step_avg:97.44ms +step:1290/1695 train_time:125693ms step_avg:97.44ms +step:1291/1695 train_time:125793ms step_avg:97.44ms +step:1292/1695 train_time:125892ms step_avg:97.44ms +step:1293/1695 train_time:125993ms step_avg:97.44ms +step:1294/1695 train_time:126092ms step_avg:97.44ms +step:1295/1695 train_time:126191ms step_avg:97.44ms +step:1296/1695 train_time:126289ms step_avg:97.45ms +step:1297/1695 train_time:126387ms step_avg:97.45ms +step:1298/1695 train_time:126485ms step_avg:97.45ms +step:1299/1695 train_time:126581ms step_avg:97.45ms +step:1300/1695 train_time:126679ms step_avg:97.45ms +step:1301/1695 train_time:126776ms step_avg:97.44ms +step:1302/1695 train_time:126874ms step_avg:97.45ms +step:1303/1695 train_time:126975ms step_avg:97.45ms +step:1304/1695 train_time:127073ms step_avg:97.45ms +step:1305/1695 train_time:127173ms step_avg:97.45ms +step:1306/1695 train_time:127270ms step_avg:97.45ms +step:1307/1695 train_time:127369ms step_avg:97.45ms +step:1308/1695 train_time:127467ms step_avg:97.45ms +step:1309/1695 train_time:127564ms step_avg:97.45ms +step:1310/1695 train_time:127661ms step_avg:97.45ms +step:1311/1695 train_time:127759ms step_avg:97.45ms +step:1312/1695 train_time:127855ms step_avg:97.45ms +step:1313/1695 train_time:127953ms step_avg:97.45ms +step:1314/1695 train_time:128051ms step_avg:97.45ms +step:1315/1695 train_time:128149ms step_avg:97.45ms +step:1316/1695 train_time:128246ms step_avg:97.45ms +step:1317/1695 train_time:128343ms step_avg:97.45ms +step:1318/1695 train_time:128440ms step_avg:97.45ms +step:1319/1695 train_time:128538ms step_avg:97.45ms +step:1320/1695 train_time:128636ms step_avg:97.45ms +step:1321/1695 train_time:128734ms step_avg:97.45ms +step:1322/1695 train_time:128833ms step_avg:97.45ms +step:1323/1695 train_time:128932ms step_avg:97.45ms +step:1324/1695 train_time:129029ms step_avg:97.45ms +step:1325/1695 train_time:129127ms step_avg:97.45ms +step:1326/1695 train_time:129225ms step_avg:97.45ms +step:1327/1695 train_time:129322ms step_avg:97.45ms +step:1328/1695 train_time:129419ms step_avg:97.45ms +step:1329/1695 train_time:129517ms step_avg:97.45ms +step:1330/1695 train_time:129615ms step_avg:97.45ms +step:1331/1695 train_time:129713ms step_avg:97.46ms +step:1332/1695 train_time:129811ms step_avg:97.46ms +step:1333/1695 train_time:129910ms step_avg:97.46ms +step:1334/1695 train_time:130007ms step_avg:97.46ms +step:1335/1695 train_time:130105ms step_avg:97.46ms +step:1336/1695 train_time:130204ms step_avg:97.46ms +step:1337/1695 train_time:130300ms step_avg:97.46ms +step:1338/1695 train_time:130398ms step_avg:97.46ms +step:1339/1695 train_time:130495ms step_avg:97.46ms +step:1340/1695 train_time:130593ms step_avg:97.46ms +step:1341/1695 train_time:130692ms step_avg:97.46ms +step:1342/1695 train_time:130790ms step_avg:97.46ms +step:1343/1695 train_time:130888ms step_avg:97.46ms +step:1344/1695 train_time:130986ms step_avg:97.46ms +step:1345/1695 train_time:131084ms step_avg:97.46ms +step:1346/1695 train_time:131182ms step_avg:97.46ms +step:1347/1695 train_time:131280ms step_avg:97.46ms +step:1348/1695 train_time:131377ms step_avg:97.46ms +step:1349/1695 train_time:131475ms step_avg:97.46ms +step:1350/1695 train_time:131572ms step_avg:97.46ms +step:1351/1695 train_time:131672ms step_avg:97.46ms +step:1352/1695 train_time:131770ms step_avg:97.46ms +step:1353/1695 train_time:131869ms step_avg:97.46ms +step:1354/1695 train_time:131966ms step_avg:97.46ms +step:1355/1695 train_time:132064ms step_avg:97.46ms +step:1356/1695 train_time:132161ms step_avg:97.46ms +step:1357/1695 train_time:132258ms step_avg:97.46ms +step:1358/1695 train_time:132356ms step_avg:97.46ms +step:1359/1695 train_time:132454ms step_avg:97.46ms +step:1360/1695 train_time:132552ms step_avg:97.46ms +step:1361/1695 train_time:132650ms step_avg:97.46ms +step:1362/1695 train_time:132748ms step_avg:97.47ms +step:1363/1695 train_time:132845ms step_avg:97.47ms +step:1364/1695 train_time:132942ms step_avg:97.46ms +step:1365/1695 train_time:133038ms step_avg:97.46ms +step:1366/1695 train_time:133136ms step_avg:97.46ms +step:1367/1695 train_time:133235ms step_avg:97.46ms +step:1368/1695 train_time:133333ms step_avg:97.47ms +step:1369/1695 train_time:133430ms step_avg:97.47ms +step:1370/1695 train_time:133529ms step_avg:97.47ms +step:1371/1695 train_time:133630ms step_avg:97.47ms +step:1372/1695 train_time:133725ms step_avg:97.47ms +step:1373/1695 train_time:133823ms step_avg:97.47ms +step:1374/1695 train_time:133921ms step_avg:97.47ms +step:1375/1695 train_time:134023ms step_avg:97.47ms +step:1375/1695 val_loss:3.3505 train_time:134113ms step_avg:97.54ms +step:1376/1695 train_time:134162ms step_avg:97.50ms +step:1377/1695 train_time:134219ms step_avg:97.47ms +step:1378/1695 train_time:134319ms step_avg:97.47ms +step:1379/1695 train_time:134416ms step_avg:97.47ms +step:1380/1695 train_time:134512ms step_avg:97.47ms +step:1381/1695 train_time:134884ms step_avg:97.67ms +step:1382/1695 train_time:135043ms step_avg:97.72ms +step:1383/1695 train_time:135139ms step_avg:97.71ms +step:1384/1695 train_time:135235ms step_avg:97.71ms +step:1385/1695 train_time:135332ms step_avg:97.71ms +step:1386/1695 train_time:135428ms step_avg:97.71ms +step:1387/1695 train_time:135526ms step_avg:97.71ms +step:1388/1695 train_time:135621ms step_avg:97.71ms +step:1389/1695 train_time:135716ms step_avg:97.71ms +step:1390/1695 train_time:135813ms step_avg:97.71ms +step:1391/1695 train_time:135916ms step_avg:97.71ms +step:1392/1695 train_time:136020ms step_avg:97.72ms +step:1393/1695 train_time:136121ms step_avg:97.72ms +step:1394/1695 train_time:136218ms step_avg:97.72ms +step:1395/1695 train_time:136315ms step_avg:97.72ms +step:1396/1695 train_time:136412ms step_avg:97.72ms +step:1397/1695 train_time:136509ms step_avg:97.72ms +step:1398/1695 train_time:136607ms step_avg:97.72ms +step:1399/1695 train_time:136704ms step_avg:97.72ms +step:1400/1695 train_time:136801ms step_avg:97.71ms +step:1401/1695 train_time:136899ms step_avg:97.72ms +step:1402/1695 train_time:136997ms step_avg:97.72ms +step:1403/1695 train_time:137096ms step_avg:97.72ms +step:1404/1695 train_time:137194ms step_avg:97.72ms +step:1405/1695 train_time:137293ms step_avg:97.72ms +step:1406/1695 train_time:137390ms step_avg:97.72ms +step:1407/1695 train_time:137487ms step_avg:97.72ms +step:1408/1695 train_time:137584ms step_avg:97.72ms +step:1409/1695 train_time:137681ms step_avg:97.72ms +step:1410/1695 train_time:137778ms step_avg:97.72ms +step:1411/1695 train_time:137876ms step_avg:97.72ms +step:1412/1695 train_time:137975ms step_avg:97.72ms +step:1413/1695 train_time:138075ms step_avg:97.72ms +step:1414/1695 train_time:138173ms step_avg:97.72ms +step:1415/1695 train_time:138272ms step_avg:97.72ms +step:1416/1695 train_time:138368ms step_avg:97.72ms +step:1417/1695 train_time:138466ms step_avg:97.72ms +step:1418/1695 train_time:138563ms step_avg:97.72ms +step:1419/1695 train_time:138661ms step_avg:97.72ms +step:1420/1695 train_time:138757ms step_avg:97.72ms +step:1421/1695 train_time:138854ms step_avg:97.72ms +step:1422/1695 train_time:138953ms step_avg:97.72ms +step:1423/1695 train_time:139053ms step_avg:97.72ms +step:1424/1695 train_time:139153ms step_avg:97.72ms +step:1425/1695 train_time:139252ms step_avg:97.72ms +step:1426/1695 train_time:139350ms step_avg:97.72ms +step:1427/1695 train_time:139448ms step_avg:97.72ms +step:1428/1695 train_time:139546ms step_avg:97.72ms +step:1429/1695 train_time:139644ms step_avg:97.72ms +step:1430/1695 train_time:139742ms step_avg:97.72ms +step:1431/1695 train_time:139839ms step_avg:97.72ms +step:1432/1695 train_time:139938ms step_avg:97.72ms +step:1433/1695 train_time:140036ms step_avg:97.72ms +step:1434/1695 train_time:140134ms step_avg:97.72ms +step:1435/1695 train_time:140232ms step_avg:97.72ms +step:1436/1695 train_time:140330ms step_avg:97.72ms +step:1437/1695 train_time:140428ms step_avg:97.72ms +step:1438/1695 train_time:140526ms step_avg:97.72ms +step:1439/1695 train_time:140623ms step_avg:97.72ms +step:1440/1695 train_time:140720ms step_avg:97.72ms +step:1441/1695 train_time:140818ms step_avg:97.72ms +step:1442/1695 train_time:140917ms step_avg:97.72ms +step:1443/1695 train_time:141014ms step_avg:97.72ms +step:1444/1695 train_time:141113ms step_avg:97.72ms +step:1445/1695 train_time:141213ms step_avg:97.73ms +step:1446/1695 train_time:141312ms step_avg:97.73ms +step:1447/1695 train_time:141411ms step_avg:97.73ms +step:1448/1695 train_time:141508ms step_avg:97.73ms +step:1449/1695 train_time:141607ms step_avg:97.73ms +step:1450/1695 train_time:141706ms step_avg:97.73ms +step:1451/1695 train_time:141804ms step_avg:97.73ms +step:1452/1695 train_time:141902ms step_avg:97.73ms +step:1453/1695 train_time:141999ms step_avg:97.73ms +step:1454/1695 train_time:142097ms step_avg:97.73ms +step:1455/1695 train_time:142194ms step_avg:97.73ms +step:1456/1695 train_time:142292ms step_avg:97.73ms +step:1457/1695 train_time:142390ms step_avg:97.73ms +step:1458/1695 train_time:142488ms step_avg:97.73ms +step:1459/1695 train_time:142588ms step_avg:97.73ms +step:1460/1695 train_time:142685ms step_avg:97.73ms +step:1461/1695 train_time:142784ms step_avg:97.73ms +step:1462/1695 train_time:142881ms step_avg:97.73ms +step:1463/1695 train_time:142978ms step_avg:97.73ms +step:1464/1695 train_time:143076ms step_avg:97.73ms +step:1465/1695 train_time:143173ms step_avg:97.73ms +step:1466/1695 train_time:143272ms step_avg:97.73ms +step:1467/1695 train_time:143369ms step_avg:97.73ms +step:1468/1695 train_time:143467ms step_avg:97.73ms +step:1469/1695 train_time:143564ms step_avg:97.73ms +step:1470/1695 train_time:143661ms step_avg:97.73ms +step:1471/1695 train_time:143759ms step_avg:97.73ms +step:1472/1695 train_time:143856ms step_avg:97.73ms +step:1473/1695 train_time:143956ms step_avg:97.73ms +step:1474/1695 train_time:144053ms step_avg:97.73ms +step:1475/1695 train_time:144149ms step_avg:97.73ms +step:1476/1695 train_time:144248ms step_avg:97.73ms +step:1477/1695 train_time:144346ms step_avg:97.73ms +step:1478/1695 train_time:144445ms step_avg:97.73ms +step:1479/1695 train_time:144541ms step_avg:97.73ms +step:1480/1695 train_time:144639ms step_avg:97.73ms +step:1481/1695 train_time:144737ms step_avg:97.73ms +step:1482/1695 train_time:144835ms step_avg:97.73ms +step:1483/1695 train_time:144933ms step_avg:97.73ms +step:1484/1695 train_time:145031ms step_avg:97.73ms +step:1485/1695 train_time:145128ms step_avg:97.73ms +step:1486/1695 train_time:145225ms step_avg:97.73ms +step:1487/1695 train_time:145323ms step_avg:97.73ms +step:1488/1695 train_time:145421ms step_avg:97.73ms +step:1489/1695 train_time:145519ms step_avg:97.73ms +step:1490/1695 train_time:145616ms step_avg:97.73ms +step:1491/1695 train_time:145714ms step_avg:97.73ms +step:1492/1695 train_time:145813ms step_avg:97.73ms +step:1493/1695 train_time:145910ms step_avg:97.73ms +step:1494/1695 train_time:146008ms step_avg:97.73ms +step:1495/1695 train_time:146105ms step_avg:97.73ms +step:1496/1695 train_time:146203ms step_avg:97.73ms +step:1497/1695 train_time:146299ms step_avg:97.73ms +step:1498/1695 train_time:146396ms step_avg:97.73ms +step:1499/1695 train_time:146495ms step_avg:97.73ms +step:1500/1695 train_time:146593ms step_avg:97.73ms +step:1500/1695 val_loss:3.3176 train_time:146690ms step_avg:97.79ms +step:1501/1695 train_time:146740ms step_avg:97.76ms +step:1502/1695 train_time:146800ms step_avg:97.74ms +step:1503/1695 train_time:146898ms step_avg:97.74ms +step:1504/1695 train_time:146995ms step_avg:97.74ms +step:1505/1695 train_time:147094ms step_avg:97.74ms +step:1506/1695 train_time:147190ms step_avg:97.74ms +step:1507/1695 train_time:147287ms step_avg:97.74ms +step:1508/1695 train_time:147383ms step_avg:97.73ms +step:1509/1695 train_time:147481ms step_avg:97.73ms +step:1510/1695 train_time:147577ms step_avg:97.73ms +step:1511/1695 train_time:147677ms step_avg:97.73ms +step:1512/1695 train_time:147778ms step_avg:97.74ms +step:1513/1695 train_time:147878ms step_avg:97.74ms +step:1514/1695 train_time:147975ms step_avg:97.74ms +step:1515/1695 train_time:148073ms step_avg:97.74ms +step:1516/1695 train_time:148170ms step_avg:97.74ms +step:1517/1695 train_time:148269ms step_avg:97.74ms +step:1518/1695 train_time:148367ms step_avg:97.74ms +step:1519/1695 train_time:148463ms step_avg:97.74ms +step:1520/1695 train_time:148560ms step_avg:97.74ms +step:1521/1695 train_time:148658ms step_avg:97.74ms +step:1522/1695 train_time:148758ms step_avg:97.74ms +step:1523/1695 train_time:148857ms step_avg:97.74ms +step:1524/1695 train_time:148956ms step_avg:97.74ms +step:1525/1695 train_time:149055ms step_avg:97.74ms +step:1526/1695 train_time:149152ms step_avg:97.74ms +step:1527/1695 train_time:149250ms step_avg:97.74ms +step:1528/1695 train_time:149347ms step_avg:97.74ms +step:1529/1695 train_time:149444ms step_avg:97.74ms +step:1530/1695 train_time:149541ms step_avg:97.74ms +step:1531/1695 train_time:149638ms step_avg:97.74ms +step:1532/1695 train_time:149735ms step_avg:97.74ms +step:1533/1695 train_time:149835ms step_avg:97.74ms +step:1534/1695 train_time:149933ms step_avg:97.74ms +step:1535/1695 train_time:150032ms step_avg:97.74ms +step:1536/1695 train_time:150129ms step_avg:97.74ms +step:1537/1695 train_time:150227ms step_avg:97.74ms +step:1538/1695 train_time:150324ms step_avg:97.74ms +step:1539/1695 train_time:150421ms step_avg:97.74ms +step:1540/1695 train_time:150519ms step_avg:97.74ms +step:1541/1695 train_time:150616ms step_avg:97.74ms +step:1542/1695 train_time:150715ms step_avg:97.74ms +step:1543/1695 train_time:150813ms step_avg:97.74ms +step:1544/1695 train_time:150912ms step_avg:97.74ms +step:1545/1695 train_time:151010ms step_avg:97.74ms +step:1546/1695 train_time:151108ms step_avg:97.74ms +step:1547/1695 train_time:151206ms step_avg:97.74ms +step:1548/1695 train_time:151304ms step_avg:97.74ms +step:1549/1695 train_time:151401ms step_avg:97.74ms +step:1550/1695 train_time:151498ms step_avg:97.74ms +step:1551/1695 train_time:151596ms step_avg:97.74ms +step:1552/1695 train_time:151941ms step_avg:97.90ms +step:1553/1695 train_time:152117ms step_avg:97.95ms +step:1554/1695 train_time:152212ms step_avg:97.95ms +step:1555/1695 train_time:152308ms step_avg:97.95ms +step:1556/1695 train_time:152403ms step_avg:97.95ms +step:1557/1695 train_time:152499ms step_avg:97.94ms +step:1558/1695 train_time:152596ms step_avg:97.94ms +step:1559/1695 train_time:152693ms step_avg:97.94ms +step:1560/1695 train_time:152790ms step_avg:97.94ms +step:1561/1695 train_time:152886ms step_avg:97.94ms +step:1562/1695 train_time:152987ms step_avg:97.94ms +step:1563/1695 train_time:153089ms step_avg:97.95ms +step:1564/1695 train_time:153189ms step_avg:97.95ms +step:1565/1695 train_time:153285ms step_avg:97.95ms +step:1566/1695 train_time:153382ms step_avg:97.95ms +step:1567/1695 train_time:153479ms step_avg:97.94ms +step:1568/1695 train_time:153576ms step_avg:97.94ms +step:1569/1695 train_time:153673ms step_avg:97.94ms +step:1570/1695 train_time:153769ms step_avg:97.94ms +step:1571/1695 train_time:153866ms step_avg:97.94ms +step:1572/1695 train_time:153964ms step_avg:97.94ms +step:1573/1695 train_time:154064ms step_avg:97.94ms +step:1574/1695 train_time:154164ms step_avg:97.94ms +step:1575/1695 train_time:154262ms step_avg:97.94ms +step:1576/1695 train_time:154360ms step_avg:97.94ms +step:1577/1695 train_time:154457ms step_avg:97.94ms +step:1578/1695 train_time:154554ms step_avg:97.94ms +step:1579/1695 train_time:154651ms step_avg:97.94ms +step:1580/1695 train_time:154748ms step_avg:97.94ms +step:1581/1695 train_time:154845ms step_avg:97.94ms +step:1582/1695 train_time:154943ms step_avg:97.94ms +step:1583/1695 train_time:155042ms step_avg:97.94ms +step:1584/1695 train_time:155142ms step_avg:97.94ms +step:1585/1695 train_time:155239ms step_avg:97.94ms +step:1586/1695 train_time:155337ms step_avg:97.94ms +step:1587/1695 train_time:155435ms step_avg:97.94ms +step:1588/1695 train_time:155532ms step_avg:97.94ms +step:1589/1695 train_time:155630ms step_avg:97.94ms +step:1590/1695 train_time:155726ms step_avg:97.94ms +step:1591/1695 train_time:155823ms step_avg:97.94ms +step:1592/1695 train_time:155921ms step_avg:97.94ms +step:1593/1695 train_time:156019ms step_avg:97.94ms +step:1594/1695 train_time:156119ms step_avg:97.94ms +step:1595/1695 train_time:156218ms step_avg:97.94ms +step:1596/1695 train_time:156317ms step_avg:97.94ms +step:1597/1695 train_time:156416ms step_avg:97.94ms +step:1598/1695 train_time:156513ms step_avg:97.94ms +step:1599/1695 train_time:156610ms step_avg:97.94ms +step:1600/1695 train_time:156707ms step_avg:97.94ms +step:1601/1695 train_time:156804ms step_avg:97.94ms +step:1602/1695 train_time:156901ms step_avg:97.94ms +step:1603/1695 train_time:156999ms step_avg:97.94ms +step:1604/1695 train_time:157097ms step_avg:97.94ms +step:1605/1695 train_time:157196ms step_avg:97.94ms +step:1606/1695 train_time:157295ms step_avg:97.94ms +step:1607/1695 train_time:157393ms step_avg:97.94ms +step:1608/1695 train_time:157491ms step_avg:97.94ms +step:1609/1695 train_time:157590ms step_avg:97.94ms +step:1610/1695 train_time:157687ms step_avg:97.94ms +step:1611/1695 train_time:157784ms step_avg:97.94ms +step:1612/1695 train_time:157881ms step_avg:97.94ms +step:1613/1695 train_time:157978ms step_avg:97.94ms +step:1614/1695 train_time:158077ms step_avg:97.94ms +step:1615/1695 train_time:158176ms step_avg:97.94ms +step:1616/1695 train_time:158274ms step_avg:97.94ms +step:1617/1695 train_time:158373ms step_avg:97.94ms +step:1618/1695 train_time:158471ms step_avg:97.94ms +step:1619/1695 train_time:158569ms step_avg:97.94ms +step:1620/1695 train_time:158666ms step_avg:97.94ms +step:1621/1695 train_time:158765ms step_avg:97.94ms +step:1622/1695 train_time:158861ms step_avg:97.94ms +step:1623/1695 train_time:158958ms step_avg:97.94ms +step:1624/1695 train_time:159057ms step_avg:97.94ms +step:1625/1695 train_time:159155ms step_avg:97.94ms +step:1625/1695 val_loss:3.2898 train_time:159251ms step_avg:98.00ms +step:1626/1695 train_time:159278ms step_avg:97.96ms +step:1627/1695 train_time:159359ms step_avg:97.95ms +step:1628/1695 train_time:159459ms step_avg:97.95ms +step:1629/1695 train_time:159560ms step_avg:97.95ms +step:1630/1695 train_time:159658ms step_avg:97.95ms +step:1631/1695 train_time:159755ms step_avg:97.95ms +step:1632/1695 train_time:159852ms step_avg:97.95ms +step:1633/1695 train_time:159950ms step_avg:97.95ms +step:1634/1695 train_time:160046ms step_avg:97.95ms +step:1635/1695 train_time:160142ms step_avg:97.95ms +step:1636/1695 train_time:160242ms step_avg:97.95ms +step:1637/1695 train_time:160342ms step_avg:97.95ms +step:1638/1695 train_time:160441ms step_avg:97.95ms +step:1639/1695 train_time:160539ms step_avg:97.95ms +step:1640/1695 train_time:160639ms step_avg:97.95ms +step:1641/1695 train_time:160737ms step_avg:97.95ms +step:1642/1695 train_time:160835ms step_avg:97.95ms +step:1643/1695 train_time:160933ms step_avg:97.95ms +step:1644/1695 train_time:161031ms step_avg:97.95ms +step:1645/1695 train_time:161128ms step_avg:97.95ms +step:1646/1695 train_time:161225ms step_avg:97.95ms +step:1647/1695 train_time:161323ms step_avg:97.95ms +step:1648/1695 train_time:161421ms step_avg:97.95ms +step:1649/1695 train_time:161519ms step_avg:97.95ms +step:1650/1695 train_time:161618ms step_avg:97.95ms +step:1651/1695 train_time:161717ms step_avg:97.95ms +step:1652/1695 train_time:161816ms step_avg:97.95ms +step:1653/1695 train_time:161914ms step_avg:97.95ms +step:1654/1695 train_time:162010ms step_avg:97.95ms +step:1655/1695 train_time:162107ms step_avg:97.95ms +step:1656/1695 train_time:162205ms step_avg:97.95ms +step:1657/1695 train_time:162302ms step_avg:97.95ms +step:1658/1695 train_time:162399ms step_avg:97.95ms +step:1659/1695 train_time:162496ms step_avg:97.95ms +step:1660/1695 train_time:162595ms step_avg:97.95ms +step:1661/1695 train_time:162693ms step_avg:97.95ms +step:1662/1695 train_time:162791ms step_avg:97.95ms +step:1663/1695 train_time:162888ms step_avg:97.95ms +step:1664/1695 train_time:162985ms step_avg:97.95ms +step:1665/1695 train_time:163083ms step_avg:97.95ms +step:1666/1695 train_time:163181ms step_avg:97.95ms +step:1667/1695 train_time:163280ms step_avg:97.95ms +step:1668/1695 train_time:163379ms step_avg:97.95ms +step:1669/1695 train_time:163476ms step_avg:97.95ms +step:1670/1695 train_time:163574ms step_avg:97.95ms +step:1671/1695 train_time:163672ms step_avg:97.95ms +step:1672/1695 train_time:163769ms step_avg:97.95ms +step:1673/1695 train_time:163867ms step_avg:97.95ms +step:1674/1695 train_time:163964ms step_avg:97.95ms +step:1675/1695 train_time:164062ms step_avg:97.95ms +step:1676/1695 train_time:164160ms step_avg:97.95ms +step:1677/1695 train_time:164258ms step_avg:97.95ms +step:1678/1695 train_time:164359ms step_avg:97.95ms +step:1679/1695 train_time:164455ms step_avg:97.95ms +step:1680/1695 train_time:164553ms step_avg:97.95ms +step:1681/1695 train_time:164651ms step_avg:97.95ms +step:1682/1695 train_time:164750ms step_avg:97.95ms +step:1683/1695 train_time:164849ms step_avg:97.95ms +step:1684/1695 train_time:164947ms step_avg:97.95ms +step:1685/1695 train_time:165044ms step_avg:97.95ms +step:1686/1695 train_time:165141ms step_avg:97.95ms +step:1687/1695 train_time:165239ms step_avg:97.95ms +step:1688/1695 train_time:165336ms step_avg:97.95ms +step:1689/1695 train_time:165435ms step_avg:97.95ms +step:1690/1695 train_time:165533ms step_avg:97.95ms +step:1691/1695 train_time:165631ms step_avg:97.95ms +step:1692/1695 train_time:165728ms step_avg:97.95ms +step:1693/1695 train_time:165825ms step_avg:97.95ms +step:1694/1695 train_time:165922ms step_avg:97.95ms +step:1695/1695 train_time:166021ms step_avg:97.95ms +step:1695/1695 val_loss:3.2782 train_time:166117ms step_avg:98.00ms +peak memory allocated: 34001 MiB reserved: 49716 MiB diff --git a/requirements.txt b/requirements.txt index fe83bb138..f97ac89a2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,5 @@ numpy tqdm torch huggingface-hub +triton +flash-attn \ No newline at end of file diff --git a/train_gpt.py b/train_gpt.py index 57ccce211..bbb431bed 100644 --- a/train_gpt.py +++ b/train_gpt.py @@ -7,7 +7,7 @@ import copy import glob from dataclasses import dataclass -from functools import lru_cache, partial # Added partial for hook registration +from functools import lru_cache from pathlib import Path os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" @@ -16,9 +16,13 @@ from torch import Tensor, nn import torch.nn.functional as F import torch.distributed as dist -# use of FlexAttention contributed by @KoszarskyB -from torch.nn.attention.flex_attention import BlockMask, flex_attention #torch._inductor.config.coordinate_descent_tuning = True # we have banned this flag for new records because it causes compilation to take 30min +import numpy as np +import triton +import triton.language as tl +from flash_attn_interface import flash_attn_func +import torch._dynamo as dynamo +dynamo.config.recompile_limit = 64 # ----------------------------------------------------------------------------- # Custom operators: FP8 matmul by @YouJiacheng @@ -102,37 +106,287 @@ def setup_context(ctx: torch.autograd.function.FunctionCtx, inputs, output): mm_op.register_autograd(backward, setup_context=setup_context) # ----------------------------------------------------------------------------- -# Muon optimizer +# Triton kernel for symmetric matrix multiplication by @byronxu99 + +def _get_autotune_configs(): + return [ + triton.Config( + { + "BLOCK_SIZE_M": bm, + "BLOCK_SIZE_N": bn, + "BLOCK_SIZE_K": bk, + "GROUP_SIZE_M": 8, + "LOWER_UPPER": 1, + }, + num_stages=stages, + num_warps=warps, + ) + for bm in [64, 128] + for bn in [64, 128, 256] + for bk in [64, 128] + for stages, warps in [(3, 4), (3, 8), (4, 4)] + if bm // bn <= 2 and bn // bm <= 2 + ] + +@triton.jit +def _pid_to_block( + pid, + M, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, +): + # Split output matrix into blocks of size (BLOCK_SIZE_M, BLOCK_SIZE_N) + num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) + num_pid_n = tl.cdiv(M, BLOCK_SIZE_N) + + # Map PID to a single matrix in batch + batch_idx = pid // (num_pid_m * num_pid_n) + pid = pid % (num_pid_m * num_pid_n) + + # Map PID to 2D grid of blocks + pid_m = pid // num_pid_n + pid_n = pid % num_pid_n + pid_m, pid_n = tl.swizzle2d(pid_m, pid_n, num_pid_m, num_pid_n, GROUP_SIZE_M) + + m_idx = pid_m * BLOCK_SIZE_M + n_idx = pid_n * BLOCK_SIZE_N + return batch_idx, m_idx, n_idx + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "K", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_1_kernel( + A_ptr, C_ptr, + M, K, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(K, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_1(A: torch.Tensor, out: torch.Tensor): + """ + Launch Triton kernel to compute C = A @ A.T + """ + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert out.size(-2) == M, "Output matrix has incorrect shape" + assert out.size(-1) == M, "Output matrix has incorrect shape" -@torch.compile -def zeropower_via_newtonschulz5(G: Tensor, steps: int) -> Tensor: + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_1_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + K=K, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + ) + return out + +@triton.autotune( + configs=_get_autotune_configs(), + key=["M", "a_stride_r", "a_stride_c", "c_stride_r", "c_stride_c"], +) +@triton.jit +def ns_line_2_kernel( + A_ptr, C_ptr, + M, + a_stride_b, a_stride_r, a_stride_c, + c_stride_b, c_stride_r, c_stride_c, + alpha, beta, + BLOCK_SIZE_M: tl.constexpr, + BLOCK_SIZE_N: tl.constexpr, + BLOCK_SIZE_K: tl.constexpr, + GROUP_SIZE_M: tl.constexpr, + LOWER_UPPER: tl.constexpr, +): + # This is mostly duplicated from ns_line_1_kernel, but also loads and adds a block of A + # Performance is slightly slower than ns_line_1_kernel, so we use two separate kernels + pid = tl.program_id(axis=0) + batch_idx, m_idx, n_idx = _pid_to_block( + pid, M, BLOCK_SIZE_M, BLOCK_SIZE_N, GROUP_SIZE_M + ) + + # Skip blocks that don't need to be computed + skip_block_below_diag = (LOWER_UPPER == 0) and (n_idx + BLOCK_SIZE_N <= m_idx) + skip_block_above_diag = (LOWER_UPPER != 0) and (m_idx + BLOCK_SIZE_M <= n_idx) + if skip_block_below_diag or skip_block_above_diag: + return + + # Index into one matrix of batch + A_ptr += batch_idx * a_stride_b + C_ptr += batch_idx * c_stride_b + + # Create pointer arrays for A and A.T + offs_m = (m_idx + tl.arange(0, BLOCK_SIZE_M)) % M + offs_n = (n_idx + tl.arange(0, BLOCK_SIZE_N)) % M + offs_k = tl.arange(0, BLOCK_SIZE_K) + a_ptrs = A_ptr + (offs_m[:, None] * a_stride_r + offs_k[None, :] * a_stride_c) + at_ptrs = A_ptr + (offs_k[:, None] * a_stride_c + offs_n[None, :] * a_stride_r) + + accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) + + # Accumulate over blocks of K + for k in tl.range(0, tl.cdiv(M, BLOCK_SIZE_K)): + a = tl.load(a_ptrs, mask=offs_k[None, :] < M - k * BLOCK_SIZE_K, other=0.0) + at = tl.load(at_ptrs, mask=offs_k[:, None] < M - k * BLOCK_SIZE_K, other=0.0) + accumulator = tl.dot(a, at, accumulator) + a_ptrs += BLOCK_SIZE_K * a_stride_c + at_ptrs += BLOCK_SIZE_K * a_stride_c + + # Load block of A to add (corresponds to the current block of C) + offs_am = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_an = n_idx + tl.arange(0, BLOCK_SIZE_N) + a_add_ptrs = A_ptr + (offs_am[:, None] * a_stride_r + offs_an[None, :] * a_stride_c) + a_add_mask = (offs_am[:, None] < M) & (offs_an[None, :] < M) + a_add = tl.load(a_add_ptrs, mask=a_add_mask, other=0.0).to(tl.float32) + + # Apply alpha and beta + accumulator *= alpha + accumulator += a_add * beta + + out_dtype = C_ptr.dtype.element_ty + output = accumulator.to(out_dtype) + + # Store block of C + offs_cm = m_idx + tl.arange(0, BLOCK_SIZE_M) + offs_cn = n_idx + tl.arange(0, BLOCK_SIZE_N) + c_ptrs = C_ptr + (offs_cm[:, None] * c_stride_r + offs_cn[None, :] * c_stride_c) + c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < M) + tl.store(c_ptrs, output, mask=c_mask) + + # Store block of C mirrored across the diagonal + c_ptrs_t = C_ptr + (offs_cn[:, None] * c_stride_r + offs_cm[None, :] * c_stride_c) + c_mask_t = (offs_cn[:, None] < M) & (offs_cm[None, :] < M) + tl.store(c_ptrs_t, output.T, mask=c_mask_t) + +def ns_line_2(A: torch.Tensor, alpha: float, beta: float, out: torch.Tensor): """ - Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a - quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose - of minimizing steps, it turns out to be empirically effective to keep increasing the slope at - zero even beyond the point where the iteration no longer converges all the way to one everywhere - on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T - where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model - performance at all relative to UV^T, where USV^T = G is the SVD. + Launch Triton kernel to compute C = alpha * A @ A.T + beta * A """ - assert G.ndim >= 2 # batched Muon implementation by @scottjmaddox, and put into practice in the record by @YouJiacheng - a, b, c = (3.4445, -4.7750, 2.0315) - X = G + assert A.ndim == 2 or A.ndim == 3 + M, K = A.shape[-2:] + assert M == K, "Input matrix must be square" + assert out.size(-2) == M + assert out.size(-1) == M + + batch_size = A.size(0) if A.ndim == 3 else 1 + input_batch_stride = A.stride(0) if A.ndim == 3 else 0 + output_batch_stride = out.stride(0) if out.ndim == 3 else 0 + + grid = lambda meta: ( + batch_size * triton.cdiv(M, meta["BLOCK_SIZE_M"]) * triton.cdiv(M, meta["BLOCK_SIZE_N"]), + ) + ns_line_2_kernel[grid]( + A_ptr=A, + C_ptr=out, + M=M, + a_stride_b=input_batch_stride, + a_stride_r=A.stride(-2), + a_stride_c=A.stride(-1), + c_stride_b=output_batch_stride, + c_stride_r=out.stride(-2), + c_stride_c=out.stride(-1), + alpha=alpha, + beta=beta, + ) + return out + +@torch.compile(dynamic=False, fullgraph=True) # Must use dynamic=False or else it's much slower +def newton_schulz_triton(G: torch.Tensor): + a, b, c = (3.4445, -4.7750, 2.0315) + X = G.bfloat16() if G.size(-2) > G.size(-1): X = X.mT # Ensure spectral norm is at most 1 X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7) + + # Allocate buffers + X = X.contiguous() + A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype) + B = torch.empty_like(A) + C = torch.empty_like(X) + + ns_line_3 = torch.baddbmm if X.ndim > 2 else torch.addmm + # Perform the NS iterations - for _ in range(steps): - A = X @ X.mT - B = b * A + c * A @ A # quintic computation strategy adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng - X = a * X + B @ X + for _ in range(5): + ns_line_1(X, out=A) # A = X @ X.mT + ns_line_2(A, alpha=c, beta=b, out=B) # B = b * A + c * A @ A + ns_line_3(X, B, X, beta=a, out=C) # C = a * X + B @ X + X, C = C, X # Swap references to avoid unnecessary copies if G.size(-2) > G.size(-1): X = X.mT return X +# ----------------------------------------------------------------------------- +# Muon optimizer + class Muon(torch.optim.Optimizer): """ Muon - MomentUm Orthogonalized by Newton-schulz @@ -166,7 +420,7 @@ def step(self): rank = dist.get_rank() world_size = dist.get_world_size() reduce_scatter_futures: list[torch.Future] = [] - all_reduce_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] for group in self.param_groups: params: list[Tensor] = group["params"] grad = torch.empty_like(params[-1]) @@ -196,11 +450,11 @@ def step(self): p.mul_(1 - eff_weight_decay) momentum_buffer.lerp_(grad, 1 - momentum) grad = grad.lerp_(momentum_buffer, momentum) - v = zeropower_via_newtonschulz5(grad.bfloat16(), 5) + v = newton_schulz_triton(grad) p.add_(other=v, alpha=-eff_lr) idx += 1 - all_reduce_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) - torch.futures.collect_all(all_reduce_futures).wait() + all_gather_futures.append(dist.all_gather(params_pad[base_i:base_i + world_size], params_pad[base_i + rank], async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() class DistAdam(torch.optim.Optimizer): def __init__(self, params, lr: float = 1e-3, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-8, weight_decay: float = 0.01): @@ -221,11 +475,10 @@ def step(self): rank = dist.get_rank() world_size = dist.get_world_size() reduce_scatter_futures: list[torch.Future] = [] - all_reduce_futures: list[torch.Future] = [] + all_gather_futures: list[torch.Future] = [] grad_slices = [] for group in self.param_groups: params: list[Tensor] = group["params"] - grad = torch.empty_like(params[-1]) for base_i in range(len(params)): grad = params[base_i].grad rank_size = grad.shape[0] // world_size @@ -272,8 +525,8 @@ def step(self): update = exp_avg.div(denom).mul_(step_size) p_slice.add_(other=update, alpha=-1.0) idx += 1 - all_reduce_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) - torch.futures.collect_all(all_reduce_futures).wait() + all_gather_futures.append(dist.all_gather_into_tensor(p, p_slice, async_op=True).get_future()) + torch.futures.collect_all(all_gather_futures).wait() # ----------------------------------------------------------------------------- # PyTorch nn.Module definitions for the model @@ -328,45 +581,61 @@ def __init__(self, dim: int, num_heads: int, max_seq_len: int, head_dim=128): self.num_heads = num_heads self.head_dim = head_dim hdim = num_heads * head_dim + assert hdim == dim, "num_heads * head_dim must equal model_dim" std = 0.5 * (dim ** -0.5) bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng # merged QKV weights: suggested by many, implemented by @fernbear.bsky.social, and further improved by @YouJiacheng # https://x.com/hi_tysam/status/1879699187107033311 - self.qkv_w = nn.Parameter(torch.empty(3, hdim, dim).uniform_(-bound, bound)) + self.qkvo_w = nn.Parameter(torch.empty(4, hdim, dim)) + with torch.no_grad(): + self.qkvo_w[:3].uniform_(-bound, bound) # init QKV weights + self.qkvo_w[3].zero_() # init output weights to zero self.rotary = Rotary(head_dim, max_seq_len) - self.c_proj = CastedLinear(hdim, dim) - self.c_proj.weight.detach().zero_() # zero init suggested by @Grad62304977 # scale the attention logits by given constant, instead of the default head_dim**-0.5, by @leloykun # inspired by learnable scalars used by @brendanh0gan https://x.com/hi_tysam/status/1879693583898591283 self.attn_scale = 0.12 - def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, block_mask: BlockMask): + # sparse gated attention to enable context based no-op by @classiclarryd + self.attn_gate_dim = 12 + self.attn_gate = CastedLinear(self.attn_gate_dim, num_heads) + self.attn_gate.weight.detach().zero_() + + def forward(self, x: Tensor, ve: Tensor | None, lambdas: Tensor, bm_size: int): B, T = x.size(0), x.size(1) # batch size, sequence length - assert B == 1, "Must use batch size = 1 for FlexAttention" - q, k, v = F.linear(x, self.qkv_w.flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) + + q, k, v = F.linear(x, self.qkvo_w[:3].flatten(end_dim=1).type_as(x)).view(B, T, 3 * self.num_heads, self.head_dim).chunk(3, dim=-2) q, k = norm(q), norm(k) # QK norm @Grad62304977 q, k = self.rotary(q), self.rotary(k) if ve is not None: v = lambdas[0] * v + lambdas[1] * ve.view_as(v) # @KoszarskyB & @Grad62304977 else: # skip mid-layers token value embeddings by @YouJiacheng v = lambdas[0] * v - y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask, scale=self.attn_scale).transpose(1, 2) + + y = flash_attn_func(q, k, v, softmax_scale=self.attn_scale, window_size=(bm_size, 0)) # use flash_attn over flex_attn @varunneal + y = y.view(B, T, self.num_heads, self.head_dim) + y = y * torch.sigmoid(self.attn_gate(x[..., :self.attn_gate_dim])).view(B, T, self.num_heads, 1) y = y.contiguous().view(B, T, self.num_heads * self.head_dim) # re-assemble all head outputs side by side - y = self.c_proj(y) + y = F.linear(y, self.qkvo_w[3].type_as(y)) return y class MLP(nn.Module): def __init__(self, dim: int): super().__init__() hdim = 4 * dim - self.c_fc = CastedLinear(dim, hdim) - self.c_proj = CastedLinear(hdim, dim) - self.c_proj.weight.detach().zero_() # zero init suggested by @Grad62304977 + # make both matrices have the same shape because optimizer sorts params by shape + # 2 matrices x 12 layers = 24 total, which is divisible by 8 GPU world size + self.c_fc = nn.Parameter(torch.empty(dim, hdim)) + self.c_proj = nn.Parameter(torch.empty(dim, hdim)) + std = 0.5 * (dim ** -0.5) + bound = (3 ** 0.5) * std # improved init scale by @YouJiacheng + with torch.no_grad(): + self.c_fc.uniform_(-bound, bound) + self.c_proj.zero_() # zero init suggested by @Grad62304977 def forward(self, x: Tensor): - x = self.c_fc(x) + x = F.linear(x, self.c_fc.T.type_as(x)) x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977 - x = self.c_proj(x) + x = F.linear(x, self.c_proj.type_as(x)) return x class Block(nn.Module): @@ -376,10 +645,10 @@ def __init__(self, dim: int, num_heads: int, max_seq_len: int, layer_idx: int): self.attn = CausalSelfAttention(dim, num_heads, max_seq_len) if layer_idx != 7 else None self.mlp = MLP(dim) - def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, block_mask: BlockMask): + def forward(self, x: Tensor, ve: Tensor | None, x0: Tensor, lambdas: Tensor, sa_lambdas: Tensor, bm_size: int): x = lambdas[0] * x + lambdas[1] * x0 if self.attn is not None: - x = x + self.attn(norm(x), ve, sa_lambdas, block_mask) + x = x + self.attn(norm(x), ve, sa_lambdas, bm_size) x = x + self.mlp(norm(x)) return x @@ -400,7 +669,8 @@ def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: self.blocks = nn.ModuleList([Block(model_dim, num_heads, max_seq_len, i) for i in range(num_layers)]) # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. # suggested to me by @Grad62304977. this originates from Karpathy's experiments. - self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=True, x_s=(model_dim**0.5)/448, w_s=24/448, grad_s=1/448) + use_fp8 = not os.environ.get("DISABLE_FP8", False) + self.lm_head = CastedLinear(model_dim, vocab_size, use_fp8=use_fp8, x_s=(model_dim**0.5)/448, w_s=2**-9, grad_s=1/448) self.lm_head.weight.detach().zero_() # @Grad62304977 # Add learnable skip connection weights for decoder layers assert num_layers % 2 == 0 @@ -416,62 +686,23 @@ def __init__(self, vocab_size: int, num_layers: int, num_heads: int, model_dim: param.lr_mul = 75. for param in self.value_embeds.parameters(): param.lr_mul = 75. - self.lm_head.weight.lr_mul = 27.5 + self.lm_head.weight.lr_mul = 1.0 self.scalars.lr_mul = 5.0 - def create_blockmasks(self, input_seq: Tensor, sliding_window_num_blocks: Tensor): - BLOCK_SIZE = 128 - docs = (input_seq == 50256).cumsum(0) - - def document_causal(b, h, q_idx, kv_idx): - causal_mask = q_idx >= kv_idx - document_mask = docs[q_idx] == docs[kv_idx] - return causal_mask & document_mask - - def dense_to_ordered(dense_blockmask: Tensor): - num_blocks = dense_blockmask.sum(dim=-1, dtype=torch.int32) - indices = dense_blockmask.argsort(dim=-1, descending=False, stable=True).flip(-1).to(torch.int32) - return num_blocks[None, None].contiguous(), indices[None, None].contiguous() - - # manual block mask creation by @YouJiacheng - assert len(input_seq) % BLOCK_SIZE == 0 - NUM_BLOCKS = len(input_seq) // BLOCK_SIZE - block_idx = torch.arange(NUM_BLOCKS, dtype=torch.int32, device="cuda") - causal_blockmask_any = block_idx[:, None] >= block_idx - causal_blockmask_all = block_idx[:, None] > block_idx - docs_low = docs.view(-1, BLOCK_SIZE)[:, 0].contiguous() - docs_high = docs.view(-1, BLOCK_SIZE)[:, -1].contiguous() - document_blockmask_any = (docs_low[:, None] <= docs_high) & (docs_high[:, None] >= docs_low) - document_blockmask_all = (docs_low[:, None] == docs_high) & (docs_high[:, None] == docs_low) - blockmask_any = causal_blockmask_any & document_blockmask_any - blockmask_all = causal_blockmask_all & document_blockmask_all - partial_kv_num_blocks, partial_kv_indices = dense_to_ordered(blockmask_any & ~blockmask_all) - full_kv_num_blocks, full_kv_indices = dense_to_ordered(blockmask_all) - def build_bm(window_size_blocks: Tensor) -> BlockMask: - return BlockMask.from_kv_blocks( - torch.clamp_max(partial_kv_num_blocks, torch.clamp_min(window_size_blocks - full_kv_num_blocks, 1)), - partial_kv_indices, - torch.clamp_max(full_kv_num_blocks, window_size_blocks - 1), - full_kv_indices, - BLOCK_SIZE=BLOCK_SIZE, - mask_mod=document_causal, - ) - # Long-short SWA block masks by @leloykun & @YouJiacheng, adapated from suggestion by @Grad62304977, following Gemma 2 paper - return build_bm(sliding_window_num_blocks), build_bm(sliding_window_num_blocks // 2) - - def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_blocks: Tensor): - assert input_seq.ndim == 1 + + def forward(self, input_seq: Tensor, target_seq: Tensor, ws_long: int, ws_short: int): + assert input_seq.ndim == 2 ve = [value_embed(input_seq) for value_embed in self.value_embeds] # 012 ... 012 structure on token value embeddings by @YouJiacheng, improved on @leloykun's U-net structure ve = [ve[0], ve[1], ve[2]] + [None] * (len(self.blocks) - 6) + [ve[0], ve[1], ve[2]] assert len(ve) == len(self.blocks) - long_bm, short_bm = self.create_blockmasks(input_seq, sliding_window_num_blocks) - block_masks = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] - assert len(block_masks) == len(self.blocks) + long_bm, short_bm = ws_long * args.bandwidth, ws_short * args.bandwidth + bm_sizes = [long_bm, short_bm, short_bm, short_bm, long_bm, short_bm, short_bm, long_bm, short_bm, short_bm, short_bm, long_bm] + assert len(bm_sizes) == len(self.blocks) - x = x0 = norm(self.embed(input_seq)[None]) # use of norm here by @Grad62304977 + x = x0 = norm(self.embed(input_seq)) # use of norm here by @Grad62304977 # U-net design by @brendanh0gan skip_connections = [] @@ -484,15 +715,16 @@ def forward(self, input_seq: Tensor, target_seq: Tensor, sliding_window_num_bloc for i in range(len(self.blocks)): if i >= n: x = x + skip_weights[i - n] * skip_connections.pop() - x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], block_masks[i]) + x = self.blocks[i](x, ve[i], x0, lambdas[i], sa_lambdas[i], bm_sizes[i]) if i < n: skip_connections.append(x) x = norm(x) logits = self.lm_head(x).float() # @Grad62304977 added tanh softcapping following Gemma 2 paper, @KoszarskyB reduced it from 30 to 15, @YouJiacheng shifted it by +15 (2*sigmoid(2*x)=tanh(x)+1) - logits = 30 * torch.sigmoid(logits / (7.5 * x.size(-1)**0.5)) - loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq, reduction="sum" if self.training else "mean") + logits = 30 * torch.sigmoid(logits / 7.5) + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), + reduction="sum" if self.training else "mean") return loss # ----------------------------------------------------------------------------- @@ -510,44 +742,104 @@ def _load_data_shard(file: Path): assert nbytes == 2 * num_tokens, "number of tokens read does not match header" return tokens -# find world_size starting indicies, such that each begins with token 50256 and local_batches don't overlap -def find_batch_starts(tokens: Tensor, pos: int, local_batch_size: int, max_batch_span: int): - boundary_mask = tokens[pos : pos + max_batch_span] == 50256 - boundary_positions = torch.nonzero(boundary_mask, as_tuple=False).squeeze(-1) + pos - start = boundary_positions[0].item() - starts = [] - for i in range(1, len(boundary_positions)): - end = boundary_positions[i].item() - if end - start >= local_batch_size: - starts.append(start) # append start once end pos is confirmed - if len(starts) == dist.get_world_size(): - return starts, end - pos - start = end - assert False # increase max_batch_span if necessary - -def distributed_data_generator(filename_pattern: str, batch_size: int, align_to_bos: bool): - rank = dist.get_rank() - world_size = dist.get_world_size() +class EOSBatchFinder: + # Helper for getting sequences that start at the beginning of documents by @varunneal based on work by @classiclarryd + def __init__(self, tokens: Tensor, world_size: int = 1, eos_id: int = 50256): + # Precompute EOS positions once per shard + self.eos_idx = (tokens == eos_id).nonzero(as_tuple=True)[0].to(torch.int64).cpu().numpy() + self.i = 0 # pointer into eos_idx (start EOS for next step) + self.pos = 0 # logical stream position within this shard + self.world_size = world_size + def seek(self, pos: int): + # Set pointer to the first EOS >= pos + self.i = np.searchsorted(self.eos_idx, pos) + if self.i >= len(self.eos_idx): + raise StopIteration("Seek past last EOS.") + self.pos = pos + def next_batch(self, batch_size_local: int, seq_len: int): + n = len(self.eos_idx) + if self.i >= n: + raise StopIteration("No more EOS in this shard.") + starts = [[] for _ in range(self.world_size)] + idx = self.i + cur = self.eos_idx[idx] # EOS that ends the "previous" document; next doc starts at cur+1 + for r in range(self.world_size): + for _ in range(batch_size_local): + start = cur + 1 + target = start + seq_len # need seq_len tokens before next EOS + j = np.searchsorted(self.eos_idx, target) + if j >= n: + raise StopIteration("Insufficient EOS ahead; hit tail of shard.") + starts[r].append(start) + idx = j + cur = self.eos_idx[idx] # next seq must also start at a new doc + advance = self.eos_idx[idx] - self.pos # move stream to the last end + self.pos += advance + self.i = idx + return starts, advance + + +def distributed_data_generator(filename_pattern: str, batch_size: int, seq_len: int, align_to_bos: bool = True): + # align_to_bos: each sequence begins with Beginning of Sequence token and sequences don't overlap + rank = dist.get_rank() if dist.is_initialized() else 0 + world_size = dist.get_world_size() if dist.is_initialized() else 1 + assert batch_size % world_size == 0, "Batch size must be divisible by world size" + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] - assert batch_size % world_size == 0 - local_batch_size = batch_size // world_size - file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + if not files: + raise FileNotFoundError(f"No files found for pattern: {filename_pattern}") + + file_iter = iter(files) # Use itertools.cycle(files) for multi-epoch training tokens, pos = _load_data_shard(next(file_iter)), 0 - max_batch_span = 2 * batch_size if align_to_bos else batch_size # provide buffer to handle samples up to length local_batch_size + + finder = EOSBatchFinder(tokens, world_size=world_size) if align_to_bos else None + if align_to_bos: finder.seek(pos) + while True: - if pos + max_batch_span + 1 >= len(tokens): + batch_size_local = batch_size // world_size + num_tokens_global = batch_size * seq_len + + if not align_to_bos and pos + num_tokens_global + 1 >= len(tokens): tokens, pos = _load_data_shard(next(file_iter)), 0 + if align_to_bos: - batch_starts, batch_span = find_batch_starts(tokens, pos, local_batch_size, max_batch_span) - start_idx = batch_starts[rank] + try: + batch_starts, batch_span = finder.next_batch(batch_size_local, seq_len) + start_idxs = batch_starts[rank] + except StopIteration: + # This shard is exhausted, load the next one in the next loop iteration. + tokens, pos = _load_data_shard(next(file_iter)), 0 + finder = EOSBatchFinder(tokens, world_size=world_size) + continue + + bufs = [tokens[s: s + seq_len + 1] for s in start_idxs] + buf = torch.stack(bufs, dim=0) + _inputs = buf[:, :-1] + _targets = buf[:, 1:] else: - batch_span = batch_size - start_idx = pos + rank * local_batch_size - buf = tokens[start_idx:][:local_batch_size + 1] - inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; - targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + batch_span = num_tokens_global + start_pos_local = pos + rank * (batch_size_local * seq_len) + end_pos_local = start_pos_local + (batch_size_local * seq_len) + + buf = tokens[start_pos_local: end_pos_local + 1] + + _inputs = buf[:-1].view(batch_size_local, seq_len) + _targets = buf[1:].view(batch_size_local, seq_len) + + new_params = yield ( + _inputs.to(device="cuda", dtype=torch.int32, non_blocking=True), + _targets.to(device="cuda", dtype=torch.int64, non_blocking=True) + ) + pos += batch_span - yield inputs, targets + + if new_params is not None: + # makes it possible for generator to recieve new (batch_size, seq_len) via .send() + new_batch_size, new_seq_len = new_params + assert new_batch_size % world_size == 0, "New batch size must be divisible by world size" + batch_size = new_batch_size + seq_len = new_seq_len + # ----------------------------------------------------------------------------- # int main @@ -555,23 +847,34 @@ def distributed_data_generator(filename_pattern: str, batch_size: int, align_to_ @dataclass class Hyperparameters: # data - train_files = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on - val_files = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on - val_tokens = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons - train_seq_len = 48*1024 # FlexAttention sequence length - val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + train_files: str = "data/fineweb10B/fineweb_train_*.bin" # input .bin to train on + val_files: str = "data/fineweb10B/fineweb_val_*.bin" # input .bin to eval validation loss on + val_tokens: int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons + train_seq_len: int = 1024 * 2 + train_batch_size: int = 24 * 8 + val_seq_len: int = 4 * 64 * 1024 # Validation will be done with batch size = world_size. # optimization - num_iterations = 1750 # number of iterations to run - cooldown_frac = 0.45 # fraction of training spent cooling down the learning rate + num_iterations: int = 1695 # number of iterations to run + cooldown_frac: int = 0.45 # fraction of training spent cooling down the learning rate # evaluation and logging - val_loss_every = 125 # every how many steps to evaluate val loss? 0 for only at the end - save_checkpoint = False + run_id: str = str(uuid.uuid4()) + val_loss_every: int = 125 # every how many steps to evaluate val loss? 0 for only at the end + save_checkpoint: bool = False + # attention masking + bandwidth: int = 128 + ws_schedule: tuple = (3, 7, 11) + args = Hyperparameters() +data_path = os.environ.get("DATA_PATH", ".") +args.train_files = os.path.join(data_path, args.train_files) +args.val_files = os.path.join(data_path, args.val_files) + # torchrun sets these env variables rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) -assert world_size == 8 # this code is designed for 8xH100 +assert 8 % world_size == 0, "world_size must be a divisor of 8" +grad_accum_steps = 8 // world_size assert torch.cuda.is_available() device = torch.device("cuda", int(os.environ["LOCAL_RANK"])) torch.cuda.set_device(device) @@ -582,7 +885,7 @@ class Hyperparameters: # begin logging logfile = None if master_process: - run_id = uuid.uuid4() + run_id = args.run_id os.makedirs("logs", exist_ok=True) logfile = f"logs/{run_id}.txt" print(logfile) @@ -599,13 +902,21 @@ def print0(s, console=False): # log information about the hardware/software environment this is running on print0(f"Running Python {sys.version}") print0(f"Running PyTorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}") +print0(f"Running Triton version {triton.__version__}") + def nvidia_smi(): import subprocess # avoid top level import return subprocess.run(["nvidia-smi"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True).stdout print0(nvidia_smi()) print0("="*100) -model: nn.Module = GPT(vocab_size=50257, num_layers=12, num_heads=6, model_dim=768, max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +model: nn.Module = GPT( + vocab_size=50257, + num_layers=12, + num_heads=6, + model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len) +).cuda() for m in model.modules(): if isinstance(m, nn.Embedding): m.bfloat16() @@ -629,41 +940,31 @@ def nvidia_smi(): group["initial_lr"] = group["lr"] # learning rate schedule: stable then decay -def get_lr(step: int): - x = step / args.num_iterations # progress in training +def get_lr_and_ws(step: int): + x = step / (1 + args.num_iterations) # progress in training assert 0 <= x < 1 - if x < 1 - args.cooldown_frac: - return 1.0 - else: + lr = 1.0 + if x >= 1 - args.cooldown_frac: w = (1 - x) / args.cooldown_frac - return w * 1.0 + (1 - w) * 0.1 - -# attention window size schedule: linearly increase -@lru_cache(1) -def get_window_size_blocks_helper(window_size: int): - return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) -def get_window_size_blocks(step: int): - x = step / args.num_iterations # progress in training - assert 0 <= x <= 1 - # Linearly increase the block-wise sliding window size over training 128 -> 1792 - # increase by @fernbear.bsky.social; block-wise by @YouJiacheng - window_size = next_multiple_of_n(1728 * x, n=128) - return get_window_size_blocks_helper(window_size) - -model: nn.Module = torch.compile(model, dynamic=False) + lr = w * 1.0 + (1 - w) * 0.1 + ws_idx = int(len(args.ws_schedule) * x) + return lr, args.ws_schedule[ws_idx] + +model: nn.Module = torch.compile(model, dynamic=False, fullgraph=True) ######################################## # Warmup kernels # ######################################## # Warmup the training kernels, then re-initialize the state so we aren't cheating -warmup_steps = 10 +warmup_steps = 60 initial_state = dict(model=copy.deepcopy(model.state_dict()), optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers]) # save the initial state -train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, align_to_bos=True) -for _ in range(warmup_steps): +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) +for step in range(warmup_steps): inputs, targets = next(train_loader) - model(inputs, targets, get_window_size_blocks(1)).backward() + ws = args.ws_schedule[step % len(args.ws_schedule)] # each window size is a new graph, need to warm up eachZ + model(inputs, targets, ws, ws // 2).backward() for opt in optimizers: opt.step() model.zero_grad(set_to_none=True) @@ -676,7 +977,7 @@ def get_window_size_blocks(step: int): # Training and validation # ######################################## -train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, align_to_bos=True) +train_loader = distributed_data_generator(args.train_files, args.train_batch_size, args.train_seq_len) training_time_ms = 0 # start the clock torch.cuda.synchronize() @@ -685,6 +986,7 @@ def get_window_size_blocks(step: int): train_steps = args.num_iterations for step in range(train_steps + 1): last_step = (step == train_steps) + lr, ws = get_lr_and_ws(step) # --------------- VALIDATION SECTION ----------------- if last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): @@ -692,15 +994,14 @@ def get_window_size_blocks(step: int): torch.cuda.synchronize() training_time_ms += 1000 * (time.perf_counter() - t0) model.eval() - val_batch_size = world_size * args.val_seq_len - assert args.val_tokens % val_batch_size == 0 - val_steps = args.val_tokens // val_batch_size - val_loader = distributed_data_generator(args.val_files, val_batch_size, align_to_bos=False) + assert args.val_tokens % (world_size * args.val_seq_len) == 0 + val_steps = args.val_tokens // (world_size * args.val_seq_len) + val_loader = distributed_data_generator(args.val_files, world_size, args.val_seq_len, align_to_bos=False) val_loss = 0 with torch.no_grad(): for _ in range(val_steps): inputs, targets = next(val_loader) - val_loss += model(inputs, targets, get_window_size_blocks(step)) + val_loss += model(inputs, targets, ws, ws // 2) val_loss /= val_steps del val_loader dist.all_reduce(val_loss, op=dist.ReduceOp.AVG) @@ -719,12 +1020,13 @@ def get_window_size_blocks(step: int): break # --------------- TRAINING SECTION ----------------- - inputs, targets = next(train_loader) - model(inputs, targets, get_window_size_blocks(step)).backward() + for _ in range(grad_accum_steps): + inputs, targets = next(train_loader) + model(inputs, targets, ws, ws // 2).backward() # set optimization hyperparameters for opt in optimizers: for group in opt.param_groups: - group["lr"] = group["initial_lr"] * get_lr(step) + group["lr"] = group["initial_lr"] * lr for group in optimizer2.param_groups: frac = min(step / 300, 1) # momentum warmup for muon group["momentum"] = (1 - frac) * 0.85 + frac * 0.95