diff --git a/document/core/appendix/gen-index-instructions.py b/document/core/appendix/gen-index-instructions.py index 5fcefc943..3ed07b747 100755 --- a/document/core/appendix/gen-index-instructions.py +++ b/document/core/appendix/gen-index-instructions.py @@ -446,10 +446,10 @@ def Instruction(name, opcode, type=None, validation=None, execution=None, operat Instruction(r'\I16X8.\BITMASK', r'\hex{FD}~~132', r'[\V128] \to [\I32]', r'valid-simd-bitmask', r'exec-simd-bitmask'), Instruction(r'\I16X8.\NARROW\K{\_i16x8\_s}', r'\hex{FD}~~133', r'[\V128~\V128] \to [\V128]', r'valid-vbinop', r'exec-simd-narrow'), Instruction(r'\I16X8.\NARROW\K{\_i16x8\_u}', r'\hex{FD}~~134', r'[\V128~\V128] \to [\V128]', r'valid-vbinop', r'exec-simd-narrow'), - Instruction(r'\I16X8.\WIDEN\K{\_low\_i8x16\_s}', r'\hex{FD}~~135', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I16X8.\WIDEN\K{\_high\_i8x16\_s}', r'\hex{FD}~~136', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I16X8.\WIDEN\K{\_low\_i8x16\_u}', r'\hex{FD}~~137', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I16X8.\WIDEN\K{\_high\_i8x16\_u}', r'\hex{FD}~~138', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), + Instruction(r'\I16X8.\VEXTEND\K{\_low\_i8x16\_s}', r'\hex{FD}~~135', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I16X8.\VEXTEND\K{\_high\_i8x16\_s}', r'\hex{FD}~~136', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I16X8.\VEXTEND\K{\_low\_i8x16\_u}', r'\hex{FD}~~137', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I16X8.\VEXTEND\K{\_high\_i8x16\_u}', r'\hex{FD}~~138', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), Instruction(r'\I16X8.\VSHL', r'\hex{FD}~~139', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishl'), Instruction(r'\I16X8.\VSHR\K{\_s}', r'\hex{FD}~~140', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishr_s'), Instruction(r'\I16X8.\VSHR\K{\_u}', r'\hex{FD}~~141', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishr_u'), @@ -474,10 +474,10 @@ def Instruction(name, opcode, type=None, validation=None, execution=None, operat Instruction(r'\I32X4.\VNEG', r'\hex{FD}~~161', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-vunop', r'op-ineg'), Instruction(r'\I32X4.\ALLTRUE', r'\hex{FD}~~163', r'[\V128] \to [\I32]', r'valid-vitestop', r'exec-vitestop'), Instruction(r'\I32X4.\BITMASK', r'\hex{FD}~~164', r'[\V128] \to [\I32]', r'valid-simd-bitmask', r'exec-simd-bitmask'), - Instruction(r'\I32X4.\WIDEN\K{\_low\_i16x8\_s}', r'\hex{FD}~~167', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I32X4.\WIDEN\K{\_high\_i16x8\_s}', r'\hex{FD}~~168', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I32X4.\WIDEN\K{\_low\_i16x8\_u}', r'\hex{FD}~~169', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I32X4.\WIDEN\K{\_high\_i16x8\_u}', r'\hex{FD}~~170', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), + Instruction(r'\I32X4.\VEXTEND\K{\_low\_i16x8\_s}', r'\hex{FD}~~167', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I32X4.\VEXTEND\K{\_high\_i16x8\_s}', r'\hex{FD}~~168', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I32X4.\VEXTEND\K{\_low\_i16x8\_u}', r'\hex{FD}~~169', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I32X4.\VEXTEND\K{\_high\_i16x8\_u}', r'\hex{FD}~~170', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), Instruction(r'\I32X4.\VSHL', r'\hex{FD}~~171', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishl'), Instruction(r'\I32X4.\VSHR\K{\_s}', r'\hex{FD}~~172', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishr_s'), Instruction(r'\I32X4.\VSHR\K{\_u}', r'\hex{FD}~~173', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishr_u'), @@ -495,10 +495,10 @@ def Instruction(name, opcode, type=None, validation=None, execution=None, operat Instruction(r'\I64X2.\VABS', r'\hex{FD}~~162', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-vunop', r'op-iabs'), Instruction(r'\I64X2.\VNEG', r'\hex{FD}~~193', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-vunop', r'op-ineg'), Instruction(r'\I64X2.\BITMASK', r'\hex{FD}~~196', r'[\V128] \to [\I32]', r'valid-simd-bitmask', r'exec-simd-bitmask'), - Instruction(r'\I64X2.\WIDEN\K{\_low\_i32x4\_s}', r'\hex{FD}~~199', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I64X2.\WIDEN\K{\_high\_i32x4\_s}', r'\hex{FD}~~200', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I64X2.\WIDEN\K{\_low\_i32x4\_u}', r'\hex{FD}~~201', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), - Instruction(r'\I64X2.\WIDEN\K{\_high\_i32x4\_u}', r'\hex{FD}~~202', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-widen'), + Instruction(r'\I64X2.\VEXTEND\K{\_low\_i32x4\_s}', r'\hex{FD}~~199', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I64X2.\VEXTEND\K{\_high\_i32x4\_s}', r'\hex{FD}~~200', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I64X2.\VEXTEND\K{\_low\_i32x4\_u}', r'\hex{FD}~~201', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), + Instruction(r'\I64X2.\VEXTEND\K{\_high\_i32x4\_u}', r'\hex{FD}~~202', r'[\V128] \to [\V128]', r'valid-vunop', r'exec-simd-extend'), Instruction(r'\I64X2.\VSHL', r'\hex{FD}~~203', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishl'), Instruction(r'\I64X2.\VSHR\K{\_s}', r'\hex{FD}~~204', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishr_s'), Instruction(r'\I64X2.\VSHR\K{\_u}', r'\hex{FD}~~205', r'[\V128~\I32] \to [\V128]', r'valid-vshiftop', r'exec-vshiftop', r'op-ishr_u'), diff --git a/document/core/appendix/index-instructions.rst b/document/core/appendix/index-instructions.rst index 949bec12e..e9d3f1569 100644 --- a/document/core/appendix/index-instructions.rst +++ b/document/core/appendix/index-instructions.rst @@ -394,10 +394,10 @@ Instruction Binary Opcode Type :math:`\I16X8.\BITMASK` :math:`\hex{FD}~~132` :math:`[\V128] \to [\I32]` :ref:`validation ` :ref:`execution ` :math:`\I16X8.\NARROW\K{\_i16x8\_s}` :math:`\hex{FD}~~133` :math:`[\V128~\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` :math:`\I16X8.\NARROW\K{\_i16x8\_u}` :math:`\hex{FD}~~134` :math:`[\V128~\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I16X8.\WIDEN\K{\_low\_i8x16\_s}` :math:`\hex{FD}~~135` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I16X8.\WIDEN\K{\_high\_i8x16\_s}` :math:`\hex{FD}~~136` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I16X8.\WIDEN\K{\_low\_i8x16\_u}` :math:`\hex{FD}~~137` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I16X8.\WIDEN\K{\_high\_i8x16\_u}` :math:`\hex{FD}~~138` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I16X8.\VEXTEND\K{\_low\_i8x16\_s}` :math:`\hex{FD}~~135` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I16X8.\VEXTEND\K{\_high\_i8x16\_s}` :math:`\hex{FD}~~136` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I16X8.\VEXTEND\K{\_low\_i8x16\_u}` :math:`\hex{FD}~~137` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I16X8.\VEXTEND\K{\_high\_i8x16\_u}` :math:`\hex{FD}~~138` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` :math:`\I16X8.\VSHL` :math:`\hex{FD}~~139` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I16X8.\VSHR\K{\_s}` :math:`\hex{FD}~~140` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I16X8.\VSHR\K{\_u}` :math:`\hex{FD}~~141` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` @@ -422,10 +422,10 @@ Instruction Binary Opcode Type :math:`\I32X4.\VNEG` :math:`\hex{FD}~~161` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I32X4.\ALLTRUE` :math:`\hex{FD}~~163` :math:`[\V128] \to [\I32]` :ref:`validation ` :ref:`execution ` :math:`\I32X4.\BITMASK` :math:`\hex{FD}~~164` :math:`[\V128] \to [\I32]` :ref:`validation ` :ref:`execution ` -:math:`\I32X4.\WIDEN\K{\_low\_i16x8\_s}` :math:`\hex{FD}~~167` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I32X4.\WIDEN\K{\_high\_i16x8\_s}` :math:`\hex{FD}~~168` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I32X4.\WIDEN\K{\_low\_i16x8\_u}` :math:`\hex{FD}~~169` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I32X4.\WIDEN\K{\_high\_i16x8\_u}` :math:`\hex{FD}~~170` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I32X4.\VEXTEND\K{\_low\_i16x8\_s}` :math:`\hex{FD}~~167` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I32X4.\VEXTEND\K{\_high\_i16x8\_s}` :math:`\hex{FD}~~168` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I32X4.\VEXTEND\K{\_low\_i16x8\_u}` :math:`\hex{FD}~~169` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I32X4.\VEXTEND\K{\_high\_i16x8\_u}` :math:`\hex{FD}~~170` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` :math:`\I32X4.\VSHL` :math:`\hex{FD}~~171` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I32X4.\VSHR\K{\_s}` :math:`\hex{FD}~~172` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I32X4.\VSHR\K{\_u}` :math:`\hex{FD}~~173` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` @@ -443,10 +443,10 @@ Instruction Binary Opcode Type :math:`\I64X2.\VABS` :math:`\hex{FD}~~162` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I64X2.\VNEG` :math:`\hex{FD}~~193` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I64X2.\BITMASK` :math:`\hex{FD}~~196` :math:`[\V128] \to [\I32]` :ref:`validation ` :ref:`execution ` -:math:`\I64X2.\WIDEN\K{\_low\_i32x4\_s}` :math:`\hex{FD}~~199` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I64X2.\WIDEN\K{\_high\_i32x4\_s}` :math:`\hex{FD}~~200` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I64X2.\WIDEN\K{\_low\_i32x4\_u}` :math:`\hex{FD}~~201` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` -:math:`\I64X2.\WIDEN\K{\_high\_i32x4\_u}` :math:`\hex{FD}~~202` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I64X2.\VEXTEND\K{\_low\_i32x4\_s}` :math:`\hex{FD}~~199` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I64X2.\VEXTEND\K{\_high\_i32x4\_s}` :math:`\hex{FD}~~200` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I64X2.\VEXTEND\K{\_low\_i32x4\_u}` :math:`\hex{FD}~~201` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` +:math:`\I64X2.\VEXTEND\K{\_high\_i32x4\_u}` :math:`\hex{FD}~~202` :math:`[\V128] \to [\V128]` :ref:`validation ` :ref:`execution ` :math:`\I64X2.\VSHL` :math:`\hex{FD}~~203` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I64X2.\VSHR\K{\_s}` :math:`\hex{FD}~~204` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` :math:`\I64X2.\VSHR\K{\_u}` :math:`\hex{FD}~~205` :math:`[\V128~\I32] \to [\V128]` :ref:`validation ` :ref:`execution `, :ref:`operator ` diff --git a/document/core/binary/instructions.rst b/document/core/binary/instructions.rst index 742434e5e..7a411158c 100644 --- a/document/core/binary/instructions.rst +++ b/document/core/binary/instructions.rst @@ -640,10 +640,10 @@ All other SIMD instructions are plain opcodes without any immediates. \hex{FD}~~132{:}\Bu32 &\Rightarrow& \I16X8.\BITMASK \\ &&|& \hex{FD}~~133{:}\Bu32 &\Rightarrow& \I16X8.\NARROW\K{\_i32x4\_s} \\ &&|& \hex{FD}~~134{:}\Bu32 &\Rightarrow& \I16X8.\NARROW\K{\_i32x4\_u} \\ &&|& - \hex{FD}~~135{:}\Bu32 &\Rightarrow& \I16X8.\WIDEN\K{\_low\_i8x16\_s} \\ &&|& - \hex{FD}~~136{:}\Bu32 &\Rightarrow& \I16X8.\WIDEN\K{\_high\_i8x16\_s} \\ &&|& - \hex{FD}~~137{:}\Bu32 &\Rightarrow& \I16X8.\WIDEN\K{\_low\_i8x16\_u} \\ &&|& - \hex{FD}~~138{:}\Bu32 &\Rightarrow& \I16X8.\WIDEN\K{\_high\_i8x16\_u} \\ &&|& + \hex{FD}~~135{:}\Bu32 &\Rightarrow& \I16X8.\VEXTEND\K{\_low\_i8x16\_s} \\ &&|& + \hex{FD}~~136{:}\Bu32 &\Rightarrow& \I16X8.\VEXTEND\K{\_high\_i8x16\_s} \\ &&|& + \hex{FD}~~137{:}\Bu32 &\Rightarrow& \I16X8.\VEXTEND\K{\_low\_i8x16\_u} \\ &&|& + \hex{FD}~~138{:}\Bu32 &\Rightarrow& \I16X8.\VEXTEND\K{\_high\_i8x16\_u} \\ &&|& \hex{FD}~~139{:}\Bu32 &\Rightarrow& \I16X8.\VSHL \\ &&|& \hex{FD}~~140{:}\Bu32 &\Rightarrow& \I16X8.\VSHR\K{\_s} \\ &&|& \hex{FD}~~141{:}\Bu32 &\Rightarrow& \I16X8.\VSHR\K{\_u} \\ &&|& @@ -673,10 +673,10 @@ All other SIMD instructions are plain opcodes without any immediates. \hex{FD}~~161{:}\Bu32 &\Rightarrow& \I32X4.\VNEG \\ &&|& \hex{FD}~~163{:}\Bu32 &\Rightarrow& \I32X4.\ALLTRUE \\ &&|& \hex{FD}~~164{:}\Bu32 &\Rightarrow& \I32X4.\BITMASK \\ &&|& - \hex{FD}~~167{:}\Bu32 &\Rightarrow& \I32X4.\WIDEN\K{\_low\_i16x8\_s} \\ &&|& - \hex{FD}~~168{:}\Bu32 &\Rightarrow& \I32X4.\WIDEN\K{\_high\_i16x8\_s} \\ &&|& - \hex{FD}~~169{:}\Bu32 &\Rightarrow& \I32X4.\WIDEN\K{\_low\_i16x8\_u} \\ &&|& - \hex{FD}~~170{:}\Bu32 &\Rightarrow& \I32X4.\WIDEN\K{\_high\_i16x8\_u} \\ &&|& + \hex{FD}~~167{:}\Bu32 &\Rightarrow& \I32X4.\VEXTEND\K{\_low\_i16x8\_s} \\ &&|& + \hex{FD}~~168{:}\Bu32 &\Rightarrow& \I32X4.\VEXTEND\K{\_high\_i16x8\_s} \\ &&|& + \hex{FD}~~169{:}\Bu32 &\Rightarrow& \I32X4.\VEXTEND\K{\_low\_i16x8\_u} \\ &&|& + \hex{FD}~~170{:}\Bu32 &\Rightarrow& \I32X4.\VEXTEND\K{\_high\_i16x8\_u} \\ &&|& \hex{FD}~~171{:}\Bu32 &\Rightarrow& \I32X4.\VSHL \\ &&|& \hex{FD}~~172{:}\Bu32 &\Rightarrow& \I32X4.\VSHR\K{\_s} \\ &&|& \hex{FD}~~173{:}\Bu32 &\Rightarrow& \I32X4.\VSHR\K{\_u} \\ &&|& @@ -699,10 +699,10 @@ All other SIMD instructions are plain opcodes without any immediates. \hex{FD}~~162{:}\Bu32 &\Rightarrow& \I64X2.\VABS \\ &&|& \hex{FD}~~193{:}\Bu32 &\Rightarrow& \I64X2.\VNEG \\ &&|& \hex{FD}~~196{:}\Bu32 &\Rightarrow& \I64X2.\BITMASK \\ &&|& - \hex{FD}~~199{:}\Bu32 &\Rightarrow& \I64X2.\WIDEN\K{\_low\_i32x4\_s} \\ &&|& - \hex{FD}~~200{:}\Bu32 &\Rightarrow& \I64X2.\WIDEN\K{\_high\_i32x4\_s} \\ &&|& - \hex{FD}~~201{:}\Bu32 &\Rightarrow& \I64X2.\WIDEN\K{\_low\_i32x4\_u} \\ &&|& - \hex{FD}~~202{:}\Bu32 &\Rightarrow& \I64X2.\WIDEN\K{\_high\_i32x4\_u} \\ &&|& + \hex{FD}~~199{:}\Bu32 &\Rightarrow& \I64X2.\VEXTEND\K{\_low\_i32x4\_s} \\ &&|& + \hex{FD}~~200{:}\Bu32 &\Rightarrow& \I64X2.\VEXTEND\K{\_high\_i32x4\_s} \\ &&|& + \hex{FD}~~201{:}\Bu32 &\Rightarrow& \I64X2.\VEXTEND\K{\_low\_i32x4\_u} \\ &&|& + \hex{FD}~~202{:}\Bu32 &\Rightarrow& \I64X2.\VEXTEND\K{\_high\_i32x4\_u} \\ &&|& \hex{FD}~~203{:}\Bu32 &\Rightarrow& \I64X2.\VSHL \\ &&|& \hex{FD}~~204{:}\Bu32 &\Rightarrow& \I64X2.\VSHR\K{\_s} \\ &&|& \hex{FD}~~205{:}\Bu32 &\Rightarrow& \I64X2.\VSHR\K{\_u} \\ &&|& diff --git a/document/core/exec/instructions.rst b/document/core/exec/instructions.rst index a0d7b0d71..9a900de96 100644 --- a/document/core/exec/instructions.rst +++ b/document/core/exec/instructions.rst @@ -671,9 +671,9 @@ SIMD instructions are defined in terms of generic numeric operators applied lane \end{array} -.. _exec-simd-widen: +.. _exec-simd-extend: -:math:`t_2\K{x}N\K{.}\WIDEN\_\K{low}\_t_1\K{x}M\_\sx` +:math:`t_2\K{x}N\K{.}\VEXTEND\_\K{low}\_t_1\K{x}M\_\sx` ..................................................... 1. Assert: due to :ref:`validation `, a value of :ref:`value type ` |V128| is on the top of the stack. @@ -689,7 +689,7 @@ SIMD instructions are defined in terms of generic numeric operators applied lane .. math:: \begin{array}{l} \begin{array}{lcl@{\qquad}l} - (\V128\K{.}\VCONST~c_1)~t_2\K{x}N\K{.}\WIDEN\_\K{low}\_t_1\K{x}M\_\sx &\stepto& (\V128\K{.}\VCONST~c) \\ + (\V128\K{.}\VCONST~c_1)~t_2\K{x}N\K{.}\VEXTEND\_\K{low}\_t_1\K{x}M\_\sx &\stepto& (\V128\K{.}\VCONST~c) \\ \end{array} \\ \qquad \begin{array}[t]{@{}r@{~}l@{}} @@ -699,7 +699,7 @@ SIMD instructions are defined in terms of generic numeric operators applied lane \end{array} -:math:`t_2\K{x}N\K{.}\WIDEN\_\K{high}\_t_1\K{x}M\_\sx` +:math:`t_2\K{x}N\K{.}\VEXTEND\_\K{high}\_t_1\K{x}M\_\sx` ...................................................... 1. Assert: due to :ref:`validation `, a value of :ref:`value type ` |V128| is on the top of the stack. @@ -715,7 +715,7 @@ SIMD instructions are defined in terms of generic numeric operators applied lane .. math:: \begin{array}{l} \begin{array}{lcl@{\qquad}l} - (\V128\K{.}\VCONST~c_1)~t_2\K{x}N\K{.}\WIDEN\_\K{high}\_t_1\K{x}M\_\sx &\stepto& (\V128\K{.}\VCONST~c) \\ + (\V128\K{.}\VCONST~c_1)~t_2\K{x}N\K{.}\VEXTEND\_\K{high}\_t_1\K{x}M\_\sx &\stepto& (\V128\K{.}\VCONST~c) \\ \end{array} \\ \qquad \begin{array}[t]{@{}r@{~}l@{}} diff --git a/document/core/syntax/instructions.rst b/document/core/syntax/instructions.rst index ba2376214..4dbd2b546 100644 --- a/document/core/syntax/instructions.rst +++ b/document/core/syntax/instructions.rst @@ -242,12 +242,12 @@ SIMD instructions provide basic operations over :ref:`values ` of \ishape\K{.}\BITMASK \\ &&|& \K{i8x16.}\NARROW\K{\_i16x8\_}\sx ~|~ \K{i16x8.}\NARROW\K{\_i32x4\_}\sx \\&&|& - \K{i16x8.}\WIDEN\K{\_low}\K{\_i8x16\_}\sx ~|~ - \K{i32x4.}\WIDEN\K{\_low}\K{\_i16x8\_}\sx \\&&|& - \K{i64x2.}\WIDEN\K{\_low}\K{\_i32x4\_}\sx \\&&|& - \K{i16x8.}\WIDEN\K{\_high}\K{\_i8x16\_}\sx ~|~ - \K{i32x4.}\WIDEN\K{\_high}\K{\_i16x8\_}\sx \\&&|& - \K{i64x2.}\WIDEN\K{\_high}\K{\_i32x4\_}\sx \\&&|& + \K{i16x8.}\VEXTEND\K{\_low}\K{\_i8x16\_}\sx ~|~ + \K{i32x4.}\VEXTEND\K{\_low}\K{\_i16x8\_}\sx \\&&|& + \K{i64x2.}\VEXTEND\K{\_low}\K{\_i32x4\_}\sx \\&&|& + \K{i16x8.}\VEXTEND\K{\_high}\K{\_i8x16\_}\sx ~|~ + \K{i32x4.}\VEXTEND\K{\_high}\K{\_i16x8\_}\sx \\&&|& + \K{i64x2.}\VEXTEND\K{\_high}\K{\_i32x4\_}\sx \\&&|& \ishape\K{.}\vshiftop \\&&|& \ishape\K{.}\vibinop \\&&|& \K{i8x16.}\viminmaxop ~|~ @@ -371,7 +371,7 @@ For the other SIMD instructions, the use of two's complement for the signed inte .. _syntax-vunop: .. _syntax-vbinop: -.. _syntax-vwiden: +.. _syntax-vextend: .. _syntax-vextmul: Conventions @@ -396,9 +396,9 @@ Occasionally, it is convenient to group operators together according to the foll \production{conversion operator} & \vcvtop &::=& \VTRUNC\K{\_sat} ~|~ \VCONVERT \\ - \production{widen operator} & \vwiden &::=& - \WIDEN\K{\_low\_}\shape\K{\_}\sx ~|~ - \WIDEN\K{\_high\_}\shape\K{\_}\sx \\ + \production{extend operator} & \vextend &::=& + \VEXTEND\K{\_low\_}\shape\K{\_}\sx ~|~ + \VEXTEND\K{\_high\_}\shape\K{\_}\sx \\ \production{extmul operator} & \vextmul &::=& \EXTMUL\K{\_low\_}\ishape\K{\_}\sx ~|~ \EXTMUL\K{\_high\_}\ishape\K{\_}\sx \\ diff --git a/document/core/text/instructions.rst b/document/core/text/instructions.rst index 2df52d3f8..5158d4970 100644 --- a/document/core/text/instructions.rst +++ b/document/core/text/instructions.rst @@ -673,10 +673,10 @@ SIMD const instructions have a mandatory :ref:`shape ` descri \text{i16x8.bitmask} &\Rightarrow& \I16X8.\BITMASK\\ &&|& \text{i16x8.narrow\_i32x4\_s} &\Rightarrow& \I16X8.\NARROW\K{\_i32x4\_s}\\ &&|& \text{i16x8.narrow\_i32x4\_u} &\Rightarrow& \I16X8.\NARROW\K{\_i32x4\_u}\\ &&|& - \text{i16x8.widen\_low\_i8x16\_s} &\Rightarrow& \I16X8.\WIDEN\K{\_low\_i8x16\_s}\\ &&|& - \text{i16x8.widen\_high\_i8x16\_s} &\Rightarrow& \I16X8.\WIDEN\K{\_high\_i8x16\_s}\\ &&|& - \text{i16x8.widen\_low\_i8x16\_u} &\Rightarrow& \I16X8.\WIDEN\K{\_low\_i8x16\_u}\\ &&|& - \text{i16x8.widen\_high\_i8x16\_u} &\Rightarrow& \I16X8.\WIDEN\K{\_high\_i8x16\_u}\\ &&|& + \text{i16x8.extend\_low\_i8x16\_s} &\Rightarrow& \I16X8.\VEXTEND\K{\_low\_i8x16\_s}\\ &&|& + \text{i16x8.extend\_high\_i8x16\_s} &\Rightarrow& \I16X8.\VEXTEND\K{\_high\_i8x16\_s}\\ &&|& + \text{i16x8.extend\_low\_i8x16\_u} &\Rightarrow& \I16X8.\VEXTEND\K{\_low\_i8x16\_u}\\ &&|& + \text{i16x8.extend\_high\_i8x16\_u} &\Rightarrow& \I16X8.\VEXTEND\K{\_high\_i8x16\_u}\\ &&|& \text{i16x8.shl} &\Rightarrow& \I16X8.\VSHL\\ &&|& \text{i16x8.shr\_s} &\Rightarrow& \I16X8.\VSHR\K{\_s}\\ &&|& \text{i16x8.shr\_u} &\Rightarrow& \I16X8.\VSHR\K{\_u}\\ &&|& @@ -706,10 +706,10 @@ SIMD const instructions have a mandatory :ref:`shape ` descri \text{i32x4.neg} &\Rightarrow& \I32X4.\VNEG\\ &&|& \text{i32x4.all\_true} &\Rightarrow& \I32X4.\ALLTRUE\\ &&|& \text{i32x4.bitmask} &\Rightarrow& \I32X4.\BITMASK\\ &&|& - \text{i32x4.widen\_low\_i16x8\_s} &\Rightarrow& \I32X4.\WIDEN\K{\_low\_i16x8\_s}\\ &&|& - \text{i32x4.widen\_high\_i16x8\_s} &\Rightarrow& \I32X4.\WIDEN\K{\_high\_i16x8\_s}\\ &&|& - \text{i32x4.widen\_low\_i16x8\_u} &\Rightarrow& \I32X4.\WIDEN\K{\_low\_i16x8\_u}\\ &&|& - \text{i32x4.widen\_high\_i16x8\_u} &\Rightarrow& \I32X4.\WIDEN\K{\_high\_i16x8\_u}\\ &&|& + \text{i32x4.extend\_low\_i16x8\_s} &\Rightarrow& \I32X4.\VEXTEND\K{\_low\_i16x8\_s}\\ &&|& + \text{i32x4.extend\_high\_i16x8\_s} &\Rightarrow& \I32X4.\VEXTEND\K{\_high\_i16x8\_s}\\ &&|& + \text{i32x4.extend\_low\_i16x8\_u} &\Rightarrow& \I32X4.\VEXTEND\K{\_low\_i16x8\_u}\\ &&|& + \text{i32x4.extend\_high\_i16x8\_u} &\Rightarrow& \I32X4.\VEXTEND\K{\_high\_i16x8\_u}\\ &&|& \text{i32x4.shl} &\Rightarrow& \I32X4.\VSHL\\ &&|& \text{i32x4.shr\_s} &\Rightarrow& \I32X4.\VSHR\K{\_s}\\ &&|& \text{i32x4.shr\_u} &\Rightarrow& \I32X4.\VSHR\K{\_u}\\ &&|& @@ -733,10 +733,10 @@ SIMD const instructions have a mandatory :ref:`shape ` descri \text{i64x2.neg} &\Rightarrow& \I64X2.\VNEG\\ &&|& \text{i64x2.all\_true} &\Rightarrow& \I64X2.\ALLTRUE\\ &&|& \text{i64x2.bitmask} &\Rightarrow& \I64X2.\BITMASK\\ &&|& - \text{i64x2.widen\_low\_i32x4\_s} &\Rightarrow& \I64X2.\WIDEN\K{\_low\_i32x4\_s} \\ &&|& - \text{i64x2.widen\_high\_i32x4\_s} &\Rightarrow& \I64X2.\WIDEN\K{\_high\_i32x4\_s} \\ &&|& - \text{i64x2.widen\_low\_i32x4\_u} &\Rightarrow& \I64X2.\WIDEN\K{\_low\_i32x4\_u} \\ &&|& - \text{i64x2.widen\_high\_i32x4\_u} &\Rightarrow& \I64X2.\WIDEN\K{\_high\_i32x4\_u} \\ &&|& + \text{i64x2.extend\_low\_i32x4\_s} &\Rightarrow& \I64X2.\VEXTEND\K{\_low\_i32x4\_s} \\ &&|& + \text{i64x2.extend\_high\_i32x4\_s} &\Rightarrow& \I64X2.\VEXTEND\K{\_high\_i32x4\_s} \\ &&|& + \text{i64x2.extend\_low\_i32x4\_u} &\Rightarrow& \I64X2.\VEXTEND\K{\_low\_i32x4\_u} \\ &&|& + \text{i64x2.extend\_high\_i32x4\_u} &\Rightarrow& \I64X2.\VEXTEND\K{\_high\_i32x4\_u} \\ &&|& \text{i64x2.shl} &\Rightarrow& \I64X2.\VSHL\\ &&|& \text{i64x2.shr\_s} &\Rightarrow& \I64X2.\VSHR\K{\_s}\\ &&|& \text{i64x2.shr\_u} &\Rightarrow& \I64X2.\VSHR\K{\_u}\\ &&|& diff --git a/document/core/util/macros.def b/document/core/util/macros.def index 2822712b6..6f61458f8 100644 --- a/document/core/util/macros.def +++ b/document/core/util/macros.def @@ -423,7 +423,7 @@ .. |VPMIN| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{pmin}} .. |VPMAX| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{pmax}} .. |NARROW| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{narrow}} -.. |WIDEN| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{widen}} +.. |VEXTEND| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{extend}} .. |AVGR| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{avgr}} .. |EXTMUL| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{extmul}} .. |VTRUNC| mathdef:: \xref{syntax/instructions}{syntax-instr-simd}{\K{trunc}} @@ -456,7 +456,7 @@ .. |vunop| mathdef:: \xref{syntax/instructions}{syntax-vunop}{\X{vunop}} .. |vbinop| mathdef:: \xref{syntax/instructions}{syntax-vbinop}{\X{vbinop}} .. |vternop| mathdef:: \xref{syntax/instructions}{syntax-vternop}{\X{vternop}} -.. |vwiden| mathdef:: \xref{syntax/instructions}{syntax-vwiden}{\X{vwiden}} +.. |vextend| mathdef:: \xref{syntax/instructions}{syntax-vextend}{\X{vextend}} .. |vcvtop| mathdef:: \xref{syntax/instructions}{syntax-vcvtop}{\X{vcvtop}} .. |vextmul| mathdef:: \xref{syntax/instructions}{syntax-vextmul}{\X{vextmul}} diff --git a/document/core/valid/instructions.rst b/document/core/valid/instructions.rst index 5dd0a652e..5a8602d64 100644 --- a/document/core/valid/instructions.rst +++ b/document/core/valid/instructions.rst @@ -407,17 +407,17 @@ We also define an auxiliary function to get number of packed numeric types in a } -.. _valid-vwiden: +.. _valid-vextend: -:math:`\shape\K{.}\vwiden\K{\_}\shape\K{\_}\sx` -............................................... +:math:`\shape\K{.}\vextend\K{\_}\shape\K{\_}\sx` +................................................ * The instruction is valid with type :math:`[\V128] \to [\V128]`. .. math:: \frac{ }{ - C \vdashinstr \shape\K{.}\vwiden\K{\_}\shape\K{\_}\sx : [\V128] \to [\V128] + C \vdashinstr \shape\K{.}\vextend\K{\_}\shape\K{\_}\sx : [\V128] \to [\V128] } diff --git a/proposals/simd/BinarySIMD.md b/proposals/simd/BinarySIMD.md index 196f462f3..ae63b844f 100644 --- a/proposals/simd/BinarySIMD.md +++ b/proposals/simd/BinarySIMD.md @@ -141,10 +141,10 @@ For example, `ImmLaneIdx16` is a byte with values in the range 0-15 (inclusive). | `i16x8.bitmask` | `0x84`| - | | `i16x8.narrow_i32x4_s` | `0x85`| - | | `i16x8.narrow_i32x4_u` | `0x86`| - | -| `i16x8.widen_low_i8x16_s` | `0x87`| - | -| `i16x8.widen_high_i8x16_s` | `0x88`| - | -| `i16x8.widen_low_i8x16_u` | `0x89`| - | -| `i16x8.widen_high_i8x16_u` | `0x8a`| - | +| `i16x8.extend_low_i8x16_s` | `0x87`| - | +| `i16x8.extend_high_i8x16_s` | `0x88`| - | +| `i16x8.extend_low_i8x16_u` | `0x89`| - | +| `i16x8.extend_high_i8x16_u` | `0x8a`| - | | `i16x8.shl` | `0x8b`| - | | `i16x8.shr_s` | `0x8c`| - | | `i16x8.shr_u` | `0x8d`| - | @@ -164,10 +164,10 @@ For example, `ImmLaneIdx16` is a byte with values in the range 0-15 (inclusive). | `i32x4.neg` | `0xa1`| - | | `i32x4.all_true` | `0xa3`| - | | `i32x4.bitmask` | `0xa4`| - | -| `i32x4.widen_low_i16x8_s` | `0xa7`| - | -| `i32x4.widen_high_i16x8_s` | `0xa8`| - | -| `i32x4.widen_low_i16x8_u` | `0xa9`| - | -| `i32x4.widen_high_i16x8_u` | `0xaa`| - | +| `i32x4.extend_low_i16x8_s` | `0xa7`| - | +| `i32x4.extend_high_i16x8_s` | `0xa8`| - | +| `i32x4.extend_low_i16x8_u` | `0xa9`| - | +| `i32x4.extend_high_i16x8_u` | `0xaa`| - | | `i32x4.shl` | `0xab`| - | | `i32x4.shr_s` | `0xac`| - | | `i32x4.shr_u` | `0xad`| - | @@ -182,10 +182,10 @@ For example, `ImmLaneIdx16` is a byte with values in the range 0-15 (inclusive). | `i64x2.abs` | `0xc0`| - | | `i64x2.neg` | `0xc1`| - | | `i64x2.bitmask` | `0xc4`| - | -| `i64x2.widen_low_i32x4_s` | `0xc7`| - | -| `i64x2.widen_high_i32x4_s` | `0xc8`| - | -| `i64x2.widen_low_i32x4_u` | `0xc9`| - | -| `i64x2.widen_high_i32x4_u` | `0xca`| - | +| `i64x2.extend_low_i32x4_s` | `0xc7`| - | +| `i64x2.extend_high_i32x4_s` | `0xc8`| - | +| `i64x2.extend_low_i32x4_u` | `0xc9`| - | +| `i64x2.extend_high_i32x4_u` | `0xca`| - | | `i64x2.shl` | `0xcb`| - | | `i64x2.shr_s` | `0xcc`| - | | `i64x2.shr_u` | `0xcd`| - | diff --git a/proposals/simd/ImplementationStatus.md b/proposals/simd/ImplementationStatus.md index 32513d1e1..2f24cf628 100644 --- a/proposals/simd/ImplementationStatus.md +++ b/proposals/simd/ImplementationStatus.md @@ -109,10 +109,10 @@ | `i16x8.bitmask` | `-munimplemented-simd128` | :heavy_check_mark: [6] | | | :heavy_check_mark: | | `i16x8.narrow_i32x4_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | | `i16x8.narrow_i32x4_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i16x8.widen_low_i8x16_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i16x8.widen_high_i8x16_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i16x8.widen_low_i8x16_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i16x8.widen_high_i8x16_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i16x8.extend_low_i8x16_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i16x8.extend_high_i8x16_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i16x8.extend_low_i8x16_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i16x8.extend_high_i8x16_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | | `i16x8.shl` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i16x8.shr_s` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i16x8.shr_u` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | @@ -133,10 +133,10 @@ | `i32x4.neg` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i32x4.all_true` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i32x4.bitmask` | `-munimplemented-simd128` | :heavy_check_mark: [6] | | | :heavy_check_mark: | -| `i32x4.widen_low_i16x8_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i32x4.widen_high_i16x8_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i32x4.widen_low_i16x8_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i32x4.widen_high_i16x8_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i32x4.extend_low_i16x8_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i32x4.extend_high_i16x8_s` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i32x4.extend_low_i16x8_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | +| `i32x4.extend_high_i16x8_u` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | | `i32x4.shl` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i32x4.shr_s` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i32x4.shr_u` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | @@ -159,10 +159,10 @@ | `i64x2.add` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i64x2.sub` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `i64x2.mul` | `-msimd128` | :heavy_check_mark: | | | :heavy_check_mark: | -| `i64x2.widen_low_i32x4_s` | | | | | | -| `i64x2.widen_high_i32x4_s` | | | | | | -| `i64x2.widen_low_i32x4_u` | | | | | | -| `i64x2.widen_high_i32x4_u` | | | | | | +| `i64x2.extend_low_i32x4_s` | | | | | | +| `i64x2.extend_high_i32x4_s` | | | | | | +| `i64x2.extend_low_i32x4_u` | | | | | | +| `i64x2.extend_high_i32x4_u` | | | | | | | `f32x4.abs` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `f32x4.neg` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | `f32x4.sqrt` | `-msimd128` | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | diff --git a/proposals/simd/NewOpcodes.md b/proposals/simd/NewOpcodes.md index a423374e9..49b1232a4 100644 --- a/proposals/simd/NewOpcodes.md +++ b/proposals/simd/NewOpcodes.md @@ -104,10 +104,10 @@ | i8x16.bitmask | 0x64 | i16x8.bitmask | 0x84 | i32x4.bitmask | 0xa4 | i64x2.bitmask | 0xc4 | | i8x16.narrow_i16x8_s | 0x65 | i16x8.narrow_i32x4_s | 0x85 | ---- narrow ---- | 0xa5 | ------------- | 0xc5 | | i8x16.narrow_i16x8_u | 0x66 | i16x8.narrow_i32x4_u | 0x86 | ---- narrow ---- | 0xa6 | ------------- | 0xc6 | -| f32x4.ceil | 0x67 | i16x8.widen_low_i8x16_s | 0x87 | i32x4.widen_low_i16x8_s | 0xa7 | i64x2.widen_low_i32x4_s | 0xc7 | -| f32x4.floor | 0x68 | i16x8.widen_high_i8x16_s | 0x88 | i32x4.widen_high_i16x8_s | 0xa8 | i64x2.widen_high_i32x4_s | 0xc8 | -| f32x4.trunc | 0x69 | i16x8.widen_low_i8x16_u | 0x89 | i32x4.widen_low_i16x8_u | 0xa9 | i64x2.widen_low_i32x4_u | 0xc9 | -| f32x4.nearest | 0x6a | i16x8.widen_high_i8x16_u | 0x8a | i32x4.widen_high_i16x8_u | 0xaa | i64x2.widen_high_i32x4_u | 0xca | +| f32x4.ceil | 0x67 | i16x8.extend_low_i8x16_s | 0x87 | i32x4.extend_low_i16x8_s | 0xa7 | i64x2.extend_low_i32x4_s | 0xc7 | +| f32x4.floor | 0x68 | i16x8.extend_high_i8x16_s | 0x88 | i32x4.extend_high_i16x8_s | 0xa8 | i64x2.extend_high_i32x4_s | 0xc8 | +| f32x4.trunc | 0x69 | i16x8.extend_low_i8x16_u | 0x89 | i32x4.extend_low_i16x8_u | 0xa9 | i64x2.extend_low_i32x4_u | 0xc9 | +| f32x4.nearest | 0x6a | i16x8.extend_high_i8x16_u | 0x8a | i32x4.extend_high_i16x8_u | 0xaa | i64x2.extend_high_i32x4_u | 0xca | | i8x16.shl | 0x6b | i16x8.shl | 0x8b | i32x4.shl | 0xab | i64x2.shl | 0xcb | | i8x16.shr_s | 0x6c | i16x8.shr_s | 0x8c | i32x4.shr_s | 0xac | i64x2.shr_s | 0xcc | | i8x16.shr_u | 0x6d | i16x8.shr_u | 0x8d | i32x4.shr_u | 0xad | i64x2.shr_u | 0xcd | diff --git a/proposals/simd/SIMD.md b/proposals/simd/SIMD.md index f27ac5309..a463d101f 100644 --- a/proposals/simd/SIMD.md +++ b/proposals/simd/SIMD.md @@ -435,18 +435,18 @@ def S.neg(a): Lane-wise integer extended multiplication producing twice wider result than the inputs. These instructions provide a more performant equivalent to the following composite operations: -- `i16x8.extmul_low_i8x16_s(a, b)` is equivalent to `i16x8.mul(i16x8.widen_low_i8x16_s(a), i16x8.widen_low_i8x16_s(b))`. -- `i16x8.extmul_high_i8x16_s(a, b)` is equivalent to `i16x8.mul(i16x8.widen_high_i8x16_s(a), i16x8.widen_high_i8x16_s(b))`. -- `i16x8.extmul_low_i8x16_u(a, b)` is equivalent to `i16x8.mul(i16x8.widen_low_i8x16_u(a), i16x8.widen_low_i8x16_u(b))`. -- `i16x8.extmul_high_i8x16_u(a, b)` is equivalent to `i16x8.mul(i16x8.widen_high_i8x16_u(a), i16x8.widen_high_i8x16_u(b))`. -- `i32x4.extmul_low_i16x8_s(a, b)` is equivalent to `i32x4.mul(i32x4.widen_low_i16x8_s(a), i32x4.widen_low_i16x8_s(b))`. -- `i32x4.extmul_high_i16x8_s(a, b)` is equivalent to `i32x4.mul(i32x4.widen_high_i16x8_s(a), i32x4.widen_high_i16x8_s(b))`. -- `i32x4.extmul_low_i16x8_u(a, b)` is equivalent to `i32x4.mul(i32x4.widen_low_i16x8_u(a), i32x4.widen_low_i16x8_u(b))`. -- `i32x4.extmul_high_i16x8_u(a, b)` is equivalent to `i32x4.mul(i32x4.widen_high_i16x8_u(a), i32x4.widen_high_i16x8_u(b))`. -- `i64x2.extmul_low_i32x4_s(a, b)` is equivalent to `i64x2.mul(i64x2.widen_low_i32x4_s(a), i64x2.widen_low_i32x4_s(b))`. -- `i64x2.extmul_high_i32x4_s(a, b)` is equivalent to `i64x2.mul(i64x2.widen_high_i32x4_s(a), i64x2.widen_high_i32x4_s(b))`. -- `i64x2.extmul_low_i32x4_u(a, b)` is equivalent to `i64x2.mul(i64x2.widen_low_i32x4_u(a), i64x2.widen_low_i32x4_u(b))`. -- `i64x2.extmul_high_i32x4_u(a, b)` is equivalent to `i64x2.mul(i64x2.widen_high_i32x4_u(a), i64x2.widen_high_i32x4_u(b))`. +- `i16x8.extmul_low_i8x16_s(a, b)` is equivalent to `i16x8.mul(i16x8.extend_low_i8x16_s(a), i16x8.extend_low_i8x16_s(b))`. +- `i16x8.extmul_high_i8x16_s(a, b)` is equivalent to `i16x8.mul(i16x8.extend_high_i8x16_s(a), i16x8.extend_high_i8x16_s(b))`. +- `i16x8.extmul_low_i8x16_u(a, b)` is equivalent to `i16x8.mul(i16x8.extend_low_i8x16_u(a), i16x8.extend_low_i8x16_u(b))`. +- `i16x8.extmul_high_i8x16_u(a, b)` is equivalent to `i16x8.mul(i16x8.extend_high_i8x16_u(a), i16x8.extend_high_i8x16_u(b))`. +- `i32x4.extmul_low_i16x8_s(a, b)` is equivalent to `i32x4.mul(i32x4.extend_low_i16x8_s(a), i32x4.extend_low_i16x8_s(b))`. +- `i32x4.extmul_high_i16x8_s(a, b)` is equivalent to `i32x4.mul(i32x4.extend_high_i16x8_s(a), i32x4.extend_high_i16x8_s(b))`. +- `i32x4.extmul_low_i16x8_u(a, b)` is equivalent to `i32x4.mul(i32x4.extend_low_i16x8_u(a), i32x4.extend_low_i16x8_u(b))`. +- `i32x4.extmul_high_i16x8_u(a, b)` is equivalent to `i32x4.mul(i32x4.extend_high_i16x8_u(a), i32x4.extend_high_i16x8_u(b))`. +- `i64x2.extmul_low_i32x4_s(a, b)` is equivalent to `i64x2.mul(i64x2.extend_low_i32x4_s(a), i64x2.extend_low_i32x4_s(b))`. +- `i64x2.extmul_high_i32x4_s(a, b)` is equivalent to `i64x2.mul(i64x2.extend_high_i32x4_s(a), i64x2.extend_high_i32x4_s(b))`. +- `i64x2.extmul_low_i32x4_u(a, b)` is equivalent to `i64x2.mul(i64x2.extend_low_i32x4_u(a), i64x2.extend_low_i32x4_u(b))`. +- `i64x2.extmul_high_i32x4_u(a, b)` is equivalent to `i64x2.mul(i64x2.extend_high_i32x4_u(a), i64x2.extend_high_i32x4_u(b))`. ### Extended pairwise integer addition * `i16x8.extadd_pairwise_i8x16_s(a: v128) -> v128` @@ -1136,43 +1136,43 @@ def S.narrow_T_u(a, b): return result ``` -### Integer to integer widening -* `i16x8.widen_low_i8x16_s(a: v128) -> v128` -* `i16x8.widen_high_i8x16_s(a: v128) -> v128` -* `i16x8.widen_low_i8x16_u(a: v128) -> v128` -* `i16x8.widen_high_i8x16_u(a: v128) -> v128` -* `i32x4.widen_low_i16x8_s(a: v128) -> v128` -* `i32x4.widen_high_i16x8_s(a: v128) -> v128` -* `i32x4.widen_low_i16x8_u(a: v128) -> v128` -* `i32x4.widen_high_i16x8_u(a: v128) -> v128` -* `i64x2.widen_low_i32x4_s(a: v128) -> v128` -* `i64x2.widen_high_i32x4_s(a: v128) -> v128` -* `i64x2.widen_low_i32x4_u(a: v128) -> v128` -* `i64x2.widen_high_i32x4_u(a: v128) -> v128` +### Integer to integer extension +* `i16x8.extend_low_i8x16_s(a: v128) -> v128` +* `i16x8.extend_high_i8x16_s(a: v128) -> v128` +* `i16x8.extend_low_i8x16_u(a: v128) -> v128` +* `i16x8.extend_high_i8x16_u(a: v128) -> v128` +* `i32x4.extend_low_i16x8_s(a: v128) -> v128` +* `i32x4.extend_high_i16x8_s(a: v128) -> v128` +* `i32x4.extend_low_i16x8_u(a: v128) -> v128` +* `i32x4.extend_high_i16x8_u(a: v128) -> v128` +* `i64x2.extend_low_i32x4_s(a: v128) -> v128` +* `i64x2.extend_high_i32x4_s(a: v128) -> v128` +* `i64x2.extend_low_i32x4_u(a: v128) -> v128` +* `i64x2.extend_high_i32x4_u(a: v128) -> v128` Converts low or high half of the smaller lane vector to a larger lane vector, sign extended or zero (unsigned) extended. ```python -def S.widen_low_T(ext, a): +def S.extend_low_T(ext, a): result = S.New() for i in range(S.Lanes): result[i] = ext(a[i]) -def S.widen_high_T(ext, a): +def S.extend_high_T(ext, a): result = S.New() for i in range(S.Lanes): result[i] = ext(a[S.Lanes + i]) -def S.widen_low_T_s(a): - return S.widen_low_T(Sext, a) +def S.extend_low_T_s(a): + return S.extend_low_T(Sext, a) -def S.widen_high_T_s(a): - return S.widen_high_T(Sext, a) +def S.extend_high_T_s(a): + return S.extend_high_T(Sext, a) -def S.widen_low_T_u(a): - return S.widen_low_T(Zext, a) +def S.extend_low_T_u(a): + return S.extend_low_T(Zext, a) -def S.widen_high_T_u(a): - return S.widen_high_T(Zext, a) +def S.extend_high_T_u(a): + return S.extend_high_T(Zext, a) ```