Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/coreclr/jit/hwintrinsic.h
Original file line number Diff line number Diff line change
Expand Up @@ -1159,12 +1159,16 @@ struct HWIntrinsicInfo
{
case NI_Sve_And:
return NI_Sve_And_Predicates;
case NI_Sve_AndNot:
return NI_Sve_AndNot_Predicates;
case NI_Sve_BitwiseClear:
return NI_Sve_BitwiseClear_Predicates;
case NI_Sve_Xor:
return NI_Sve_Xor_Predicates;
case NI_Sve_Or:
return NI_Sve_Or_Predicates;
case NI_Sve_OrNot:
return NI_Sve_OrNot_Predicates;
case NI_Sve_ZipHigh:
return NI_Sve_ZipHigh_Predicates;
case NI_Sve_ZipLow:
Expand Down
16 changes: 16 additions & 0 deletions src/coreclr/jit/hwintrinsiccodegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -702,9 +702,25 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
break;
}

case NI_Sve_AndNot:
// Emit an unpredicated AND (to avoid the RMW constraints), then a predicated NOT
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is that correct?

What happens for following code where inputs to AndNot are not masks, but vectors?

vector1 = Sve.Add() 
vector2 = Sve.Add() 
Sve.ConditionalSelect(mask, Sve.AndNot(vector1, vector2), op2)

I am hoping to see something like this:

and z4, p3/M, vector1, vector2
not result, p3/M, z4

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What happens for following code where inputs to AndNot are not masks, but vectors?

    static Vector<short> AndNot(Vector<short> v1, Vector<short> v2, Vector<short> v3, Vector<short> v4)
    {
        return Sve.ConditionalSelect(v1, Sve.AndNot(v2, v3), v4);
    }
IN0001: 000008      ptrue   p0.h
IN0002: 00000C      cmpne   p0.h, p0/z, z0.h, #0
IN0003: 000010      ptrue   p1.h
IN0004: 000014      and     z0.h, z1.h, z2.h
IN0005: 000018      not     z0.h, p1/m, z0.h
IN0006: 00001C      sel     z0.h, p0, z0.h, z3.h
    static Vector<short> AndNot(Vector<short> v1, Vector<short> v2, Vector<short> v3)
    {
        return Sve.ConditionalSelect(v1, Sve.AndNot(v2, v3), Vector<short>.Zero);
    }
IN0001: 000008      ptrue   p0.h
IN0002: 00000C      cmpne   p0.h, p0/z, z0.h, #0
IN0003: 000010      movi    v0.4s, #0
IN0004: 000014      and     z0.h, z1.h, z2.h
IN0005: 000018      not     z0.h, p0/m, z0.h

And I think that second one is incorrect - the false lanes will be the result of and(vec1,vec2)

It would be correct if using the predicated and. I just need to handle the RMW.

However - depending on the result of #115566 (comment) we may want to pause this PR.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think even first version is correct. Ideally, z0 or the destination of AndNot should have all the inactive lanes untouched, but from what we are getting, we will have the result of And operation in inactive lanes of destination.

GetEmitter()->emitIns_R_R_R(INS_sve_and, emitSize, targetReg, embMaskOp1Reg,
embMaskOp2Reg, opt);
GetEmitter()->emitIns_R_R_R(INS_sve_not, emitSize, targetReg, maskReg, targetReg, opt);
break;

case NI_Sve_OrNot:
// Emit an unpredicated OR (to avoid the RMW constraints), then a predicated NOT
GetEmitter()->emitIns_R_R_R(INS_sve_orr, emitSize, targetReg, embMaskOp1Reg,
embMaskOp2Reg, opt);
GetEmitter()->emitIns_R_R_R(INS_sve_not, emitSize, targetReg, maskReg, targetReg, opt);
break;

case NI_Sve_And_Predicates:
case NI_Sve_AndNot_Predicates:
case NI_Sve_BitwiseClear_Predicates:
case NI_Sve_Or_Predicates:
case NI_Sve_OrNot_Predicates:
case NI_Sve_Xor_Predicates:
GetEmitter()->emitIns_R_R_R_R(insEmbMask, emitSize, targetReg, maskReg, embMaskOp1Reg,
embMaskOp2Reg, INS_OPTS_SCALABLE_B);
Expand Down
4 changes: 4 additions & 0 deletions src/coreclr/jit/hwintrinsiclistarm64sve.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ HARDWARE_INTRINSIC(Sve, AddSaturate,
HARDWARE_INTRINSIC(Sve, AddSequentialAcross, -1, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fadda, INS_sve_fadda}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_ReduceOperation)
HARDWARE_INTRINSIC(Sve, And, -1, -1, {INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_OptionalEmbeddedMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_LowMaskedOperation|HW_Flag_HasAllMaskVariant)
HARDWARE_INTRINSIC(Sve, AndAcross, -1, -1, {INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ReduceOperation)
HARDWARE_INTRINSIC(Sve, AndNot, -1, -1, {INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_HasAllMaskVariant)
HARDWARE_INTRINSIC(Sve, BitwiseClear, -1, -1, {INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_OptionalEmbeddedMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_LowMaskedOperation|HW_Flag_HasAllMaskVariant)
HARDWARE_INTRINSIC(Sve, BooleanNot, -1, -1, {INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, Compact, -1, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_compact, INS_sve_compact, INS_sve_compact, INS_sve_compact, INS_sve_compact, INS_sve_compact}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
Expand Down Expand Up @@ -230,6 +231,7 @@ HARDWARE_INTRINSIC(Sve, Negate,
HARDWARE_INTRINSIC(Sve, Not, -1, -1, {INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation)
HARDWARE_INTRINSIC(Sve, Or, -1, -1, {INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_OptionalEmbeddedMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_LowMaskedOperation|HW_Flag_HasAllMaskVariant)
HARDWARE_INTRINSIC(Sve, OrAcross, -1, -1, {INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_ReduceOperation)
HARDWARE_INTRINSIC(Sve, OrNot, -1, -1, {INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_ZeroingMaskedOperation|HW_Flag_HasAllMaskVariant)
HARDWARE_INTRINSIC(Sve, PopCount, -1, -1, {INS_sve_cnt, INS_sve_cnt, INS_sve_cnt, INS_sve_cnt, INS_sve_cnt, INS_sve_cnt, INS_sve_cnt, INS_sve_cnt, INS_sve_cnt, INS_sve_cnt}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, Prefetch16Bit, -1, 3, {INS_invalid, INS_invalid, INS_invalid, INS_sve_prfh, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_Special, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_BaseTypeFromFirstArg|HW_Flag_HasImmediateOperand|HW_Flag_SpecialSideEffect_Other)
HARDWARE_INTRINSIC(Sve, Prefetch32Bit, -1, 3, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_prfw, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_Special, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_BaseTypeFromFirstArg|HW_Flag_HasImmediateOperand|HW_Flag_SpecialSideEffect_Other)
Expand Down Expand Up @@ -353,8 +355,10 @@ HARDWARE_INTRINSIC(Sve, StoreAndZipx3,
HARDWARE_INTRINSIC(Sve, StoreAndZipx4, -1, 3, {INS_sve_st4b, INS_sve_st4b, INS_sve_st4h, INS_sve_st4h, INS_sve_st4w, INS_sve_st4w, INS_sve_st4d, INS_sve_st4d, INS_sve_st4w, INS_sve_st4d}, HW_Category_MemoryStore, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_NeedsConsecutiveRegisters)
// Predicate variants of intrinsics, these are specialized for operating on TYP_MASK type values.
HARDWARE_INTRINSIC(Sve, And_Predicates, -1, 2, {INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask|HW_Flag_EmbeddedMaskedOperation|HW_Flag_SpecialCodeGen)
HARDWARE_INTRINSIC(Sve, AndNot_Predicates, -1, -1, {INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask|HW_Flag_EmbeddedMaskedOperation|HW_Flag_SpecialCodeGen|HW_Flag_ZeroingMaskedOperation)
HARDWARE_INTRINSIC(Sve, BitwiseClear_Predicates, -1, 2, {INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask|HW_Flag_EmbeddedMaskedOperation|HW_Flag_SpecialCodeGen)
HARDWARE_INTRINSIC(Sve, Or_Predicates, -1, 2, {INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask|HW_Flag_EmbeddedMaskedOperation|HW_Flag_SpecialCodeGen)
HARDWARE_INTRINSIC(Sve, OrNot_Predicates, -1, -1, {INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask|HW_Flag_EmbeddedMaskedOperation|HW_Flag_SpecialCodeGen|HW_Flag_ZeroingMaskedOperation)
HARDWARE_INTRINSIC(Sve, Xor_Predicates, -1, 2, {INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask|HW_Flag_EmbeddedMaskedOperation|HW_Flag_SpecialCodeGen)
HARDWARE_INTRINSIC(Sve, ConditionalSelect_Predicates, -1, 3, {INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask|HW_Flag_ExplicitMaskedOperation|HW_Flag_SpecialCodeGen)
HARDWARE_INTRINSIC(Sve, ZipHigh_Predicates, -1, 2, {INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ReturnsPerElementMask)
Expand Down
18 changes: 11 additions & 7 deletions src/coreclr/jit/morph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9805,16 +9805,20 @@ GenTree* Compiler::doMorphVectorOperandToMask(GenTree* node, GenTreeHWIntrinsic*
//
GenTreeHWIntrinsic* Compiler::fgMorphTryUseAllMaskVariant(GenTreeHWIntrinsic* node)
{
if (HWIntrinsicInfo::HasAllMaskVariant(node->GetHWIntrinsicId()))
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();

if (HWIntrinsicInfo::HasAllMaskVariant(intrinsicId))
{
NamedIntrinsic maskVariant = HWIntrinsicInfo::GetMaskVariant(node->GetHWIntrinsicId());
NamedIntrinsic maskVariant = HWIntrinsicInfo::GetMaskVariant(intrinsicId);

// As some intrinsics have many variants, check that the count of operands on the node
// matches the number of operands required for the mask variant of the intrinsic. The mask
// variant of the intrinsic must have a fixed number of operands.
int numArgs = HWIntrinsicInfo::lookupNumArgs(maskVariant);
assert(numArgs >= 0);
if (node->GetOperandCount() == (size_t)numArgs)
// matches the number of operands required for the mask variant of the intrinsic. For
// embedded masks, the number of args are unknown.
int numArgs = HWIntrinsicInfo::lookupNumArgs(maskVariant);
bool embedded = HWIntrinsicInfo::IsEmbeddedMaskedOperation(maskVariant);
assert(numArgs >= 0 || embedded);

if (node->GetOperandCount() == (size_t)numArgs || embedded)
{
// We're sure it will work at this point, so perform the pattern match on operands.
if (canMorphAllVectorOperandsToMasks(node))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -581,6 +581,57 @@ internal Arm64() { }
public static Vector<ulong> AndAcross(Vector<ulong> value) { throw new PlatformNotSupportedException(); }


// Bitwise NAND

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<byte> AndNot(Vector<byte> left, Vector<byte> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<short> AndNot(Vector<short> left, Vector<short> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<int> AndNot(Vector<int> left, Vector<int> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<long> AndNot(Vector<long> left, Vector<long> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<sbyte> AndNot(Vector<sbyte> left, Vector<sbyte> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<ushort> AndNot(Vector<ushort> left, Vector<ushort> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<uint> AndNot(Vector<uint> left, Vector<uint> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<ulong> AndNot(Vector<ulong> left, Vector<ulong> right) { throw new PlatformNotSupportedException(); }


// Bitwise clear

/// <summary>
Expand Down Expand Up @@ -7584,6 +7635,57 @@ internal Arm64() { }
public static Vector<ulong> OrAcross(Vector<ulong> value) { throw new PlatformNotSupportedException(); }


// Bitwise NOR

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<byte> OrNot(Vector<byte> left, Vector<byte> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<short> OrNot(Vector<short> left, Vector<short> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<int> OrNot(Vector<int> left, Vector<int> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<long> OrNot(Vector<long> left, Vector<long> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<sbyte> OrNot(Vector<sbyte> left, Vector<sbyte> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<ushort> OrNot(Vector<ushort> left, Vector<ushort> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<uint> OrNot(Vector<uint> left, Vector<uint> right) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
/// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B
/// </summary>
public static Vector<ulong> OrNot(Vector<ulong> left, Vector<ulong> right) { throw new PlatformNotSupportedException(); }


// Count nonzero bits

/// <summary>
Expand Down
Loading
Loading