Edit

Share via


Avx10v1 Class

Definition

Important

This API is not CLS-compliant.

Provides access to X86 AVX10.1 hardware instructions via intrinsics

public ref class Avx10v1 abstract : System::Runtime::Intrinsics::X86::Avx2
[System.CLSCompliant(false)]
public abstract class Avx10v1 : System.Runtime.Intrinsics.X86.Avx2
[<System.CLSCompliant(false)>]
type Avx10v1 = class
    inherit Avx2
Public MustInherit Class Avx10v1
Inherits Avx2
Inheritance
Derived
Attributes

Properties

IsSupported

Gets a value that indicates whether the APIs in this class are supported.

Methods

Abs(Vector128<Int64>)

__m128i _mm_abs_epi64 (__m128i a)

VPABSQ xmm1 {k1}{z}, xmm2/m128/m64bcst

Abs(Vector256<Int64>)

__m256i _mm256_abs_epi64 (__m128i a)

VPABSQ ymm1 {k1}{z}, ymm2/m256/m64bcst

AddScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_add_round_sd (__m128d a, __m128d b, int rounding)

VADDSD xmm1, xmm2, xmm3 {er}

AddScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_add_round_ss (__m128 a, __m128 b, int rounding)

VADDSS xmm1, xmm2, xmm3 {er}

AlignRight32(Vector128<Int32>, Vector128<Int32>, Byte)

__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)

VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

AlignRight32(Vector128<UInt32>, Vector128<UInt32>, Byte)

__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)

VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

AlignRight32(Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)

VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

AlignRight32(Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)

VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

AlignRight64(Vector128<Int64>, Vector128<Int64>, Byte)

__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)

VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

AlignRight64(Vector128<UInt64>, Vector128<UInt64>, Byte)

__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)

VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

AlignRight64(Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)

VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

AlignRight64(Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)

VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

BlendVariable(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_mask_blendv_epu8 (__m128i a, __m128i b, __mmask16 mask)

VPBLENDMB xmm1 {k1}, xmm2, xmm3/m128

BlendVariable(Vector128<Double>, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_blendv_pd (__m128d a, __m128d b, __mmask8 mask)

VBLENDMPD xmm1 {k1}, xmm2, xmm3/m128/m64bcst

BlendVariable(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mask_blendv_epi16 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMW xmm1 {k1}, xmm2, xmm3/m128

BlendVariable(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_blendv_epi32 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMD xmm1 {k1}, xmm2, xmm3/m128/m32bcst

BlendVariable(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_blendv_epi64 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMQ xmm1 {k1}, xmm2, xmm3/m128/m64bcst

BlendVariable(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_mask_blendv_epi8 (__m128i a, __m128i b, __mmask16 mask)

VPBLENDMB xmm1 {k1}, xmm2, xmm3/m128

BlendVariable(Vector128<Single>, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_blendv_ps (__m128 a, __m128 b, __mmask8 mask)

VBLENDMPS xmm1 {k1}, xmm2, xmm3/m128/m32bcst

BlendVariable(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mask_blendv_epu16 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMW xmm1 {k1}, xmm2, xmm3/m128

BlendVariable(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_blendv_epu32 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMD xmm1 {k1}, xmm2, xmm3/m128/m32bcst

BlendVariable(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_blendv_epu64 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMQ xmm1 {k1}, xmm2, xmm3/m128/m64bcst

BlendVariable(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_mask_blendv_epu8 (__m256i a, __m256i b, __mmask32 mask)

VPBLENDMB ymm1 {k1}, ymm2, ymm3/m256

BlendVariable(Vector256<Double>, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_blendv_pd (__m256d a, __m256d b, __mmask8 mask)

VBLENDMPD ymm1 {k1}, ymm2, ymm3/m256/m64bcst

BlendVariable(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_mask_blendv_epi16 (__m256i a, __m256i b, __mmask16 mask)

VPBLENDMW ymm1 {k1}, ymm2, ymm3/m256

BlendVariable(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_blendv_epi32 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMD ymm1 {k1}, ymm2, ymm3/m256/m32bcst

BlendVariable(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_blendv_epi64 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMQ ymm1 {k1}, ymm2, ymm3/m256/m64bcst

BlendVariable(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_mask_blendv_epi8 (__m256i a, __m256i b, __mmask32 mask)

VPBLENDMB ymm1 {k1}, ymm2, ymm3/m256

BlendVariable(Vector256<Single>, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_blendv_ps (__m256 a, __m256 b, __mmask8 mask)

VBLENDMPS ymm1 {k1}, ymm2, ymm3/m256/m32bcst

BlendVariable(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_mask_blendv_epu16 (__m256i a, __m256i b, __mmask16 mask)

VPBLENDMW ymm1 {k1}, ymm2, ymm3/m256

BlendVariable(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_blendv_epu32 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMD ymm1 {k1}, ymm2, ymm3/m256/m32bcst

BlendVariable(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_blendv_epu64 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMQ ymm1 {k1}, ymm2, ymm3/m256/m64bcst

BroadcastPairScalarToVector128(Vector128<Int32>)

__m128i _mm_broadcast_i32x2 (__m128i a)

VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64

BroadcastPairScalarToVector128(Vector128<UInt32>)

__m128i _mm_broadcast_i32x2 (__m128i a)

VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64

BroadcastPairScalarToVector256(Vector128<Int32>)

__m256i _mm256_broadcast_i32x2 (__m128i a)

VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64

BroadcastPairScalarToVector256(Vector128<Single>)

__m256 _mm256_broadcast_f32x2 (__m128 a)

VBROADCASTF32x2 ymm1 {k1}{z}, xmm2/m64

BroadcastPairScalarToVector256(Vector128<UInt32>)

__m256i _mm256_broadcast_i32x2 (__m128i a)

VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64

Classify(Vector128<Double>, Byte)

__mmask8 _mm_fpclass_pd_mask (__m128d a, int c)

VFPCLASSPD k2 {k1}, xmm2/m128/m64bcst, imm8

Classify(Vector128<Single>, Byte)

__mmask8 _mm_fpclass_ps_mask (__m128 a, int c)

VFPCLASSPS k2 {k1}, xmm2/m128/m32bcst, imm8

Classify(Vector256<Double>, Byte)

__mmask8 _mm256_fpclass_pd_mask (__m256d a, int c)

VFPCLASSPD k2 {k1}, ymm2/m256/m64bcst, imm8

Classify(Vector256<Single>, Byte)

__mmask8 _mm256_fpclass_ps_mask (__m256 a, int c)

VFPCLASSPS k2 {k1}, ymm2/m256/m32bcst, imm8

ClassifyScalar(Vector128<Double>, Byte)

__mmask8 _mm_fpclass_sd_mask (__m128d a, int c)

VFPCLASSSS k2 {k1}, xmm2/m32, imm8

ClassifyScalar(Vector128<Single>, Byte)

__mmask8 _mm_fpclass_ss_mask (__m128 a, int c)

VFPCLASSSS k2 {k1}, xmm2/m32, imm8

Compare(Vector128<Double>, Vector128<Double>, FloatComparisonMode)

__mmask8 _mm_cmp_pd_mask (__m128d a, __m128d b, const int imm8)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8

Compare(Vector128<Single>, Vector128<Single>, FloatComparisonMode)

__mmask8 _mm_cmp_ps_mask (__m128 a, __m128 b, const int imm8)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8

Compare(Vector256<Double>, Vector256<Double>, FloatComparisonMode)

__mmask8 _mm256_cmp_pd_mask (__m256d a, __m256d b, const int imm8)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8

Compare(Vector256<Single>, Vector256<Single>, FloatComparisonMode)

__mmask8 _mm256_cmp_ps_mask (__m256 a, __m256 b, const int imm8)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8

CompareEqual(Vector128<Byte>, Vector128<Byte>)

__mmask16 _mm_cmpeq_epu8_mask (__m128i a, __m128i b)

VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(0)

CompareEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpeq_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector128<Int16>, Vector128<Int16>)

__mmask8 _mm_cmpeq_epi16_mask (__m128i a, __m128i b)

VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(0)

CompareEqual(Vector128<Int32>, Vector128<Int32>)

__mmask8 _mm_cmpeq_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(0)

CompareEqual(Vector128<Int64>, Vector128<Int64>)

__mmask8 _mm_cmpeq_epi64_mask (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(0)

CompareEqual(Vector128<SByte>, Vector128<SByte>)

__mmask16 _mm_cmpeq_epi8_mask (__m128i a, __m128i b)

VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(0)

CompareEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpeq_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector128<UInt16>, Vector128<UInt16>)

__mmask8 _mm_cmpeq_epu16_mask (__m128i a, __m128i b)

VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(0)

CompareEqual(Vector128<UInt32>, Vector128<UInt32>)

__mmask8 _mm_cmpeq_epu32_mask (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(0)

CompareEqual(Vector128<UInt64>, Vector128<UInt64>)

__mask8 _mm_cmpeq_epu64_mask (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(0)

CompareEqual(Vector256<Byte>, Vector256<Byte>)

__mmask32 _mm256_cmpeq_epu8_mask (__m256i a, __m256i b)

VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(0)

CompareEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpeq_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector256<Int16>, Vector256<Int16>)

__mmask16 _mm256_cmpeq_epi16_mask (__m256i a, __m256i b)

VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(0)

CompareEqual(Vector256<Int32>, Vector256<Int32>)

__mmask8 _mm_cmpeq_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(0)

CompareEqual(Vector256<Int64>, Vector256<Int64>)

__mmask8 _mm256_cmpeq_epi64_mask (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(0)

CompareEqual(Vector256<SByte>, Vector256<SByte>)

__mmask32 _mm256_cmpeq_epi8_mask (__m256i a, __m256i b)

VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(0)

CompareEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpeq_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector256<UInt16>, Vector256<UInt16>)

__mmask16 _mm256_cmpeq_epu16_mask (__m256i a, __m256i b)

VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(0)

CompareEqual(Vector256<UInt32>, Vector256<UInt32>)

__mask8 _mm256_cmpeq_epu32_mask (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(0)

CompareEqual(Vector256<UInt64>, Vector256<UInt64>)

__mask8 _mm256_cmpeq_epu64_mask (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(0)

CompareGreaterThan(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_cmpgt_epu8 (__m128i a, __m128i b)

VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(6)

CompareGreaterThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpgt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector128<Int16>, Vector128<Int16>)

__mmask8 _mm_cmpgt_epi16_mask (__m128i a, __m128i b)

VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(6)

CompareGreaterThan(Vector128<Int32>, Vector128<Int32>)

__mmask8 _mm_cmpgt_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)

CompareGreaterThan(Vector128<Int64>, Vector128<Int64>)

__mmask8 _mm_cmpgt_epi64_mask (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)

CompareGreaterThan(Vector128<SByte>, Vector128<SByte>)

__mmask16 _mm_cmpgt_epi8_mask (__m128i a, __m128i b)

VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(6)

CompareGreaterThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpgt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_cmpgt_epu16 (__m128i a, __m128i b)

VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(6)

CompareGreaterThan(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpgt_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)

CompareGreaterThan(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpgt_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)

CompareGreaterThan(Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_cmpgt_epu8 (__m256i a, __m256i b)

VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(6)

CompareGreaterThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpgt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector256<Int16>, Vector256<Int16>)

__mmask16 _mm256_cmpgt_epi16_mask (__m256i a, __m256i b)

VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(6)

CompareGreaterThan(Vector256<Int32>, Vector256<Int32>)

__mmask8 _mm_cmpgt_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)

CompareGreaterThan(Vector256<Int64>, Vector256<Int64>)

__mmask8 _mm256_cmpgt_epi64_mask (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)

CompareGreaterThan(Vector256<SByte>, Vector256<SByte>)

__mmask32 _mm256_cmpgt_epi8_mask (__m256i a, __m256i b)

VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(6)

CompareGreaterThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpgt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_cmpgt_epu16 (__m256i a, __m256i b)

VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(6)

CompareGreaterThan(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpgt_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(6)

CompareGreaterThan(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpgt_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)

CompareGreaterThanOrEqual(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_cmpge_epu8 (__m128i a, __m128i b)

VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(5)

CompareGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpge_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_cmpge_epi16 (__m128i a, __m128i b)

VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(5)

CompareGreaterThanOrEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpge_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmpge_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_cmpge_epi8 (__m128i a, __m128i b)

VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(5)

CompareGreaterThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpge_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_cmpge_epu16 (__m128i a, __m128i b)

VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(5)

CompareGreaterThanOrEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpge_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpge_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_cmpge_epu8 (__m256i a, __m256i b)

VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(5)

CompareGreaterThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpge_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_cmpge_epi16 (__m256i a, __m256i b)

VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(5)

CompareGreaterThanOrEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmpge_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmpge_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_cmpge_epi8 (__m256i a, __m256i b)

VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(5)

CompareGreaterThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpge_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_cmpge_epu16 (__m256i a, __m256i b)

VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(5)

CompareGreaterThanOrEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpge_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpge_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)

CompareLessThan(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_cmplt_epu8 (__m128i a, __m128i b)

VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(1)

CompareLessThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmplt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_cmplt_epi16 (__m128i a, __m128i b)

VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(1)

CompareLessThan(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmplt_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)

CompareLessThan(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmplt_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)

CompareLessThan(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_cmplt_epi8 (__m128i a, __m128i b)

VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(1)

CompareLessThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmplt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_cmplt_epu16 (__m128i a, __m128i b)

VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(1)

CompareLessThan(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmplt_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)

CompareLessThan(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmplt_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)

CompareLessThan(Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_cmplt_epu8 (__m256i a, __m256i b)

VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(1)

CompareLessThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmplt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_cmplt_epi16 (__m256i a, __m256i b)

VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(1)

CompareLessThan(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmplt_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)

CompareLessThan(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmplt_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)

CompareLessThan(Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_cmplt_epi8 (__m256i a, __m256i b)

VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(1)

CompareLessThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmplt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_cmplt_epu16 (__m256i a, __m256i b)

VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(1)

CompareLessThan(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmplt_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)

CompareLessThan(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmplt_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)

CompareLessThanOrEqual(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_cmple_epu8 (__m128i a, __m128i b)

VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(2)

CompareLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmple_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_cmple_epi16 (__m128i a, __m128i b)

VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(2)

CompareLessThanOrEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmple_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmple_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_cmple_epi8 (__m128i a, __m128i b)

VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(2)

CompareLessThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmple_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_cmple_epu16 (__m128i a, __m128i b)

VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(2)

CompareLessThanOrEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmple_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmple_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_cmple_epu8 (__m256i a, __m256i b)

VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(2)

CompareLessThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmple_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_cmple_epi16 (__m256i a, __m256i b)

VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(2)

CompareLessThanOrEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmple_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmple_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_cmple_epi8 (__m256i a, __m256i b)

VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(2)

CompareLessThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmple_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_cmple_epu16 (__m256i a, __m256i b)

VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(2)

CompareLessThanOrEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmple_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmple_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)

CompareNotEqual(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_cmpne_epu8 (__m128i a, __m128i b)

VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(4)

CompareNotEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpneq_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_cmpne_epi16 (__m128i a, __m128i b)

VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(4)

CompareNotEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpne_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)

CompareNotEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmpne_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)

CompareNotEqual(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_cmpne_epi8 (__m128i a, __m128i b)

VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(4)

CompareNotEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpneq_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_cmpne_epu16 (__m128i a, __m128i b)

VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(4)

CompareNotEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpne_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)

CompareNotEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpne_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)

CompareNotEqual(Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_cmpne_epu8 (__m256i a, __m256i b)

VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(4)

CompareNotEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpneq_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_cmpne_epi16 (__m256i a, __m256i b)

VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(4)

CompareNotEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmpne_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)

CompareNotEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmpne_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)

CompareNotEqual(Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_cmpne_epi8 (__m256i a, __m256i b)

VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(4)

CompareNotEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpneq_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_cmpne_epu16 (__m256i a, __m256i b)

VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(4)

CompareNotEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpne_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)

CompareNotEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpne_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)

CompareNotGreaterThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpngt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpngt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpngt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpngt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpnge_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpnge_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpnge_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpnge_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpnlt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpnlt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpnlt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpnlt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpnle_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpnle_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpnle_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpnle_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpord_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpord_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpord_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpord_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpunord_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpunord_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpunord_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpunord_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

Compress(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_mask_compress_epi8 (__m128i s, __mmask16 k, __m128i a)

VPCOMPRESSB xmm1 {k1}{z}, xmm2

Compress(Vector128<Double>, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_compress_pd (__m128d s, __mmask8 k, __m128d a)

VCOMPRESSPD xmm1 {k1}{z}, xmm2

Compress(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mask_compress_epi16 (__m128i s, __mmask8 k, __m128i a)

VPCOMPRESSW xmm1 {k1}{z}, xmm2

Compress(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_compress_epi32 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSD xmm1 {k1}{z}, xmm2

Compress(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_compress_epi64 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSQ xmm1 {k1}{z}, xmm2

Compress(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_mask_compress_epi8 (__m128i s, __mmask16 k, __m128i a)

VPCOMPRESSB xmm1 {k1}{z}, xmm2

Compress(Vector128<Single>, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_compress_ps (__m128 s, __mmask8 k, __m128 a)

VCOMPRESSPS xmm1 {k1}{z}, xmm2

Compress(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mask_compress_epi16 (__m128i s, __mmask8 k, __m128i a)

VPCOMPRESSW xmm1 {k1}{z}, xmm2

Compress(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_compress_epi32 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSD xmm1 {k1}{z}, xmm2

Compress(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_compress_epi64 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSQ xmm1 {k1}{z}, xmm2

Compress(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_mask_compress_epi8 (__m256i s, __mmask32 k, __m256i a)

VPCOMPRESSB ymm1 {k1}{z}, ymm2

Compress(Vector256<Double>, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_compress_pd (__m256d s, __mmask8 k, __m256d a)

VCOMPRESSPD ymm1 {k1}{z}, ymm2

Compress(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_mask_compress_epi16 (__m256i s, __mmask16 k, __m256i a)

VPCOMPRESSW ymm1 {k1}{z}, ymm2

Compress(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_compress_epi32 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSD ymm1 {k1}{z}, ymm2

Compress(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_compress_epi64 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSQ ymm1 {k1}{z}, ymm2

Compress(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_mask_compress_epi8 (__m256i s, __mmask32 k, __m256i a)

VPCOMPRESSB ymm1 {k1}{z}, ymm2

Compress(Vector256<Single>, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_compress_ps (__m256 s, __mmask8 k, __m256 a)

VCOMPRESSPS ymm1 {k1}{z}, ymm2

Compress(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_mask_compress_epi16 (__m256i s, __mmask16 k, __m256i a)

VPCOMPRESSW ymm1 {k1}{z}, ymm2

Compress(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_compress_epi32 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSD ymm1 {k1}{z}, ymm2

Compress(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_compress_epi64 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSQ ymm1 {k1}{z}, ymm2

CompressStore(Byte*, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_mask_compressstoreu_epi8 (void * s, __mmask16 k, __m128i a)

VPCOMPRESSB m128 {k1}{z}, xmm2

CompressStore(Byte*, Vector256<Byte>, Vector256<Byte>)

void _mm256_mask_compressstoreu_epi8 (void * s, __mmask32 k, __m256i a)

VPCOMPRESSB m256 {k1}{z}, ymm2

CompressStore(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_compressstoreu_pd (void * a, __mmask8 k, __m128d a)

VCOMPRESSPD m128 {k1}{z}, xmm2

CompressStore(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_compressstoreu_pd (void * a, __mmask8 k, __m256d a)

VCOMPRESSPD m256 {k1}{z}, ymm2

CompressStore(Int16*, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mask_compressstoreu_epi16 (void * s, __mmask8 k, __m128i a)

VPCOMPRESSW m128 {k1}{z}, xmm2

CompressStore(Int16*, Vector256<Int16>, Vector256<Int16>)

void _mm256_mask_compressstoreu_epi16 (void * s, __mmask16 k, __m256i a)

VPCOMPRESSW m256 {k1}{z}, ymm2

CompressStore(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_compressstoreu_epi32 (void * a, __mask8 k, __m128i a)

VPCOMPRESSD m128 {k1}{z}, xmm2

CompressStore(Int32*, Vector256<Int32>, Vector256<Int32>)

void _mm256_mask_compressstoreu_epi32 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSD m256 {k1}{z}, ymm2

CompressStore(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_compressstoreu_epi64 (void * a, __mask8 k, __m128i a)

VPCOMPRESSQ m128 {k1}{z}, xmm2

CompressStore(Int64*, Vector256<Int64>, Vector256<Int64>)

void _mm256_mask_compressstoreu_epi64 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSQ m256 {k1}{z}, ymm2

CompressStore(SByte*, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_mask_compressstoreu_epi8 (void * s, __mmask16 k, __m128i a)

VPCOMPRESSB m128 {k1}{z}, xmm2

CompressStore(SByte*, Vector256<SByte>, Vector256<SByte>)

void _mm256_mask_compressstoreu_epi8 (void * s, __mmask32 k, __m256i a)

VPCOMPRESSB m256 {k1}{z}, ymm2

CompressStore(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_compressstoreu_ps (void * a, __mmask8 k, __m128 a)

VCOMPRESSPS m128 {k1}{z}, xmm2

CompressStore(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_compressstoreu_ps (void * a, __mmask8 k, __m256 a)

VCOMPRESSPS m256 {k1}{z}, ymm2

CompressStore(UInt16*, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mask_compressstoreu_epi16 (void * s, __mmask8 k, __m128i a)

VPCOMPRESSW m128 {k1}{z}, xmm2

CompressStore(UInt16*, Vector256<UInt16>, Vector256<UInt16>)

void _mm256_mask_compressstoreu_epi16 (void * s, __mmask16 k, __m256i a)

VPCOMPRESSW m256 {k1}{z}, ymm2

CompressStore(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_compressstoreu_epi32 (void * a, __mask8 k, __m128i a)

VPCOMPRESSD m128 {k1}{z}, xmm2

CompressStore(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

void _mm256_mask_compressstoreu_epi32 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSD m256 {k1}{z}, ymm2

CompressStore(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_compressstoreu_epi64 (void * a, __mask8 k, __m128i a)

VPCOMPRESSQ m128 {k1}{z}, xmm2

CompressStore(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

void _mm256_mask_compressstoreu_epi64 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSQ m256 {k1}{z}, ymm2

ConvertScalarToVector128Double(Vector128<Double>, UInt32)

__m128d _mm_cvtsi32_sd (__m128d a, int b)

VCVTUSI2SD xmm1, xmm2, r/m32

ConvertScalarToVector128Single(Vector128<Single>, Int32, FloatRoundingMode)

__m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding)

VCVTSI2SS xmm1, xmm2, r32 {er}

ConvertScalarToVector128Single(Vector128<Single>, UInt32, FloatRoundingMode)

__m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding)

VCVTUSI2SS xmm1, xmm2, r32 {er}

ConvertScalarToVector128Single(Vector128<Single>, UInt32)

__m128 _mm_cvtsi32_ss (__m128 a, int b)

VCVTUSI2SS xmm1, xmm2, r/m32

ConvertScalarToVector128Single(Vector128<Single>, Vector128<Double>, FloatRoundingMode)

__m128 _mm_cvt_roundsd_ss (__m128 a, __m128d b, int rounding)

VCVTSD2SS xmm1, xmm2, xmm3 {er}

ConvertToInt32(Vector128<Double>, FloatRoundingMode)

int _mm_cvt_roundsd_i32 (__m128d a, int rounding)

VCVTSD2SI r32, xmm1 {er}

ConvertToInt32(Vector128<Single>, FloatRoundingMode)

int _mm_cvt_roundss_i32 (__m128 a, int rounding)

VCVTSS2SIK r32, xmm1 {er}

ConvertToUInt32(Vector128<Double>, FloatRoundingMode)

unsigned int _mm_cvt_roundsd_u32 (__m128d a, int rounding)

VCVTSD2USI r32, xmm1 {er}

ConvertToUInt32(Vector128<Double>)

unsigned int _mm_cvtsd_u32 (__m128d a)

VCVTSD2USI r32, xmm1/m64{er}

ConvertToUInt32(Vector128<Single>, FloatRoundingMode)

unsigned int _mm_cvt_roundss_u32 (__m128 a, int rounding)

VCVTSS2USI r32, xmm1 {er}

ConvertToUInt32(Vector128<Single>)

unsigned int _mm_cvtss_u32 (__m128 a)

VCVTSS2USI r32, xmm1/m32{er}

ConvertToUInt32WithTruncation(Vector128<Double>)

unsigned int _mm_cvttsd_u32 (__m128d a)

VCVTTSD2USI r32, xmm1/m64{er}

ConvertToUInt32WithTruncation(Vector128<Single>)

unsigned int _mm_cvttss_u32 (__m128 a)

VCVTTSS2USI r32, xmm1/m32{er}

ConvertToVector128Byte(Vector128<Int16>)

__m128i _mm_cvtepi16_epi8 (__m128i a)

VPMOVWB xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<Int32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<Int64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<UInt16>)

__m128i _mm_cvtepi16_epi8 (__m128i a)

VPMOVWB xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128Byte(Vector256<Int16>)

__m128i _mm256_cvtepi16_epi8 (__m256i a)

VPMOVWB xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<UInt16>)

__m128i _mm256_cvtepi16_epi8 (__m256i a)

VPMOVWB xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128ByteWithSaturation(Vector128<UInt16>)

__m128i _mm_cvtusepi16_epi8 (__m128i a)

VPMOVUWB xmm1/m64 {k1}{z}, xmm2

ConvertToVector128ByteWithSaturation(Vector128<UInt32>)

__m128i _mm_cvtusepi32_epi8 (__m128i a)

VPMOVUSDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128ByteWithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi8 (__m128i a)

VPMOVUSQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128ByteWithSaturation(Vector256<UInt16>)

__m128i _mm256_cvtusepi16_epi8 (__m256i a)

VPMOVUWB xmm1/m128 {k1}{z}, ymm2

ConvertToVector128ByteWithSaturation(Vector256<UInt32>)

__m128i _mm256_cvtusepi32_epi8 (__m256i a)

VPMOVUSDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128ByteWithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi8 (__m256i a)

VPMOVUSQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128Double(Vector128<Int64>)

__m128d _mm_cvtepi64_pd (__m128i a)

VCVTQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128Double(Vector128<UInt32>)

__m128d _mm_cvtepu32_pd (__m128i a)

VCVTUDQ2PD xmm1 {k1}{z}, xmm2/m64/m32bcst

ConvertToVector128Double(Vector128<UInt64>)

__m128d _mm_cvtepu64_pd (__m128i a)

VCVTUQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128Int16(Vector128<Int32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<Int64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector128<Int32>)

__m128i _mm_cvtsepi32_epi16 (__m128i a)

VPMOVSDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi16 (__m128i a)

VPMOVSQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector256<Int32>)

__m128i _mm256_cvtsepi32_epi16 (__m256i a)

VPMOVSDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi16 (__m256i a)

VPMOVSQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int32(Vector128<Int64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Int32(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Int32WithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi32 (__m128i a)

VPMOVSQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32WithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi32 (__m256i a)

VPMOVSQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Int64(Vector128<Double>)

__m128i _mm_cvtpd_epi64 (__m128d a)

VCVTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128Int64(Vector128<Single>)

__m128i _mm_cvtps_epi64 (__m128 a)

VCVTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst

ConvertToVector128Int64WithTruncation(Vector128<Double>)

__m128i _mm_cvttpd_epi64 (__m128d a)

VCVTTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128Int64WithTruncation(Vector128<Single>)

__m128i _mm_cvttps_epi64 (__m128 a)

VCVTTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst

ConvertToVector128SByte(Vector128<Int16>)

__m128i _mm_cvtepi16_epi8 (__m128i a)

VPMOVWB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<Int32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<Int64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<UInt16>)

__m128i _mm_cvtepi16_epi8 (__m128i a)

VPMOVWB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<Int16>)

__m128i _mm256_cvtepi16_epi8 (__m256i a)

VPMOVWB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<UInt16>)

__m128i _mm256_cvtepi16_epi8 (__m256i a)

VPMOVWB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector128<Int16>)

__m128i _mm_cvtsepi16_epi8 (__m128i a)

VPMOVSWB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector128<Int32>)

__m128i _mm_cvtsepi32_epi8 (__m128i a)

VPMOVSDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi8 (__m128i a)

VPMOVSQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector256<Int16>)

__m128i _mm256_cvtsepi16_epi8 (__m256i a)

VPMOVSWB xmm1/m128 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector256<Int32>)

__m128i _mm256_cvtsepi32_epi8 (__m256i a)

VPMOVSDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi8 (__m256i a)

VPMOVSQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Single(Vector128<Int64>)

__m128 _mm_cvtepi64_ps (__m128i a)

VCVTQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128Single(Vector128<UInt32>)

__m128 _mm_cvtepu32_ps (__m128i a)

VCVTUDQ2PS xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128Single(Vector128<UInt64>)

__m128 _mm_cvtepu64_ps (__m128i a)

VCVTUQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128Single(Vector256<Int64>)

__m128 _mm256_cvtepi64_ps (__m256i a)

VCVTQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector128Single(Vector256<UInt64>)

__m128 _mm256_cvtepu64_ps (__m256i a)

VCVTUQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector128UInt16(Vector128<Int32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<Int64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt16WithSaturation(Vector128<UInt32>)

__m128i _mm_cvtusepi32_epi16 (__m128i a)

VPMOVUSDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16WithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi16 (__m128i a)

VPMOVUSQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16WithSaturation(Vector256<UInt32>)

__m128i _mm256_cvtusepi32_epi16 (__m256i a)

VPMOVUSDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16WithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi16 (__m256i a)

VPMOVUSQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt32(Vector128<Double>)

__m128i _mm_cvtpd_epu32 (__m128d a)

VCVTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt32(Vector128<Int64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32(Vector128<Single>)

__m128i _mm_cvtps_epu32 (__m128 a)

VCVTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt32(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32(Vector256<Double>)

__m128i _mm256_cvtpd_epu32 (__m256d a)

VCVTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector128UInt32(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32WithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi32 (__m128i a)

VPMOVUSQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32WithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi32 (__m256i a)

VPMOVUSQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32WithTruncation(Vector128<Double>)

__m128i _mm_cvttpd_epu32 (__m128d a)

VCVTTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt32WithTruncation(Vector128<Single>)

__m128i _mm_cvttps_epu32 (__m128 a)

VCVTTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt32WithTruncation(Vector256<Double>)

__m128i _mm256_cvttpd_epu32 (__m256d a)

VCVTTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector128UInt64(Vector128<Double>)

__m128i _mm_cvtpd_epu64 (__m128d a)

VCVTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt64(Vector128<Single>)

__m128i _mm_cvtps_epu64 (__m128 a)

VCVTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst

ConvertToVector128UInt64WithTruncation(Vector128<Double>)

__m128i _mm_cvttpd_epu64 (__m128d a)

VCVTTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt64WithTruncation(Vector128<Single>)

__m128i _mm_cvttps_epu64 (__m128 a)

VCVTTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst

ConvertToVector256Double(Vector128<UInt32>)

__m256d _mm512_cvtepu32_pd (__m128i a)

VCVTUDQ2PD ymm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector256Double(Vector256<Int64>)

__m256d _mm256_cvtepi64_pd (__m256i a)

VCVTQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector256Double(Vector256<UInt64>)

__m256d _mm256_cvtepu64_pd (__m256i a)

VCVTUQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector256Int64(Vector128<Single>)

__m256i _mm256_cvtps_epi64 (__m128 a)

VCVTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector256Int64(Vector256<Double>)

__m256i _mm256_cvtpd_epi64 (__m256d a)

VCVTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector256Int64WithTruncation(Vector128<Single>)

__m256i _mm256_cvttps_epi64 (__m128 a)

VCVTTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector256Int64WithTruncation(Vector256<Double>)

__m256i _mm256_cvttpd_epi64 (__m256d a)

VCVTTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector256Single(Vector256<UInt32>)

__m256 _mm256_cvtepu32_ps (__m256i a)

VCVTUDQ2PS ymm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector256UInt32(Vector256<Single>)

__m256i _mm256_cvtps_epu32 (__m256 a)

VCVTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector256UInt32WithTruncation(Vector256<Single>)

__m256i _mm256_cvttps_epu32 (__m256 a)

VCVTTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector256UInt64(Vector128<Single>)

__m256i _mm256_cvtps_epu64 (__m128 a)

VCVTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector256UInt64(Vector256<Double>)

__m256i _mm256_cvtpd_epu64 (__m256d a)

VCVTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector256UInt64WithTruncation(Vector128<Single>)

__m256i _mm256_cvttps_epu64 (__m128 a)

VCVTTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector256UInt64WithTruncation(Vector256<Double>)

__m256i _mm256_cvttpd_epu64 (__m256d a)

VCVTTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst

DetectConflicts(Vector128<Int32>)

__m128i _mm_conflict_epi32 (__m128i a)

VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst

DetectConflicts(Vector128<Int64>)

__m128i _mm_conflict_epi64 (__m128i a)

VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst

DetectConflicts(Vector128<UInt32>)

__m128i _mm_conflict_epi32 (__m128i a)

VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst

DetectConflicts(Vector128<UInt64>)

__m128i _mm_conflict_epi64 (__m128i a)

VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst

DetectConflicts(Vector256<Int32>)

__m256i _mm256_conflict_epi32 (__m256i a)

VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst

DetectConflicts(Vector256<Int64>)

__m256i _mm256_conflict_epi64 (__m256i a)

VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst

DetectConflicts(Vector256<UInt32>)

__m256i _mm256_conflict_epi32 (__m256i a)

VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst

DetectConflicts(Vector256<UInt64>)

__m256i _mm256_conflict_epi64 (__m256i a)

VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst

DivideScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_div_round_sd (__m128d a, __m128d b, int rounding)

VDIVSS xmm1, xmm2, xmm3 {er}

DivideScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_div_round_ss (__m128 a, __m128 b, int rounding)

VDIVSD xmm1, xmm2, xmm3 {er}

Equals(Object)

Determines whether the specified object is equal to the current object.

(Inherited from Object)
Expand(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_mask_expand_epi8 (__m128i s, __mmask16 k, __m128i a)

VPEXPANDB xmm1 {k1}{z}, xmm2

Expand(Vector128<Double>, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_expand_pd (__m128d s, __mmask8 k, __m128d a)

VEXPANDPD xmm1 {k1}{z}, xmm2

Expand(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mask_expand_epi16 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDW xmm1 {k1}{z}, xmm2

Expand(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_expand_epi32 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDD xmm1 {k1}{z}, xmm2

Expand(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_expand_epi64 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDQ xmm1 {k1}{z}, xmm2

Expand(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_mask_expand_epi8 (__m128i s, __mmask16 k, __m128i a)

VPEXPANDB xmm1 {k1}{z}, xmm2

Expand(Vector128<Single>, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_expand_ps (__m128 s, __mmask8 k, __m128 a)

VEXPANDPS xmm1 {k1}{z}, xmm2

Expand(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mask_expand_epi16 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDW xmm1 {k1}{z}, xmm2

Expand(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_expand_epi32 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDD xmm1 {k1}{z}, xmm2

Expand(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_expand_epi64 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDQ xmm1 {k1}{z}, xmm2

Expand(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_mask_expand_epi8 (__m256i s, __mmask32 k, __m256i a)

VPEXPANDB ymm1 {k1}{z}, ymm2

Expand(Vector256<Double>, Vector256<Double>, Vector256<Double>)

__m256d _mm256_value_expand_pd (__m256d s, __mmask8 k, __m256d a)

VEXPANDPD ymm1 {k1}{z}, ymm2

Expand(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_mask_expand_epi16 (__m256i s, __mmask16 k, __m256i a)

VPEXPANDW ymm1 {k1}{z}, ymm2

Expand(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_value_expand_epi32 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDD ymm1 {k1}{z}, ymm2

Expand(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_value_expand_epi64 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDQ ymm1 {k1}{z}, ymm2

Expand(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_mask_expand_epi8 (__m256i s, __mmask32 k, __m256i a)

VPEXPANDB ymm1 {k1}{z}, ymm2

Expand(Vector256<Single>, Vector256<Single>, Vector256<Single>)

__m256 _mm256_value_expand_ps (__m256 s, __mmask8 k, __m256 a)

VEXPANDPS ymm1 {k1}{z}, ymm2

Expand(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_mask_expand_epi16 (__m256i s, __mmask16 k, __m256i a)

VPEXPANDW ymm1 {k1}{z}, ymm2

Expand(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_value_expand_epi32 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDD ymm1 {k1}{z}, ymm2

Expand(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_value_expand_epi64 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDQ ymm1 {k1}{z}, ymm2

ExpandLoad(Byte*, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_mask_expandloadu_epi8 (__m128i s, __mmask16 k, void const * a)

VPEXPANDB xmm1 {k1}{z}, m128

ExpandLoad(Byte*, Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_mask_expandloadu_epi8 (__m256i s, __mmask32 k, void const * a)

VPEXPANDB ymm1 {k1}{z}, m256

ExpandLoad(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_expandloadu_pd (__m128d s, __mmask8 k, void const * a)

VEXPANDPD xmm1 {k1}{z}, m128

ExpandLoad(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_address_expandloadu_pd (__m256d s, __mmask8 k, void const * a)

VEXPANDPD ymm1 {k1}{z}, m256

ExpandLoad(Int16*, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mask_expandloadu_epi16 (__m128i s, __mmask8 k, void const * a)

VPEXPANDW xmm1 {k1}{z}, m128

ExpandLoad(Int16*, Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_mask_expandloadu_epi16 (__m256i s, __mmask16 k, void const * a)

VPEXPANDW ymm1 {k1}{z}, m256

ExpandLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

ExpandLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

ExpandLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

ExpandLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

ExpandLoad(SByte*, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_mask_expandloadu_epi8 (__m128i s, __mmask16 k, void const * a)

VPEXPANDB xmm1 {k1}{z}, m128

ExpandLoad(SByte*, Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_mask_expandloadu_epi8 (__m256i s, __mmask32 k, void const * a)

VPEXPANDB ymm1 {k1}{z}, m256

ExpandLoad(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_expandloadu_ps (__m128 s, __mmask8 k, void const * a)

VEXPANDPS xmm1 {k1}{z}, m128

ExpandLoad(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_address_expandloadu_ps (__m256 s, __mmask8 k, void const * a)

VEXPANDPS ymm1 {k1}{z}, m256

ExpandLoad(UInt16*, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mask_expandloadu_epi16 (__m128i s, __mmask8 k, void const * a)

VPEXPANDW xmm1 {k1}{z}, m128

ExpandLoad(UInt16*, Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_mask_expandloadu_epi16 (__m256i s, __mmask16 k, void const * a)

VPEXPANDW ymm1 {k1}{z}, m256

ExpandLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

ExpandLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

ExpandLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

ExpandLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

Fixup(Vector128<Double>, Vector128<Double>, Vector128<Int64>, Byte)

__m128d _mm_fixupimm_pd(__m128d a, __m128d b, __m128i tbl, int imm);

VFIXUPIMMPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

Fixup(Vector128<Single>, Vector128<Single>, Vector128<Int32>, Byte)

__m128 _mm_fixupimm_ps(__m128 a, __m128 b, __m128i tbl, int imm);

VFIXUPIMMPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

Fixup(Vector256<Double>, Vector256<Double>, Vector256<Int64>, Byte)

__m256d _mm256_fixupimm_pd(__m256d a, __m256d b, __m256i tbl, int imm);

VFIXUPIMMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Fixup(Vector256<Single>, Vector256<Single>, Vector256<Int32>, Byte)

__m256 _mm256_fixupimm_ps(__m256 a, __m256 b, __m256i tbl, int imm);

VFIXUPIMMPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

FixupScalar(Vector128<Double>, Vector128<Double>, Vector128<Int64>, Byte)

__m128d _mm_fixupimm_sd(__m128d a, __m128d b, __m128i tbl, int imm);

VFIXUPIMMSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

FixupScalar(Vector128<Single>, Vector128<Single>, Vector128<Int32>, Byte)

__m128 _mm_fixupimm_ss(__m128 a, __m128 b, __m128i tbl, int imm);

VFIXUPIMMSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

FusedMultiplyAddNegatedScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_fnmadd_round_sd (__m128d a, __m128d b, __m128d c, int r)

VFNMADDSD xmm1, xmm2, xmm3 {er}

FusedMultiplyAddNegatedScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_fnmadd_round_ss (__m128 a, __m128 b, __m128 c, int r)

VFNMADDSS xmm1, xmm2, xmm3 {er}

FusedMultiplyAddScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_fmadd_round_sd (__m128d a, __m128d b, __m128d c, int r)

VFMADDSD xmm1, xmm2, xmm3 {er}

FusedMultiplyAddScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_fmadd_round_ss (__m128 a, __m128 b, __m128 c, int r)

VFMADDSS xmm1, xmm2, xmm3 {er}

FusedMultiplySubtractNegatedScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_fnmsub_round_sd (__m128d a, __m128d b, __m128d c, int r)

VFNMSUBSS xmm1, xmm2, xmm3 {er}

FusedMultiplySubtractNegatedScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_fnmsub_round_ss (__m128 a, __m128 b, __m128 c, int r)

VFNMSUBSS xmm1, xmm2, xmm3 {er}

FusedMultiplySubtractScalar(Vector128<Double>, Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_fmsub_round_sd (__m128d a, __m128d b, __m128d c, int r)

VFMSUBSS xmm1, xmm2, xmm3 {er}

FusedMultiplySubtractScalar(Vector128<Single>, Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_fmsub_round_ss (__m128 a, __m128 b, __m128 c, int r)

VFMSUBSS xmm1, xmm2, xmm3 {er}

GetExponent(Vector128<Double>)

__m128d _mm_getexp_pd (__m128d a)

VGETEXPPD xmm1 {k1}{z}, xmm2/m128/m64bcst

GetExponent(Vector128<Single>)

__m128 _mm_getexp_ps (__m128 a)

VGETEXPPS xmm1 {k1}{z}, xmm2/m128/m32bcst

GetExponent(Vector256<Double>)

__m256d _mm256_getexp_pd (__m256d a)

VGETEXPPD ymm1 {k1}{z}, ymm2/m256/m64bcst

GetExponent(Vector256<Single>)

__m256 _mm256_getexp_ps (__m256 a)

VGETEXPPS ymm1 {k1}{z}, ymm2/m256/m32bcst

GetExponentScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_getexp_sd (__m128d a, __m128d b)

VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

GetExponentScalar(Vector128<Double>)

__m128d _mm_getexp_sd (__m128d a)

VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}

GetExponentScalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_getexp_ss (__m128 a, __m128 b)

VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

GetExponentScalar(Vector128<Single>)

__m128 _mm_getexp_ss (__m128 a)

VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}

GetHashCode()

Serves as the default hash function.

(Inherited from Object)
GetMantissa(Vector128<Double>, Byte)

__m128d _mm_getmant_pd (__m128d a)

VGETMANTPD xmm1 {k1}{z}, xmm2/m128/m64bcst

GetMantissa(Vector128<Single>, Byte)

__m128 _mm_getmant_ps (__m128 a)

VGETMANTPS xmm1 {k1}{z}, xmm2/m128/m32bcst

GetMantissa(Vector256<Double>, Byte)

__m256d _mm256_getmant_pd (__m256d a)

VGETMANTPD ymm1 {k1}{z}, ymm2/m256/m64bcst

GetMantissa(Vector256<Single>, Byte)

__m256 _mm256_getmant_ps (__m256 a)

VGETMANTPS ymm1 {k1}{z}, ymm2/m256/m32bcst

GetMantissaScalar(Vector128<Double>, Byte)

__m128d _mm_getmant_sd (__m128d a)

VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}

GetMantissaScalar(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_getmant_sd (__m128d a, __m128d b)

VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

GetMantissaScalar(Vector128<Single>, Byte)

__m128 _mm_getmant_ss (__m128 a)

VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}

GetMantissaScalar(Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_getmant_ss (__m128 a, __m128 b)

VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

GetType()

Gets the Type of the current instance.

(Inherited from Object)
LeadingZeroCount(Vector128<Int32>)

__m128i _mm_lzcnt_epi32 (__m128i a)

VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst

LeadingZeroCount(Vector128<Int64>)

__m128i _mm_lzcnt_epi64 (__m128i a)

VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst

LeadingZeroCount(Vector128<UInt32>)

__m128i _mm_lzcnt_epi32 (__m128i a)

VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst

LeadingZeroCount(Vector128<UInt64>)

__m128i _mm_lzcnt_epi64 (__m128i a)

VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst

LeadingZeroCount(Vector256<Int32>)

__m256i _mm256_lzcnt_epi32 (__m256i a)

VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst

LeadingZeroCount(Vector256<Int64>)

__m256i _mm256_lzcnt_epi64 (__m256i a)

VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst

LeadingZeroCount(Vector256<UInt32>)

__m256i _mm256_lzcnt_epi32 (__m256i a)

VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst

LeadingZeroCount(Vector256<UInt64>)

__m256i _mm256_lzcnt_epi64 (__m256i a)

VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst

MaskLoad(Byte*, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_mask_loadu_epi8 (__m128i s, __mmask16 k, void const * mem_addr)

VMOVDQU8 xmm1 {k1}{z}, m128

MaskLoad(Byte*, Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_mask_loadu_epi8 (__m256i s, __mmask32 k, void const * mem_addr)

VMOVDQU8 ymm1 {k1}{z}, m256

MaskLoad(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_loadu_pd (__m128d s, __mmask8 k, void const * mem_addr)

VMOVUPD xmm1 {k1}{z}, m128

MaskLoad(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_loadu_pd (__m256d s, __mmask8 k, void const * mem_addr)

VMOVUPD ymm1 {k1}{z}, m256

MaskLoad(Int16*, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mask_loadu_epi16 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(Int16*, Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_mask_loadu_epi16 (__m256i s, __mmask16 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

MaskLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

MaskLoad(SByte*, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_mask_loadu_epi8 (__m128i s, __mmask16 k, void const * mem_addr)

VMOVDQU8 xmm1 {k1}{z}, m128

MaskLoad(SByte*, Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_mask_loadu_epi8 (__m256i s, __mmask32 k, void const * mem_addr)

VMOVDQU8 ymm1 {k1}{z}, m256

MaskLoad(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_loadu_ps (__m128 s, __mmask8 k, void const * mem_addr)

VMOVUPS xmm1 {k1}{z}, m128

MaskLoad(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_loadu_ps (__m256 s, __mmask8 k, void const * mem_addr)

VMOVUPS ymm1 {k1}{z}, m256

MaskLoad(UInt16*, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mask_loadu_epi16 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(UInt16*, Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_mask_loadu_epi16 (__m256i s, __mmask16 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

MaskLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

MaskLoadAligned(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_load_pd (__m128d s, __mmask8 k, void const * mem_addr)

VMOVAPD xmm1 {k1}{z}, m128

MaskLoadAligned(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_load_pd (__m256d s, __mmask8 k, void const * mem_addr)

VMOVAPD ymm1 {k1}{z}, m256

MaskLoadAligned(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_load_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 xmm1 {k1}{z}, m128

MaskLoadAligned(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_load_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 ymm1 {k1}{z}, m256

MaskLoadAligned(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_load_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 xmm1 {k1}{z}, m128

MaskLoadAligned(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_load_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 ymm1 {k1}{z}, m256

MaskLoadAligned(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_load_ps (__m128 s, __mmask8 k, void const * mem_addr)

VMOVAPS xmm1 {k1}{z}, m128

MaskLoadAligned(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_load_ps (__m256 s, __mmask8 k, void const * mem_addr)

VMOVAPS ymm1 {k1}{z}, m256

MaskLoadAligned(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_load_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 xmm1 {k1}{z}, m128

MaskLoadAligned(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_load_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 ymm1 {k1}{z}, m256

MaskLoadAligned(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_load_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 xmm1 {k1}{z}, m128

MaskLoadAligned(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_load_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 ymm1 {k1}{z}, m256

MaskStore(Byte*, Vector128<Byte>, Vector128<Byte>)

void _mm_mask_storeu_si128 (void * mem_addr, __mmask16 k, __m128i a)

VMOVDQU8 m128 {k1}{z}, xmm1

MaskStore(Byte*, Vector256<Byte>, Vector256<Byte>)

void _mm256_mask_storeu_si256 (void * mem_addr, __mmask32 k, __m256i a)

VMOVDQU8 m256 {k1}{z}, ymm1

MaskStore(Double*, Vector128<Double>, Vector128<Double>)

void _mm_mask_storeu_pd (void * mem_addr, __mmask8 k, __m128d a)

VMOVUPD m128 {k1}{z}, xmm1

MaskStore(Double*, Vector256<Double>, Vector256<Double>)

void _mm256_mask_storeu_pd (void * mem_addr, __mmask8 k, __m256d a)

VMOVUPD m256 {k1}{z}, ymm1

MaskStore(Int16*, Vector128<Int16>, Vector128<Int16>)

void _mm_mask_storeu_si128 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU16 m128 {k1}{z}, xmm1

MaskStore(Int16*, Vector256<Int16>, Vector256<Int16>)

void _mm256_mask_storeu_si256 (void * mem_addr, __mmask16 k, __m256i a)

VMOVDQU16 m256 {k1}{z}, ymm1

MaskStore(Int32*, Vector128<Int32>, Vector128<Int32>)

void _mm_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU32 m128 {k1}{z}, xmm1

MaskStore(Int32*, Vector256<Int32>, Vector256<Int32>)

void _mm256_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU32 m256 {k1}{z}, ymm1

MaskStore(Int64*, Vector128<Int64>, Vector128<Int64>)

void _mm_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU64 m128 {k1}{z}, xmm1

MaskStore(Int64*, Vector256<Int64>, Vector256<Int64>)

void _mm256_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU64 m256 {k1}{z}, ymm1

MaskStore(SByte*, Vector128<SByte>, Vector128<SByte>)

void _mm_mask_storeu_si128 (void * mem_addr, __mmask16 k, __m128i a)

VMOVDQU8 m128 {k1}{z}, xmm1

MaskStore(SByte*, Vector256<SByte>, Vector256<SByte>)

void _mm256_mask_storeu_si256 (void * mem_addr, __mmask32 k, __m256i a)

VMOVDQU8 m256 {k1}{z}, ymm1

MaskStore(Single*, Vector128<Single>, Vector128<Single>)

void _mm_mask_storeu_ps (void * mem_addr, __mmask8 k, __m128 a)

VMOVUPS m128 {k1}{z}, xmm1

MaskStore(Single*, Vector256<Single>, Vector256<Single>)

void _mm256_mask_storeu_ps (void * mem_addr, __mmask8 k, __m256 a)

VMOVUPS m256 {k1}{z}, ymm1

MaskStore(UInt16*, Vector128<UInt16>, Vector128<UInt16>)

void _mm_mask_storeu_si128 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU16 m128 {k1}{z}, xmm1

MaskStore(UInt16*, Vector256<UInt16>, Vector256<UInt16>)

void _mm256_mask_storeu_si256 (void * mem_addr, __mmask16 k, __m256i a)

VMOVDQU16 m256 {k1}{z}, ymm1

MaskStore(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

void _mm_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU32 m128 {k1}{z}, xmm1

MaskStore(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

void _mm256_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU32 m256 {k1}{z}, ymm1

MaskStore(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

void _mm_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU64 m128 {k1}{z}, xmm1

MaskStore(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

void _mm256_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU64 m256 {k1}{z}, ymm1

MaskStoreAligned(Double*, Vector128<Double>, Vector128<Double>)

void _mm_mask_store_pd (void * mem_addr, __mmask8 k, __m128d a)

VMOVAPD m128 {k1}{z}, xmm1

MaskStoreAligned(Double*, Vector256<Double>, Vector256<Double>)

void _mm256_mask_store_pd (void * mem_addr, __mmask8 k, __m256d a)

VMOVAPD m256 {k1}{z}, ymm1

MaskStoreAligned(Int32*, Vector128<Int32>, Vector128<Int32>)

void _mm_mask_store_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(Int32*, Vector256<Int32>, Vector256<Int32>)

void _mm256_mask_store_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

MaskStoreAligned(Int64*, Vector128<Int64>, Vector128<Int64>)

void _mm_mask_store_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(Int64*, Vector256<Int64>, Vector256<Int64>)

void _mm256_mask_store_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

MaskStoreAligned(Single*, Vector128<Single>, Vector128<Single>)

void _mm_mask_store_ps (void * mem_addr, __mmask8 k, __m128 a)

VMOVAPS m128 {k1}{z}, xmm1

MaskStoreAligned(Single*, Vector256<Single>, Vector256<Single>)

void _mm256_mask_store_ps (void * mem_addr, __mmask8 k, __m256 a)

VMOVAPS m256 {k1}{z}, ymm1

MaskStoreAligned(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

void _mm_mask_store_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

void _mm256_mask_store_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

MaskStoreAligned(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

void _mm_mask_store_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

void _mm256_mask_store_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

Max(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_max_epi64 (__m128i a, __m128i b)

VPMAXSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Max(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_max_epu64 (__m128i a, __m128i b)

VPMAXUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Max(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_max_epi64 (__m256i a, __m256i b)

VPMAXSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Max(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_max_epu64 (__m256i a, __m256i b)

VPMAXUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

MemberwiseClone()

Creates a shallow copy of the current Object.

(Inherited from Object)
Min(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_min_epi64 (__m128i a, __m128i b)

VPMINSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Min(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_min_epu64 (__m128i a, __m128i b)

VPMINUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Min(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_min_epi64 (__m256i a, __m256i b)

VPMINSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Min(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_min_epu64 (__m256i a, __m256i b)

VPMINUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

MoveMask(Vector128<Byte>)

unsigned int _cvtmask16_u32 (__mmask16 a)

KMOVW r32, k1

MoveMask(Vector128<Double>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector128<Int16>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector128<Int32>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector128<Int64>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector128<SByte>)

unsigned int _cvtmask16_u32 (__mmask16 a)

KMOVW r32, k1

MoveMask(Vector128<Single>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector128<UInt16>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector128<UInt32>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector128<UInt64>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector256<Byte>)

unsigned int _cvtmask32_u32 (__mmask32 a)

KMOVD r32, k1

MoveMask(Vector256<Double>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector256<Int16>)

unsigned int _cvtmask16_u32 (__mmask16 a)

KMOVW r32, k1

MoveMask(Vector256<Int32>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector256<Int64>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector256<SByte>)

unsigned int _cvtmask32_u32 (__mmask32 a)

KMOVD r32, k1

MoveMask(Vector256<Single>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector256<UInt16>)

unsigned int _cvtmask16_u32 (__mmask16 a)

KMOVW r32, k1

MoveMask(Vector256<UInt32>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MoveMask(Vector256<UInt64>)

unsigned int _cvtmask8_u32 (__mmask8 a)

KMOVB r32, k1

MultiplyLow(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mullo_epi64 (__m128i a, __m128i b)

VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

MultiplyLow(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mullo_epi64 (__m128i a, __m128i b)

VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

MultiplyLow(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mullo_epi64 (__m256i a, __m256i b)

VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

MultiplyLow(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mullo_epi64 (__m256i a, __m256i b)

VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

MultiplyScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_mul_round_sd (__m128d a, __m128d b, int rounding)

VMULSD xmm1, xmm2, xmm3 {er}

MultiplyScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_mul_round_ss (__m128 a, __m128 b, int rounding)

VMULSS xmm1, xmm2, xmm3 {er}

MultiShift(Vector128<Byte>, Vector128<UInt64>)

__m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b)

VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

MultiShift(Vector128<SByte>, Vector128<Int64>)

__m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b)

VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

MultiShift(Vector256<Byte>, Vector256<UInt64>)

__m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b)

VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

MultiShift(Vector256<SByte>, Vector256<Int64>)

__m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b)

VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar16x16(Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b)

VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar16x16(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b)

VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar16x16x2(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b)

VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256

VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar16x16x2(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b)

VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256

VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar16x8(Vector128<Byte>, Vector128<Byte>)

__m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b)

VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar16x8(Vector128<SByte>, Vector128<SByte>)

__m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b)

VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar16x8x2(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b)

VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128

VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar16x8x2(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b)

VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128

VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar2x64x2(Vector128<Double>, Vector128<Int64>, Vector128<Double>)

__m128d _mm_permutex2var_pd (__m128d a, __m128i idx, __m128i b)

VPERMI2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

VPERMT2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar2x64x2(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)

VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar2x64x2(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)

VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar32x8(Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b)

VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar32x8(Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b)

VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar32x8x2(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b)

VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256

VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar32x8x2(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b)

VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256

VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256

PermuteVar4x32x2(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)

VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x32x2(Vector128<Single>, Vector128<Int32>, Vector128<Single>)

__m128 _mm_permutex2var_ps (__m128 a, __m128i idx, __m128i b)

VPERMI2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

VPERMT2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x32x2(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)

VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x64(Vector256<Double>, Vector256<Int64>)

__m256d _mm256_permute4x64_pd (__m256d a, __m256i b)

VPERMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_permute4x64_epi64 (__m256i a, __m256i b)

VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_permute4x64_pd (__m256d a, __m256i b)

VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<Double>, Vector256<Int64>, Vector256<Double>)

__m256d _mm256_permutex2var_pd (__m256d a, __m256i idx, __m256i b)

VPERMI2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

VPERMT2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)

VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)

VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar8x16(Vector128<Int16>, Vector128<Int16>)

__m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b)

VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar8x16(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b)

VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar8x16x2(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b)

VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128

VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar8x16x2(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b)

VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128

VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128

PermuteVar8x32x2(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)

VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

PermuteVar8x32x2(Vector256<Single>, Vector256<Int32>, Vector256<Single>)

__m256 _mm256_permutex2var_ps (__m256 a, __m256i idx, __m256i b)

VPERMI2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

VPERMT2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

PermuteVar8x32x2(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)

VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

Range(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_range_pd(__m128d a, __m128d b, int imm);

VRANGEPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

Range(Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_range_ps(__m128 a, __m128 b, int imm);

VRANGEPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

Range(Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_range_pd(__m256d a, __m256d b, int imm);

VRANGEPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Range(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_range_ps(__m256 a, __m256 b, int imm);

VRANGEPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

RangeScalar(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_range_sd(__m128d a, __m128d b, int imm);

VRANGESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

RangeScalar(Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_range_ss(__m128 a, __m128 b, int imm);

VRANGESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

Reciprocal14(Vector128<Double>)

__m128d _mm_rcp14_pd (__m128d a, __m128d b)

VRCP14PD xmm1 {k1}{z}, xmm2/m128/m64bcst

Reciprocal14(Vector128<Single>)

__m128 _mm_rcp14_ps (__m128 a, __m128 b)

VRCP14PS xmm1 {k1}{z}, xmm2/m128/m32bcst

Reciprocal14(Vector256<Double>)

__m256d _mm256_rcp14_pd (__m256d a, __m256d b)

VRCP14PD ymm1 {k1}{z}, ymm2/m256/m64bcst

Reciprocal14(Vector256<Single>)

__m256 _mm256_rcp14_ps (__m256 a, __m256 b)

VRCP14PS ymm1 {k1}{z}, ymm2/m256/m32bcst

Reciprocal14Scalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_rcp14_sd (__m128d a, __m128d b)

VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

Reciprocal14Scalar(Vector128<Double>)

__m128d _mm_rcp14_sd (__m128d a)

VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64

Reciprocal14Scalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_rcp14_ss (__m128 a, __m128 b)

VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

Reciprocal14Scalar(Vector128<Single>)

__m128 _mm_rcp14_ss (__m128 a)

VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32

ReciprocalSqrt14(Vector128<Double>)

__m128d _mm_rsqrt14_pd (__m128d a, __m128d b)

VRSQRT14PD xmm1 {k1}{z}, xmm2/m128/m64bcst

ReciprocalSqrt14(Vector128<Single>)

__m128 _mm_rsqrt14_ps (__m128 a, __m128 b)

VRSQRT14PS xmm1 {k1}{z}, xmm2/m128/m32bcst

ReciprocalSqrt14(Vector256<Double>)

__m256d _mm256_rsqrt14_pd (__m256d a, __m256d b)

VRSQRT14PD ymm1 {k1}{z}, ymm2/m256/m64bcst

ReciprocalSqrt14(Vector256<Single>)

__m256 _mm256_rsqrt14_ps (__m256 a, __m256 b)

VRSQRT14PS ymm1 {k1}{z}, ymm2/m256/m32bcst

ReciprocalSqrt14Scalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_rsqrt14_sd (__m128d a, __m128d b)

VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

ReciprocalSqrt14Scalar(Vector128<Double>)

__m128d _mm_rsqrt14_sd (__m128d a)

VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64

ReciprocalSqrt14Scalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_rsqrt14_ss (__m128 a, __m128 b)

VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

ReciprocalSqrt14Scalar(Vector128<Single>)

__m128 _mm_rsqrt14_ss (__m128 a)

VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32

Reduce(Vector128<Double>, Byte)

__m128d _mm_reduce_pd(__m128d a, int imm);

VREDUCEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

Reduce(Vector128<Single>, Byte)

__m128 _mm_reduce_ps(__m128 a, int imm);

VREDUCEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

Reduce(Vector256<Double>, Byte)

__m256d _mm256_reduce_pd(__m256d a, int imm);

VREDUCEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

Reduce(Vector256<Single>, Byte)

__m256 _mm256_reduce_ps(__m256 a, int imm);

VREDUCEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

ReduceScalar(Vector128<Double>, Byte)

__m128d _mm_reduce_sd(__m128d a, int imm);

VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

ReduceScalar(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_reduce_sd(__m128d a, __m128d b, int imm);

VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

ReduceScalar(Vector128<Single>, Byte)

__m128 _mm_reduce_ss(__m128 a, int imm);

VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

ReduceScalar(Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_reduce_ss(__m128 a, __m128 b, int imm);

VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

RotateLeft(Vector128<Int32>, Byte)

__m128i _mm_rol_epi32 (__m128i a, int imm8)

VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateLeft(Vector128<Int64>, Byte)

__m128i _mm_rol_epi64 (__m128i a, int imm8)

VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateLeft(Vector128<UInt32>, Byte)

__m128i _mm_rol_epi32 (__m128i a, int imm8)

VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateLeft(Vector128<UInt64>, Byte)

__m128i _mm_rol_epi64 (__m128i a, int imm8)

VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateLeft(Vector256<Int32>, Byte)

__m256i _mm256_rol_epi32 (__m256i a, int imm8)

VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateLeft(Vector256<Int64>, Byte)

__m256i _mm256_rol_epi64 (__m256i a, int imm8)

VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateLeft(Vector256<UInt32>, Byte)

__m256i _mm256_rol_epi32 (__m256i a, int imm8)

VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateLeft(Vector256<UInt64>, Byte)

__m256i _mm256_rol_epi64 (__m256i a, int imm8)

VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateLeftVariable(Vector128<Int32>, Vector128<UInt32>)

__m128i _mm_rolv_epi32 (__m128i a, __m128i b)

VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateLeftVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_rolv_epi64 (__m128i a, __m128i b)

VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateLeftVariable(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_rolv_epi32 (__m128i a, __m128i b)

VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateLeftVariable(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_rolv_epi64 (__m128i a, __m128i b)

VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateLeftVariable(Vector256<Int32>, Vector256<UInt32>)

__m256i _mm256_rolv_epi32 (__m256i a, __m256i b)

VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateLeftVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_rolv_epi64 (__m256i a, __m256i b)

VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateLeftVariable(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_rolv_epi32 (__m256i a, __m256i b)

VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateLeftVariable(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_rolv_epi64 (__m256i a, __m256i b)

VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateRight(Vector128<Int32>, Byte)

__m128i _mm_ror_epi32 (__m128i a, int imm8)

VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateRight(Vector128<Int64>, Byte)

__m128i _mm_ror_epi64 (__m128i a, int imm8)

VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateRight(Vector128<UInt32>, Byte)

__m128i _mm_ror_epi32 (__m128i a, int imm8)

VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateRight(Vector128<UInt64>, Byte)

__m128i _mm_ror_epi64 (__m128i a, int imm8)

VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateRight(Vector256<Int32>, Byte)

__m256i _mm256_ror_epi32 (__m256i a, int imm8)

VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateRight(Vector256<Int64>, Byte)

__m256i _mm256_ror_epi64 (__m256i a, int imm8)

VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateRight(Vector256<UInt32>, Byte)

__m256i _mm256_ror_epi32 (__m256i a, int imm8)

VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateRight(Vector256<UInt64>, Byte)

__m256i _mm256_ror_epi64 (__m256i a, int imm8)

VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateRightVariable(Vector128<Int32>, Vector128<UInt32>)

__m128i _mm_rorv_epi32 (__m128i a, __m128i b)

VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateRightVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_rorv_epi64 (__m128i a, __m128i b)

VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateRightVariable(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_rorv_epi32 (__m128i a, __m128i b)

VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateRightVariable(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_rorv_epi64 (__m128i a, __m128i b)

VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateRightVariable(Vector256<Int32>, Vector256<UInt32>)

__m256i _mm256_rorv_epi32 (__m256i a, __m256i b)

VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateRightVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_rorv_epi64 (__m256i a, __m256i b)

VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateRightVariable(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_rorv_epi32 (__m256i a, __m256i b)

VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateRightVariable(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_rorv_epi64 (__m256i a, __m256i b)

VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RoundScale(Vector128<Double>, Byte)

__m128d _mm_roundscale_pd (__m128d a, int imm)

VRNDSCALEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RoundScale(Vector128<Single>, Byte)

__m128 _mm_roundscale_ps (__m128 a, int imm)

VRNDSCALEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RoundScale(Vector256<Double>, Byte)

__m256d _mm256_roundscale_pd (__m256d a, int imm)

VRNDSCALEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RoundScale(Vector256<Single>, Byte)

__m256 _mm256_roundscale_ps (__m256 a, int imm)

VRNDSCALEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RoundScaleScalar(Vector128<Double>, Byte)

__m128d _mm_roundscale_sd (__m128d a, int imm)

VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

RoundScaleScalar(Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_roundscale_sd (__m128d a, __m128d b, int imm)

VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

RoundScaleScalar(Vector128<Single>, Byte)

__m128 _mm_roundscale_ss (__m128 a, int imm)

VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

RoundScaleScalar(Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_roundscale_ss (__m128 a, __m128 b, int imm)

VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.

Scale(Vector128<Double>, Vector128<Double>)

__m128d _mm_scalef_pd (__m128d a, int imm)

VSCALEFPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Scale(Vector128<Single>, Vector128<Single>)

__m128 _mm_scalef_ps (__m128 a, int imm)

VSCALEFPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

Scale(Vector256<Double>, Vector256<Double>)

__m256d _mm256_scalef_pd (__m256d a, int imm)

VSCALEFPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Scale(Vector256<Single>, Vector256<Single>)

__m256 _mm256_scalef_ps (__m256 a, int imm)

VSCALEFPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

ScaleScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_scalef_round_sd (__m128d a, __m128d b)

VSCALEFSD xmm1, xmm2, xmm3 {er}

ScaleScalar(Vector128<Double>, Vector128<Double>)

__m128d _mm_scalef_sd (__m128d a, __m128d b)

VSCALEFSD xmm1 {k1}{z}, xmm2, xmm3/m64{er}

ScaleScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_scalef_round_ss (__m128 a, __m128 b)

VSCALEFSS xmm1, xmm2, xmm3 {er}

ScaleScalar(Vector128<Single>, Vector128<Single>)

__m128 _mm_scalef_ss (__m128 a, __m128 b)

VSCALEFSS xmm1 {k1}{z}, xmm2, xmm3/m32{er}

ShiftLeftLogicalVariable(Vector128<Int16>, Vector128<UInt16>)

__m128i _mm_sllv_epi16 (__m128i a, __m128i count)

VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftLeftLogicalVariable(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_sllv_epi16 (__m128i a, __m128i count)

VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftLeftLogicalVariable(Vector256<Int16>, Vector256<UInt16>)

__m256i _mm256_sllv_epi16 (__m256i a, __m256i count)

VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256

ShiftLeftLogicalVariable(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_sllv_epi16 (__m256i a, __m256i count)

VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256

ShiftRightArithmetic(Vector128<Int64>, Byte)

__128i _mm_srai_epi64 (__m128i a, int imm8)

VPSRAQ xmm1 {k1}{z}, xmm2, imm8

ShiftRightArithmetic(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_sra_epi64 (__m128i a, __m128i count)

VPSRAQ xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftRightArithmetic(Vector256<Int64>, Byte)

__m256i _mm256_srai_epi64 (__m256i a, int imm8)

VPSRAQ ymm1 {k1}{z}, ymm2, imm8

ShiftRightArithmetic(Vector256<Int64>, Vector128<Int64>)

__m256i _mm256_sra_epi64 (__m256i a, __m128i count)

VPSRAQ ymm1 {k1}{z}, ymm2, xmm3/m128

ShiftRightArithmeticVariable(Vector128<Int16>, Vector128<UInt16>)

__m128i _mm_srav_epi16 (__m128i a, __m128i count)

VPSRAVW xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftRightArithmeticVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_srav_epi64 (__m128i a, __m128i count)

VPSRAVQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

ShiftRightArithmeticVariable(Vector256<Int16>, Vector256<UInt16>)

__m256i _mm256_srav_epi16 (__m256i a, __m256i count)

VPSRAVW ymm1 {k1}{z}, ymm2, ymm3/m256

ShiftRightArithmeticVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_srav_epi64 (__m256i a, __m256i count)

VPSRAVQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

ShiftRightLogicalVariable(Vector128<Int16>, Vector128<UInt16>)

__m128i _mm_srlv_epi16 (__m128i a, __m128i count)

VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftRightLogicalVariable(Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_srlv_epi16 (__m128i a, __m128i count)

VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftRightLogicalVariable(Vector256<Int16>, Vector256<UInt16>)

__m256i _mm256_srlv_epi16 (__m256i a, __m256i count)

VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256

ShiftRightLogicalVariable(Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_srlv_epi16 (__m256i a, __m256i count)

VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256

Shuffle2x128(Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_shuffle_f64x2 (__m256d a, __m256d b, const int imm8)

VSHUFF64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Shuffle2x128(Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)

VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)

VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Shuffle2x128(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_shuffle_f32x4 (__m256 a, __m256 b, const int imm8)

VSHUFF32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)

VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)

VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

SqrtScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_sqrt_round_sd (__m128d a, __m128d b, int rounding)

VSQRTSD xmm1, xmm2 xmm3 {er}

SqrtScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_sqrt_round_ss (__m128 a, __m128 b, int rounding)

VSQRTSS xmm1, xmm2, xmm3 {er}

SubtractScalar(Vector128<Double>, Vector128<Double>, FloatRoundingMode)

__m128d _mm_sub_round_sd (__m128d a, __m128d b, int rounding)

VSUBSD xmm1, xmm2, xmm3 {er}

SubtractScalar(Vector128<Single>, Vector128<Single>, FloatRoundingMode)

__m128 _mm_sub_round_ss (__m128 a, __m128 b, int rounding)

VSUBSS xmm1, xmm2, xmm3 {er}

SumAbsoluteDifferencesInBlock32(Vector128<Byte>, Vector128<Byte>, Byte)

__m128i _mm_dbsad_epu8 (__m128i a, __m128i b, int imm8)

VDBPSADBW xmm1 {k1}{z}, xmm2, xmm3/m128

SumAbsoluteDifferencesInBlock32(Vector256<Byte>, Vector256<Byte>, Byte)

__m256i _mm256_dbsad_epu8 (__m256i a, __m256i b, int imm8)

VDBPSADBW ymm1 {k1}{z}, ymm2, ymm3/m256

TernaryLogic(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Double>, Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_ternarylogic_pd (__m128d a, __m128d b, __m128d c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>, Byte)

__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

TernaryLogic(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>, Byte)

__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

TernaryLogic(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Single>, Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_ternarylogic_ps (__m128 a, __m128 b, __m128 c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>, Byte)

__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

TernaryLogic(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>, Byte)

__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

TernaryLogic(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Double>, Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_ternarylogic_pd (__m256d a, __m256d b, __m256d c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

TernaryLogic(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

TernaryLogic(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Single>, Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_ternarylogic_ps (__m256 a, __m256 b, __m256 c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8

The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

TernaryLogic(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

ToString()

Returns a string that represents the current object.

(Inherited from Object)

Applies to