Edit

Share via


Avx512F.VL Class

Definition

Provides access to the x86 AVX512F+VL hardware instructions via intrinsics.

public: ref class Avx512F::VL abstract
public abstract class Avx512F.VL
type Avx512F.VL = class
Public MustInherit Class Avx512F.VL
Inheritance
Avx512F.VL
Derived

Properties

IsSupported

Gets a value that indicates whether the APIs in this class are supported.

Methods

Abs(Vector128<Int64>)

__m128i _mm_abs_epi64 (__m128i a)

VPABSQ xmm1 {k1}{z}, xmm2/m128/m64bcst

Abs(Vector256<Int64>)

__m256i _mm256_abs_epi64 (__m128i a)

VPABSQ ymm1 {k1}{z}, ymm2/m256/m64bcst

AlignRight32(Vector128<Int32>, Vector128<Int32>, Byte)

__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)

VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

AlignRight32(Vector128<UInt32>, Vector128<UInt32>, Byte)

__m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)

VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

AlignRight32(Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)

VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

AlignRight32(Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)

VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

AlignRight64(Vector128<Int64>, Vector128<Int64>, Byte)

__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)

VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

AlignRight64(Vector128<UInt64>, Vector128<UInt64>, Byte)

__m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)

VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

AlignRight64(Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)

VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

AlignRight64(Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)

VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

BlendVariable(Vector128<Double>, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_blendv_pd (__m128d a, __m128d b, __mmask8 mask)

VBLENDMPD xmm1 {k1}, xmm2, xmm3/m128/m64bcst

BlendVariable(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_blendv_epi32 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMD xmm1 {k1}, xmm2, xmm3/m128/m32bcst

BlendVariable(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_blendv_epi64 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMQ xmm1 {k1}, xmm2, xmm3/m128/m64bcst

BlendVariable(Vector128<Single>, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_blendv_ps (__m128 a, __m128 b, __mmask8 mask)

VBLENDMPS xmm1 {k1}, xmm2, xmm3/m128/m32bcst

BlendVariable(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_blendv_epu32 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMD xmm1 {k1}, xmm2, xmm3/m128/m32bcst

BlendVariable(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_blendv_epu64 (__m128i a, __m128i b, __mmask8 mask)

VPBLENDMQ xmm1 {k1}, xmm2, xmm3/m128/m64bcst

BlendVariable(Vector256<Double>, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_blendv_pd (__m256d a, __m256d b, __mmask8 mask)

VBLENDMPD ymm1 {k1}, ymm2, ymm3/m256/m64bcst

BlendVariable(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_blendv_epi32 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMD ymm1 {k1}, ymm2, ymm3/m256/m32bcst

BlendVariable(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_blendv_epi64 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMQ ymm1 {k1}, ymm2, ymm3/m256/m64bcst

BlendVariable(Vector256<Single>, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_blendv_ps (__m256 a, __m256 b, __mmask8 mask)

VBLENDMPS ymm1 {k1}, ymm2, ymm3/m256/m32bcst

BlendVariable(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_blendv_epu32 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMD ymm1 {k1}, ymm2, ymm3/m256/m32bcst

BlendVariable(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_blendv_epu64 (__m256i a, __m256i b, __mmask8 mask)

VPBLENDMQ ymm1 {k1}, ymm2, ymm3/m256/m64bcst

Compare(Vector128<Double>, Vector128<Double>, FloatComparisonMode)

__mmask8 _mm_cmp_pd_mask (__m128d a, __m128d b, const int imm8)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8

Compare(Vector128<Single>, Vector128<Single>, FloatComparisonMode)

__mmask8 _mm_cmp_ps_mask (__m128 a, __m128 b, const int imm8)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8

Compare(Vector256<Double>, Vector256<Double>, FloatComparisonMode)

__mmask8 _mm256_cmp_pd_mask (__m256d a, __m256d b, const int imm8)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8

Compare(Vector256<Single>, Vector256<Single>, FloatComparisonMode)

__mmask8 _mm256_cmp_ps_mask (__m256 a, __m256 b, const int imm8)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8

CompareEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpeq_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector128<Int32>, Vector128<Int32>)

__mmask8 _mm_cmpeq_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(0)

CompareEqual(Vector128<Int64>, Vector128<Int64>)

__mmask8 _mm_cmpeq_epi64_mask (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(0)

CompareEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpeq_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector128<UInt32>, Vector128<UInt32>)

__mmask8 _mm_cmpeq_epu32_mask (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(0)

CompareEqual(Vector128<UInt64>, Vector128<UInt64>)

__mask8 _mm_cmpeq_epu64_mask (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(0)

CompareEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpeq_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector256<Int32>, Vector256<Int32>)

__mmask8 _mm_cmpeq_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(0)

CompareEqual(Vector256<Int64>, Vector256<Int64>)

__mmask8 _mm256_cmpeq_epi64_mask (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(0)

CompareEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpeq_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(0)

The above native signature does not exist. We provide this additional overload for completeness.

CompareEqual(Vector256<UInt32>, Vector256<UInt32>)

__mask8 _mm256_cmpeq_epu32_mask (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(0)

CompareEqual(Vector256<UInt64>, Vector256<UInt64>)

__mask8 _mm256_cmpeq_epu64_mask (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(0)

CompareGreaterThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpgt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector128<Int32>, Vector128<Int32>)

__mmask8 _mm_cmpgt_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)

CompareGreaterThan(Vector128<Int64>, Vector128<Int64>)

__mmask8 _mm_cmpgt_epi64_mask (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)

CompareGreaterThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpgt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpgt_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)

CompareGreaterThan(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpgt_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)

CompareGreaterThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpgt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector256<Int32>, Vector256<Int32>)

__mmask8 _mm_cmpgt_epi32_mask (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)

CompareGreaterThan(Vector256<Int64>, Vector256<Int64>)

__mmask8 _mm256_cmpgt_epi64_mask (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)

CompareGreaterThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpgt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(14)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThan(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpgt_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(6)

CompareGreaterThan(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpgt_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)

CompareGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpge_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpge_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmpge_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpge_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpge_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpge_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpge_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmpge_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmpge_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpge_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(13)

The above native signature does not exist. We provide this additional overload for completeness.

CompareGreaterThanOrEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpge_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)

CompareGreaterThanOrEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpge_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)

CompareLessThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmplt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmplt_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)

CompareLessThan(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmplt_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)

CompareLessThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmplt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmplt_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)

CompareLessThan(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmplt_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)

CompareLessThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmplt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmplt_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)

CompareLessThan(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmplt_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)

CompareLessThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmplt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(1)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThan(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmplt_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)

CompareLessThan(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmplt_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)

CompareLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmple_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmple_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmple_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmple_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmple_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmple_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmple_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmple_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmple_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)

CompareLessThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmple_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(2)

The above native signature does not exist. We provide this additional overload for completeness.

CompareLessThanOrEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmple_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)

CompareLessThanOrEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmple_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)

CompareNotEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpneq_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector128<Int32>, Vector128<Int32>)

__m128i _mm_cmpne_epi32 (__m128i a, __m128i b)

VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)

CompareNotEqual(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_cmpne_epi64 (__m128i a, __m128i b)

VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)

CompareNotEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpneq_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_cmpne_epu32 (__m128i a, __m128i b)

VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)

CompareNotEqual(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_cmpne_epu64 (__m128i a, __m128i b)

VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)

CompareNotEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpneq_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_cmpne_epi32 (__m256i a, __m256i b)

VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)

CompareNotEqual(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_cmpne_epi64 (__m256i a, __m256i b)

VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)

CompareNotEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpneq_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(4)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotEqual(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_cmpne_epu32 (__m256i a, __m256i b)

VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)

CompareNotEqual(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_cmpne_epu64 (__m256i a, __m256i b)

VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)

CompareNotGreaterThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpngt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpngt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpngt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpngt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(10)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpnge_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpnge_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpnge_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotGreaterThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpnge_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(9)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpnlt_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpnlt_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpnlt_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThan(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpnlt_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(5)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpnle_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpnle_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpnle_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareNotLessThanOrEqual(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpnle_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(6)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpord_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpord_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpord_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareOrdered(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpord_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(7)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector128<Double>, Vector128<Double>)

__mmask8 _mm_cmpunord_pd_mask (__m128d a, __m128d b)

VCMPPD k1 {k2}, xmm2, xmm3/m128/m64bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector128<Single>, Vector128<Single>)

__mmask8 _mm_cmpunord_ps_mask (__m128 a, __m128 b)

VCMPPS k1 {k2}, xmm2, xmm3/m128/m32bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector256<Double>, Vector256<Double>)

__mmask8 _mm256_cmpunord_pd_mask (__m256d a, __m256d b)

VCMPPD k1 {k2}, ymm2, ymm3/m256/m64bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

CompareUnordered(Vector256<Single>, Vector256<Single>)

__mmask8 _mm256_cmpunord_ps_mask (__m256 a, __m256 b)

VCMPPS k1 {k2}, ymm2, ymm3/m256/m32bcst{sae}, imm8(3)

The above native signature does not exist. We provide this additional overload for completeness.

Compress(Vector128<Double>, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_compress_pd (__m128d s, __mmask8 k, __m128d a)

VCOMPRESSPD xmm1 {k1}{z}, xmm2

Compress(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_compress_epi32 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSD xmm1 {k1}{z}, xmm2

Compress(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_compress_epi64 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSQ xmm1 {k1}{z}, xmm2

Compress(Vector128<Single>, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_compress_ps (__m128 s, __mmask8 k, __m128 a)

VCOMPRESSPS xmm1 {k1}{z}, xmm2

Compress(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_compress_epi32 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSD xmm1 {k1}{z}, xmm2

Compress(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_compress_epi64 (__m128i s, __mask8 k, __m128i a)

VPCOMPRESSQ xmm1 {k1}{z}, xmm2

Compress(Vector256<Double>, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_compress_pd (__m256d s, __mmask8 k, __m256d a)

VCOMPRESSPD ymm1 {k1}{z}, ymm2

Compress(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_compress_epi32 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSD ymm1 {k1}{z}, ymm2

Compress(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_compress_epi64 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSQ ymm1 {k1}{z}, ymm2

Compress(Vector256<Single>, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_compress_ps (__m256 s, __mmask8 k, __m256 a)

VCOMPRESSPS ymm1 {k1}{z}, ymm2

Compress(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_compress_epi32 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSD ymm1 {k1}{z}, ymm2

Compress(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_compress_epi64 (__m256i s, __mmask8 k, __m256i a)

VPCOMPRESSQ ymm1 {k1}{z}, ymm2

CompressStore(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_compressstoreu_pd (void * a, __mmask8 k, __m128d a)

VCOMPRESSPD m128 {k1}{z}, xmm2

CompressStore(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_compressstoreu_pd (void * a, __mmask8 k, __m256d a)

VCOMPRESSPD m256 {k1}{z}, ymm2

CompressStore(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_compressstoreu_epi32 (void * a, __mask8 k, __m128i a)

VPCOMPRESSD m128 {k1}{z}, xmm2

CompressStore(Int32*, Vector256<Int32>, Vector256<Int32>)

void _mm256_mask_compressstoreu_epi32 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSD m256 {k1}{z}, ymm2

CompressStore(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_compressstoreu_epi64 (void * a, __mask8 k, __m128i a)

VPCOMPRESSQ m128 {k1}{z}, xmm2

CompressStore(Int64*, Vector256<Int64>, Vector256<Int64>)

void _mm256_mask_compressstoreu_epi64 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSQ m256 {k1}{z}, ymm2

CompressStore(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_compressstoreu_ps (void * a, __mmask8 k, __m128 a)

VCOMPRESSPS m128 {k1}{z}, xmm2

CompressStore(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_compressstoreu_ps (void * a, __mmask8 k, __m256 a)

VCOMPRESSPS m256 {k1}{z}, ymm2

CompressStore(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_compressstoreu_epi32 (void * a, __mask8 k, __m128i a)

VPCOMPRESSD m128 {k1}{z}, xmm2

CompressStore(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

void _mm256_mask_compressstoreu_epi32 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSD m256 {k1}{z}, ymm2

CompressStore(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_compressstoreu_epi64 (void * a, __mask8 k, __m128i a)

VPCOMPRESSQ m128 {k1}{z}, xmm2

CompressStore(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

void _mm256_mask_compressstoreu_epi64 (void * a, __mmask8 k, __m256i a)

VPCOMPRESSQ m256 {k1}{z}, ymm2

ConvertToVector128Byte(Vector128<Int32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<Int64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128Byte(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128Byte(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128Byte(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128ByteWithSaturation(Vector128<UInt32>)

__m128i _mm_cvtusepi32_epi8 (__m128i a)

VPMOVUSDB xmm1/m32 {k1}{z}, xmm2

ConvertToVector128ByteWithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi8 (__m128i a)

VPMOVUSQB xmm1/m16 {k1}{z}, xmm2

ConvertToVector128ByteWithSaturation(Vector256<UInt32>)

__m128i _mm256_cvtusepi32_epi8 (__m256i a)

VPMOVUSDB xmm1/m64 {k1}{z}, ymm2

ConvertToVector128ByteWithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi8 (__m256i a)

VPMOVUSQB xmm1/m32 {k1}{z}, ymm2

ConvertToVector128Double(Vector128<UInt32>)

__m128d _mm_cvtepu32_pd (__m128i a)

VCVTUDQ2PD xmm1 {k1}{z}, xmm2/m64/m32bcst

ConvertToVector128Int16(Vector128<Int32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<Int64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector128<Int32>)

__m128i _mm_cvtsepi32_epi16 (__m128i a)

VPMOVSDW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi16 (__m128i a)

VPMOVSQW xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector256<Int32>)

__m128i _mm256_cvtsepi32_epi16 (__m256i a)

VPMOVSDW xmm1/m128 {k1}{z}, zmm2

ConvertToVector128Int16WithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi16 (__m256i a)

VPMOVSQW xmm1/m64 {k1}{z}, zmm2

ConvertToVector128Int32(Vector128<Int64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Int32(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128Int32WithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi32 (__m128i a)

VPMOVSQD xmm1/m64 {k1}{z}, xmm2

ConvertToVector128Int32WithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi32 (__m256i a)

VPMOVSQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128SByte(Vector128<Int32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<Int64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi8 (__m128i a)

VPMOVDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi8 (__m128i a)

VPMOVQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi8 (__m256i a)

VPMOVDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByte(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi8 (__m256i a)

VPMOVQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector128<Int32>)

__m128i _mm_cvtsepi32_epi8 (__m128i a)

VPMOVSDB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector128<Int64>)

__m128i _mm_cvtsepi64_epi8 (__m128i a)

VPMOVSQB xmm1/m16 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector256<Int32>)

__m128i _mm256_cvtsepi32_epi8 (__m256i a)

VPMOVSDB xmm1/m64 {k1}{z}, zmm2

ConvertToVector128SByteWithSaturation(Vector256<Int64>)

__m128i _mm256_cvtsepi64_epi8 (__m256i a)

VPMOVSQB xmm1/m32 {k1}{z}, zmm2

ConvertToVector128Single(Vector128<UInt32>)

__m128 _mm_cvtepu32_ps (__m128i a)

VCVTUDQ2PS xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt16(Vector128<Int32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<Int64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<UInt32>)

__m128i _mm_cvtepi32_epi16 (__m128i a)

VPMOVDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi16 (__m128i a)

VPMOVQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16(Vector256<Int32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<UInt32>)

__m128i _mm256_cvtepi32_epi16 (__m256i a)

VPMOVDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi16 (__m256i a)

VPMOVQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt16WithSaturation(Vector128<UInt32>)

__m128i _mm_cvtusepi32_epi16 (__m128i a)

VPMOVUSDW xmm1/m64 {k1}{z}, xmm2

ConvertToVector128UInt16WithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi16 (__m128i a)

VPMOVUSQW xmm1/m32 {k1}{z}, xmm2

ConvertToVector128UInt16WithSaturation(Vector256<UInt32>)

__m128i _mm256_cvtusepi32_epi16 (__m256i a)

VPMOVUSDW xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt16WithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi16 (__m256i a)

VPMOVUSQW xmm1/m64 {k1}{z}, ymm2

ConvertToVector128UInt32(Vector128<Double>)

__m128i _mm_cvtpd_epu32 (__m128d a)

VCVTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt32(Vector128<Int64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32(Vector128<Single>)

__m128i _mm_cvtps_epu32 (__m128 a)

VCVTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt32(Vector128<UInt64>)

__m128i _mm_cvtepi64_epi32 (__m128i a)

VPMOVQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32(Vector256<Double>)

__m128i _mm256_cvtpd_epu32 (__m256d a)

VCVTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector128UInt32(Vector256<Int64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32(Vector256<UInt64>)

__m128i _mm256_cvtepi64_epi32 (__m256i a)

VPMOVQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32WithSaturation(Vector128<UInt64>)

__m128i _mm_cvtusepi64_epi32 (__m128i a)

VPMOVUSQD xmm1/m128 {k1}{z}, xmm2

ConvertToVector128UInt32WithSaturation(Vector256<UInt64>)

__m128i _mm256_cvtusepi64_epi32 (__m256i a)

VPMOVUSQD xmm1/m128 {k1}{z}, ymm2

ConvertToVector128UInt32WithTruncation(Vector128<Double>)

__m128i _mm_cvttpd_epu32 (__m128d a)

VCVTTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst

ConvertToVector128UInt32WithTruncation(Vector128<Single>)

__m128i _mm_cvttps_epu32 (__m128 a)

VCVTTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector128UInt32WithTruncation(Vector256<Double>)

__m128i _mm256_cvttpd_epu32 (__m256d a)

VCVTTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst

ConvertToVector256Double(Vector128<UInt32>)

__m256d _mm512_cvtepu32_pd (__m128i a)

VCVTUDQ2PD ymm1 {k1}{z}, xmm2/m128/m32bcst

ConvertToVector256Single(Vector256<UInt32>)

__m256 _mm256_cvtepu32_ps (__m256i a)

VCVTUDQ2PS ymm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector256UInt32(Vector256<Single>)

__m256i _mm256_cvtps_epu32 (__m256 a)

VCVTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst

ConvertToVector256UInt32WithTruncation(Vector256<Single>)

__m256i _mm256_cvttps_epu32 (__m256 a)

VCVTTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst

Equals(Object)

Determines whether the specified object is equal to the current object.

(Inherited from Object)
Expand(Vector128<Double>, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_expand_pd (__m128d s, __mmask8 k, __m128d a)

VEXPANDPD xmm1 {k1}{z}, xmm2

Expand(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_expand_epi32 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDD xmm1 {k1}{z}, xmm2

Expand(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_expand_epi64 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDQ xmm1 {k1}{z}, xmm2

Expand(Vector128<Single>, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_expand_ps (__m128 s, __mmask8 k, __m128 a)

VEXPANDPS xmm1 {k1}{z}, xmm2

Expand(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_expand_epi32 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDD xmm1 {k1}{z}, xmm2

Expand(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_expand_epi64 (__m128i s, __mmask8 k, __m128i a)

VPEXPANDQ xmm1 {k1}{z}, xmm2

Expand(Vector256<Double>, Vector256<Double>, Vector256<Double>)

__m256d _mm256_value_expand_pd (__m256d s, __mmask8 k, __m256d a)

VEXPANDPD ymm1 {k1}{z}, ymm2

Expand(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_value_expand_epi32 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDD ymm1 {k1}{z}, ymm2

Expand(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_value_expand_epi64 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDQ ymm1 {k1}{z}, ymm2

Expand(Vector256<Single>, Vector256<Single>, Vector256<Single>)

__m256 _mm256_value_expand_ps (__m256 s, __mmask8 k, __m256 a)

VEXPANDPS ymm1 {k1}{z}, ymm2

Expand(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_value_expand_epi32 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDD ymm1 {k1}{z}, ymm2

Expand(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_value_expand_epi64 (__m256i s, __mmask8 k, __m256i a)

VPEXPANDQ ymm1 {k1}{z}, ymm2

ExpandLoad(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_expandloadu_pd (__m128d s, __mmask8 k, void const * a)

VEXPANDPD xmm1 {k1}{z}, m128

ExpandLoad(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_address_expandloadu_pd (__m256d s, __mmask8 k, void const * a)

VEXPANDPD ymm1 {k1}{z}, m256

ExpandLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

ExpandLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

ExpandLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

ExpandLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

ExpandLoad(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_expandloadu_ps (__m128 s, __mmask8 k, void const * a)

VEXPANDPS xmm1 {k1}{z}, m128

ExpandLoad(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_address_expandloadu_ps (__m256 s, __mmask8 k, void const * a)

VEXPANDPS ymm1 {k1}{z}, m256

ExpandLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

ExpandLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

ExpandLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

ExpandLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

Fixup(Vector128<Double>, Vector128<Double>, Vector128<Int64>, Byte)

__m128d _mm_fixupimm_pd(__m128d a, __m128d b, __m128i tbl, int imm); VFIXUPIMMPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

Fixup(Vector128<Single>, Vector128<Single>, Vector128<Int32>, Byte)

__m128 _mm_fixupimm_ps(__m128 a, __m128 b, __m128i tbl, int imm); VFIXUPIMMPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

Fixup(Vector256<Double>, Vector256<Double>, Vector256<Int64>, Byte)

__m256d _mm256_fixupimm_pd(__m256d a, __m256d b, __m256i tbl, int imm); VFIXUPIMMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Fixup(Vector256<Single>, Vector256<Single>, Vector256<Int32>, Byte)

__m256 _mm256_fixupimm_ps(__m256 a, __m256 b, __m256i tbl, int imm); VFIXUPIMMPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

GetExponent(Vector128<Double>)

__m128d _mm_getexp_pd (__m128d a)

VGETEXPPD xmm1 {k1}{z}, xmm2/m128/m64bcst

GetExponent(Vector128<Single>)

__m128 _mm_getexp_ps (__m128 a)

VGETEXPPS xmm1 {k1}{z}, xmm2/m128/m32bcst

GetExponent(Vector256<Double>)

__m256d _mm256_getexp_pd (__m256d a)

VGETEXPPD ymm1 {k1}{z}, ymm2/m256/m64bcst

GetExponent(Vector256<Single>)

__m256 _mm256_getexp_ps (__m256 a)

VGETEXPPS ymm1 {k1}{z}, ymm2/m256/m32bcst

GetHashCode()

Serves as the default hash function.

(Inherited from Object)
GetMantissa(Vector128<Double>, Byte)

__m128d _mm_getmant_pd (__m128d a)

VGETMANTPD xmm1 {k1}{z}, xmm2/m128/m64bcst

GetMantissa(Vector128<Single>, Byte)

__m128 _mm_getmant_ps (__m128 a)

VGETMANTPS xmm1 {k1}{z}, xmm2/m128/m32bcst

GetMantissa(Vector256<Double>, Byte)

__m256d _mm256_getmant_pd (__m256d a)

VGETMANTPD ymm1 {k1}{z}, ymm2/m256/m64bcst

GetMantissa(Vector256<Single>, Byte)

__m256 _mm256_getmant_ps (__m256 a)

VGETMANTPS ymm1 {k1}{z}, ymm2/m256/m32bcst

GetType()

Gets the Type of the current instance.

(Inherited from Object)
MaskLoad(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_loadu_pd (__m128d s, __mmask8 k, void const * mem_addr)

VMOVUPD xmm1 {k1}{z}, m128

MaskLoad(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_loadu_pd (__m256d s, __mmask8 k, void const * mem_addr)

VMOVUPD ymm1 {k1}{z}, m256

MaskLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

MaskLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

MaskLoad(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_loadu_ps (__m128 s, __mmask8 k, void const * mem_addr)

VMOVUPS xmm1 {k1}{z}, m128

MaskLoad(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_loadu_ps (__m256 s, __mmask8 k, void const * mem_addr)

VMOVUPS ymm1 {k1}{z}, m256

MaskLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

MaskLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

MaskLoadAligned(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_load_pd (__m128d s, __mmask8 k, void const * mem_addr)

VMOVAPD xmm1 {k1}{z}, m128

MaskLoadAligned(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_load_pd (__m256d s, __mmask8 k, void const * mem_addr)

VMOVAPD ymm1 {k1}{z}, m256

MaskLoadAligned(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_load_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 xmm1 {k1}{z}, m128

MaskLoadAligned(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_load_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 ymm1 {k1}{z}, m256

MaskLoadAligned(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_load_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 xmm1 {k1}{z}, m128

MaskLoadAligned(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_load_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 ymm1 {k1}{z}, m256

MaskLoadAligned(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_load_ps (__m128 s, __mmask8 k, void const * mem_addr)

VMOVAPS xmm1 {k1}{z}, m128

MaskLoadAligned(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_load_ps (__m256 s, __mmask8 k, void const * mem_addr)

VMOVAPS ymm1 {k1}{z}, m256

MaskLoadAligned(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_load_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 xmm1 {k1}{z}, m128

MaskLoadAligned(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_load_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA32 ymm1 {k1}{z}, m256

MaskLoadAligned(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_load_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 xmm1 {k1}{z}, m128

MaskLoadAligned(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_load_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQA64 ymm1 {k1}{z}, m256

MaskStore(Double*, Vector128<Double>, Vector128<Double>)

void _mm_mask_storeu_pd (void * mem_addr, __mmask8 k, __m128d a)

VMOVUPD m128 {k1}{z}, xmm1

MaskStore(Double*, Vector256<Double>, Vector256<Double>)

void _mm256_mask_storeu_pd (void * mem_addr, __mmask8 k, __m256d a)

VMOVUPD m256 {k1}{z}, ymm1

MaskStore(Int32*, Vector128<Int32>, Vector128<Int32>)

void _mm_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU32 m128 {k1}{z}, xmm1

MaskStore(Int32*, Vector256<Int32>, Vector256<Int32>)

void _mm256_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU32 m256 {k1}{z}, ymm1

MaskStore(Int64*, Vector128<Int64>, Vector128<Int64>)

void _mm_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU64 m128 {k1}{z}, xmm1

MaskStore(Int64*, Vector256<Int64>, Vector256<Int64>)

void _mm256_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU64 m256 {k1}{z}, ymm1

MaskStore(Single*, Vector128<Single>, Vector128<Single>)

void _mm_mask_storeu_ps (void * mem_addr, __mmask8 k, __m128 a)

VMOVUPS m128 {k1}{z}, xmm1

MaskStore(Single*, Vector256<Single>, Vector256<Single>)

void _mm256_mask_storeu_ps (void * mem_addr, __mmask8 k, __m256 a)

VMOVUPS m256 {k1}{z}, ymm1

MaskStore(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

void _mm_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU32 m128 {k1}{z}, xmm1

MaskStore(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

void _mm256_mask_storeu_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU32 m256 {k1}{z}, ymm1

MaskStore(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

void _mm_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQU64 m128 {k1}{z}, xmm1

MaskStore(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

void _mm256_mask_storeu_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQU64 m256 {k1}{z}, ymm1

MaskStoreAligned(Double*, Vector128<Double>, Vector128<Double>)

void _mm_mask_store_pd (void * mem_addr, __mmask8 k, __m128d a)

VMOVAPD m128 {k1}{z}, xmm1

MaskStoreAligned(Double*, Vector256<Double>, Vector256<Double>)

void _mm256_mask_store_pd (void * mem_addr, __mmask8 k, __m256d a)

VMOVAPD m256 {k1}{z}, ymm1

MaskStoreAligned(Int32*, Vector128<Int32>, Vector128<Int32>)

void _mm_mask_store_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(Int32*, Vector256<Int32>, Vector256<Int32>)

void _mm256_mask_store_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

MaskStoreAligned(Int64*, Vector128<Int64>, Vector128<Int64>)

void _mm_mask_store_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(Int64*, Vector256<Int64>, Vector256<Int64>)

void _mm256_mask_store_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

MaskStoreAligned(Single*, Vector128<Single>, Vector128<Single>)

void _mm_mask_store_ps (void * mem_addr, __mmask8 k, __m128 a)

VMOVAPS m128 {k1}{z}, xmm1

MaskStoreAligned(Single*, Vector256<Single>, Vector256<Single>)

void _mm256_mask_store_ps (void * mem_addr, __mmask8 k, __m256 a)

VMOVAPS m256 {k1}{z}, ymm1

MaskStoreAligned(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

void _mm_mask_store_epi32 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

void _mm256_mask_store_epi32 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

MaskStoreAligned(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

void _mm_mask_store_epi64 (void * mem_addr, __mmask8 k, __m128i a)

VMOVDQA32 m128 {k1}{z}, xmm1

MaskStoreAligned(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

void _mm256_mask_store_epi64 (void * mem_addr, __mmask8 k, __m256i a)

VMOVDQA32 m256 {k1}{z}, ymm1

Max(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_max_epi64 (__m128i a, __m128i b)

VPMAXSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Max(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_max_epu64 (__m128i a, __m128i b)

VPMAXUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Max(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_max_epi64 (__m256i a, __m256i b)

VPMAXSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Max(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_max_epu64 (__m256i a, __m256i b)

VPMAXUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

MemberwiseClone()

Creates a shallow copy of the current Object.

(Inherited from Object)
Min(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_min_epi64 (__m128i a, __m128i b)

VPMINSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Min(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_min_epu64 (__m128i a, __m128i b)

VPMINUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Min(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_min_epi64 (__m256i a, __m256i b)

VPMINSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Min(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_min_epu64 (__m256i a, __m256i b)

VPMINUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar2x64x2(Vector128<Double>, Vector128<Int64>, Vector128<Double>)

__m128d _mm_permutex2var_pd (__m128d a, __m128i idx, __m128i b)

VPERMI2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar2x64x2(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)

VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar2x64x2(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)

VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

PermuteVar4x32x2(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)

VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x32x2(Vector128<Single>, Vector128<Int32>, Vector128<Single>)

__m128 _mm_permutex2var_ps (__m128 a, __m128i idx, __m128i b)

VPERMI2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x32x2(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)

VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

PermuteVar4x64(Vector256<Double>, Vector256<Int64>)

__m256d _mm256_permute4x64_pd (__m256d a, __m256i b)

VPERMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64(Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_permute4x64_epi64 (__m256i a, __m256i b)

VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_permute4x64_pd (__m256d a, __m256i b)

VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<Double>, Vector256<Int64>, Vector256<Double>)

__m256d _mm256_permutex2var_pd (__m256d a, __m256i idx, __m256i b)

VPERMI2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)

VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar4x64x2(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)

VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

PermuteVar8x32x2(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)

VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

PermuteVar8x32x2(Vector256<Single>, Vector256<Int32>, Vector256<Single>)

__m256 _mm256_permutex2var_ps (__m256 a, __m256i idx, __m256i b)

VPERMI2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

PermuteVar8x32x2(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)

VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

Reciprocal14(Vector128<Double>)

__m128d _mm_rcp14_pd (__m128d a, __m128d b)

VRCP14PD xmm1 {k1}{z}, xmm2/m128/m64bcst

Reciprocal14(Vector128<Single>)

__m128 _mm_rcp14_ps (__m128 a, __m128 b)

VRCP14PS xmm1 {k1}{z}, xmm2/m128/m32bcst

Reciprocal14(Vector256<Double>)

__m256d _mm256_rcp14_pd (__m256d a, __m256d b)

VRCP14PD ymm1 {k1}{z}, ymm2/m256/m64bcst

Reciprocal14(Vector256<Single>)

__m256 _mm256_rcp14_ps (__m256 a, __m256 b)

VRCP14PS ymm1 {k1}{z}, ymm2/m256/m32bcst

ReciprocalSqrt14(Vector128<Double>)

__m128d _mm_rsqrt14_pd (__m128d a, __m128d b)

VRSQRT14PD xmm1 {k1}{z}, xmm2/m128/m64bcst

ReciprocalSqrt14(Vector128<Single>)

__m128 _mm_rsqrt14_ps (__m128 a, __m128 b)

VRSQRT14PS xmm1 {k1}{z}, xmm2/m128/m32bcst

ReciprocalSqrt14(Vector256<Double>)

__m256d _mm256_rsqrt14_pd (__m256d a, __m256d b)

VRSQRT14PD ymm1 {k1}{z}, ymm2/m256/m64bcst

ReciprocalSqrt14(Vector256<Single>)

__m256 _mm256_rsqrt14_ps (__m256 a, __m256 b)

VRSQRT14PS ymm1 {k1}{z}, ymm2/m256/m32bcst

RotateLeft(Vector128<Int32>, Byte)

__m128i _mm_rol_epi32 (__m128i a, int imm8)

VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateLeft(Vector128<Int64>, Byte)

__m128i _mm_rol_epi64 (__m128i a, int imm8)

VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateLeft(Vector128<UInt32>, Byte)

__m128i _mm_rol_epi32 (__m128i a, int imm8)

VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateLeft(Vector128<UInt64>, Byte)

__m128i _mm_rol_epi64 (__m128i a, int imm8)

VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateLeft(Vector256<Int32>, Byte)

__m256i _mm256_rol_epi32 (__m256i a, int imm8)

VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateLeft(Vector256<Int64>, Byte)

__m256i _mm256_rol_epi64 (__m256i a, int imm8)

VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateLeft(Vector256<UInt32>, Byte)

__m256i _mm256_rol_epi32 (__m256i a, int imm8)

VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateLeft(Vector256<UInt64>, Byte)

__m256i _mm256_rol_epi64 (__m256i a, int imm8)

VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateLeftVariable(Vector128<Int32>, Vector128<UInt32>)

__m128i _mm_rolv_epi32 (__m128i a, __m128i b)

VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateLeftVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_rolv_epi64 (__m128i a, __m128i b)

VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateLeftVariable(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_rolv_epi32 (__m128i a, __m128i b)

VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateLeftVariable(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_rolv_epi64 (__m128i a, __m128i b)

VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateLeftVariable(Vector256<Int32>, Vector256<UInt32>)

__m256i _mm256_rolv_epi32 (__m256i a, __m256i b)

VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateLeftVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_rolv_epi64 (__m256i a, __m256i b)

VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateLeftVariable(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_rolv_epi32 (__m256i a, __m256i b)

VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateLeftVariable(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_rolv_epi64 (__m256i a, __m256i b)

VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateRight(Vector128<Int32>, Byte)

__m128i _mm_ror_epi32 (__m128i a, int imm8)

VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateRight(Vector128<Int64>, Byte)

__m128i _mm_ror_epi64 (__m128i a, int imm8)

VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateRight(Vector128<UInt32>, Byte)

__m128i _mm_ror_epi32 (__m128i a, int imm8)

VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RotateRight(Vector128<UInt64>, Byte)

__m128i _mm_ror_epi64 (__m128i a, int imm8)

VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RotateRight(Vector256<Int32>, Byte)

__m256i _mm256_ror_epi32 (__m256i a, int imm8)

VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateRight(Vector256<Int64>, Byte)

__m256i _mm256_ror_epi64 (__m256i a, int imm8)

VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateRight(Vector256<UInt32>, Byte)

__m256i _mm256_ror_epi32 (__m256i a, int imm8)

VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

RotateRight(Vector256<UInt64>, Byte)

__m256i _mm256_ror_epi64 (__m256i a, int imm8)

VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RotateRightVariable(Vector128<Int32>, Vector128<UInt32>)

__m128i _mm_rorv_epi32 (__m128i a, __m128i b)

VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateRightVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_rorv_epi64 (__m128i a, __m128i b)

VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateRightVariable(Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_rorv_epi32 (__m128i a, __m128i b)

VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

RotateRightVariable(Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_rorv_epi64 (__m128i a, __m128i b)

VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

RotateRightVariable(Vector256<Int32>, Vector256<UInt32>)

__m256i _mm256_rorv_epi32 (__m256i a, __m256i b)

VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateRightVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_rorv_epi64 (__m256i a, __m256i b)

VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RotateRightVariable(Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_rorv_epi32 (__m256i a, __m256i b)

VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

RotateRightVariable(Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_rorv_epi64 (__m256i a, __m256i b)

VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

RoundScale(Vector128<Double>, Byte)

__m128d _mm_roundscale_pd (__m128d a, int imm)

VRNDSCALEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8

RoundScale(Vector128<Single>, Byte)

__m128 _mm_roundscale_ps (__m128 a, int imm)

VRNDSCALEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8

RoundScale(Vector256<Double>, Byte)

__m256d _mm256_roundscale_pd (__m256d a, int imm)

VRNDSCALEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8

RoundScale(Vector256<Single>, Byte)

__m256 _mm256_roundscale_ps (__m256 a, int imm)

VRNDSCALEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8

Scale(Vector128<Double>, Vector128<Double>)

__m128d _mm_scalef_pd (__m128d a, int imm)

VSCALEFPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

Scale(Vector128<Single>, Vector128<Single>)

__m128 _mm_scalef_ps (__m128 a, int imm)

VSCALEFPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst

Scale(Vector256<Double>, Vector256<Double>)

__m256d _mm256_scalef_pd (__m256d a, int imm)

VSCALEFPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Scale(Vector256<Single>, Vector256<Single>)

__m256 _mm256_scalef_ps (__m256 a, int imm)

VSCALEFPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst

ShiftRightArithmetic(Vector128<Int64>, Byte)

__128i _mm_srai_epi64 (__m128i a, int imm8)

VPSRAQ xmm1 {k1}{z}, xmm2, imm8

ShiftRightArithmetic(Vector128<Int64>, Vector128<Int64>)

__m128i _mm_sra_epi64 (__m128i a, __m128i count)

VPSRAQ xmm1 {k1}{z}, xmm2, xmm3/m128

ShiftRightArithmetic(Vector256<Int64>, Byte)

__m256i _mm256_srai_epi64 (__m256i a, int imm8)

VPSRAQ ymm1 {k1}{z}, ymm2, imm8

ShiftRightArithmetic(Vector256<Int64>, Vector128<Int64>)

__m256i _mm256_sra_epi64 (__m256i a, __m128i count)

VPSRAQ ymm1 {k1}{z}, ymm2, xmm3/m128

ShiftRightArithmeticVariable(Vector128<Int64>, Vector128<UInt64>)

__m128i _mm_srav_epi64 (__m128i a, __m128i count)

VPSRAVQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst

ShiftRightArithmeticVariable(Vector256<Int64>, Vector256<UInt64>)

__m256i _mm256_srav_epi64 (__m256i a, __m256i count)

VPSRAVQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst

Shuffle2x128(Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_shuffle_f64x2 (__m256d a, __m256d b, const int imm8)

VSHUFF64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Shuffle2x128(Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)

VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)

VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

Shuffle2x128(Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_shuffle_f32x4 (__m256 a, __m256 b, const int imm8)

VSHUFF32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)

VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

Shuffle2x128(Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)

VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

TernaryLogic(Vector128<Byte>, Vector128<Byte>, Vector128<Byte>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Double>, Vector128<Double>, Vector128<Double>, Byte)

__m128d _mm_ternarylogic_pd (__m128d a, __m128d b, __m128d c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Int16>, Vector128<Int16>, Vector128<Int16>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>, Byte)

__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

TernaryLogic(Vector128<Int64>, Vector128<Int64>, Vector128<Int64>, Byte)

__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

TernaryLogic(Vector128<SByte>, Vector128<SByte>, Vector128<SByte>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<Single>, Vector128<Single>, Vector128<Single>, Byte)

__m128 _mm_ternarylogic_ps (__m128 a, __m128 b, __m128 c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<UInt16>, Vector128<UInt16>, Vector128<UInt16>, Byte)

__m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector128<UInt32>, Vector128<UInt32>, Vector128<UInt32>, Byte)

__m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8

TernaryLogic(Vector128<UInt64>, Vector128<UInt64>, Vector128<UInt64>, Byte)

__m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)

VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8

TernaryLogic(Vector256<Byte>, Vector256<Byte>, Vector256<Byte>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Double>, Vector256<Double>, Vector256<Double>, Byte)

__m256d _mm256_ternarylogic_pd (__m256d a, __m256d b, __m256d c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Int16>, Vector256<Int16>, Vector256<Int16>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Int32>, Vector256<Int32>, Vector256<Int32>, Byte)

__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

TernaryLogic(Vector256<Int64>, Vector256<Int64>, Vector256<Int64>, Byte)

__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

TernaryLogic(Vector256<SByte>, Vector256<SByte>, Vector256<SByte>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<Single>, Vector256<Single>, Vector256<Single>, Byte)

__m256 _mm256_ternarylogic_ps (__m256 a, __m256 b, __m256 c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<UInt16>, Vector256<UInt16>, Vector256<UInt16>, Byte)

__m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8 The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.

TernaryLogic(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>, Byte)

__m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8

TernaryLogic(Vector256<UInt64>, Vector256<UInt64>, Vector256<UInt64>, Byte)

__m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)

VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8

ToString()

Returns a string that represents the current object.

(Inherited from Object)

Applies to