@@ -463,41 +463,6 @@ private static unsafe nuint ChangeCase<TFrom, TTo, TCasing>(TFrom* pSrc, TTo* pD
463
463
return i;
464
464
}
465
465
466
- [ MethodImpl( MethodImplOptions . AggressiveInlining ) ]
467
- private static unsafe void Widen8To16AndAndWriteTo( Vector128 < byte > narrowVector , char * pDest , nuint destOffset )
468
- {
469
- if ( Vector256 . IsHardwareAccelerated )
470
- {
471
- Vector256 < ushort > wide = Vector256. WidenLower( narrowVector . ToVector256Unsafe ( ) ) ;
472
- wide . StoreUnsafe ( ref * ( ushort * ) pDest, destOffset ) ;
473
- }
474
- else
475
- {
476
- Vector128 . WidenLower ( narrowVector ) . StoreUnsafe( ref * ( ushort * ) pDest, destOffset ) ;
477
- Vector128 . WidenUpper ( narrowVector ) . StoreUnsafe ( ref * ( ushort * ) pDest, destOffset + 8 ) ;
478
- }
479
- }
480
-
481
- [ MethodImpl ( MethodImplOptions . AggressiveInlining ) ]
482
- private static unsafe void Narrow16To8AndAndWriteTo( Vector128 < ushort > wideVector , byte * pDest , nuint destOffset )
483
- {
484
- Vector128 < byte > narrow = Vector128 . Narrow ( wideVector , wideVector ) ;
485
-
486
- if ( Sse2 . IsSupported )
487
- {
488
- // MOVQ is supported even on x86, unaligned accesses allowed
489
- Sse2 . StoreScalar ( ( ulong * ) ( pDest + destOffset) , narrow . AsUInt64 ( ) ) ;
490
- }
491
- else if ( Vector64 . IsHardwareAccelerated )
492
- {
493
- narrow . GetLower ( ) . StoreUnsafe ( ref * pDest , destOffset ) ;
494
- }
495
- else
496
- {
497
- Unsafe . WriteUnaligned < ulong > ( pDest + destOffset , narrow . AsUInt64 ( ) . ToScalar ( ) ) ;
498
- }
499
- }
500
-
501
466
[ MethodImpl( MethodImplOptions . AggressiveInlining ) ]
502
467
private static unsafe void ChangeWidthAndWriteTo< TFrom, TTo > ( Vector128 < TFrom > vector , TTo * pDest , nuint elementOffset )
503
468
where TFrom : unmanaged
@@ -524,12 +489,9 @@ private static unsafe void ChangeWidthAndWriteTo<TFrom, TTo>(Vector128<TFrom> ve
524
489
}
525
490
else if ( sizeof ( TFrom ) == 2 && sizeof ( TTo) == 1 )
526
491
{
527
- // narrowing operation required
528
- // since we know data is all-ASCII, special-case SSE2 to avoid unneeded PAND in Narrow call
529
- Vector128 < byte > narrow = ( Sse2 . IsSupported )
530
- ? Sse2 . PackUnsignedSaturate ( vector . AsInt16 ( ) , vector . AsInt16 ( ) )
531
- : Vector128 . Narrow ( vector . AsUInt16 ( ) , vector . AsUInt16 ( ) ) ;
532
- narrow . GetLower ( ) . StoreUnsafe ( ref * ( byte * ) pDest, elementOffset ) ;
492
+ // narrowing operation required, we know data is all-ASCII so use extract helper
493
+ Vector128 < byte > narrow = ExtractAsciiVector( vector . AsUInt16 ( ) , vector . AsUInt16 ( ) ) ;
494
+ narrow . StoreLowerUnsafe ( ref * ( byte * ) pDest, elementOffset ) ;
533
495
}
534
496
else
535
497
{
@@ -556,25 +518,6 @@ private static unsafe Vector128<T> SignedLessThan<T>(Vector128<T> left, Vector12
556
518
}
557
519
}
558
520
559
- [ MethodImpl ( MethodImplOptions . AggressiveInlining ) ]
560
- private static unsafe Vector128 < TTo > NarrowOrWidenLowerVectorUnsigned < TFrom, TTo > ( Vector128 < TFrom > vector )
561
- where TFrom : unmanaged
562
- where TTo : unmanaged
563
- {
564
- if ( sizeof ( TFrom ) == 1 && sizeof ( TTo) == 2 )
565
- {
566
- return Vector128 . WidenLower ( vector . AsByte ( ) ) . As < ushort , TTo > ( ) ;
567
- }
568
- else if ( sizeof ( TFrom ) == 2 && sizeof ( TTo) == 1 )
569
- {
570
- return Vector128 . Narrow ( vector . AsUInt16 ( ) , vector . AsUInt16 ( ) ) . As < byte , TTo > ( ) ;
571
- }
572
- else
573
- {
574
- throw new NotSupportedException ( ) ;
575
- }
576
- }
577
-
578
521
private struct ToUpperConversion { }
579
522
private struct ToLowerConversion { }
580
523
}
0 commit comments