-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcopy_amd64.s
946 lines (729 loc) · 19.7 KB
/
copy_amd64.s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
#include "textflag.h"
#include "go_asm.h"
#include "funcdata.h"
DATA MASK1<>+0x00(SB)/8, $2
DATA MASK1<>+0x08(SB)/8, $6
DATA MASK1<>+0x10(SB)/8, $10
DATA MASK1<>+0x18(SB)/8, $14
DATA MASK1<>+0x20(SB)/8, $0x80
DATA MASK1<>+0x28(SB)/8, $0x80
DATA MASK1<>+0x30(SB)/8, $0x80
DATA MASK1<>+0x38(SB)/8, $0x80
DATA MASK1<>+0x40(SB)/8, $0x80
DATA MASK1 <>+0x48(SB)/8, $0x80
DATA MASK1<>+0x50(SB)/8, $0x80
DATA MASK1<>+0x58(SB)/8, $0x80
DATA MASK1<>+0x60(SB)/8, $0x80
DATA MASK1<>+0x68(SB)/8, $0x80
DATA MASK1<>+0x70(SB)/8, $0x80
DATA MASK1<>+0x78(SB)/8, $0x80
DATA MASK1<>+0x80(SB)/8, $2
DATA MASK1<>+0x88(SB)/8, $6
DATA MASK1<>+0x90(SB)/8, $10
DATA MASK1<>+0x98(SB)/8, $14
DATA MASK1<>+0xA0(SB)/8, $0x80
DATA MASK1<>+0xA8(SB)/8, $0x80
DATA MASK1<>+0xB0(SB)/8, $0x80
DATA MASK1<>+0xB8(SB)/8, $0x80
DATA MASK1<>+0xC0(SB)/8, $0x80
DATA MASK1<>+0xC8(SB)/8, $0x80
DATA MASK1<>+0xD0(SB)/8, $0x80
DATA MASK1<>+0xD8(SB)/8, $0x80
DATA MASK1<>+0xE0(SB)/8, $0x80
DATA MASK1<>+0xE8(SB)/8, $0x80
DATA MASK1<>+0xF0(SB)/8, $0x80
DATA MASK1<>+0xF8(SB)/8, $0x80
GLOBL MASK1(SB),RODATA|NOPTR, $32
DATA MASK2<>+0x00(SB)/8, $0x80
DATA MASK2<>+0x08(SB)/8, $0x80
DATA MASK2<>+0x10(SB)/8, $0x80
DATA MASK2<>+0x18(SB)/8, $0x80
DATA MASK2<>+0x20(SB)/8, $2
DATA MASK2<>+0x28(SB)/8, $6
DATA MASK2<>+0x30(SB)/8, $0xA
DATA MASK2<>+0x38(SB)/8, $0xE
DATA MASK2<>+0x40(SB)/8, $0x80
DATA MASK2<>+0x48(SB)/8, $0x80
DATA MASK2<>+0x50(SB)/8, $0x80
DATA MASK2<>+0x58(SB)/8, $0x80
DATA MASK2<>+0x60(SB)/8, $0x80
DATA MASK2<>+0x68(SB)/8, $0x80
DATA MASK2<>+0x70(SB)/8, $0x80
DATA MASK2<>+0x78(SB)/8, $0x80
DATA MASK2<>+0x80(SB)/8, $0x80
DATA MASK2<>+0x88(SB)/8, $0x80
DATA MASK2<>+0x90(SB)/8, $0x80
DATA MASK2<>+0x98(SB)/8, $0x80
DATA MASK2<>+0xA0(SB)/8, $2
DATA MASK2<>+0xA8(SB)/8, $6
DATA MASK2<>+0xB0(SB)/8, $10
DATA MASK2<>+0xB8(SB)/8, $14
DATA MASK2<>+0xC0(SB)/8, $0x80
DATA MASK2<>+0xC8(SB)/8, $0x80
DATA MASK2<>+0xD0(SB)/8, $0x80
DATA MASK2<>+0xD8(SB)/8, $0x80
DATA MASK2<>+0xE0(SB)/8, $0x80
DATA MASK2<>+0xE8(SB)/8, $0x80
DATA MASK2<>+0xF0(SB)/8, $0x80
DATA MASK2<>+0xF8(SB)/8, $0x80
GLOBL MASK2(SB),RODATA|NOPTR, $32
DATA MASK3<>+0x00(SB)/8, $0x80
DATA MASK3<>+0x08(SB)/8, $0x80
DATA MASK3<>+0x10(SB)/8, $0x80
DATA MASK3<>+0x18(SB)/8, $0x80
DATA MASK3<>+0x20(SB)/8, $0x80
DATA MASK3<>+0x28(SB)/8, $0x80
DATA MASK3<>+0x30(SB)/8, $0x80
DATA MASK3<>+0x38(SB)/8, $0x80
DATA MASK3<>+0x40(SB)/8, $2
DATA MASK3<>+0x48(SB)/8, $6
DATA MASK3<>+0x50(SB)/8, $0xA
DATA MASK3<>+0x58(SB)/8, $0xE
DATA MASK3<>+0x60(SB)/8, $0x80
DATA MASK3<>+0x68(SB)/8, $0x80
DATA MASK3<>+0x70(SB)/8, $0x80
DATA MASK3<>+0x78(SB)/8, $0x80
DATA MASK3<>+0x80(SB)/8, $0x80
DATA MASK3<>+0x88(SB)/8, $0x80
DATA MASK3<>+0x90(SB)/8, $0x80
DATA MASK3<>+0x98(SB)/8, $0x80
DATA MASK3<>+0xA0(SB)/8, $0x80
DATA MASK3<>+0xA8(SB)/8, $0x80
DATA MASK3<>+0xB0(SB)/8, $0x80
DATA MASK3<>+0xB8(SB)/8, $0x80
DATA MASK3<>+0xC0(SB)/8, $2
DATA MASK3<>+0xC8(SB)/8, $6
DATA MASK3<>+0xD0(SB)/8, $0xA
DATA MASK3<>+0xD8(SB)/8, $0xE
DATA MASK3<>+0xE0(SB)/8, $0x80
DATA MASK3<>+0xE8(SB)/8, $0x80
DATA MASK3<>+0xF0(SB)/8, $0x80
DATA MASK3<>+0xF8(SB)/8, $0x80
GLOBL MASK3(SB),RODATA|NOPTR, $32
DATA MASK4<>+0x00(SB)/8, $0x80
DATA MASK4<>+0x08(SB)/8, $0x80
DATA MASK4<>+0x10(SB)/8, $0x80
DATA MASK4<>+0x18(SB)/8, $0x80
DATA MASK4<>+0x20(SB)/8, $0x80
DATA MASK4<>+0x28(SB)/8, $0x80
DATA MASK4<>+0x30(SB)/8, $0x80
DATA MASK4<>+0x38(SB)/8, $0x80
DATA MASK4<>+0x40(SB)/8, $0x80
DATA MASK4<>+0x48(SB)/8, $0x80
DATA MASK4<>+0x50(SB)/8, $0x80
DATA MASK4<>+0x58(SB)/8, $0x80
DATA MASK4<>+0x60(SB)/8, $2
DATA MASK4<>+0x68(SB)/8, $6
DATA MASK4<>+0x70(SB)/8, $0xA
DATA MASK4<>+0x78(SB)/8, $0xE
DATA MASK4<>+0x80(SB)/8, $0x80
DATA MASK4<>+0x88(SB)/8, $0x80
DATA MASK4<>+0x90(SB)/8, $0x80
DATA MASK4<>+0x98(SB)/8, $0x80
DATA MASK4<>+0xA0(SB)/8, $0x80
DATA MASK4<>+0xA8(SB)/8, $0x80
DATA MASK4<>+0xB0(SB)/8, $0x80
DATA MASK4<>+0xB8(SB)/8, $0x80
DATA MASK4<>+0xC0(SB)/8, $0x80
DATA MASK4<>+0xC8(SB)/8, $0x80
DATA MASK4<>+0xD0(SB)/8, $0x80
DATA MASK4<>+0xD8(SB)/8, $0x80
DATA MASK4<>+0xE0(SB)/8, $2
DATA MASK4<>+0xE8(SB)/8, $6
DATA MASK4<>+0xF0(SB)/8, $0xA
DATA MASK4<>+0xF8(SB)/8, $0xE
GLOBL MASK4(SB),RODATA|NOPTR, $32
DATA P_MASK<>+0x00(SB)/8, $0
DATA P_MASK<>+0x08(SB)/8, $4
DATA P_MASK<>+0x10(SB)/8, $1
DATA P_MASK<>+0x18(SB)/8, $5
DATA P_MASK<>+0x20(SB)/8, $2
DATA P_MASK<>+0x28(SB)/8, $6
DATA P_MASK<>+0x30(SB)/8, $3
DATA P_MASK<>+0x38(SB)/8, $7
// 8
GLOBL P_Mask(SB),RODATA|NOPTR, $32
//TEXT ·hasAVX2(SB), NOSPLIT, $0-1
// MOVQ ·cpuid(SB), AX
// BTL $0x08, AX
// SETCC AL
// MOVBQZX AL, AX
// RET
//if len < 32, dont' use avx2
TEXT ·copy_AVX2_32(SB), NOSPLIT , $0
MOVQ dst_data+0(FP), DI
MOVQ src_data+24(FP), SI
MOVQ src_len+32(FP), BX
XORQ AX, AX
PCALIGN $32
LOOP:
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU Y0, 0(DI)(AX*1)
ADDQ $32, AX
CMPQ AX, BX
JL LOOP
VZEROUPPER
//PCALIGN $32
RET
TEXT ·memcopy_avx2_32(SB), $0-16
MOVQ addr+0(FP), DI
MOVQ addr1+8(FP), SI
XORQ AX,AX
PCALIGN $32
LOOP:
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU Y0, 0(DI)(AX*1)
ADDQ $32, AX
CMPQ AX, $32
JL LOOP
VZEROUPPER
//PCALIGN $32
RET
TEXT ·memcopy_avx2_64(SB), $0-16
MOVQ addr+0(FP), DI
MOVQ addr1+8(FP), SI
XORQ AX,AX
PCALIGN $32
LOOP:
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU 32(SI)(AX*1), Y1
VMOVDQU Y0, 0(DI)(AX*1)
VMOVDQU Y1, 32(DI)(AX*1)
ADDQ $64, AX
CMPQ AX, $64
JL LOOP
VZEROUPPER
//PCALIGN $32
RET
//this functions too much slower in zen1 processors
TEXT ·copy_AVX2_64(SB), NOSPLIT , $0
MOVQ dst_data+0(FP), DI
MOVQ src_data+24(FP), SI
MOVQ src_len+32(FP), BX
XORQ AX, AX
PCALIGN $32
LOOP:
//VMOVDQU 0(SI)(AX*1), Y0
//VMOVDQU Y0, 0(DI)(AX*1)
//ADDQ $32, AX
//CMPQ AX, BX
//JGE END
//VMOVDQU 0(SI)(AX*1), Y1
//VMOVDQU Y1, 0(DI)(AX*1)
//ADDQ $32, AX
//CMPQ AX, BX
//JL LOOP
//RET
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU 32(SI)(AX*1),Y1
VMOVDQU Y0, 0(DI)(AX*1)
VMOVDQU Y1, 32(DI)(AX*1)
ADDQ $64, AX
CMPQ AX, BX
JL LOOP
VZEROUPPER
RET
TEXT ·copy_AMD_AVX2_32(SB), NOSPLIT ,$0
MOVD dst_data+0(FP), DI
MOVD src_data+24(FP), SI
MOVD src_len+32(FP), BX
XORQ AX, AX
ALIGN_CHECK:
CMPQ DI,$16
JNZ USE_UNALIGNED
CMPQ SI,$16
JNZ USE_UNALIGNED
JMP ALIGNED_LOOP
ALIGNED_LOOP:
VMOVDQA 0(SI)(AX*1), X0
VMOVDQA 16(SI)(AX*1), X1
VMOVDQA 16(SI)(AX*1), X2
VMOVDQA X0, 0(DI)(AX*1)
VMOVDQA X1, 16(DI)(AX*1)
VMOVDQA X2, 16(DI)(AX*1)
ADDQ $32, AX
CMPQ AX, BX
JL ALIGNED_LOOP
RET
USE_UNALIGNED:
VMOVDQU 0(SI)(AX*1), X0
VMOVDQU 16(SI)(AX*1), X1
VMOVDQU 16(SI)(AX*1), X2
VMOVDQU X0, 0(DI)(AX*1)
VMOVDQU X1, 16(DI)(AX*1)
VMOVDQU X2, 16(DI)(AX*1)
ADDQ $32, AX
CMPQ AX, BX
JL USE_UNALIGNED
RET
TEXT ·copy_AVX2_128(SB), NOSPLIT , $0
MOVQ dst_data+0(FP), DI
MOVQ src_data+24(FP), SI
MOVQ src_len+32(FP), BX
XORQ AX,AX
ALIGN_CHECK:
CMPQ DI,$31
JNZ USE_UNALIGNED
CMPQ SI,$31
JNZ USE_UNALIGNED
JMP ALIGNED_LOOP
ALIGNED_LOOP:
VMOVDQA 0(SI)(AX*1), Y0
VMOVDQA 32(SI)(AX*1), Y1
VMOVDQA 64(SI)(AX*1), Y2
VMOVDQA 96(SI)(AX*1), Y3
// save 128 bytes
VMOVDQA Y0, 0(DI)(AX*1)
VMOVDQA Y1, 32(DI)(AX*1)
VMOVDQA Y2, 64(DI)(AX*1)
VMOVDQA Y3, 96(DI)(AX*1)
// count 128 index
ADDQ $128, AX
// check
CMPQ AX, BX
JL ALIGNED_LOOP
RET
USE_UNALIGNED:
// load 128 byte (4 * 32 byte) for once
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU 32(SI)(AX*1), Y1
VMOVDQU 64(SI)(AX*1), Y2
VMOVDQU 96(SI)(AX*1), Y3
// save 128 bytes
VMOVDQU Y0, 0(DI)(AX*1)
VMOVDQU Y1, 32(DI)(AX*1)
VMOVDQU Y2, 64(DI)(AX*1)
VMOVDQU Y3, 96(DI)(AX*1)
// count 128 index
ADDQ $128, AX
// check
CMPQ AX, BX
JL USE_UNALIGNED
RET
TEXT ·copy_AVX2_256(SB),NOSPLIT ,$0
MOVQ dst_data+0(FP), DI
MOVQ src_data+24(FP), SI
MOVQ src_len+32(FP), BX
XORQ AX,AX
ALIGN_CHECK:
CMPQ BX, AX // Check if all data has been processed
JLE DONE // If AX >= BX, exit the loop
CMPQ DI,$31
JNZ USE_UNALIGNED
CMPQ SI,$31
JNZ USE_UNALIGNED
JMP ALIGNED_LOOP
ALIGNED_LOOP:
// 32 * 8
VMOVDQA 0(SI)(AX*1), Y0
VMOVDQA 32(SI)(AX*1), Y1
VMOVDQA 64(SI)(AX*1), Y2
VMOVDQA 96(SI)(AX*1), Y3
VMOVDQA 128(SI)(AX*1), Y4
VMOVDQA 160(SI)(AX*1), Y5
VMOVDQA 192(SI)(AX*1), Y6
VMOVDQA 224(SI)(AX*1), Y7
VMOVDQA 256(SI)(AX*1), Y8
// save 256 bytes
VMOVDQA Y0, 0(DI)(AX*1)
VMOVDQA Y1, 32(DI)(AX*1)
VMOVDQA Y2, 64(DI)(AX*1)
VMOVDQA Y3, 96(DI)(AX*1)
VMOVDQA Y4, 128(DI)(AX*1)
VMOVDQA Y5, 160(DI)(AX*1)
VMOVDQA Y6, 192(DI)(AX*1)
VMOVDQA Y7, 224(DI)(AX*1)
VMOVDQA Y8, 256(DI)(AX*1)
// count 256 index
ADDQ $256, AX
// check
CMPQ AX, BX
JL ALIGNED_LOOP
RET
USE_UNALIGNED: // load 256 byte (4 * 64 byte) for once
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU 32(SI)(AX*1), Y1
VMOVDQU 64(SI)(AX*1), Y2
VMOVDQU 96(SI)(AX*1), Y3
VMOVDQU 128(SI)(AX*1), Y4
VMOVDQU 160(SI)(AX*1), Y5
VMOVDQU 192(SI)(AX*1), Y6
VMOVDQU 224(SI)(AX*1), Y7
VMOVDQU 256(SI)(AX*1), Y8
// save 256 bytes
VMOVDQU Y0, 0(DI)(AX*1)
VMOVDQU Y1, 32(DI)(AX*1)
VMOVDQU Y2, 64(DI)(AX*1)
VMOVDQU Y3, 96(DI)(AX*1)
VMOVDQU Y4, 128(DI)(AX*1)
VMOVDQU Y5, 160(DI)(AX*1)
VMOVDQU Y6, 192(DI)(AX*1)
VMOVDQU Y7, 224(DI)(AX*1)
VMOVDQU Y8, 256(DI)(AX*1)
// count 256 index
ADDQ $256, AX
// check
CMPQ AX, BX
JL USE_UNALIGNED
RET
DONE:
RET
TEXT ·copy_AVX2_512(SB),NOSPLIT ,$0
MOVQ dst_data+0(FP), DI
MOVQ src_data+24(FP), SI
MOVQ src_len+32(FP), BX
XORQ AX,AX
ALIGN_CHECK:
CMPQ BX, AX // Check if all data has been processed
JLE DONE // If AX >= BX, exit the loop
CMPQ DI, $31
JNZ USE_UNALIGNED
CMPQ SI,$31
JNZ USE_UNALIGNED
JMP ALIGNED_LOOP
ALIGNED_LOOP:
// Load 512 bytes in chunks, given 15 registers
VMOVDQA 0(SI)(AX*1), Y0
VMOVDQA 32(SI)(AX*1), Y1
VMOVDQA 64(SI)(AX*1), Y2
VMOVDQA 96(SI)(AX*1), Y3
VMOVDQA 128(SI)(AX*1), Y4
VMOVDQA 160(SI)(AX*1), Y5
VMOVDQA 192(SI)(AX*1), Y6
VMOVDQA 224(SI)(AX*1), Y7
VMOVDQA 256(SI)(AX*1), Y8
VMOVDQA Y0, 0(DI)(AX*1)
VMOVDQA Y1, 32(DI)(AX*1)
VMOVDQA Y2, 64(DI)(AX*1)
VMOVDQA Y3, 96(DI)(AX*1)
VMOVDQA Y4, 128(DI)(AX*1)
VMOVDQA Y5, 160(DI)(AX*1)
VMOVDQA Y6, 192(DI)(AX*1)
VMOVDQA Y7, 224(DI)(AX*1)
VMOVDQA Y8, 256(DI)(AX*1)
ADDQ $512, AX
CMPQ AX, BX
JL ALIGNED_LOOP
RET
USE_UNALIGNED:
// Load 512 bytes in chunks for unaligned memory
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU 32(SI)(AX*1), Y1
VMOVDQU 64(SI)(AX*1), Y2
VMOVDQU 96(SI)(AX*1), Y3
VMOVDQU 128(SI)(AX*1), Y4
VMOVDQU 160(SI)(AX*1), Y5
VMOVDQU 192(SI)(AX*1), Y6
VMOVDQU 224(SI)(AX*1), Y7
VMOVDQU 256(SI)(AX*1), Y8
VMOVDQA 288(SI)(AX*1), Y9
VMOVDQA 320(SI)(AX*1), Y10
VMOVDQA 352(SI)(AX*1), Y11
VMOVDQA 384(SI)(AX*1), Y12
VMOVDQA 416(SI)(AX*1), Y13
VMOVDQA 448(SI)(AX*1), Y14
VMOVDQA 480(SI)(AX*1), Y15
// Store 512 bytes for unaligned memory
VMOVDQU Y0, 0(DI)(AX*1)
VMOVDQU Y1, 32(DI)(AX*1)
VMOVDQU Y2, 64(DI)(AX*1)
VMOVDQU Y3, 96(DI)(AX*1)
VMOVDQU Y4, 128(DI)(AX*1)
VMOVDQU Y5, 160(DI)(AX*1)
VMOVDQU Y6, 192(DI)(AX*1)
VMOVDQU Y7, 224(DI)(AX*1)
VMOVDQU Y8, 256(DI)(AX*1)
VMOVDQU Y9, 288(DI)(AX*1)
VMOVDQU Y10, 320(DI)(AX*1)
VMOVDQU Y11, 352(DI)(AX*1)
VMOVDQU Y12, 384(DI)(AX*1)
VMOVDQU Y13, 416(DI)(AX*1)
VMOVDQU Y14, 448(DI)(AX*1)
VMOVDQU Y15, 480(DI)(AX*1)
ADDQ $512, AX
CMPQ AX, BX
JL USE_UNALIGNED
RET
DONE:
RET
//still thinking about this function which copy 1024 bytes of data
TEXT ·copy_AVX2_1024(SB), NOSPLIT, $0
MOVQ dst_data+0(FP), DI
MOVQ src_data+24(FP), SI
MOVQ src_len+32(FP), BX
XORQ AX, AX
LOOP_START:
;CMPQ BX,AX // Check if all data has been processed
;JLE DONE // If AX >= BX, exit the loop
;
// Check for alignment
CMPQ DI, $31
JNZ USE_UNALIGNED
CMPQ SI, $31
JNZ USE_UNALIGNED
JMP ALIGNED_LOOP
ALIGNED_LOOP:
VMOVDQA 0(SI)(AX*1), Y0
VMOVDQA 32(SI)(AX*1), Y1
VMOVDQA 64(SI)(AX*1), Y2
VMOVDQA 96(SI)(AX*1), Y3
VMOVDQA 128(SI)(AX*1), Y4
VMOVDQA 160(SI)(AX*1), Y5
VMOVDQA 192(SI)(AX*1), Y6
VMOVDQA 224(SI)(AX*1), Y7
VMOVDQA 256(SI)(AX*1), Y8
VMOVDQA 288(SI)(AX*1), Y9
VMOVDQA 320(SI)(AX*1), Y10
VMOVDQA 352(SI)(AX*1), Y11
VMOVDQA 384(SI)(AX*1), Y12
VMOVDQA 416(SI)(AX*1), Y13
VMOVDQA 448(SI)(AX*1), Y14
VMOVDQA 480(SI)(AX*1), Y15
VMOVDQA Y0, 0(DI)(AX*1)
VMOVDQA Y1, 32(DI)(AX*1)
VMOVDQA Y2, 64(DI)(AX*1)
VMOVDQA Y3, 96(DI)(AX*1)
VMOVDQA Y4, 128(DI)(AX*1)
VMOVDQA Y5, 160(DI)(AX*1)
VMOVDQA Y6, 192(DI)(AX*1)
VMOVDQA Y7, 224(DI)(AX*1)
VMOVDQA Y8, 256(DI)(AX*1)
VMOVDQA Y9, 288(DI)(AX*1)
VMOVDQA Y10, 320(DI)(AX*1)
VMOVDQA Y11, 352(DI)(AX*1)
VMOVDQA Y12, 384(DI)(AX*1)
VMOVDQA Y13, 416(DI)(AX*1)
VMOVDQA Y14, 448(DI)(AX*1)
VMOVDQA Y15, 480(DI)(AX*1)
ADDQ $512, AX
JLE ALIGNED_LOOP
CMPQ AX, BX
JL ALIGNED_LOOP
RET
USE_UNALIGNED:
VMOVDQU 0(SI)(AX*1), Y0
VMOVDQU 32(SI)(AX*1), Y1
VMOVDQU 64(SI)(AX*1), Y2
VMOVDQU 96(SI)(AX*1), Y3
VMOVDQU 128(SI)(AX*1), Y4
VMOVDQU 160(SI)(AX*1), Y5
VMOVDQU 192(SI)(AX*1), Y6
VMOVDQU 224(SI)(AX*1), Y7
VMOVDQU 256(SI)(AX*1), Y8
VMOVDQU 288(SI)(AX*1), Y9
VMOVDQU 320(SI)(AX*1), Y10
VMOVDQU 352(SI)(AX*1), Y11
VMOVDQU 384(SI)(AX*1), Y12
VMOVDQU 416(SI)(AX*1), Y13
VMOVDQU 448(SI)(AX*1), Y14
VMOVDQU 480(SI)(AX*1), Y15
VMOVDQU Y0, 0(DI)(AX*1)
VMOVDQU Y1, 32(DI)(AX*1)
VMOVDQU Y2, 64(DI)(AX*1)
VMOVDQU Y3, 96(DI)(AX*1)
VMOVDQU Y4, 128(DI)(AX*1)
VMOVDQU Y5, 160(DI)(AX*1)
VMOVDQU Y6, 192(DI)(AX*1)
VMOVDQU Y7, 224(DI)(AX*1)
VMOVDQU Y8, 256(DI)(AX*1)
VMOVDQU Y9, 288(DI)(AX*1)
VMOVDQU Y10, 320(DI)(AX*1)
VMOVDQU Y11, 352(DI)(AX*1)
VMOVDQU Y12, 384(DI)(AX*1)
VMOVDQU Y13, 416(DI)(AX*1)
VMOVDQU Y14, 448(DI)(AX*1)
VMOVDQU Y15, 480(DI)(AX*1)
ADDQ $512, AX
JLE USE_UNALIGNED
CMPQ AX,BX
JL USE_UNALIGNED
RET
DONE:
RET
//use from 512
TEXT ·_copy_(SB),NOSPLIT, $0
MOVQ dst_base+0x00(FP), AX
MOVQ src_base+0x18(FP), CX
MOVQ dst_len+0x08(FP), DX
MOVQ src_len+0x20(FP), BX
CMPQ BX, DX
CMOVQLT BX, DX
MOVQ DX, ret+0x30(FP)
VMOVDQU MASK1+0(SB), Y10 // Load MASK1 into Y10 register
VMOVDQU MASK2+8(SB), Y11 // Load MASK2 into Y11 register
VMOVDQU MASK3+16(SB), Y12 // Load MASK3 into Y12 register
VMOVDQU MASK4+24(SB), Y13 // Load MASK4 into Y13 register
VMOVDQU P_Mask+32(SB), Y14 // Load P_Mask into Y14 register
tail:
CMPQ DX,$0x00
JEQ done
CMPQ DX, $0x02
JBE handle1to2
CMPQ DX, $0x03
JBE handle2to3
CMPQ DX, $0x04
JE handle4
CMPQ DX, $0x08
JE handle8
JB handle5to7
CMPQ DX, $0x10
JBE handle9to16
CMPQ DX, $32
BTL $0x08, ·X86+0(SB)
JCC handle17to32
JBE avx2_tail_1to32
CALL ·copy_AVX2_32+0(SB)
CMPQ DX, $0x40 // 64
BTL $0x08, ·X86+8(SB)
JCC handle33to64
JB avx2_tail
CALL ·copy_AVX2_64+0(SB)
//CALL ·copy_AVX2(SB)
CMPQ DX, $0x00000080
JB avx2_tail
CALL ·copy_AVX2_128+0(SB)
//JMP avx2
CMPQ DX, $0x00000100
JB avx2_tail
JMP avx2
// runtime·memmove(SB)
done:
RET
handle1to2:
MOVB (CX), AX // Load the first byte from source (CX) to AX
MOVB -1(CX)(DX*1), CL // Load the second byte from source (CX + DX) into CL
MOVB 1(CX), CL // Load the third byte from source (CX + 1) into CL
MOVB CL, 1(AX) // Store the byte in the destination (AX + 1)
RET
handle1:
MOVB (CX), CL
MOVB CL, (AX)
RET
handle2to3:
CMPQ DX, 2
JE handle2
MOVW (CX), BX
MOVB 2(CX), CL
MOVW BX, (AX)
MOVB CL, 2(AX)
RET
handle2:
MOVW (CX), BX
MOVW BX, (AX)
RET
generic:
MOVOU (CX), X0 // Load 128 bits from CX into X0
MOVOU 16(CX), X1 // Load next 128 bits from CX+16 into X1
MOVOU 32(CX), X2 // Load next 128 bits from CX+32 into X2
MOVOU 48(CX), X3 // Load next 128 bits from CX+48 into X3
MOVOU X0, (AX) // Store 128 bits from X0 into AX
MOVOU X1, 16(AX) // Store 128 bits from X1 into AX+16
MOVOU X2, 32(AX) // Store 128 bits from X2 into AX+32
MOVOU X3, 48(AX) // Store 128 bits from X3 into AX+48
PAND X1, X0 // Perform bitwise AND between X0 and X1
PAND X3, X2 // Perform bitwise AND between X2 and X3
PAND X5, X4 // Example additional bitwise AND operation
PAND X7, X6 // Example additional bitwise AND operation
MOVOU X0, (AX) // Store results back to memory (AX)
MOVOU X2, 16(AX) // Store results at AX+16
MOVOU X4, 32(AX) // Store results at AX+32
MOVOU X6, 48(AX) // Store results at AX+48
ADDQ $0x40, CX // Move source pointer forward by 64 bytes
ADDQ $0x40, AX // Move destination pointer forward by 64 bytes
SUBQ $0x40, DX // Decrease the remaining length (DX)
CMPQ DX, $0x40 // If remaining length <= 64, jump to tail
JBE tail
JMP generic // Otherwise, continue processing more data
handle4:
MOVL (CX), CX
MOVL CX, (AX)
RET
handle5to7:
MOVL (CX), BX
MOVL -4(CX)(DX*1), CX
MOVL BX, (AX)
MOVL CX, -4(AX)(DX*1)
RET
handle8:
MOVQ (CX), CX
MOVQ CX, (AX)
RET
handle9to16:
MOVQ (CX), BX
MOVQ -8(CX)(DX*1), CX
MOVQ BX, (AX)
MOVQ CX, -8(AX)(DX*1)
RET
handle17to32:
MOVOU (CX), X0
MOVOU -16(CX)(DX*1), X1
MOVOU X0, (AX)
MOVOU X1, -16(AX)(DX*1)
RET
handle33to64:
MOVOU (CX), X0
MOVOU 16(CX), X1
MOVOU -32(CX)(DX*1), X2
MOVOU -16(CX)(DX*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
MOVOU X2, -32(AX)(DX*1)
MOVOU X3, -16(AX)(DX*1)
RET
avx:
VMOVDQU 0(CX)(SI*1),Y0
VMOVDQU Y0, 0(AX)(SI*1)
ADDQ $32, SI
CMPQ SI, DX
JZ avx2_done
JAE avx
avx2:
VMOVDQU (CX), Y0
VMOVDQU 32(CX), Y1
VMOVDQU 64(CX), Y2
VMOVDQU 96(CX), Y3
VMOVDQU Y0, (AX)
VMOVDQU Y1, 32(AX)
VMOVDQU Y2, 64(AX)
VMOVDQU Y3, 96(AX)
VPSHUFB Y10, Y0, Y0
VPSHUFB Y11, Y1, Y1
VPSHUFB Y12, Y2, Y2
VPSHUFB Y13, Y3, Y3
VPOR Y0, Y1, Y0
VPOR Y2, Y3, Y2
VPOR Y0, Y2, Y0
VPERMD Y0,Y14,Y0
VMOVDQU Y0, (CX)
VMOVDQU Y0, (AX)
ADDQ $0x20, CX
ADDQ $0x20, AX
SUBQ $0x20, DX
JZ avx2_done
CMPQ DX, CX
JAE avx2
avx2_tail:
CMPQ DX, $0x40
JBE avx2_tail_1to64
VMOVDQU (CX), Y0
VMOVDQU 32(CX), Y1
VMOVDQU -64(CX)(DX*1), Y2
VMOVDQU -32(CX)(DX*1), Y3
VMOVDQU Y0, (AX)
VMOVDQU Y1, 32(AX)
VMOVDQU Y2, -64(AX)(DX*1)
VMOVDQU Y3, -32(AX)(DX*1)
JMP avx2_done
avx2_tail_1to64:
VMOVDQU -64(CX)(DX*1), Y0
VMOVDQU -32(CX)(DX*1), Y1
VMOVDQU Y0, -64(AX)(DX*1)
VMOVDQU Y1, -32(AX)(DX*1)
avx2_tail_1to32:
VMOVDQU (CX), Y0
VMOVDQU -32(CX)(DX*1), Y1
VMOVDQU Y0, (AX)
VMOVDQU Y1, -32(AX)(DX*1)
avx2_done:
VZEROUPPER
RET