forked from prysmaticlabs/gohashtree
-
Notifications
You must be signed in to change notification settings - Fork 0
/
hash_arm64.s
1338 lines (1225 loc) · 43.3 KB
/
hash_arm64.s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
MIT License
Copyright (c) 2021 Prysmatic Labs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code is based on Intel's implementation found in
https://github.com/intel/intel-ipsec-mb
Copied parts are
Copyright (c) 2012-2021, Intel Corporation
*/
#include "textflag.h"
#define OUTPUT_PTR R0
#define DATA_PTR R1
#define NUM_BLKS R2
#define last R2
#define digest R19
#define k256 R20
#define padding R21
#define VR0 V0
#define VR1 V1
#define VR2 V2
#define VR3 V3
#define VTMP0 V4
#define VTMP1 V5
#define VTMP2 V6
#define VTMP3 V7
#define VTMP4 V17
#define VTMP5 V18
#define VTMP6 V19
#define KV0 V20
#define KV1 V21
#define KV2 V22
#define KV3 V23
#define KQ0 F20
#define KQ1 F21
#define KQ2 F22
#define KQ3 F23
#define VZ V16
#define A_ R3
#define B_ R4
#define C_ R5
#define D_ R6
#define E_ R7
#define F_ R9
#define G_ R10
#define H_ R11
#define T1 R12
#define T2 R13
#define T3 R14
#define T4 R15
#define T5 R22
#define round1_sched(A, B, C, D, E, F, G, H, VV0, VV1, VV2, VV3) \
VEXT $4, VV3.B16, VV2.B16, VTMP0.B16; \
RORW $6, E, T1; \
MOVWU (RSP), T3; \
RORW $2, A, T2; \
RORW $13, A, T4; \
VEXT $4, VV1.B16, VV0.B16, VTMP1.B16; \
EORW T4, T2, T2; \
ADDW T3, H, H; \
RORW $11, E, T3; \
VADD VV0.S4, VTMP0.S4, VTMP0.S4; \
EORW T3, T1, T1; \
RORW $25, E, T3; \
RORW $22, A, T4; \
VUSHR $7, VTMP1.S4, VTMP2.S4; \
EORW T3, T1, T1; \
EORW T4, T2, T2; \
EORW G, F, T3; \
VSHL $(32-7), VTMP1.S4, VTMP3.S4; \
EORW C, A, T4; \
ANDW E, T3, T3; \
ANDW B, T4, T4; \
EORW G, T3, T3; \
VUSHR $18, VTMP1.S4, VTMP4.S4; \
ADDW T3, T1, T1; \
ANDW C, A, T3; \
ADDW T1, H, H; \
VORR VTMP2.B16, VTMP3.B16, VTMP3.B16; \
EORW T3, T4, T4; \
ADDW H, D, D; \
ADDW T4, T2, T2; \
VUSHR $3, VTMP1.S4, VTMP2.S4; \
ADDW T2, H, H
#define round2_sched(A, B, C, D, E, F, G, H, VV3) \
MOVWU 4(RSP), T3; \
RORW $6, E, T1; \
VSHL $(32-18), VTMP1.S4, VTMP1.S4; \
RORW $2, A, T2; \
RORW $13, A, T4; \
ADDW T3, H, H; \
VEOR VTMP2.B16, VTMP3.B16, VTMP3.B16; \
RORW $11, E, T3; \
EORW T4, T2, T2; \
EORW T3, T1, T1; \
VEOR VTMP1.B16, VTMP4.B16, VTMP1.B16; \
RORW $25, E, T3; \
RORW $22, A, T4; \
EORW T3, T1, T1; \
VZIP2 VV3.S4, VV3.S4, VTMP5.S4; \
EORW T4, T2, T2; \
EORW G, F, T3; \
EORW C, A, T4; \
VEOR VTMP1.B16, VTMP3.B16, VTMP1.B16; \
ANDW E, T3, T3; \
ANDW B, T4, T4; \
EORW G, T3, T3; \
VUSHR $10, VTMP5.S4, VTMP6.S4; \
ADDW T3, T1, T1; \
ANDW C, A, T3; \
ADDW T1, H, H; \
VUSHR $19, VTMP5.D2, VTMP3.D2; \
EORW T3, T4, T4; \
ADDW H, D, D; \
ADDW T4, T2, T2; \
VUSHR $17, VTMP5.D2, VTMP2.D2; \
ADDW T2, H, H
#define round3_sched(A, B, C, D, E, F, G, H) \
MOVWU 8(RSP), T3; \
RORW $6, E, T1; \
VEOR VTMP6.B16, VTMP3.B16, VTMP3.B16; \
RORW $2, A, T2; \
RORW $13, A, T4; \
ADDW T3, H, H; \
VADD VTMP1.S4, VTMP0.S4, VTMP0.S4; \
RORW $11, E, T3; \
EORW T4, T2, T2; \
EORW T3, T1, T1; \
VEOR VTMP2.B16, VTMP3.B16, VTMP1.B16; \
RORW $25, E, T3; \
RORW $22, A, T4; \
EORW T3, T1, T1; \
WORD $0xea128a5; \
EORW T4, T2, T2; \
EORW G, F, T3; \
EORW C, A, T4; \
VADD VTMP1.S4, VTMP0.S4, VTMP0.S4; \
ANDW E, T3, T3; \
ANDW B, T4, T4; \
EORW G, T3, T3; \
VZIP1 VTMP0.S4, VTMP0.S4, VTMP2.S4; \
ADDW T3, T1, T1; \
ANDW C, A, T3; \
ADDW T1, H, H; \
EORW T3, T4, T4; \
ADDW H, D, D; \
ADDW T4, T2, T2; \
VUSHR $10, VTMP2.S4, VTMP1.S4; \
ADDW T2, H, H
#define round4_sched(A, B, C, D, E, F, G, H, VV0) \
MOVWU 12(RSP), T3; \
RORW $6, E, T1; \
RORW $2, A, T2; \
VUSHR $19, VTMP2.D2, VTMP3.D2; \
RORW $13, A, T4; \
ADDW T3, H, H; \
RORW $11, E, T3; \
EORW T4, T2, T2; \
VUSHR $17, VTMP2.D2, VTMP2.D2; \
EORW T3, T1, T1; \
RORW $25, E, T3; \
RORW $22, A, T4; \
EORW T3, T1, T1; \
VEOR VTMP3.B16, VTMP1.B16, VTMP1.B16; \
EORW T4, T2, T2; \
EORW G, F, T3; \
EORW C, A, T4; \
VEOR VTMP2.B16, VTMP1.B16, VTMP1.B16; \
ANDW E, T3, T3; \
ANDW B, T4, T4; \
EORW G, T3, T3; \
VUZP1 VTMP1.S4, VZ.S4, VTMP1.S4; \
ADDW T3, T1, T1; \
ANDW C, A, T3; \
ADDW T1, H, H; \
EORW T3, T4, T4; \
ADDW H, D, D; \
ADDW T4, T2, T2; \
VADD VTMP0.S4, VTMP1.S4, VV0.S4; \
ADDW T2, H, H
#define four_rounds_sched(A, B, C, D, E, F, G, H, VV0, VV1, VV2, VV3) \
round1_sched(A, B, C, D, E, F, G, H, VV0, VV1, VV2, VV3); \
round2_sched(H, A, B, C, D, E, F, G, VV3); \
round3_sched(G, H, A, B, C, D, E, F); \
round4_sched(F, G, H, A, B, C, D, E, VV0)
#define one_round(A, B, C, D, E, F, G, H, ptr, offset) \
MOVWU offset(ptr), T3; \
RORW $6, E, T1; \
RORW $2, A, T2; \
RORW $13, A, T4; \
ADDW T3, H, H; \
RORW $11, E, T3; \
EORW T4, T2, T2; \
EORW T3, T1, T1; \
RORW $25, E, T3; \
RORW $22, A, T4; \
EORW T3, T1, T1; \
EORW T4, T2, T2; \
EORW G, F, T3; \
EORW C, A, T4; \
ANDW E, T3, T3; \
ANDW B, T4, T4; \
EORW G, T3, T3; \
ADDW T3, T1, T1; \
ANDW C, A, T3; \
ADDW T1, H, H; \
EORW T3, T4, T4; \
ADDW H, D, D; \
ADDW T4, T2, T2; \
ADDW T2, H, H
#define four_rounds(A, B, C, D, E, F, G, H, ptr, offset) \
one_round(A, B, C, D, E, F, G, H, ptr, offset); \
one_round(H, A, B, C, D, E, F, G, ptr, offset + 4); \
one_round(G, H, A, B, C, D, E, F, ptr, offset + 8); \
one_round(F, G, H, A, B, C, D, E, ptr, offset + 12)
// Definitions for ASIMD version
#define digest2 R6
#define post64 R7
#define postminus176 R9
#define post32 R10
#define postminus80 R11
#define M1 V16
#define M2 V17
#define M3 V18
#define M4 V19
#define MQ1 F16
#define MQ2 F17
#define MQ3 F18
#define MQ4 F19
#define NVR1 V24
#define NVR2 V25
#define NVR3 V26
#define NVR4 V27
#define QR2 F25
#define QR4 F27
#define TV1 V28
#define TV2 V29
#define TV3 V30
#define TV4 V31
#define TV5 V20
#define TV6 V21
#define TV7 V22
#define TV8 V23
#define TQ4 F31
#define TQ5 F20
#define TQ6 F21
#define TQ7 F22
#define round_4(A, B, C, D, E, F, G, H, MV, MQ, bicword, offset) \
VUSHR $6, E.S4, TV1.S4; \
VSHL $(32-6), E.S4, TV2.S4; \
VUSHR $11, E.S4, NVR2.S4; \
VSHL $(32-11), E.S4, NVR1.S4; \
VAND F.B16, E.B16, TV3.B16; \
WORD bicword; \
VORR TV2.B16, TV1.B16, TV1.B16; \
VUSHR $25, E.S4, TV2.S4; \
FMOVQ offset(k256), QR4; \
VSHL $(32-25), E.S4, NVR3.S4; \
VORR NVR1.B16, NVR2.B16, NVR1.B16; \
VEOR TV4.B16, TV3.B16, TV3.B16; \
VORR NVR3.B16, TV2.B16, TV2.B16; \
VEOR C.B16, A.B16, NVR3.B16; \
VEOR NVR1.B16, TV1.B16, TV1.B16; \
VADD NVR4.S4, MV.S4, TV4.S4; \
VADD TV3.S4, H.S4, H.S4; \
VUSHR $2, A.S4, TV3.S4; \
VAND B.B16, NVR3.B16, NVR3.B16; \
VSHL $(32-2), A.S4, NVR4.S4; \
VEOR TV2.B16, TV1.B16, TV1.B16; \
VUSHR $13, A.S4, TV2.S4; \
VSHL $(32-13), A.S4, NVR1.S4; \
VADD TV4.S4, H.S4, H.S4; \
VORR NVR4.B16, TV3.B16, TV3.B16; \
VAND C.B16, A.B16, NVR4.B16; \
VUSHR $22, A.S4, TV4.S4; \
VSHL $(32 - 22), A.S4, NVR2.S4 ; \
VORR NVR1.B16, TV2.B16, TV2.B16; \
VADD TV1.S4, H.S4, H.S4; \
VEOR NVR4.B16, NVR3.B16, NVR3.B16; \
VORR NVR2.B16, TV4.B16, TV4.B16; \
VEOR TV3.B16, TV2.B16, TV2.B16; \
VADD H.S4, D.S4, D.S4; \
VADD NVR3.S4, H.S4, H.S4; \
VEOR TV4.B16, TV2.B16, TV2.B16; \
FMOVQ MQ, offset(RSP); \
VADD TV2.S4, H.S4, H.S4
#define eight_4_roundsA(A, B, C, D, E, F, G, H, MV1, MV2, MV3, MV4, MQ1, MQ2, MQ3, MQ4, offset) \
round_4(A, B, C, D, E, F, G, H, MV1, MQ1, $0x4e641cdf, offset); \
round_4(H, A, B, C, D, E, F, G, MV2, MQ2, $0x4e631cbf, offset + 16); \
round_4(G, H, A, B, C, D, E, F, MV3, MQ3, $0x4e621c9f, offset + 32); \
round_4(F, G, H, A, B, C, D, E, MV4, MQ4, $0x4e611c7f, offset + 48)
#define eight_4_roundsB(A, B, C, D, E, F, G, H, MV1, MV2, MV3, MV4, MQ1, MQ2, MQ3, MQ4, offset) \
round_4(A, B, C, D, E, F, G, H, MV1, MQ1, $0x4e601c5f, offset); \
round_4(H, A, B, C, D, E, F, G, MV2, MQ2, $0x4e671c3f, offset + 16); \
round_4(G, H, A, B, C, D, E, F, MV3, MQ3, $0x4e661c1f, offset + 32); \
round_4(F, G, H, A, B, C, D, E, MV4, MQ4, $0x4e651cff, offset + 48)
#define round_4_and_sched(A, B, C, D, E, F, G, H, bicword, offset) \
FLDPQ (offset-256)(RSP), (TQ6, TQ5); \
VUSHR $6, E.S4, TV1.S4; \
VSHL $(32-6), E.S4, TV2.S4; \
VUSHR $11, E.S4, NVR2.S4; \
VSHL $(32-11), E.S4, NVR1.S4; \
VAND F.B16, E.B16, TV3.B16; \
WORD bicword; \
VUSHR $7, TV5.S4, M1.S4; \
FMOVQ (offset-32)(RSP), TQ7; \
VSHL $(32-7), TV5.S4, M2.S4; \
VORR TV2.B16, TV1.B16, TV1.B16; \
VUSHR $25, E.S4, TV2.S4; \
VSHL $(32-25), E.S4, NVR3.S4; \
VORR NVR1.B16, NVR2.B16, NVR1.B16; \
VEOR TV4.B16, TV3.B16, TV3.B16; \
FMOVQ offset(k256), QR4; \
VORR M2.B16, M1.B16, M1.B16; \
VUSHR $17, TV7.S4, M3.S4; \
VSHL $(32-17), TV7.S4, M4.S4; \
VUSHR $18, TV5.S4, M2.S4; \
VSHL $(32-18), TV5.S4, TV8.S4; \
VORR NVR3.B16, TV2.B16, TV2.B16; \
VEOR C.B16, A.B16, NVR3.B16; \
VORR M4.B16, M3.B16, M3.B16; \
FMOVQ (offset-112)(RSP), TQ4; \
VUSHR $19, TV7.S4, M4.S4; \
VSHL $(32-19), TV7.S4, NVR2.S4; \
VORR TV8.B16, M2.B16, M2.B16; \
VUSHR $3, TV5.S4, TV8.S4; \
VORR NVR2.B16, M4.B16, M4.B16; \
VEOR NVR1.B16, TV1.B16, TV1.B16; \
VEOR M2.B16, M1.B16, M1.B16; \
VUSHR $10, TV7.S4, M2.S4; \
VEOR M4.B16, M3.B16, M3.B16; \
VADD TV3.S4, H.S4, H.S4; \
VEOR TV8.B16, M1.B16, M1.B16; \
VADD TV4.S4, TV6.S4, TV6.S4; \
VEOR M2.B16, M3.B16, M3.B16; \
VUSHR $2, A.S4, TV3.S4; \
VAND B.B16, NVR3.B16, NVR3.B16; \
VADD TV6.S4, M1.S4, M1.S4; \
VSHL $(32-2), A.S4, TV6.S4; \
VEOR TV2.B16, TV1.B16, TV1.B16; \
VUSHR $13, A.S4, TV2.S4; \
VADD M3.S4, M1.S4, M1.S4; \
VADD TV1.S4, H.S4, H.S4; \
VSHL $(32-13), A.S4, NVR1.S4; \
VORR TV6.B16, TV3.B16, TV3.B16; \
VADD NVR4.S4, M1.S4, TV5.S4; \
FMOVQ MQ1, offset(RSP); \
VAND C.B16, A.B16, NVR4.B16; \
VUSHR $22, A.S4, TV4.S4; \
VSHL $(32-22), A.S4, NVR2.S4; \
VADD TV5.S4, H.S4, H.S4; \
VORR NVR1.B16, TV2.B16, TV2.B16; \
VEOR NVR4.B16, NVR3.B16, NVR3.B16; \
VORR NVR2.B16, TV4.B16, TV4.B16; \
VEOR TV3.B16, TV2.B16, TV2.B16; \
VADD H.S4, D.S4, D.S4; \
VADD NVR3.S4, H.S4, H.S4; \
VEOR TV4.B16, TV2.B16, TV2.B16; \
VADD TV2.S4, H.S4, H.S4
#define eight_4_rounds_and_sched(A, B, C, D, E, F, G, H, offset) \
round_4_and_sched(A, B, C, D, E, F, G, H, $0x4e641cdf, offset + 0*16); \
round_4_and_sched(H, A, B, C, D, E, F, G, $0x4e631cbf, offset + 1*16); \
round_4_and_sched(G, H, A, B, C, D, E, F, $0x4e621c9f, offset + 2*16); \
round_4_and_sched(F, G, H, A, B, C, D, E, $0x4e611c7f, offset + 3*16); \
round_4_and_sched(E, F, G, H, A, B, C, D, $0x4e601c5f, offset + 4*16); \
round_4_and_sched(D, E, F, G, H, A, B, C, $0x4e671c3f, offset + 5*16); \
round_4_and_sched(C, D, E, F, G, H, A, B, $0x4e661c1f, offset + 6*16); \
round_4_and_sched(B, C, D, E, F, G, H, A, $0x4e651cff, offset + 7*16)
#define round_4_padding(A, B, C, D, E, F, G, H, bicword, offset) \
VUSHR $6, E.S4, TV1.S4; \
VSHL $(32-6), E.S4, TV2.S4; \
VUSHR $11, E.S4, NVR2.S4; \
VSHL $(32-11), E.S4, NVR1.S4; \
VAND F.B16, E.B16, TV3.B16; \
WORD bicword; \
VORR TV2.B16, TV1.B16, TV1.B16; \
VUSHR $25, E.S4, TV2.S4; \
VSHL $(32-25), E.S4, NVR3.S4; \
VORR NVR1.B16, NVR2.B16, NVR1.B16; \
VEOR TV4.B16, TV3.B16, TV3.B16; \
VORR NVR3.B16, TV2.B16, TV2.B16; \
VEOR C.B16, A.B16, NVR3.B16; \
VEOR NVR1.B16, TV1.B16, TV1.B16; \
VADD TV3.S4, H.S4, H.S4; \
VUSHR $2, A.S4, TV3.S4; \
FMOVQ offset(padding), QR2; \
VAND B.B16, NVR3.B16, NVR3.B16; \
VSHL $(32-2), A.S4, NVR4.S4; \
VEOR TV2.B16, TV1.B16, TV1.B16; \
VUSHR $13, A.S4, TV2.S4; \
VSHL $(32-13), A.S4, NVR1.S4; \
VADD NVR2.S4, H.S4, H.S4; \
VORR NVR4.B16, TV3.B16, TV3.B16; \
VAND C.B16, A.B16, NVR4.B16; \
VUSHR $22, A.S4, TV4.S4; \
VSHL $(32-22), A.S4, NVR2.S4; \
VORR NVR1.B16, TV2.B16, TV2.B16; \
VADD TV1.S4, H.S4, H.S4; \
VEOR NVR4.B16, NVR3.B16, NVR3.B16; \
VORR NVR2.B16, TV4.B16, TV4.B16; \
VEOR TV3.B16, TV2.B16, TV2.B16; \
VADD H.S4, D.S4, D.S4; \
VADD NVR3.S4, H.S4, H.S4; \
VEOR TV4.B16, TV2.B16, TV2.B16; \
VADD TV2.S4, H.S4, H.S4
#define eight_4_rounds_padding(A, B, C, D, E, F, G, H, offset) \
round_4_padding(A, B, C, D, E, F, G, H, $0x4e641cdf, offset + 0*16); \
round_4_padding(H, A, B, C, D, E, F, G, $0x4e631cbf, offset + 1*16); \
round_4_padding(G, H, A, B, C, D, E, F, $0x4e621c9f, offset + 2*16); \
round_4_padding(F, G, H, A, B, C, D, E, $0x4e611c7f, offset + 3*16); \
round_4_padding(E, F, G, H, A, B, C, D, $0x4e601c5f, offset + 4*16); \
round_4_padding(D, E, F, G, H, A, B, C, $0x4e671c3f, offset + 5*16); \
round_4_padding(C, D, E, F, G, H, A, B, $0x4e661c1f, offset + 6*16); \
round_4_padding(B, C, D, E, F, G, H, A, $0x4e651cff, offset + 7*16)
// Definitions for SHA-2
#define check_shani R19
#define HASHUPDATE(word) \
SHA256H word, V3, V2; \
SHA256H2 word, V8, V3; \
VMOV V2.B16, V8.B16
TEXT ·_hash(SB), 0, $1024-36
MOVD digests+0(FP), OUTPUT_PTR
MOVD p_base+8(FP), DATA_PTR
MOVWU count+32(FP), NUM_BLKS
MOVBU ·hasShani(SB), check_shani
CBNZ check_shani, shani
arm_x4:
CMPW $4, NUM_BLKS
BLO arm_x1
MOVD $_PADDING_4<>(SB), padding
MOVD $_K256_4<>(SB), k256
MOVD $_DIGEST_4<>(SB), digest
ADD $64, digest, digest2
MOVD $64, post64
MOVD $32, post32
MOVD $-80, postminus80
MOVD $-176, postminus176
arm_x4_loop:
CMPW $4, NUM_BLKS
BLO arm_x1
VLD1 (digest), [V0.S4, V1.S4, V2.S4, V3.S4]
VLD1 (digest2), [V4.S4, V5.S4, V6.S4, V7.S4]
// First 16 rounds
WORD $0xde7a030
WORD $0xde7b030
WORD $0x4de7a030
WORD $0x4de9b030
VREV32 M1.B16, M1.B16
VREV32 M2.B16, M2.B16
VREV32 M3.B16, M3.B16
VREV32 M4.B16, M4.B16
eight_4_roundsA(V0, V1, V2, V3, V4, V5, V6, V7, M1, M2, M3, M4, MQ1, MQ2, MQ3, MQ4, 0x00)
WORD $0xde7a030
WORD $0xde7b030
WORD $0x4de7a030
WORD $0x4de9b030
VREV32 M1.B16, M1.B16
VREV32 M2.B16, M2.B16
VREV32 M3.B16, M3.B16
VREV32 M4.B16, M4.B16
eight_4_roundsB(V4, V5, V6, V7, V0, V1, V2, V3, M1, M2, M3, M4, MQ1, MQ2, MQ3, MQ4, 0x40)
WORD $0xde7a030
WORD $0xde7b030
WORD $0x4de7a030
WORD $0x4de9b030
VREV32 M1.B16, M1.B16
VREV32 M2.B16, M2.B16
VREV32 M3.B16, M3.B16
VREV32 M4.B16, M4.B16
eight_4_roundsA(V0, V1, V2, V3, V4, V5, V6, V7, M1, M2, M3, M4, MQ1, MQ2, MQ3, MQ4, 0x80)
WORD $0xde7a030
WORD $0xde7b030
WORD $0x4de7a030
WORD $0x4de9b030
VREV32 M1.B16, M1.B16
VREV32 M2.B16, M2.B16
VREV32 M3.B16, M3.B16
VREV32 M4.B16, M4.B16
eight_4_roundsB(V4, V5, V6, V7, V0, V1, V2, V3, M1, M2, M3, M4, MQ1, MQ2, MQ3, MQ4, 0xc0)
eight_4_rounds_and_sched(V0, V1, V2, V3, V4, V5, V6, V7, 0x100)
eight_4_rounds_and_sched(V0, V1, V2, V3, V4, V5, V6, V7, 0x180)
eight_4_rounds_and_sched(V0, V1, V2, V3, V4, V5, V6, V7, 0x200)
eight_4_rounds_and_sched(V0, V1, V2, V3, V4, V5, V6, V7, 0x280)
eight_4_rounds_and_sched(V0, V1, V2, V3, V4, V5, V6, V7, 0x300)
eight_4_rounds_and_sched(V0, V1, V2, V3, V4, V5, V6, V7, 0x380)
// add previous digest
VLD1 (digest), [M1.S4, M2.S4, M3.S4, M4.S4]
VLD1 (digest2), [TV5.S4, TV6.S4, TV7.S4, TV8.S4]
VADD M1.S4, V0.S4, V0.S4
VADD M2.S4, V1.S4, V1.S4
VADD M3.S4, V2.S4, V2.S4
VADD M4.S4, V3.S4, V3.S4
VADD TV5.S4, V4.S4, V4.S4
VADD TV6.S4, V5.S4, V5.S4
VADD TV7.S4, V6.S4, V6.S4
VADD TV8.S4, V7.S4, V7.S4
// save state
VMOV V0.B16, M1.B16
VMOV V1.B16, M2.B16
VMOV V2.B16, M3.B16
VMOV V3.B16, M4.B16
VMOV V4.B16, TV5.B16
VMOV V5.B16, TV6.B16
VMOV V6.B16, TV7.B16
VMOV V7.B16, TV8.B16
// rounds with padding
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x000)
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x080)
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x100)
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x180)
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x200)
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x280)
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x300)
eight_4_rounds_padding(V0, V1, V2, V3, V4, V5, V6, V7, 0x380)
// add previous digest
VADD M1.S4, V0.S4, V0.S4
VADD M2.S4, V1.S4, V1.S4
VADD M3.S4, V2.S4, V2.S4
VADD M4.S4, V3.S4, V3.S4
VADD TV5.S4, V4.S4, V4.S4
VADD TV6.S4, V5.S4, V5.S4
VADD TV7.S4, V6.S4, V6.S4
VADD TV8.S4, V7.S4, V7.S4
// change endianness transpose and store
VREV32 V0.B16, V0.B16
VREV32 V1.B16, V1.B16
VREV32 V2.B16, V2.B16
VREV32 V3.B16, V3.B16
VREV32 V4.B16, V4.B16
VREV32 V5.B16, V5.B16
VREV32 V6.B16, V6.B16
VREV32 V7.B16, V7.B16
WORD $0xdaaa000
WORD $0xdaab000
WORD $0x4daaa000
WORD $0x4dabb000
WORD $0xdaaa004
WORD $0xdaab004
WORD $0x4daaa004
WORD $0x4dbfb004
ADD $192, DATA_PTR, DATA_PTR
SUBW $4, NUM_BLKS, NUM_BLKS
JMP arm_x4_loop
arm_x1:
VMOV ZR, VZ.S4 // Golang guarantees this is zero
MOVD $_DIGEST_1<>(SB), digest
MOVD $_PADDING_1<>(SB), padding
ADD NUM_BLKS<<5, OUTPUT_PTR, last
arm_x1_loop:
CMP OUTPUT_PTR, last
BEQ epilog
// Load one block
VLD1.P 64(DATA_PTR), [VR0.S4, VR1.S4, VR2.S4, VR3.S4]
MOVD $_K256_1<>(SB), k256
// change endiannes
VREV32 VR0.B16, VR0.B16
VREV32 VR1.B16, VR1.B16
VREV32 VR2.B16, VR2.B16
VREV32 VR3.B16, VR3.B16
// load initial digest
LDPW (digest), (A_, B_)
LDPW 8(digest), (C_, D_)
LDPW 16(digest), (E_, F_)
LDPW 24(digest), (G_, H_)
// First 48 rounds
VLD1.P 64(k256), [KV0.S4, KV1.S4, KV2.S4, KV3.S4]
VADD VR0.S4, KV0.S4, KV0.S4
FMOVQ KQ0, (RSP)
four_rounds_sched(A_, B_, C_, D_, E_, F_, G_, H_, VR0, VR1, VR2, VR3)
VADD VR1.S4, KV1.S4, KV1.S4
FMOVQ KQ1, (RSP)
four_rounds_sched(E_, F_, G_, H_, A_, B_, C_, D_, VR1, VR2, VR3, VR0)
VADD VR2.S4, KV2.S4, KV2.S4
FMOVQ KQ2, (RSP)
four_rounds_sched(A_, B_, C_, D_, E_, F_, G_, H_, VR2, VR3, VR0, VR1)
VADD VR3.S4, KV3.S4, KV3.S4
FMOVQ KQ3, (RSP)
four_rounds_sched(E_, F_, G_, H_, A_, B_, C_, D_, VR3, VR0, VR1, VR2)
VLD1.P 64(k256), [KV0.S4, KV1.S4, KV2.S4, KV3.S4]
VADD VR0.S4, KV0.S4, KV0.S4
FMOVQ KQ0, (RSP)
four_rounds_sched(A_, B_, C_, D_, E_, F_, G_, H_, VR0, VR1, VR2, VR3)
VADD VR1.S4, KV1.S4, KV1.S4
FMOVQ KQ1, (RSP)
four_rounds_sched(E_, F_, G_, H_, A_, B_, C_, D_, VR1, VR2, VR3, VR0)
VADD VR2.S4, KV2.S4, KV2.S4
FMOVQ KQ2, (RSP)
four_rounds_sched(A_, B_, C_, D_, E_, F_, G_, H_, VR2, VR3, VR0, VR1)
VADD VR3.S4, KV3.S4, KV3.S4
FMOVQ KQ3, (RSP)
four_rounds_sched(E_, F_, G_, H_, A_, B_, C_, D_, VR3, VR0, VR1, VR2)
VLD1.P 64(k256), [KV0.S4, KV1.S4, KV2.S4, KV3.S4]
VADD VR0.S4, KV0.S4, KV0.S4
FMOVQ KQ0, (RSP)
four_rounds_sched(A_, B_, C_, D_, E_, F_, G_, H_, VR0, VR1, VR2, VR3)
VADD VR1.S4, KV1.S4, KV1.S4
FMOVQ KQ1, (RSP)
four_rounds_sched(E_, F_, G_, H_, A_, B_, C_, D_, VR1, VR2, VR3, VR0)
VADD VR2.S4, KV2.S4, KV2.S4
FMOVQ KQ2, (RSP)
four_rounds_sched(A_, B_, C_, D_, E_, F_, G_, H_, VR2, VR3, VR0, VR1)
VADD VR3.S4, KV3.S4, KV3.S4
FMOVQ KQ3, (RSP)
four_rounds_sched(E_, F_, G_, H_, A_, B_, C_, D_, VR3, VR0, VR1, VR2)
// last 16 rounds
VLD1.P 64(k256), [KV0.S4, KV1.S4, KV2.S4, KV3.S4]
VADD VR0.S4, KV0.S4, KV0.S4
FMOVQ KQ0, (RSP)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, RSP, 0)
VADD VR1.S4, KV1.S4, KV1.S4
FMOVQ KQ1, (RSP)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, RSP, 0)
VADD VR2.S4, KV2.S4, KV2.S4
FMOVQ KQ2, (RSP)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, RSP, 0)
VADD VR3.S4, KV3.S4, KV3.S4
FMOVQ KQ3, (RSP)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, RSP, 0)
// rounds with padding
LDPW (digest), (T1, T2)
LDPW 8(digest), (T3, T4)
ADDW T1, A_, A_
ADDW T2, B_, B_
ADDW T3, C_, C_
ADDW T4, D_, D_
LDPW 16(digest), (T1, T2)
STPW (A_, B_), (RSP)
STPW (C_, D_), 8(RSP)
LDPW 24(digest), (T3, T4)
ADDW T1, E_, E_
ADDW T2, F_, F_
ADDW T3, G_, G_
STPW (E_, F_), 16(RSP)
ADDW T4, H_, H_
STPW (G_, H_), 24(RSP)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0x00)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0x10)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0x20)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0x30)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0x40)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0x50)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0x60)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0x70)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0x80)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0x90)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0xa0)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0xb0)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0xc0)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0xd0)
four_rounds(A_, B_, C_, D_, E_, F_, G_, H_, padding, 0xe0)
four_rounds(E_, F_, G_, H_, A_, B_, C_, D_, padding, 0xf0)
LDPW (RSP), (T1, T2)
LDPW 8(RSP), (T3, T4)
ADDW T1, A_, A_
ADDW T2, B_, B_
REV32 A_, A_
REV32 B_, B_
ADDW T3, C_, C_
ADDW T4, D_, D_
STPW.P (A_, B_), 8(OUTPUT_PTR)
LDPW 16(RSP), (T1, T2)
REV32 C_, C_
REV32 D_, D_
STPW.P (C_, D_), 8(OUTPUT_PTR)
LDPW 24(RSP), (T3, T4)
ADDW T1, E_, E_
ADDW T2, F_, F_
REV32 E_, E_
REV32 F_, F_
ADDW T3, G_, G_
ADDW T4, H_, H_
REV32 G_, G_
REV32 H_, H_
STPW.P (E_, F_), 8(OUTPUT_PTR)
STPW.P (G_, H_), 8(OUTPUT_PTR)
JMP arm_x1_loop
shani:
MOVD $_DIGEST_1<>(SB), digest
MOVD $_PADDING_1<>(SB), padding
MOVD $_K256_1<>(SB), k256
ADD NUM_BLKS<<5, OUTPUT_PTR, last
// load incoming digest
VLD1 (digest), [V0.S4, V1.S4]
shani_loop:
CMP OUTPUT_PTR, last
BEQ epilog
// load all K constants
VLD1.P 64(k256), [V16.S4, V17.S4, V18.S4, V19.S4]
VLD1.P 64(k256), [V20.S4, V21.S4, V22.S4, V23.S4]
VLD1.P 64(k256), [V24.S4, V25.S4, V26.S4, V27.S4]
VLD1 (k256), [V28.S4, V29.S4, V30.S4, V31.S4]
SUB $192, k256, k256
// load one block
VLD1.P 64(DATA_PTR), [V4.S4, V5.S4, V6.S4, V7.S4]
VMOV V0.B16, V2.B16
VMOV V1.B16, V3.B16
VMOV V2.B16, V8.B16
// reverse endianness
VREV32 V4.B16, V4.B16
VREV32 V5.B16, V5.B16
VREV32 V6.B16, V6.B16
VREV32 V7.B16, V7.B16
VADD V16.S4, V4.S4, V9.S4
SHA256SU0 V5.S4, V4.S4
HASHUPDATE(V9.S4)
VADD V17.S4, V5.S4, V9.S4
SHA256SU0 V6.S4, V5.S4
SHA256SU1 V7.S4, V6.S4, V4.S4
HASHUPDATE(V9.S4)
VADD V18.S4, V6.S4, V9.S4
SHA256SU0 V7.S4, V6.S4
SHA256SU1 V4.S4, V7.S4, V5.S4
HASHUPDATE(V9.S4)
VADD V19.S4, V7.S4, V9.S4
SHA256SU0 V4.S4, V7.S4
SHA256SU1 V5.S4, V4.S4, V6.S4
HASHUPDATE(V9.S4)
VADD V20.S4, V4.S4, V9.S4
SHA256SU0 V5.S4, V4.S4
SHA256SU1 V6.S4, V5.S4, V7.S4
HASHUPDATE(V9.S4)
VADD V21.S4, V5.S4, V9.S4
SHA256SU0 V6.S4, V5.S4
SHA256SU1 V7.S4, V6.S4, V4.S4
HASHUPDATE(V9.S4)
VADD V22.S4, V6.S4, V9.S4
SHA256SU0 V7.S4, V6.S4
SHA256SU1 V4.S4, V7.S4, V5.S4
HASHUPDATE(V9.S4)
VADD V23.S4, V7.S4, V9.S4
SHA256SU0 V4.S4, V7.S4
SHA256SU1 V5.S4, V4.S4, V6.S4
HASHUPDATE(V9.S4)
VADD V24.S4, V4.S4, V9.S4
SHA256SU0 V5.S4, V4.S4
SHA256SU1 V6.S4, V5.S4, V7.S4
HASHUPDATE(V9.S4)
VADD V25.S4, V5.S4, V9.S4
SHA256SU0 V6.S4, V5.S4
SHA256SU1 V7.S4, V6.S4, V4.S4
HASHUPDATE(V9.S4)
VADD V26.S4, V6.S4, V9.S4
SHA256SU0 V7.S4, V6.S4
SHA256SU1 V4.S4, V7.S4, V5.S4
HASHUPDATE(V9.S4)
VADD V27.S4, V7.S4, V9.S4
SHA256SU0 V4.S4, V7.S4
SHA256SU1 V5.S4, V4.S4, V6.S4
HASHUPDATE(V9.S4)
VADD V28.S4, V4.S4, V9.S4
HASHUPDATE(V9.S4)
SHA256SU1 V6.S4, V5.S4, V7.S4
VADD V29.S4, V5.S4, V9.S4
HASHUPDATE(V9.S4)
VADD V30.S4, V6.S4, V9.S4
HASHUPDATE(V9.S4)
VADD V31.S4, V7.S4, V9.S4
HASHUPDATE(V9.S4)
// Add initial digest
VADD V2.S4, V0.S4, V2.S4
VADD V3.S4, V1.S4, V3.S4
// Back it up
VMOV V2.B16, V10.B16
VMOV V3.B16, V11.B16
// Rounds with padding
// load prescheduled constants
VLD1.P 64(padding), [V16.S4, V17.S4, V18.S4, V19.S4]
VLD1.P 64(padding), [V20.S4, V21.S4, V22.S4, V23.S4]
VMOV V2.B16, V8.B16
VLD1.P 64(padding), [V24.S4, V25.S4, V26.S4, V27.S4]
VLD1 (padding), [V28.S4, V29.S4, V30.S4, V31.S4]
SUB $192, padding, padding
HASHUPDATE(V16.S4)
HASHUPDATE(V17.S4)
HASHUPDATE(V18.S4)
HASHUPDATE(V19.S4)
HASHUPDATE(V20.S4)
HASHUPDATE(V21.S4)
HASHUPDATE(V22.S4)
HASHUPDATE(V23.S4)
HASHUPDATE(V24.S4)
HASHUPDATE(V25.S4)
HASHUPDATE(V26.S4)
HASHUPDATE(V27.S4)
HASHUPDATE(V28.S4)
HASHUPDATE(V29.S4)
HASHUPDATE(V30.S4)
HASHUPDATE(V31.S4)
// add backed up digest
VADD V2.S4, V10.S4, V2.S4
VADD V3.S4, V11.S4, V3.S4
VREV32 V2.B16, V2.B16
VREV32 V3.B16, V3.B16
VST1.P [V2.S4, V3.S4], 32(OUTPUT_PTR)
JMP shani_loop
epilog:
RET
// Data section
DATA _K256_1<>+0x00(SB)/4, $0x428a2f98
DATA _K256_1<>+0x04(SB)/4, $0x71374491
DATA _K256_1<>+0x08(SB)/4, $0xb5c0fbcf
DATA _K256_1<>+0x0c(SB)/4, $0xe9b5dba5
DATA _K256_1<>+0x10(SB)/4, $0x3956c25b
DATA _K256_1<>+0x14(SB)/4, $0x59f111f1
DATA _K256_1<>+0x18(SB)/4, $0x923f82a4
DATA _K256_1<>+0x1c(SB)/4, $0xab1c5ed5
DATA _K256_1<>+0x20(SB)/4, $0xd807aa98
DATA _K256_1<>+0x24(SB)/4, $0x12835b01
DATA _K256_1<>+0x28(SB)/4, $0x243185be
DATA _K256_1<>+0x2c(SB)/4, $0x550c7dc3
DATA _K256_1<>+0x30(SB)/4, $0x72be5d74
DATA _K256_1<>+0x34(SB)/4, $0x80deb1fe
DATA _K256_1<>+0x38(SB)/4, $0x9bdc06a7
DATA _K256_1<>+0x3c(SB)/4, $0xc19bf174
DATA _K256_1<>+0x40(SB)/4, $0xe49b69c1
DATA _K256_1<>+0x44(SB)/4, $0xefbe4786
DATA _K256_1<>+0x48(SB)/4, $0x0fc19dc6
DATA _K256_1<>+0x4c(SB)/4, $0x240ca1cc
DATA _K256_1<>+0x50(SB)/4, $0x2de92c6f
DATA _K256_1<>+0x54(SB)/4, $0x4a7484aa
DATA _K256_1<>+0x58(SB)/4, $0x5cb0a9dc
DATA _K256_1<>+0x5c(SB)/4, $0x76f988da
DATA _K256_1<>+0x60(SB)/4, $0x983e5152
DATA _K256_1<>+0x64(SB)/4, $0xa831c66d
DATA _K256_1<>+0x68(SB)/4, $0xb00327c8
DATA _K256_1<>+0x6c(SB)/4, $0xbf597fc7
DATA _K256_1<>+0x70(SB)/4, $0xc6e00bf3
DATA _K256_1<>+0x74(SB)/4, $0xd5a79147
DATA _K256_1<>+0x78(SB)/4, $0x06ca6351
DATA _K256_1<>+0x7c(SB)/4, $0x14292967
DATA _K256_1<>+0x80(SB)/4, $0x27b70a85
DATA _K256_1<>+0x84(SB)/4, $0x2e1b2138
DATA _K256_1<>+0x88(SB)/4, $0x4d2c6dfc
DATA _K256_1<>+0x8c(SB)/4, $0x53380d13
DATA _K256_1<>+0x90(SB)/4, $0x650a7354
DATA _K256_1<>+0x94(SB)/4, $0x766a0abb
DATA _K256_1<>+0x98(SB)/4, $0x81c2c92e
DATA _K256_1<>+0x9c(SB)/4, $0x92722c85
DATA _K256_1<>+0xa0(SB)/4, $0xa2bfe8a1
DATA _K256_1<>+0xa4(SB)/4, $0xa81a664b
DATA _K256_1<>+0xa8(SB)/4, $0xc24b8b70
DATA _K256_1<>+0xac(SB)/4, $0xc76c51a3
DATA _K256_1<>+0xb0(SB)/4, $0xd192e819
DATA _K256_1<>+0xb4(SB)/4, $0xd6990624
DATA _K256_1<>+0xb8(SB)/4, $0xf40e3585
DATA _K256_1<>+0xbc(SB)/4, $0x106aa070
DATA _K256_1<>+0xc0(SB)/4, $0x19a4c116
DATA _K256_1<>+0xc4(SB)/4, $0x1e376c08
DATA _K256_1<>+0xc8(SB)/4, $0x2748774c
DATA _K256_1<>+0xcc(SB)/4, $0x34b0bcb5
DATA _K256_1<>+0xd0(SB)/4, $0x391c0cb3
DATA _K256_1<>+0xd4(SB)/4, $0x4ed8aa4a
DATA _K256_1<>+0xd8(SB)/4, $0x5b9cca4f
DATA _K256_1<>+0xdc(SB)/4, $0x682e6ff3
DATA _K256_1<>+0xe0(SB)/4, $0x748f82ee
DATA _K256_1<>+0xe4(SB)/4, $0x78a5636f
DATA _K256_1<>+0xe8(SB)/4, $0x84c87814
DATA _K256_1<>+0xec(SB)/4, $0x8cc70208
DATA _K256_1<>+0xf0(SB)/4, $0x90befffa
DATA _K256_1<>+0xf4(SB)/4, $0xa4506ceb
DATA _K256_1<>+0xf8(SB)/4, $0xbef9a3f7
DATA _K256_1<>+0xfc(SB)/4, $0xc67178f2
GLOBL _K256_1<>(SB),(NOPTR+RODATA),$256
DATA _PADDING_1<>+0x00(SB)/4, $0xc28a2f98
DATA _PADDING_1<>+0x04(SB)/4, $0x71374491
DATA _PADDING_1<>+0x08(SB)/4, $0xb5c0fbcf
DATA _PADDING_1<>+0x0c(SB)/4, $0xe9b5dba5
DATA _PADDING_1<>+0x10(SB)/4, $0x3956c25b
DATA _PADDING_1<>+0x14(SB)/4, $0x59f111f1
DATA _PADDING_1<>+0x18(SB)/4, $0x923f82a4
DATA _PADDING_1<>+0x1c(SB)/4, $0xab1c5ed5
DATA _PADDING_1<>+0x20(SB)/4, $0xd807aa98
DATA _PADDING_1<>+0x24(SB)/4, $0x12835b01
DATA _PADDING_1<>+0x28(SB)/4, $0x243185be
DATA _PADDING_1<>+0x2c(SB)/4, $0x550c7dc3
DATA _PADDING_1<>+0x30(SB)/4, $0x72be5d74
DATA _PADDING_1<>+0x34(SB)/4, $0x80deb1fe
DATA _PADDING_1<>+0x38(SB)/4, $0x9bdc06a7
DATA _PADDING_1<>+0x3c(SB)/4, $0xc19bf374