// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include # void xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_4x8__aarch64_neonfp16arith_ld64( # size_t mr, x0 # size_t nc, x1 # size_t kc, x2 / x0 # const void*restrict a, x3 # size_t a_stride, x4 # const void*restrict w, x5 # void*restrict c, x6 # size_t cm_stride, x7 # size_t cn_stride, [sp] -> x14 $if INC: # const float*restrict acc, [sp + 8] -> x15 # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> (x8) $else: # const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> (x8) # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. # A pointers # x3 a0 # x11 a1 # x12 a2 # x4 a3 / a_stride # C pointers # x6 c0 # x9 c1 # x10 c2 # x7 c3 / cm_stride # Vector register usage # A0 v0 # A1 v1 # A2 v2 # A3 v3 # B v20 v21 v22 v23 # C v24 # C v26 # C v28 # C v30 # Clamp v4, v5 # unused A v6, v7 v8 v9 v10 v11 # unused B v27 BEGIN_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_4x8__aarch64_neonfp16arith_ld64 $if INC: # Load cn_stride, acc LDP x14, x15, [sp] # Load params pointer LDR x8, [sp, 16] $else: # Load cn_stride, params pointer LDP x14, x8, [sp] # Load params values LD2R {v4.8h, v5.8h}, [x8] # Clamp A and C pointers CMP x0, 2 // if mr < 2 ADD x11, x3, x4 // a1 = a0 + a_stride ADD x9, x6, x7 // c1 = c0 + cm_stride CSEL x11, x3, x11, LO // a1 = a0 CSEL x9, x6, x9, LO // c1 = c0 ADD x12, x11, x4 // a2 = a1 + a_stride ADD x10, x9, x7 // c2 = c1 + cm_stride // if mr <= 2 CSEL x12, x11, x12, LS // a2 = a1 CSEL x10, x9, x10, LS // c2 = c1 CMP x0, 4 // if mr < 4 ADD x4, x12, x4 // a3 = a2 + a_stride ADD x7, x10, x7 // c3 = c2 + cm_stride CSEL x4, x12, x4, LO // a3 = a2 CSEL x7, x10, x7, LO // c3 = c2 0: $if INC: # Load initial accumulators LDP q24, q26, [x15], 32 LDP q28, q30, [x15], 32 $else: # Load initial bias from w into accumulators LDR q24, [x5], 16 MOV v26.16b, v24.16b MOV v28.16b, v24.16b MOV v30.16b, v24.16b # Is there at least 4 halffloats (8 bytes)? SUBS x0, x2, 8 // k = kc - 8 B.LO 3f # Main loop - 4 halffloats of A (8 bytes) 1: LDR d0, [x3], 8 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR d1, [x11], 8 LDR d2, [x12], 8 LDR d3, [x4], 8 LDR q22, [x5], 16 LDR q23, [x5], 16 SUBS x0, x0, 8 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v24.8h, v21.8h, v0.h[1] FMLA v26.8h, v21.8h, v1.h[1] FMLA v28.8h, v21.8h, v2.h[1] FMLA v30.8h, v21.8h, v3.h[1] FMLA v24.8h, v22.8h, v0.h[2] FMLA v26.8h, v22.8h, v1.h[2] FMLA v28.8h, v22.8h, v2.h[2] FMLA v30.8h, v22.8h, v3.h[2] FMLA v24.8h, v23.8h, v0.h[3] FMLA v26.8h, v23.8h, v1.h[3] FMLA v28.8h, v23.8h, v2.h[3] FMLA v30.8h, v23.8h, v3.h[3] B.HS 1b # Is there a remainder?- 2 halffloats of A (4 bytes) TBNZ x0, 2, 4f # Is there a remainder?- 1 halffloat of A (2 bytes) TBNZ x0, 1, 5f 2: # Clamp FMAX v24.8h, v24.8h, v4.8h SUBS x1, x1, 8 FMAX v26.8h, v26.8h, v4.8h FMAX v28.8h, v28.8h, v4.8h FMAX v30.8h, v30.8h, v4.8h FMIN v24.8h, v24.8h, v5.8h FMIN v26.8h, v26.8h, v5.8h FMIN v28.8h, v28.8h, v5.8h FMIN v30.8h, v30.8h, v5.8h # Store full 4 x 8 B.LO 6f $if INC: ST1 {v30.16b}, [x7], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v28.16b}, [x10], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v26.16b}, [x9], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v24.16b}, [x6], x14 SUB x4, x4, x2 // a3 -= kc $else: ST1 {v24.16b}, [x6], x14 SUB x3, x3, x2 // a0 -= kc ST1 {v26.16b}, [x9], x14 SUB x11, x11, x2 // a1 -= kc ST1 {v28.16b}, [x10], x14 SUB x12, x12, x2 // a2 -= kc ST1 {v30.16b}, [x7], x14 SUB x4, x4, x2 // a3 -= kc B.HI 0b RET 3: TBZ x0, 2, 5f 4: # Remainder- 2 halffloats of A (4 bytes) LDR s0, [x3], 4 LDR q20, [x5], 16 LDR q21, [x5], 16 LDR s1, [x11], 4 LDR s2, [x12], 4 LDR s3, [x4], 4 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] FMLA v24.8h, v21.8h, v0.h[1] FMLA v26.8h, v21.8h, v1.h[1] FMLA v28.8h, v21.8h, v2.h[1] FMLA v30.8h, v21.8h, v3.h[1] TBZ x0, 1, 2b 5: # Remainder- 1 halffloat of A (2 bytes) LDR h0, [x3], 2 LDR q20, [x5], 16 LDR h1, [x11], 2 LDR h2, [x12], 2 LDR h3 , [x4], 2 FMLA v24.8h, v20.8h, v0.h[0] FMLA v26.8h, v20.8h, v1.h[0] FMLA v28.8h, v20.8h, v2.h[0] FMLA v30.8h, v20.8h, v3.h[0] B 2b # Store odd width 6: TBZ x1, 2, 7f $if INC: STR d30, [x7], 8 STR d28, [x10], 8 DUP d30, v30.d[1] DUP d28, v28.d[1] STR d26, [x9], 8 STR d24, [x6], 8 DUP d26, v26.d[1] DUP d24, v24.d[1] $else: STR d24, [x6], 8 STR d26, [x9], 8 DUP d24, v24.d[1] DUP d26, v26.d[1] STR d28, [x10], 8 STR d30, [x7], 8 DUP d28, v28.d[1] DUP d30, v30.d[1] 7: TBZ x1, 1, 8f $if INC: STR s30, [x7], 4 STR s28, [x10], 4 DUP s30, v30.s[1] DUP s28, v28.s[1] STR s26, [x9], 4 STR s24, [x6], 4 DUP s26, v26.s[1] DUP s24, v24.s[1] $else: STR s24, [x6], 4 STR s26, [x9], 4 DUP s24, v24.s[1] DUP s26, v26.s[1] STR s28, [x10], 4 STR s30, [x7], 4 DUP s28, v28.s[1] DUP s30, v30.s[1] 8: TBZ x1, 0, 9f $if INC: STR h30, [x7] STR h28, [x10] STR h26, [x9] STR h24, [x6] $else: STR h24, [x6] STR h26, [x9] STR h28, [x10] STR h30, [x7] 9: RET END_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_4x8__aarch64_neonfp16arith_ld64 #ifdef __ELF__ .section ".note.GNU-stack","",%progbits #endif