memset-archs.S 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
  4. */
  5. #include <linux/linkage.h>
  6. #include <asm/cache.h>
  7. /*
  8. * The memset implementation below is optimized to use prefetchw and prealloc
  9. * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
  10. * If you want to implement optimized memset for other possible L1 data cache
  11. * line lengths (32B and 128B) you should rewrite code carefully checking
  12. * we don't call any prefetchw/prealloc instruction for L1 cache lines which
  13. * don't belongs to memset area.
  14. */
  15. #if L1_CACHE_SHIFT == 6
  16. .macro PREALLOC_INSTR reg, off
  17. prealloc [\reg, \off]
  18. .endm
  19. .macro PREFETCHW_INSTR reg, off
  20. prefetchw [\reg, \off]
  21. .endm
  22. #else
  23. .macro PREALLOC_INSTR reg, off
  24. .endm
  25. .macro PREFETCHW_INSTR reg, off
  26. .endm
  27. #endif
  28. ENTRY_CFI(memset)
  29. PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
  30. mov.f 0, r2
  31. ;;; if size is zero
  32. jz.d [blink]
  33. mov r3, r0 ; don't clobber ret val
  34. ;;; if length < 8
  35. brls.d.nt r2, 8, .Lsmallchunk
  36. mov.f lp_count,r2
  37. and.f r4, r0, 0x03
  38. rsub lp_count, r4, 4
  39. lpnz @.Laligndestination
  40. ;; LOOP BEGIN
  41. stb.ab r1, [r3,1]
  42. sub r2, r2, 1
  43. .Laligndestination:
  44. ;;; Destination is aligned
  45. and r1, r1, 0xFF
  46. asl r4, r1, 8
  47. or r4, r4, r1
  48. asl r5, r4, 16
  49. or r5, r5, r4
  50. mov r4, r5
  51. sub3 lp_count, r2, 8
  52. cmp r2, 64
  53. bmsk.hi r2, r2, 5
  54. mov.ls lp_count, 0
  55. add3.hi r2, r2, 8
  56. ;;; Convert len to Dwords, unfold x8
  57. lsr.f lp_count, lp_count, 6
  58. lpnz @.Lset64bytes
  59. ;; LOOP START
  60. PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
  61. #ifdef CONFIG_ARC_HAS_LL64
  62. std.ab r4, [r3, 8]
  63. std.ab r4, [r3, 8]
  64. std.ab r4, [r3, 8]
  65. std.ab r4, [r3, 8]
  66. std.ab r4, [r3, 8]
  67. std.ab r4, [r3, 8]
  68. std.ab r4, [r3, 8]
  69. std.ab r4, [r3, 8]
  70. #else
  71. st.ab r4, [r3, 4]
  72. st.ab r4, [r3, 4]
  73. st.ab r4, [r3, 4]
  74. st.ab r4, [r3, 4]
  75. st.ab r4, [r3, 4]
  76. st.ab r4, [r3, 4]
  77. st.ab r4, [r3, 4]
  78. st.ab r4, [r3, 4]
  79. st.ab r4, [r3, 4]
  80. st.ab r4, [r3, 4]
  81. st.ab r4, [r3, 4]
  82. st.ab r4, [r3, 4]
  83. st.ab r4, [r3, 4]
  84. st.ab r4, [r3, 4]
  85. st.ab r4, [r3, 4]
  86. st.ab r4, [r3, 4]
  87. #endif
  88. .Lset64bytes:
  89. lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
  90. lpnz .Lset32bytes
  91. ;; LOOP START
  92. #ifdef CONFIG_ARC_HAS_LL64
  93. std.ab r4, [r3, 8]
  94. std.ab r4, [r3, 8]
  95. std.ab r4, [r3, 8]
  96. std.ab r4, [r3, 8]
  97. #else
  98. st.ab r4, [r3, 4]
  99. st.ab r4, [r3, 4]
  100. st.ab r4, [r3, 4]
  101. st.ab r4, [r3, 4]
  102. st.ab r4, [r3, 4]
  103. st.ab r4, [r3, 4]
  104. st.ab r4, [r3, 4]
  105. st.ab r4, [r3, 4]
  106. #endif
  107. .Lset32bytes:
  108. and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
  109. .Lsmallchunk:
  110. lpnz .Lcopy3bytes
  111. ;; LOOP START
  112. stb.ab r1, [r3, 1]
  113. .Lcopy3bytes:
  114. j [blink]
  115. END_CFI(memset)
  116. ENTRY_CFI(memzero)
  117. ; adjust bzero args to memset args
  118. mov r2, r1
  119. b.d memset ;tail call so need to tinker with blink
  120. mov r1, 0
  121. END_CFI(memzero)