forked from bobbl/libaeabi-cortexm0
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmemset.S
152 lines (118 loc) · 2.94 KB
/
memset.S
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
/* Runtime ABI for the ARM Cortex-M0
* memset.S: set memory region
*
* Copyright (c) 2013 Jörg Mische <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
.syntax unified
.text
.thumb
.cpu cortex-m0
@ void __aeabi_memclr(void *r0, size_t r1)
@
@ Set the r1 bytes beginning with *r0 to 0.
@
.thumb_func
.global __aeabi_memclr
__aeabi_memclr:
eors r2, r2 @ fallthrough to memset
@ void __aeabi_memset(void *r0, size_t r1, int r2)
@
@ Set the r1 bytes beginning with *r0 to r2
@
.thumb_func
.global __aeabi_memset
__aeabi_memset:
@ check if length=0
cmp r1, #0
beq L_return1
movs r3, #1 @ set one byte if odd address
tst r0, r3
beq L_align2
strb r2, [r0]
adds r0, #1
subs r1, #1
beq L_return1
L_align2:
movs r3, #2 @ set one halfword if address is not 32 bit aligned
tst r0, r3
beq __aeabi_memset4
strb r2, [r0]
cmp r1, #1 @ if length is 1 copy only 1 byte
beq L_return1
strb r2, [r0, #1]
adds r0, #2
subs r1, #2
bne __aeabi_memset4
L_return1:
bx lr
@ void __aeabi_memclr4(void *r0, size_t r1)
@
@ Set the r1 bytes beginning with *r0 to 0.
@ r0 must be 4-byte-aligned
@
.thumb_func
.global __aeabi_memclr4
__aeabi_memclr4:
@ void __aeabi_memclr8(void *r0, size_t r1)
@
@ Set the r1 bytes beginning with *r0 to 0.
@ r0 must be 8-byte-aligned
@
.thumb_func
.global __aeabi_memclr8
__aeabi_memclr8:
eors r2, r2 @ fallthrough to memset4
@ void __aeabi_memset4(void *r0, size_t r1, int r2)
@
@ Set the r1 bytes beginning with *r0 to r2.
@ r0 must be 4-byte-aligned
@
.thumb_func
.global __aeabi_memset4
__aeabi_memset4:
@ void __aeabi_memset8(void *r0, size_t r1, int r2)
@
@ Set the r1 bytes beginning with *r0 to r2.
@ r0 must be 8-byte-aligned
@
.thumb_func
.global __aeabi_memset8
__aeabi_memset8:
subs r1, #4
blo L_last_3bytes
lsls r2, r2, #24 @ copy lowest byte of r2 to all other bytes in r2
lsrs r3, r2, #8
orrs r2, r3
lsrs r3, r2, #16
orrs r2, r3
L_loop:
str r2, [r0]
adds r0, #4
subs r1, #4
bhs L_loop
L_last_3bytes: @ r1 = remaining len - 4
adds r1, #2
blo L_one_left @ branch if r1 was -4 or -3
strh r2, [r0]
beq L_return2 @ finished if r1 was -2
strb r2, [r0, #2]
L_return2:
bx lr
L_one_left:
adds r1, #1
bne L_return3
strb r2, [r0]
L_return3:
bx lr