-
Notifications
You must be signed in to change notification settings - Fork 744
Expand file tree
/
Copy pathstart.S
More file actions
195 lines (167 loc) · 5.69 KB
/
start.S
File metadata and controls
195 lines (167 loc) · 5.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
/*
* Copyright (c) 2021 Travis Geiselbrecht
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#include <lk/asm.h>
.section .text.boot
FUNCTION(_start)
// load the first 4 args that were pushed on whatever stack we have
// NOTE: assumes stack is pointing at at least readable memory
movl %sp@(4),%d4
movl %sp@(8),%d5
movl %sp@(12),%d6
movl %sp@(16),%d7
#if ARCH_DO_RELOCATION
lea %pc@(_start),%a0 // load the current address using PC relative addressing mode
movl #_start,%a1 // load the same symbol absolutely
cmpal %a0,%a1
beqs bss_clear
// load the end address for loop termination
movl #_end,%a2
// copy forwards
// NOTE: assumes the source and target do not overlap
0:
movel %a0@+,%a1@+
cmpal %a1,%a2
bne 0b
// branch to the new location
movl #bss_clear,%a0
jmp %a0@
#endif
// clear bss
bss_clear:
#if M68K_CPU >= 68020
// 020 and above have a full 32bit PC relative addressing mode.
// Since we may be using a mmu in this case, we may be operating in physical address space,
// so we need to use PC relative addressing to get the right addresses.
lea %pc@(__bss_start),%a0
lea %pc@(__bss_end),%a1
#else
// We wont be using an MMU on 68000 and 68010, so we can use absolute addresses.
movl __bss_start,%a0
movl __bss_end,%a1
#endif
cmpl %a0,%a1
beqs 1f
// zero 4 bytes at a time
0:
clrl %a0@+
cmpal %a1,%a0
bne 0b
1:
#if M68K_CPU == 68000
// Copy vector table to 0x0, no VBR here
lea %pc@(exc_vectors),%a0
move.l #0,%a1
move.w #255,%d0
copy_vectab:
move.l %a0@+,%a1@+
dbra %d0,copy_vectab
#endif
#if M68K_MMU == 68040
init_mmu_68040:
// Set up DTTR0 and ITTR0 to map 0x00000000 - 0x3FFFFFFF (1GB) to 0x00000000
// Logical address base: 0x00000000, mask 0x3f000000, enable, supervisor, cacheble, copyback
movl #0x003fa020,%d0
movec %d0,%dtt0
movec %d0,%itt0
// Set up an mapping of [0, MEMSIZE) to [KERNEL_ASPACE_BASE, KERNEL_ASPACE_BASE + MEMSIZE)
// Load the kernel page table and save back its physical address for use later
lea %pc@(kernel_pgtable),%a0
lea %pc@(kernel_pgtable_phys),%a1
movl %a0,%a1@
// Set up L0 entries
addl #(KERNEL_ASPACE_BASE / L0_ENTRY_RANGE * 4),%a0 // offset into the middle of the L0 table for KERNEL_ASPACE_BASE
movl #L0_ENTRIES,%d0
lea %pc@(_l1_tables),%a1 // get pointer to L1 tables
addl #0x00000003,%a1 // mark it valid
.Ll0_loop:
movl %a1,%a0@ // store it in the L0 table
addl #4,%a0 // advance to next L0 entry
addl #L1_PGTABLE_ENTRIES * 4,%a1 // advance to next L1 table
subl #1,%d0
bne .Ll0_loop
// Set up L1 entries
lea %pc@(_l1_tables),%a0
movl #L1_ENTRIES,%d0
lea %pc@(_l2_tables),%a1 // get pointer to L2 table
addl #0x00000003,%a1 // mark it valid
.Ll1_loop:
movl %a1,%a0@
addl #4,%a0
addl #L2_PGTABLE_ENTRIES * 4,%a1 // advance to next L1 table
subl #1,%d0
bne .Ll1_loop
// Set up L2 entries
lea %pc@(_l2_tables),%a0
movl #L2_ENTRIES,%d0
movl #0x000000083,%d1 // address 0, supervisor, writable, present
.L2_loop:
movl %d1,%a0@ // read the current entry
addl #4,%a0 // advance to next L2 entry
addl #PAGE_SIZE,%d1 // advance to next page
subl #1,%d0
bne .L2_loop
// set the supervisor root pointer
lea %pc@(kernel_pgtable),%a0
movec %a0,%srp
movec %a0,%urp
// enable the mmu
movl #(1<<15),%d0
movec %d0,%tc
// Branch to the high memory area
movl #.Lhigh_target,%a0
jmp %a0@
.Lhigh_target:
// Turn off DTTR0 and ITTR0
clrl %d0
movec %d0,%dtt0
movec %d0,%itt0
#endif
// load the initial stack pointer
lea _default_stack_top,%sp
// branch into C land with 4 args off the previous stack
movl %d7,%sp@-
movl %d6,%sp@-
movl %d5,%sp@-
movl %d4,%sp@-
jsr lk_main
// if we return from main just loop forever
bra .
END_FUNCTION(_start)
.bss
.balign 4
DATA(_default_stack_base)
.skip 4096
END_DATA(_default_stack_base)
_default_stack_top:
#if M68K_MMU == 68040
// Kernel identity map MEMSIZE rounded to the next 512MB boundary (for page table purposes)
.equ MAPSIZE_ALIGN, (512 * 1024 * 1024)
.equ MAPSIZE, (MEMSIZE + MAPSIZE_ALIGN - 1) & ~(MAPSIZE_ALIGN - 1)
// Define space for page tables to set up a mapping of MAPSIZE bytes of memory at KERNEL_ASPACE_BASE
.equ PAGE_SIZE, 4096
.equ L0_PGTABLE_ENTRIES, 128 // 7 bits
.equ L0_ENTRY_RANGE, (1<<25) // each L0 entry covers 32MB
.equ L1_PGTABLE_ENTRIES, 128 // 7 bits
.equ L1_ENTRY_RANGE, (1<<18) // each L1 entry covers 256KB
.equ L2_PGTABLE_ENTRIES, 64 // 6 bits
// Number of entries at each level to fill in order to cover MAPSIZE,
// rounded up to the next L0 entry range so all of the L1 and L2 page tables are fully used.
.equ MAPSIZE_ROUNDED, (MAPSIZE + L0_ENTRY_RANGE - 1) & ~(L0_ENTRY_RANGE - 1)
.equ L0_ENTRIES, MAPSIZE_ROUNDED / L0_ENTRY_RANGE
.equ L1_ENTRIES, MAPSIZE_ROUNDED / L1_ENTRY_RANGE
.equ L2_ENTRIES, MAPSIZE_ROUNDED / PAGE_SIZE
.balign 4096
DATA(_l1_tables)
.skip (L1_ENTRIES * 4 + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1) // 4 bytes each, one per 256KB section of memory
END_DATA(_l1_tables)
.balign 4096
DATA(_l2_tables)
_l2_tables:
.skip (L2_ENTRIES * 4 + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1) // 4 bytes each, one per page of memory
END_DATA(_l2_tables)
#endif // M68K_MMU == 68040