1 /*
2 * Copyright (c) 2006-2018, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 */
9
10 #include "mmu.h"
11
12 #ifdef __CC_ARM
mmu_setttbase(rt_uint32_t i)13 void mmu_setttbase(rt_uint32_t i)
14 {
15 register rt_uint32_t value;
16
17 /* Invalidates all TLBs.Domain access is selected as
18 * client by configuring domain access register,
19 * in that case access controlled by permission value
20 * set by page table entry
21 */
22 value = 0;
23 __asm volatile
24 {
25 mcr p15, 0, value, c8, c7, 0
26 }
27
28 value = 0x55555555;
29 __asm volatile
30 {
31 mcr p15, 0, value, c3, c0, 0
32 mcr p15, 0, i, c2, c0, 0
33 }
34 }
35
mmu_set_domain(rt_uint32_t i)36 void mmu_set_domain(rt_uint32_t i)
37 {
38 __asm volatile
39 {
40 mcr p15,0, i, c3, c0, 0
41 }
42 }
43
mmu_enable()44 void mmu_enable()
45 {
46 register rt_uint32_t value;
47
48 __asm volatile
49 {
50 mrc p15, 0, value, c1, c0, 0
51 orr value, value, #0x01
52 mcr p15, 0, value, c1, c0, 0
53 }
54 }
55
mmu_disable()56 void mmu_disable()
57 {
58 register rt_uint32_t value;
59
60 __asm volatile
61 {
62 mrc p15, 0, value, c1, c0, 0
63 bic value, value, #0x01
64 mcr p15, 0, value, c1, c0, 0
65 }
66 }
67
mmu_enable_icache()68 void mmu_enable_icache()
69 {
70 register rt_uint32_t value;
71
72 __asm volatile
73 {
74 mrc p15, 0, value, c1, c0, 0
75 orr value, value, #0x1000
76 mcr p15, 0, value, c1, c0, 0
77 }
78 }
79
mmu_enable_dcache()80 void mmu_enable_dcache()
81 {
82 register rt_uint32_t value;
83
84 __asm volatile
85 {
86 mrc p15, 0, value, c1, c0, 0
87 orr value, value, #0x04
88 mcr p15, 0, value, c1, c0, 0
89 }
90 }
91
mmu_disable_icache()92 void mmu_disable_icache()
93 {
94 register rt_uint32_t value;
95
96 __asm volatile
97 {
98 mrc p15, 0, value, c1, c0, 0
99 bic value, value, #0x1000
100 mcr p15, 0, value, c1, c0, 0
101 }
102 }
103
mmu_disable_dcache()104 void mmu_disable_dcache()
105 {
106 register rt_uint32_t value;
107
108 __asm volatile
109 {
110 mrc p15, 0, value, c1, c0, 0
111 bic value, value, #0x04
112 mcr p15, 0, value, c1, c0, 0
113 }
114 }
115
mmu_enable_alignfault()116 void mmu_enable_alignfault()
117 {
118 register rt_uint32_t value;
119
120 __asm volatile
121 {
122 mrc p15, 0, value, c1, c0, 0
123 orr value, value, #0x02
124 mcr p15, 0, value, c1, c0, 0
125 }
126 }
127
mmu_disable_alignfault()128 void mmu_disable_alignfault()
129 {
130 register rt_uint32_t value;
131
132 __asm volatile
133 {
134 mrc p15, 0, value, c1, c0, 0
135 bic value, value, #0x02
136 mcr p15, 0, value, c1, c0, 0
137 }
138 }
139
mmu_clean_invalidated_cache_index(int index)140 void mmu_clean_invalidated_cache_index(int index)
141 {
142 __asm volatile
143 {
144 mcr p15, 0, index, c7, c14, 2
145 }
146 }
147
mmu_clean_invalidated_dcache(rt_uint32_t buffer,rt_uint32_t size)148 void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
149 {
150 unsigned int ptr;
151
152 ptr = buffer & ~(CACHE_LINE_SIZE - 1);
153
154 while(ptr < buffer + size)
155 {
156 __asm volatile
157 {
158 MCR p15, 0, ptr, c7, c14, 1
159 }
160 ptr += CACHE_LINE_SIZE;
161 }
162 }
163
mmu_clean_dcache(rt_uint32_t buffer,rt_uint32_t size)164 void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
165 {
166 unsigned int ptr;
167
168 ptr = buffer & ~(CACHE_LINE_SIZE - 1);
169
170 while (ptr < buffer + size)
171 {
172 __asm volatile
173 {
174 MCR p15, 0, ptr, c7, c10, 1
175 }
176 ptr += CACHE_LINE_SIZE;
177 }
178 }
179
mmu_invalidate_dcache(rt_uint32_t buffer,rt_uint32_t size)180 void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
181 {
182 unsigned int ptr;
183
184 ptr = buffer & ~(CACHE_LINE_SIZE - 1);
185
186 while (ptr < buffer + size)
187 {
188 __asm volatile
189 {
190 MCR p15, 0, ptr, c7, c6, 1
191 }
192 ptr += CACHE_LINE_SIZE;
193 }
194 }
195
mmu_invalidate_tlb()196 void mmu_invalidate_tlb()
197 {
198 register rt_uint32_t value;
199
200 value = 0;
201 __asm volatile
202 {
203 mcr p15, 0, value, c8, c7, 0
204 }
205 }
206
mmu_invalidate_icache()207 void mmu_invalidate_icache()
208 {
209 register rt_uint32_t value;
210
211 value = 0;
212
213 __asm volatile
214 {
215 mcr p15, 0, value, c7, c5, 0
216 }
217 }
218
219
mmu_invalidate_dcache_all()220 void mmu_invalidate_dcache_all()
221 {
222 register rt_uint32_t value;
223
224 value = 0;
225
226 __asm volatile
227 {
228 mcr p15, 0, value, c7, c6, 0
229 }
230 }
231 #elif defined(__GNUC__)
mmu_setttbase(register rt_uint32_t i)232 void mmu_setttbase(register rt_uint32_t i)
233 {
234 register rt_uint32_t value;
235
236 /* Invalidates all TLBs.Domain access is selected as
237 * client by configuring domain access register,
238 * in that case access controlled by permission value
239 * set by page table entry
240 */
241 value = 0;
242 asm volatile ("mcr p15, 0, %0, c8, c7, 0"::"r"(value));
243
244 value = 0x55555555;
245 asm volatile ("mcr p15, 0, %0, c3, c0, 0"::"r"(value));
246 asm volatile ("mcr p15, 0, %0, c2, c0, 0"::"r"(i));
247 }
248
mmu_set_domain(register rt_uint32_t i)249 void mmu_set_domain(register rt_uint32_t i)
250 {
251 asm volatile ("mcr p15,0, %0, c3, c0, 0": :"r" (i));
252 }
253
mmu_enable()254 void mmu_enable()
255 {
256 register rt_uint32_t i;
257
258 /* read control register */
259 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
260
261 i |= 0x1;
262 /* Enables the extended page tables to be configured for
263 the hardware page translation mechanism, Subpage AP bits disabled */
264 i |= (1 << 23); /* support for ARMv6 MMU features */
265 i |= (1 << 13); /* High exception vectors selected, address range = 0xFFFF0000-0xFFFF001C */
266
267 /* write back to control register */
268 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
269 }
270
mmu_disable()271 void mmu_disable()
272 {
273 register rt_uint32_t i;
274
275 /* read control register */
276 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
277
278 i &= ~0x1;
279
280 /* write back to control register */
281 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
282 }
283
mmu_enable_icache()284 void mmu_enable_icache()
285 {
286 register rt_uint32_t i;
287
288 /* read control register */
289 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
290
291 i |= (1 << 12);
292
293 /* write back to control register */
294 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
295 }
296
mmu_enable_dcache()297 void mmu_enable_dcache()
298 {
299 register rt_uint32_t i;
300
301 /* read control register */
302 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
303
304 i |= (1 << 2);
305
306 /* write back to control register */
307 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
308 }
309
mmu_disable_icache()310 void mmu_disable_icache()
311 {
312 register rt_uint32_t i;
313
314 /* read control register */
315 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
316
317 i &= ~(1 << 12);
318
319 /* write back to control register */
320 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
321 }
322
mmu_disable_dcache()323 void mmu_disable_dcache()
324 {
325 register rt_uint32_t i;
326
327 /* read control register */
328 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
329
330 i &= ~(1 << 2);
331
332 /* write back to control register */
333 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
334 }
335
mmu_enable_alignfault()336 void mmu_enable_alignfault()
337 {
338 register rt_uint32_t i;
339
340 /* read control register */
341 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
342
343 i |= (1 << 1);
344
345 /* write back to control register */
346 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
347 }
348
mmu_disable_alignfault()349 void mmu_disable_alignfault()
350 {
351 register rt_uint32_t i;
352
353 /* read control register */
354 asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
355
356 i &= ~(1 << 1);
357
358 /* write back to control register */
359 asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
360 }
361
mmu_clean_invalidated_cache_index(int index)362 void mmu_clean_invalidated_cache_index(int index)
363 {
364 asm volatile ("mcr p15, 0, %0, c7, c14, 2": :"r" (index));
365 }
366
mmu_clean_invalidated_dcache(rt_uint32_t buffer,rt_uint32_t size)367 void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
368 {
369 unsigned int ptr;
370
371 ptr = buffer & ~(CACHE_LINE_SIZE - 1);
372
373 while(ptr < buffer + size)
374 {
375 asm volatile ("mcr p15, 0, %0, c7, c14, 1": :"r" (ptr));
376 ptr += CACHE_LINE_SIZE;
377 }
378 }
379
380
mmu_clean_dcache(rt_uint32_t buffer,rt_uint32_t size)381 void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
382 {
383 unsigned int ptr;
384
385 ptr = buffer & ~(CACHE_LINE_SIZE - 1);
386
387 while (ptr < buffer + size)
388 {
389 asm volatile ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr));
390 ptr += CACHE_LINE_SIZE;
391 }
392 }
393
mmu_invalidate_dcache(rt_uint32_t buffer,rt_uint32_t size)394 void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
395 {
396 unsigned int ptr;
397
398 ptr = buffer & ~(CACHE_LINE_SIZE - 1);
399
400 while (ptr < buffer + size)
401 {
402 asm volatile ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr));
403 ptr += CACHE_LINE_SIZE;
404 }
405 }
406
mmu_invalidate_tlb()407 void mmu_invalidate_tlb()
408 {
409 asm volatile ("mcr p15, 0, %0, c8, c7, 0": :"r" (0));
410 }
411
mmu_invalidate_icache()412 void mmu_invalidate_icache()
413 {
414 asm volatile ("mcr p15, 0, %0, c7, c5, 0": :"r" (0));
415 }
416
mmu_invalidate_dcache_all()417 void mmu_invalidate_dcache_all()
418 {
419 asm volatile ("mcr p15, 0, %0, c7, c6, 0": :"r" (0));
420 }
421 #endif
422
423 /* level1 page table */
424 static volatile unsigned int _pgd_table[4*1024] ALIGN(16*1024);
425 /*
426 * level2 page table
427 * RT_MMU_PTE_SIZE must be 1024*n
428 */
429 #define RT_MMU_PTE_SIZE 4096
430 static volatile unsigned int _pte_table[RT_MMU_PTE_SIZE] ALIGN(1*1024);
431
mmu_create_pgd(struct mem_desc * mdesc)432 void mmu_create_pgd(struct mem_desc *mdesc)
433 {
434 volatile rt_uint32_t *pTT;
435 volatile int i, nSec;
436 pTT = (rt_uint32_t *)_pgd_table + (mdesc->vaddr_start >> 20);
437 nSec = (mdesc->vaddr_end >> 20) - (mdesc->vaddr_start >> 20);
438 for(i = 0; i <= nSec; i++)
439 {
440 *pTT = mdesc->sect_attr | (((mdesc->paddr_start >> 20) + i) << 20);
441 pTT++;
442 }
443 }
444
mmu_create_pte(struct mem_desc * mdesc)445 void mmu_create_pte(struct mem_desc *mdesc)
446 {
447 volatile rt_uint32_t *pTT;
448 volatile rt_uint32_t *p_pteentry;
449 int i;
450 rt_uint32_t vaddr;
451 rt_uint32_t total_page = 0;
452 rt_uint32_t pte_offset = 0;
453 rt_uint32_t sect_attr = 0;
454
455 total_page = (mdesc->vaddr_end >> 12) - (mdesc->vaddr_start >> 12) + 1;
456 pte_offset = mdesc->sect_attr & 0xfffffc00;
457 sect_attr = mdesc->sect_attr & 0x3ff;
458 vaddr = mdesc->vaddr_start;
459
460 for(i = 0; i < total_page; i++)
461 {
462 pTT = (rt_uint32_t *)_pgd_table + (vaddr >> 20);
463 if (*pTT == 0) /* Level 1 page table item not used, now update pgd item */
464 {
465 *pTT = pte_offset | sect_attr;
466 p_pteentry = (rt_uint32_t *)pte_offset +
467 ((vaddr & 0x000ff000) >> 12);
468 pte_offset += 1024;
469 }
470 else /* using old Level 1 page table item */
471 {
472 p_pteentry = (rt_uint32_t *)(*pTT & 0xfffffc00) +
473 ((vaddr & 0x000ff000) >> 12);
474 }
475
476
477 *p_pteentry = mdesc->page_attr | (((mdesc->paddr_start >> 12) + i) << 12);
478 vaddr += 0x1000;
479 }
480 }
481
build_pte_mem_desc(struct mem_desc * mdesc,rt_uint32_t size)482 static void build_pte_mem_desc(struct mem_desc *mdesc, rt_uint32_t size)
483 {
484 rt_uint32_t pte_offset = 0;
485 rt_uint32_t nsec = 0;
486 /* set page table */
487 for (; size > 0; size--)
488 {
489 if (mdesc->mapped_mode == PAGE_MAPPED)
490 {
491 nsec = (RT_ALIGN(mdesc->vaddr_end, 0x100000) - RT_ALIGN_DOWN(mdesc->vaddr_start, 0x100000)) >> 20;
492 mdesc->sect_attr |= (((rt_uint32_t)_pte_table)& 0xfffffc00) + pte_offset;
493 pte_offset += nsec << 10;
494 }
495 if (pte_offset >= RT_MMU_PTE_SIZE)
496 {
497 rt_kprintf("PTE table size too little\n");
498 RT_ASSERT(0);
499 }
500
501 mdesc++;
502 }
503 }
504
505
rt_hw_mmu_init(struct mem_desc * mdesc,rt_uint32_t size)506 void rt_hw_mmu_init(struct mem_desc *mdesc, rt_uint32_t size)
507 {
508 /* disable I/D cache */
509 mmu_disable_dcache();
510 mmu_disable_icache();
511 mmu_disable();
512 mmu_invalidate_tlb();
513
514 /* clear pgd and pte table */
515 rt_memset((void *)_pgd_table, 0, 16*1024);
516 rt_memset((void *)_pte_table, 0, RT_MMU_PTE_SIZE);
517 build_pte_mem_desc(mdesc, size);
518 /* set page table */
519 for (; size > 0; size--)
520 {
521 if (mdesc->mapped_mode == SECT_MAPPED)
522 {
523 mmu_create_pgd(mdesc);
524 }
525 else
526 {
527 mmu_create_pte(mdesc);
528 }
529
530 mdesc++;
531 }
532
533 /* set MMU table address */
534 mmu_setttbase((rt_uint32_t)_pgd_table);
535
536 /* enables MMU */
537 mmu_enable();
538
539 /* enable Instruction Cache */
540 mmu_enable_icache();
541
542 /* enable Data Cache */
543 mmu_enable_dcache();
544
545 mmu_invalidate_icache();
546 mmu_invalidate_dcache_all();
547 }
548
549