xref: /nrf52832-nimble/rt-thread/libcpu/arm/dm36x/mmu.c (revision 167494296f0543431a51b6b1b83e957045294e05)
1 /*
2  * Copyright (c) 2006-2018, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  */
9 
10 #include "mmu.h"
11 
12 #ifdef __CC_ARM
13 void mmu_setttbase(rt_uint32_t i)
14 {
15 	register rt_uint32_t value;
16 
17    /* Invalidates all TLBs.Domain access is selected as
18     * client by configuring domain access register,
19     * in that case access controlled by permission value
20     * set by page table entry
21     */
22 	value = 0;
23     __asm volatile
24     {
25         mcr p15, 0, value, c8, c7, 0
26 	}
27 
28 	value = 0x55555555;
29 	__asm volatile
30 	{
31         mcr p15, 0, value, c3, c0, 0
32         mcr p15, 0, i, c2, c0, 0
33     }
34 }
35 
36 void mmu_set_domain(rt_uint32_t i)
37 {
38     __asm volatile
39     {
40         mcr p15,0, i, c3, c0,  0
41     }
42 }
43 
44 void mmu_enable()
45 {
46     register rt_uint32_t value;
47 
48     __asm volatile
49     {
50         mrc p15, 0, value, c1, c0, 0
51         orr value, value, #0x01
52         mcr p15, 0, value, c1, c0, 0
53     }
54 }
55 
56 void mmu_disable()
57 {
58     register rt_uint32_t value;
59 
60     __asm volatile
61     {
62         mrc p15, 0, value, c1, c0, 0
63         bic value, value, #0x01
64         mcr p15, 0, value, c1, c0, 0
65     }
66 }
67 
68 void mmu_enable_icache()
69 {
70     register rt_uint32_t value;
71 
72     __asm volatile
73     {
74         mrc p15, 0, value, c1, c0, 0
75         orr value, value, #0x1000
76         mcr p15, 0, value, c1, c0, 0
77     }
78 }
79 
80 void mmu_enable_dcache()
81 {
82     register rt_uint32_t value;
83 
84     __asm volatile
85     {
86         mrc p15, 0, value, c1, c0, 0
87         orr value, value, #0x04
88         mcr p15, 0, value, c1, c0, 0
89     }
90 }
91 
92 void mmu_disable_icache()
93 {
94     register rt_uint32_t value;
95 
96     __asm volatile
97     {
98         mrc p15, 0, value, c1, c0, 0
99         bic value, value, #0x1000
100         mcr p15, 0, value, c1, c0, 0
101     }
102 }
103 
104 void mmu_disable_dcache()
105 {
106     register rt_uint32_t value;
107 
108     __asm volatile
109     {
110         mrc p15, 0, value, c1, c0, 0
111         bic value, value, #0x04
112         mcr p15, 0, value, c1, c0, 0
113     }
114 }
115 
116 void mmu_enable_alignfault()
117 {
118     register rt_uint32_t value;
119 
120     __asm volatile
121     {
122         mrc p15, 0, value, c1, c0, 0
123         orr value, value, #0x02
124         mcr p15, 0, value, c1, c0, 0
125     }
126 }
127 
128 void mmu_disable_alignfault()
129 {
130     register rt_uint32_t value;
131 
132     __asm volatile
133     {
134         mrc p15, 0, value, c1, c0, 0
135         bic value, value, #0x02
136         mcr p15, 0, value, c1, c0, 0
137     }
138 }
139 
140 void mmu_clean_invalidated_cache_index(int index)
141 {
142     __asm volatile
143     {
144         mcr p15, 0, index, c7, c14, 2
145     }
146 }
147 
148 void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
149 {
150     unsigned int ptr;
151 
152     ptr = buffer & ~(CACHE_LINE_SIZE - 1);
153 
154     while(ptr < buffer + size)
155     {
156     	__asm volatile
157     	{
158     		MCR p15, 0, ptr, c7, c14, 1
159     	}
160         ptr += CACHE_LINE_SIZE;
161     }
162 }
163 
164 void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
165 {
166 	unsigned int ptr;
167 
168 	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
169 
170 	while (ptr < buffer + size)
171 	{
172 		__asm volatile
173 		{
174 			MCR p15, 0, ptr, c7, c10, 1
175 		}
176 		ptr += CACHE_LINE_SIZE;
177 	}
178 }
179 
180 void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
181 {
182 	unsigned int ptr;
183 
184 	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
185 
186 	while (ptr < buffer + size)
187 	{
188 		__asm volatile
189 		{
190 			MCR p15, 0, ptr, c7, c6, 1
191 		}
192 		ptr += CACHE_LINE_SIZE;
193 	}
194 }
195 
196 void mmu_invalidate_tlb()
197 {
198     register rt_uint32_t value;
199 
200     value = 0;
201     __asm volatile
202     {
203         mcr p15, 0, value, c8, c7, 0
204     }
205 }
206 
207 void mmu_invalidate_icache()
208 {
209     register rt_uint32_t value;
210 
211     value = 0;
212 
213     __asm volatile
214     {
215         mcr p15, 0, value, c7, c5, 0
216     }
217 }
218 
219 
220 void mmu_invalidate_dcache_all()
221 {
222     register rt_uint32_t value;
223 
224     value = 0;
225 
226     __asm volatile
227     {
228         mcr p15, 0, value, c7, c6, 0
229     }
230 }
231 #elif defined(__GNUC__)
232 void mmu_setttbase(register rt_uint32_t i)
233 {
234 	register rt_uint32_t value;
235 
236    /* Invalidates all TLBs.Domain access is selected as
237     * client by configuring domain access register,
238     * in that case access controlled by permission value
239     * set by page table entry
240     */
241 	value = 0;
242 	asm volatile ("mcr p15, 0, %0, c8, c7, 0"::"r"(value));
243 
244 	value = 0x55555555;
245 	asm volatile ("mcr p15, 0, %0, c3, c0, 0"::"r"(value));
246 	asm volatile ("mcr p15, 0, %0, c2, c0, 0"::"r"(i));
247 }
248 
249 void mmu_set_domain(register rt_uint32_t i)
250 {
251 	asm volatile ("mcr p15,0, %0, c3, c0,  0": :"r" (i));
252 }
253 
254 void mmu_enable()
255 {
256 	register rt_uint32_t i;
257 
258 	/* read control register */
259 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
260 
261 	i |= 0x1;
262 	i |= (1 << 13); /* High exception vectors selected, address range = 0xFFFF0000-0xFFFF001C */
263 	/* S R bit=1 0  for system protection */
264 	i |= (1 << 8);
265 	i &= ~(1 << 9);
266 
267 	/* write back to control register */
268 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
269 }
270 
271 void mmu_disable()
272 {
273 	register rt_uint32_t i;
274 
275 	/* read control register */
276 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
277 
278 	i &= ~0x1;
279 
280 	/* write back to control register */
281 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
282 }
283 
284 void mmu_enable_icache()
285 {
286 	register rt_uint32_t i;
287 
288 	/* read control register */
289 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
290 
291 	i |= (1 << 12);
292 
293 	/* write back to control register */
294 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
295 }
296 
297 void mmu_enable_dcache()
298 {
299 	register rt_uint32_t i;
300 
301 	/* read control register */
302 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
303 
304 	i |= (1 << 2);
305 
306 	/* write back to control register */
307 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
308 }
309 
310 void mmu_disable_icache()
311 {
312 	register rt_uint32_t i;
313 
314 	/* read control register */
315 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
316 
317 	i &= ~(1 << 12);
318 
319 	/* write back to control register */
320 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
321 }
322 
323 void mmu_disable_dcache()
324 {
325 	register rt_uint32_t i;
326 
327 	/* read control register */
328 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
329 
330 	i &= ~(1 << 2);
331 
332 	/* write back to control register */
333 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
334 }
335 
336 void mmu_enable_alignfault()
337 {
338 	register rt_uint32_t i;
339 
340 	/* read control register */
341 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
342 
343 	i |= (1 << 1);
344 
345 	/* write back to control register */
346 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
347 }
348 
349 void mmu_disable_alignfault()
350 {
351 	register rt_uint32_t i;
352 
353 	/* read control register */
354 	asm volatile ("mrc p15, 0, %0, c1, c0, 0":"=r" (i));
355 
356 	i &= ~(1 << 1);
357 
358 	/* write back to control register */
359 	asm volatile ("mcr p15, 0, %0, c1, c0, 0": :"r" (i));
360 }
361 
362 void mmu_clean_invalidated_cache_index(int index)
363 {
364 	asm volatile ("mcr p15, 0, %0, c7, c14, 2": :"r" (index));
365 }
366 
367 void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
368 {
369     unsigned int ptr;
370 
371     ptr = buffer & ~(CACHE_LINE_SIZE - 1);
372 
373     while(ptr < buffer + size)
374     {
375     	asm volatile ("mcr p15, 0, %0, c7, c14, 1": :"r" (ptr));
376         ptr += CACHE_LINE_SIZE;
377     }
378 }
379 
380 
381 void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
382 {
383 	unsigned int ptr;
384 
385 	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
386 
387 	while (ptr < buffer + size)
388 	{
389 		asm volatile ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr));
390 		ptr += CACHE_LINE_SIZE;
391 	}
392 }
393 
394 void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
395 {
396 	unsigned int ptr;
397 
398 	ptr = buffer & ~(CACHE_LINE_SIZE - 1);
399 
400 	while (ptr < buffer + size)
401 	{
402 		asm volatile ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr));
403 		ptr += CACHE_LINE_SIZE;
404 	}
405 }
406 
407 void mmu_invalidate_tlb()
408 {
409 	asm volatile ("mcr p15, 0, %0, c8, c7, 0": :"r" (0));
410 }
411 
412 void mmu_invalidate_icache()
413 {
414 	asm volatile ("mcr p15, 0, %0, c7, c5, 0": :"r" (0));
415 }
416 
417 void mmu_invalidate_dcache_all()
418 {
419     asm volatile ("mcr p15, 0, %0, c7, c6, 0": :"r" (0));
420 }
421 #endif
422 
423 /* level1 page table */
424 static volatile unsigned int _pgd_table[4*1024] ALIGN(16*1024);
425 /*
426  * level2 page table
427  * RT_MMU_PTE_SIZE must be 1024*n
428  */
429 static volatile unsigned int _pte_table[RT_MMU_PTE_SIZE] ALIGN(1*1024);
430 
431 void mmu_create_pgd(struct mem_desc *mdesc)
432 {
433     volatile rt_uint32_t *pTT;
434     volatile int i, nSec;
435     pTT = (rt_uint32_t *)_pgd_table + (mdesc->vaddr_start >> 20);
436     nSec = (mdesc->vaddr_end >> 20) - (mdesc->vaddr_start >> 20);
437     for(i = 0; i <= nSec; i++)
438     {
439         *pTT = mdesc->sect_attr | (((mdesc->paddr_start >> 20) + i) << 20);
440         pTT++;
441     }
442 }
443 
444 void mmu_create_pte(struct mem_desc *mdesc)
445 {
446     volatile rt_uint32_t *pTT;
447     volatile rt_uint32_t *p_pteentry;
448     int i;
449     rt_uint32_t vaddr;
450     rt_uint32_t total_page = 0;
451     rt_uint32_t pte_offset = 0;
452     rt_uint32_t sect_attr = 0;
453 
454     total_page = (mdesc->vaddr_end >> 12) - (mdesc->vaddr_start >> 12) + 1;
455     pte_offset = mdesc->sect_attr & 0xfffffc00;
456     sect_attr = mdesc->sect_attr & 0x3ff;
457     vaddr = mdesc->vaddr_start;
458 
459     for(i = 0; i < total_page; i++)
460     {
461         pTT = (rt_uint32_t *)_pgd_table + (vaddr >> 20);
462         if (*pTT == 0) /* Level 1 page table item not used, now update pgd item */
463         {
464             *pTT = pte_offset | sect_attr;
465             p_pteentry = (rt_uint32_t *)pte_offset +
466             ((vaddr & 0x000ff000) >> 12);
467             pte_offset += 1024;
468         }
469         else /* using old Level 1 page table item */
470         {
471             p_pteentry = (rt_uint32_t *)(*pTT & 0xfffffc00) +
472             ((vaddr & 0x000ff000) >> 12);
473         }
474 
475 
476         *p_pteentry = mdesc->page_attr | (((mdesc->paddr_start >> 12) + i) << 12);
477         vaddr += 0x1000;
478     }
479 }
480 
481 static void build_pte_mem_desc(struct mem_desc *mdesc, rt_uint32_t size)
482 {
483     rt_uint32_t pte_offset = 0;
484     rt_uint32_t nsec = 0;
485     /* set page table */
486     for (; size > 0; size--)
487     {
488         if (mdesc->mapped_mode == PAGE_MAPPED)
489         {
490             nsec = (RT_ALIGN(mdesc->vaddr_end, 0x100000) - RT_ALIGN_DOWN(mdesc->vaddr_start, 0x100000)) >> 20;
491             mdesc->sect_attr |= (((rt_uint32_t)_pte_table)& 0xfffffc00) + pte_offset;
492             pte_offset += nsec << 10;
493         }
494         if (pte_offset >= RT_MMU_PTE_SIZE)
495         {
496             rt_kprintf("PTE table size too little\n");
497             RT_ASSERT(0);
498         }
499 
500         mdesc++;
501     }
502 }
503 
504 void rt_hw_mmu_init(struct mem_desc *mdesc, rt_uint32_t size)
505 {
506 	/* disable I/D cache */
507 	mmu_disable_dcache();
508 	mmu_disable_icache();
509 	mmu_disable();
510 	mmu_invalidate_tlb();
511 
512 	/* clear pgd and pte table */
513     rt_memset((void *)_pgd_table, 0, 16*1024);
514     rt_memset((void *)_pte_table, 0, RT_MMU_PTE_SIZE);
515     build_pte_mem_desc(mdesc, size);
516     /* set page table */
517     for (; size > 0; size--)
518     {
519         if (mdesc->mapped_mode == SECT_MAPPED)
520         {
521             mmu_create_pgd(mdesc);
522         }
523         else
524         {
525             mmu_create_pte(mdesc);
526         }
527 
528         mdesc++;
529     }
530 
531     /* set MMU table address */
532     mmu_setttbase((rt_uint32_t)_pgd_table);
533 
534     /* enables MMU */
535     mmu_enable();
536 
537     /* enable Instruction Cache */
538     mmu_enable_icache();
539 
540     /* enable Data Cache */
541     mmu_enable_dcache();
542 
543     mmu_invalidate_icache();
544     mmu_invalidate_dcache_all();
545 }
546 
547