@@ -378,6 +378,152 @@ verify_kaslr_offset(ulong kaslr_offset)
378
378
return TRUE;
379
379
}
380
380
381
+ /*
382
+ * Find virtual (VA) and physical (PA) addresses of kernel start
383
+ *
384
+ * va:
385
+ * Actual address of the kernel start (_stext) placed
386
+ * randomly by kaslr feature. To be more accurate,
387
+ * VA = _stext(from vmlinux) + kaslr_offset
388
+ *
389
+ * pa:
390
+ * Physical address where the kerenel is placed.
391
+ *
392
+ * In nokaslr case, VA = _stext (from vmlinux)
393
+ * In kaslr case, virtual address of the kernel placement goes
394
+ * in this range: ffffffff80000000..ffffffff9fffffff, or
395
+ * __START_KERNEL_map..+512MB
396
+ *
397
+ * https://www.kernel.org/doc/Documentation/x86/x86_64/mm.txt
398
+ *
399
+ * Randomized VA will be the first valid page starting from
400
+ * ffffffff80000000 (__START_KERNEL_map). Page tree entry of
401
+ * this page will contain the PA of the kernel start.
402
+ */
403
+ static int
404
+ find_kernel_start (uint64_t pgd , ulong * va , ulong * pa )
405
+ {
406
+ int pgd_idx , p4d_idx , pud_idx , pmd_idx , pte_idx ;
407
+ uint64_t pgd_pte = 0 , pud_pte , pmd_pte , pte ;
408
+
409
+ pgd_idx = pgd_index (__START_KERNEL_map );
410
+ if (machdep -> flags & VM_5LEVEL )
411
+ p4d_idx = p4d_index (__START_KERNEL_map );
412
+ pud_idx = pud_index (__START_KERNEL_map );
413
+ pmd_idx = pmd_index (__START_KERNEL_map );
414
+ pte_idx = pte_index (__START_KERNEL_map );
415
+
416
+ /* If the VM is in 5-level page table */
417
+ if (machdep -> flags & VM_5LEVEL )
418
+ * va = ~((1UL << 57 ) - 1 );
419
+ else
420
+ * va = ~__VIRTUAL_MASK ;
421
+
422
+ FILL_PGD (pgd & PHYSICAL_PAGE_MASK , PHYSADDR , PAGESIZE ());
423
+ for (; pgd_idx < PTRS_PER_PGD ; pgd_idx ++ ) {
424
+ pgd_pte = ULONG (machdep -> pgd + pgd_idx * sizeof (uint64_t ));
425
+ if (pgd_pte & _PAGE_PRESENT )
426
+ break ;
427
+ p4d_idx = pud_idx = pmd_idx = pte_idx = 0 ;
428
+ }
429
+ if (pgd_idx == PTRS_PER_PGD )
430
+ return FALSE;
431
+ * va |= (ulong )pgd_idx << __PGDIR_SHIFT ;
432
+
433
+ if (machdep -> flags & VM_5LEVEL ) {
434
+ FILL_P4D (pgd_pte & PHYSICAL_PAGE_MASK , PHYSADDR , PAGESIZE ());
435
+ for (; p4d_idx < PTRS_PER_P4D ; p4d_idx ++ ) {
436
+ /* reuse pgd_pte */
437
+ pgd_pte = ULONG (machdep -> machspec -> p4d + p4d_idx * sizeof (uint64_t ));
438
+ if (pgd_pte & _PAGE_PRESENT )
439
+ break ;
440
+ pud_idx = pmd_idx = pte_idx = 0 ;
441
+ }
442
+ if (p4d_idx == PTRS_PER_P4D )
443
+ return FALSE;
444
+ * va |= (ulong )p4d_idx << P4D_SHIFT ;
445
+ }
446
+
447
+ FILL_PUD (pgd_pte & PHYSICAL_PAGE_MASK , PHYSADDR , PAGESIZE ());
448
+ for (; pud_idx < PTRS_PER_PUD ; pud_idx ++ ) {
449
+ pud_pte = ULONG (machdep -> pud + pud_idx * sizeof (uint64_t ));
450
+ if (pud_pte & _PAGE_PRESENT )
451
+ break ;
452
+ pmd_idx = pte_idx = 0 ;
453
+ }
454
+ if (pud_idx == PTRS_PER_PUD )
455
+ return FALSE;
456
+ * va |= (ulong )pud_idx << PUD_SHIFT ;
457
+ if (pud_pte & _PAGE_PSE ) {
458
+ /* 1GB page */
459
+ * pa = pud_pte & PHYSICAL_PAGE_MASK ;
460
+ return TRUE;
461
+ }
462
+
463
+ FILL_PMD (pud_pte & PHYSICAL_PAGE_MASK , PHYSADDR , PAGESIZE ());
464
+ for (; pmd_idx < PTRS_PER_PMD ; pmd_idx ++ ) {
465
+ pmd_pte = ULONG (machdep -> pmd + pmd_idx * sizeof (uint64_t ));
466
+ if (pmd_pte & _PAGE_PRESENT )
467
+ break ;
468
+ pte_idx = 0 ;
469
+ }
470
+ if (pmd_idx == PTRS_PER_PMD )
471
+ return FALSE;
472
+ * va |= pmd_idx << PMD_SHIFT ;
473
+ if (pmd_pte & _PAGE_PSE ) {
474
+ /* 2MB page */
475
+ * pa = pmd_pte & PHYSICAL_PAGE_MASK ;
476
+ return TRUE;
477
+ }
478
+
479
+ FILL_PTBL (pmd_pte & PHYSICAL_PAGE_MASK , PHYSADDR , PAGESIZE ());
480
+ for (; pte_idx < PTRS_PER_PTE ; pte_idx ++ ) {
481
+ pte = ULONG (machdep -> ptbl + pte_idx * sizeof (uint64_t ));
482
+ if (pte & _PAGE_PRESENT )
483
+ break ;
484
+ }
485
+ if (pte_idx == PTRS_PER_PTE )
486
+ return FALSE;
487
+
488
+ * va |= pte_idx << PAGE_SHIFT ;
489
+ * pa = pmd_pte & PHYSICAL_PAGE_MASK ;
490
+ return TRUE;
491
+ }
492
+
493
+ /*
494
+ * Page Tables based method to calculate kaslr_offset and phys_base.
495
+ * It uses VA and PA of kernel start.
496
+ *
497
+ * kaslr offset and phys_base are calculated as follows:
498
+ *
499
+ * kaslr_offset = VA - st->_stext_vmlinux
500
+ * phys_base = PA - (VA - __START_KERNEL_map)
501
+ */
502
+ static int
503
+ calc_kaslr_offset_from_page_tables (uint64_t pgd , ulong * kaslr_offset ,
504
+ ulong * phys_base )
505
+ {
506
+ ulong va , pa ;
507
+
508
+ if (!st -> _stext_vmlinux || st -> _stext_vmlinux == UNINITIALIZED ) {
509
+ fprintf (fp , "%s: st->_stext_vmlinux must be initialized\n" ,
510
+ __FUNCTION__ );
511
+ return FALSE;
512
+ }
513
+ if (!find_kernel_start (pgd , & va , & pa ))
514
+ return FALSE;
515
+
516
+ if (CRASHDEBUG (1 )) {
517
+ fprintf (fp , "calc_kaslr_offset: _stext(vmlinux): %lx\n" , st -> _stext_vmlinux );
518
+ fprintf (fp , "calc_kaslr_offset: kernel start VA: %lx\n" , va );
519
+ fprintf (fp , "calc_kaslr_offset: kernel start PA: %lx\n" , pa );
520
+ }
521
+
522
+ * kaslr_offset = va - st -> _stext_vmlinux ;
523
+ * phys_base = pa - (va - __START_KERNEL_map );
524
+ return TRUE;
525
+ }
526
+
381
527
/*
382
528
* IDT based method to calculate kaslr_offset and phys_base
383
529
*
@@ -537,8 +683,13 @@ calc_kaslr_offset(ulong *ko, ulong *pb)
537
683
"pgd" , RETURN_ON_ERROR ))
538
684
continue ;
539
685
540
- if (!calc_kaslr_offset_from_idt (idtr , pgd , & kaslr_offset , & phys_base ))
541
- continue ;
686
+ if (!calc_kaslr_offset_from_page_tables (pgd , & kaslr_offset ,
687
+ & phys_base )) {
688
+ if (!calc_kaslr_offset_from_idt (idtr , pgd ,
689
+ & kaslr_offset ,
690
+ & phys_base ))
691
+ continue ;
692
+ }
542
693
543
694
if (verify_kaslr_offset (kaslr_offset ))
544
695
goto found ;
0 commit comments