=== modified file 'sys/i386/i386/machdep.c'
@@ -2022,6 +2022,54 @@
return (1);
}
+static void
+map_bios (void)
+{
+ vm_paddr_t pa;
+ pt_entry_t *pte;
+ int i;
+
+ if (basemem > 640) {
+ printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
+ basemem);
+ basemem = 640;
+ }
+
+ /*
+ * XXX if biosbasemem is now < 640, there is a `hole'
+ * between the end of base memory and the start of
+ * ISA memory. The hole may be empty or it may
+ * contain BIOS code or data. Map it read/write so
+ * that the BIOS can write to it. (Memory from 0 to
+ * the physical end of the kernel is mapped read-only
+ * to begin with and then parts of it are remapped.
+ * The parts that aren't remapped form holes that
+ * remain read-only and are unused by the kernel.
+ * The base memory area is below the physical end of
+ * the kernel and right now forms a read-only hole.
+ * The part of it from PAGE_SIZE to
+ * (trunc_page(biosbasemem * 1024) - 1) will be
+ * remapped and used by the kernel later.)
+ *
+ * This code is similar to the code used in
+ * pmap_mapdev, but since no memory needs to be
+ * allocated we simply change the mapping.
+ */
+ for (pa = trunc_page(basemem * 1024);
+ pa < ISA_HOLE_START; pa += PAGE_SIZE)
+ pmap_kenter(KERNBASE + pa, pa);
+
+ /*
+ * Map pages between basemem and ISA_HOLE_START, if any, r/w into
+ * the vm86 page table so that vm86 can scribble on them using
+ * the vm86 map too. XXX: why 2 ways for this and only 1 way for
+ * page 0, at least as initialized here?
+ */
+ pte = (pt_entry_t *)vm86paddr;
+ for (i = basemem / 4; i < 160; i++)
+ pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
+}
+
/*
* Populate the (physmap) array with base/bound pairs describing the
* available physical memory in the system, then test this memory and
@@ -2039,7 +2087,7 @@
getmemsize(int first)
{
int i, off, physmap_idx, pa_indx, da_indx;
- int hasbrokenint12, has_smap;
+ int has_smap;
u_long physmem_tunable;
u_int extmem;
struct vm86frame vmf;
@@ -2074,68 +2122,11 @@
physmap_idx = 0;
goto physmap_done;
#endif
- hasbrokenint12 = 0;
- TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
bzero(&vmf, sizeof(vmf));
bzero(physmap, sizeof(physmap));
basemem = 0;
/*
- * Some newer BIOSes has broken INT 12H implementation which cause
- * kernel panic immediately. In this case, we need to scan SMAP
- * with INT 15:E820 first, then determine base memory size.
- */
- if (hasbrokenint12) {
- goto int15e820;
- }
-
- /*
- * Perform "base memory" related probes & setup
- */
- vm86_intcall(0x12, &vmf);
- basemem = vmf.vmf_ax;
- if (basemem > 640) {
- printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
- basemem);
- basemem = 640;
- }
-
- /*
- * XXX if biosbasemem is now < 640, there is a `hole'
- * between the end of base memory and the start of
- * ISA memory. The hole may be empty or it may
- * contain BIOS code or data. Map it read/write so
- * that the BIOS can write to it. (Memory from 0 to
- * the physical end of the kernel is mapped read-only
- * to begin with and then parts of it are remapped.
- * The parts that aren't remapped form holes that
- * remain read-only and are unused by the kernel.
- * The base memory area is below the physical end of
- * the kernel and right now forms a read-only hole.
- * The part of it from PAGE_SIZE to
- * (trunc_page(biosbasemem * 1024) - 1) will be
- * remapped and used by the kernel later.)
- *
- * This code is similar to the code used in
- * pmap_mapdev, but since no memory needs to be
- * allocated we simply change the mapping.
- */
- for (pa = trunc_page(basemem * 1024);
- pa < ISA_HOLE_START; pa += PAGE_SIZE)
- pmap_kenter(KERNBASE + pa, pa);
-
- /*
- * Map pages between basemem and ISA_HOLE_START, if any, r/w into
- * the vm86 page table so that vm86 can scribble on them using
- * the vm86 map too. XXX: why 2 ways for this and only 1 way for
- * page 0, at least as initialized here?
- */
- pte = (pt_entry_t *)vm86paddr;
- for (i = basemem / 4; i < 160; i++)
- pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
-
-int15e820:
- /*
* Fetch the memory map with INT 15:E820. First, check to see
* if the loader supplied it and use that if so. Otherwise,
* use vm86 to invoke the BIOS call directly.
@@ -2161,6 +2152,25 @@
if (!add_smap_entry(smap, physmap, &physmap_idx))
break;
} else {
+ int hasbrokenint12 = 0;
+
+ TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
+
+ /*
+ * Some newer BIOSes has broken INT 12H implementation which cause
+ * kernel panic immediately. In this case, we need to scan SMAP
+ * with INT 15:E820 first, then determine base memory size.
+ */
+ if (!hasbrokenint12) {
+
+ /*
+ * Perform "base memory" related probes & setup
+ */
+ vm86_intcall(0x12, &vmf);
+ basemem = vmf.vmf_ax;
+
+ map_bios ();
+ }
/*
* map page 1 R/W into the kernel page table so we can use it
* as a buffer. The kernel will unmap this page later.
@@ -2196,29 +2206,7 @@
}
}
- /*
- * XXX this function is horribly organized and has to the same
- * things that it does above here.
- */
- if (basemem == 0)
- basemem = 640;
- if (basemem > 640) {
- printf(
- "Preposterous BIOS basemem of %uK, truncating to 640K\n",
- basemem);
- basemem = 640;
- }
-
- /*
- * Let vm86 scribble on pages between basemem and
- * ISA_HOLE_START, as above.
- */
- for (pa = trunc_page(basemem * 1024);
- pa < ISA_HOLE_START; pa += PAGE_SIZE)
- pmap_kenter(KERNBASE + pa, pa);
- pte = (pt_entry_t *)vm86paddr;
- for (i = basemem / 4; i < 160; i++)
- pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
+ map_bios ();
}
if (physmap[1] != 0)