While at first mtd_part_of_parse() would just call
of_get_chil_by_name(), it has been edited to first try to get the OF
node thanks to mtd_get_of_node() and fallback on
of_get_child_by_name().
A of_node_put() was a bit below in the code, to balance the
of_get_child_by_name(). However, despite its name, mtd_get_of_node()
does not take a reference on the OF node. It is a simple helper hiding
some pointer logic to retrieve the OF node related to an MTD
device. People often used it this way:
of_node_put(mtd_get_of_node(<mtd>)).
The direct effect of such unbalanced reference counting is visible by
rmmod'ing any module that would have added MTD partitions:
OF: ERROR: Bad of_node_put() on <of_path_to_partition>
As it seems normal to get a reference on the OF node during the
of_property_for_each_string() that follows, add a call to
of_node_get() when relevant.
Fixes: 76a832254ab0 ("mtd: partitions: use DT info for parsing partitions with "compatible" prop")
Cc: stable(a)vger.kernel.org
Signed-off-by: Miquel Raynal <miquel.raynal(a)bootlin.com>
---
drivers/mtd/mtdpart.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 52e2cb35fc79..99c460facd5e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
np = mtd_get_of_node(master);
- if (!mtd_is_partition(master))
+ if (mtd_is_partition(master))
+ of_node_get(np);
+ else
np = of_get_child_by_name(np, "partitions");
+
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
--
2.17.1
As documented in spi-mem.h, spi_mem_op->data.buf.{in,out} must be
DMA-able, and commit 4120f8d158ef ("mtd: spi-nor: Use the spi_mem_xx()
API") failed to follow this rule as buffers passed to
->{read,write}_reg() are usually placed on the stack.
Fix that by allocating a scratch buffer and copying the data in there
before passing it to the spi-mem layer.
Fixes: 4120f8d158ef ("mtd: spi-nor: Use the spi_mem_xx() API")
Reported-by: Jarkko Nikula <jarkko.nikula(a)linux.intel.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Boris Brezillon <boris.brezillon(a)bootlin.com>
---
Note that the ->{read,write}() path is still buggy since nothing
guarantees that buffers passed by the MTD layer to the SPI NOR layer
are DMA-able, but this is a long-standing issue which we'll have to
address at the spi-nor level (this layer can choose the bounce buffer
size based on nor->page_size).
---
drivers/mtd/devices/m25p80.c | 24 +++++++++++++++++++++---
1 file changed, 21 insertions(+), 3 deletions(-)
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cbfafc453274..3b7fafa4bbd6 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,14 +39,22 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(len, val, 1));
+ SPI_MEM_OP_DATA_IN(len, NULL, 1));
+ void *scratchbuf;
int ret;
+ scratchbuf = kmemdup(val, len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
+
+ op.data.buf.in = scratchbuf;
ret = spi_mem_exec_op(flash->spimem, &op);
if (ret < 0)
dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
code);
+ kfree(scratchbuf);
+
return ret;
}
@@ -56,9 +64,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+ void *scratchbuf;
+ int ret;
- return spi_mem_exec_op(flash->spimem, &op);
+ scratchbuf = kmemdup(buf, len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
+
+ op.data.buf.out = scratchbuf;
+ ret = spi_mem_exec_op(flash->spimem, &op);
+ kfree(scratchbuf);
+
+ return ret;
}
static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
--
2.14.1
A transparent huge page is represented by a single entry on an LRU list.
Therefore, we can only make unevictable an entire compound page, not
individual subpages.
If a user tries to mlock() part of a huge page, we want the rest of the
page to be reclaimable.
We handle this by keeping PTE-mapped huge pages on normal LRU lists: the
PMD on border of VM_LOCKED VMA will be split into PTE table.
Introduction of THP migration breaks[1] the rules around mlocking THP
pages. If we had a single PMD mapping of the page in mlocked VMA, the
page will get mlocked, regardless of PTE mappings of the page.
For tmpfs/shmem it's easy to fix by checking PageDoubleMap() in
remove_migration_pmd().
Anon THP pages can only be shared between processes via fork(). Mlocked
page can only be shared if parent mlocked it before forking, otherwise
CoW will be triggered on mlock().
For Anon-THP, we can fix the issue by munlocking the page on removing PTE
migration entry for the page. PTEs for the page will always come after
mlocked PMD: rmap walks VMAs from oldest to newest.
Test-case:
#include <unistd.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/mempolicy.h>
#include <numaif.h>
int main(void)
{
unsigned long nodemask = 4;
void *addr;
addr = mmap((void *)0x20000000UL, 2UL << 20, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_LOCKED, -1, 0);
if (fork()) {
wait(NULL);
return 0;
}
mlock(addr, 4UL << 10);
mbind(addr, 2UL << 20, MPOL_PREFERRED | MPOL_F_RELATIVE_NODES,
&nodemask, 4, MPOL_MF_MOVE);
return 0;
}
[1] https://lkml.kernel.org/r/CAOMGZ=G52R-30rZvhGxEbkTw7rLLwBGadVYeo--iizcD3upL…
Signed-off-by: Kirill A. Shutemov <kirill.shutemov(a)linux.intel.com>
Reported-by: Vegard Nossum <vegard.nossum(a)oracle.com>
Fixes: 616b8371539a ("mm: thp: enable thp migration in generic path")
Cc: <stable(a)vger.kernel.org> [v4.14+]
Cc: Zi Yan <zi.yan(a)cs.rutgers.edu>
Cc: Naoya Horiguchi <n-horiguchi(a)ah.jp.nec.com>
Cc: Vlastimil Babka <vbabka(a)suse.cz>
Cc: Andrea Arcangeli <aarcange(a)redhat.com>
---
mm/huge_memory.c | 2 +-
mm/migrate.c | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 533f9b00147d..00704060b7f7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2931,7 +2931,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
else
page_add_file_rmap(new, true);
set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
- if (vma->vm_flags & VM_LOCKED)
+ if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
mlock_vma_page(new);
update_mmu_cache_pmd(vma, address, pvmw->pmd);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index d6a2e89b086a..9d374011c244 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
+ if (PageTransHuge(page) && PageMlocked(page))
+ clear_page_mlock(page);
+
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte);
}
--
2.18.0
From: Suren Baghdasaryan <surenb(a)google.com>
According to ETSI TS 102 622 specification chapter 4.4 pipe identifier
is 7 bits long which allows for 128 unique pipe IDs. Because
NFC_HCI_MAX_PIPES is used as the number of pipes supported and not
as the max pipe ID, its value should be 128 instead of 127.
nfc_hci_recv_from_llc extracts pipe ID from packet header using
NFC_HCI_FRAGMENT(0x7F) mask which allows for pipe ID value of 127.
Same happens when NCI_HCP_MSG_GET_PIPE() is being used. With
pipes array having only 127 elements and pipe ID of 127 the OOB memory
access will result.
Cc: Samuel Ortiz <sameo(a)linux.intel.com>
Cc: Allen Pais <allen.pais(a)oracle.com>
Cc: "David S. Miller" <davem(a)davemloft.net>
Suggested-by: Dan Carpenter <dan.carpenter(a)oracle.com>
Signed-off-by: Suren Baghdasaryan <surenb(a)google.com>
Reviewed-by: Kees Cook <keescook(a)chromium.org>
Cc: stable <stable(a)vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
include/net/nfc/hci.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694dafa5b..008f466d1da7 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
--
2.19.0
From: Suren Baghdasaryan <surenb(a)google.com>
When handling SHDLC I-Frame commands "pipe" field used for indexing
into an array should be checked before usage. If left unchecked it
might access memory outside of the array of size NFC_HCI_MAX_PIPES(127).
Malformed NFC HCI frames could be injected by a malicious NFC device
communicating with the device being attacked (remote attack vector),
or even by an attacker with physical access to the I2C bus such that
they could influence the data transfers on that bus (local attack vector).
skb->data is controlled by the attacker and has only been sanitized in
the most trivial ways (CRC check), therefore we can consider the
create_info struct and all of its members to tainted. 'create_info->pipe'
with max value of 255 (uint8) is used to take an offset of the
hdev->pipes array of 127 elements which can lead to OOB write.
Cc: Samuel Ortiz <sameo(a)linux.intel.com>
Cc: Allen Pais <allen.pais(a)oracle.com>
Cc: "David S. Miller" <davem(a)davemloft.net>
Suggested-by: Kevin Deus <kdeus(a)google.com>
Signed-off-by: Suren Baghdasaryan <surenb(a)google.com>
Acked-by: Kees Cook <keescook(a)chromium.org>
Cc: stable <stable(a)vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
net/nfc/hci/core.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ac8030c4bcf8..19cb2e473ea6 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
create_info = (struct hci_create_pipe_resp *)skb->data;
+ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
delete_info = (struct hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;
--
2.19.0
Since the addition of WARN_ON() in nand_subop_get_data/addr_len()
helpers, this driver will produce harmless warnings (mostly at probe)
just because it always called the *_data_len() helper in the parsing
function (even on non-data instructions, where this value is
meaningless and unneeded.
Fix these warnings by deriving the length only when it is relevant.
Fixes: 760c435e0f85 ("mtd: rawnand: make subop helpers return unsigned values")
Cc: stable(a)vger.kernel.org
Signed-off-by: Miquel Raynal <miquel.raynal(a)bootlin.com>
---
drivers/mtd/nand/raw/marvell_nand.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 7af4d6213ee5..bc2ef5209783 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int offset, naddrs;
const u8 *addrs;
- int len = nand_subop_get_data_len(subop, op_id);
+ int len;
instr = &subop->instrs[op_id];
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
--
2.17.1
From: Andi Kleen <ak(a)linux.intel.com>
Patch for stable only to fix boot resets caused by the L1TF patches.
Stable trees reverted the following patch
Revert "x86/mm/pat: Ensure cpa->pfn only contains page frame numbers"
This reverts commit 87e2bd898d3a79a8c609f183180adac47879a2a4 which is
commit edc3b9129cecd0f0857112136f5b8b1bc1d45918 upstream.
but the L1TF patch backported here
x86/mm/pat: Make set_memory_np() L1TF safe
commit 958f79b9ee55dfaf00c8106ed1c22a2919e0028b upstream
set_memory_np() is used to mark kernel mappings not present, but it has
it's own open coded mechanism which does not have the L1TF protection of
inverting the address bits.
assumed that cpa->pfn contains a PFN. With the above patch reverted
it does not, which causes the PMD to be set to an incorrect address
shifted by 12 bits, which can cause early boot reset on some
systems, like an Apollo Lake embedded system.
Convert the address to a PFN before passing it to pmd_pfn()
Thanks to Bernhard for bisecting and testing.
Cc: stable(a)vger.kernel.org # 4.4 and 4.9
Reported-by: Bernhard Kaindl <bernhard.kaindl(a)thalesgroup.com>
Tested-by: Bernhard Kaindl <bernhard.kaindl(a)thalesgroup.com>
Signed-off-by: Andi Kleen <ak(a)linux.intel.com>
---
arch/x86/mm/pageattr.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 27610c2d1821..1007fa80f5a6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1006,7 +1006,7 @@ static int populate_pmd(struct cpa_data *cpa,
pmd = pmd_offset(pud, start);
- set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
+ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn >> PAGE_SHIFT,
canon_pgprot(pmd_pgprot))));
start += PMD_SIZE;
--
2.17.1
From: Andi Kleen <ak(a)linux.intel.com>
[upstream cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 for 4.4.
Note there might be still a trivial conflict with the backport
for b0a182f875689647b014bc01d36b340217792852, but should
be easy to resolve]
On Nehalem and newer core CPUs the CPU cache internally uses 44 bits
physical address space. The L1TF workaround is limited by this internal
cache address width, and needs to have one bit free there for the
mitigation to work.
Older client systems report only 36bit physical address space so the range
check decides that L1TF is not mitigated for a 36bit phys/32GB system with
some memory holes.
But since these actually have the larger internal cache width this warning
is bogus because it would only really be needed if the system had more than
43bits of memory.
Add a new internal x86_cache_bits field. Normally it is the same as the
physical bits field reported by CPUID, but for Nehalem and newerforce it to
be at least 44bits.
Change the L1TF memory size warning to use the new cache_bits field to
avoid bogus warnings and remove the bogus comment about memory size.
Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
Reported-by: George Anchev <studio(a)anchev.net>
Reported-by: Christopher Snowhill <kode54(a)gmail.com>
Signed-off-by: Andi Kleen <ak(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: x86(a)kernel.org
Cc: linux-kernel(a)vger.kernel.org
Cc: Michael Hocko <mhocko(a)suse.com>
Cc: vbabka(a)suse.cz
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org
---
arch/x86/include/asm/processor.h | 4 ++-
arch/x86/kernel/cpu/bugs.c | 47 ++++++++++++++++++++++++++++----
arch/x86/kernel/cpu/common.c | 2 ++
3 files changed, 47 insertions(+), 6 deletions(-)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a3a53955f01c..cb07d3f618ca 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -104,6 +104,8 @@ struct cpuinfo_x86 {
__u8 x86_phys_bits;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
+
+ __u8 x86_cache_bits;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */
@@ -174,7 +176,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long l1tf_pfn_limit(void)
{
- return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
+ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}
extern void early_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 34e4aaaf03d2..2a41a86aa5c2 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -634,6 +634,46 @@ void x86_spec_ctrl_setup_ap(void)
#undef pr_fmt
#define pr_fmt(fmt) "L1TF: " fmt
+
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+ if (c->x86 != 6)
+ return;
+
+ switch (c->x86_model) {
+ case INTEL_FAM6_NEHALEM:
+ case INTEL_FAM6_WESTMERE:
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_HASWELL_CORE:
+ case INTEL_FAM6_HASWELL_ULT:
+ case INTEL_FAM6_HASWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_CORE:
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ if (c->x86_cache_bits < 44)
+ c->x86_cache_bits = 44;
+ break;
+ }
+}
+
static void __init l1tf_select_mitigation(void)
{
u64 half_pa;
@@ -641,16 +681,13 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
+ override_cache_bits(&boot_cpu_data);
+
#if CONFIG_PGTABLE_LEVELS == 2
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
return;
#endif
- /*
- * This is extremely unlikely to happen because almost all
- * systems have far more MAX_PA/2 than RAM can be fit into
- * DIMM slots.
- */
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4d3fa79c0f09..b12c0287d6cf 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -798,6 +798,8 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_phys_bits = 36;
#endif
+ c->x86_cache_bits = c->x86_phys_bits;
+
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
--
2.17.1