The ABI is broken and we cannot support it properly. Turn it off.
If this causes a meaningful performance regression for someone, KVM
can introduce an improved ABI that is supportable.
Cc: stable(a)vger.kernel.org
Signed-off-by: Andy Lutomirski <luto(a)kernel.org>
---
arch/x86/kernel/kvm.c | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 93ab0cbd304e..e6f2aefa298b 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -318,11 +318,26 @@ static void kvm_guest_cpu_init(void)
pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
-#ifdef CONFIG_PREEMPTION
- pa |= KVM_ASYNC_PF_SEND_ALWAYS;
-#endif
pa |= KVM_ASYNC_PF_ENABLED;
+ /*
+ * We do not set KVM_ASYNC_PF_SEND_ALWAYS. With the current
+ * KVM paravirt ABI, the following scenario is possible:
+ *
+ * #PF: async page fault (KVM_PV_REASON_PAGE_NOT_PRESENT)
+ * NMI before CR2 or KVM_PF_REASON_PAGE_NOT_PRESENT
+ * NMI accesses user memory, e.g. due to perf
+ * #PF: normal page fault
+ * #PF reads CR2 and apf_reason -- apf_reason should be 0
+ *
+ * outer #PF reads CR2 and apf_reason -- apf_reason should be
+ * KVM_PV_REASON_PAGE_NOT_PRESENT
+ *
+ * There is no possible way that both reads of CR2 and
+ * apf_reason get the correct values. Fixing this would
+ * require paravirt ABI changes.
+ */
+
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
--
2.24.1
Root Complex Integrated devices (RCiEP) do not have a Root Port before the
device. pci_configure_mps() should simply stick the max value for MaxPayload
size in Device Control, and for MaxReadReq. Unless pcie=pcie_bus-peer2peer
is used in kernel commandline PCIE_BUS_PEER2PEER.
When MPS is configured lower, it could result in reduced performance.
Fixes: 9dae3a97297f ("PCI: Move MPS configuration check to pci_configure_device()")
Signed-off-by: Ashok Raj <ashok.raj(a)intel.com>
Tested-by: Dave Jiang <dave.jiang(a)intel.com>
To: Bjorn Helgaas <bhelgaas(a)google.com>
To: linux-pci(a)vger.kernel.org
Cc: linux-kernel(a)vger.kernel.org
Cc: stable(a)vger.kernel.org
Cc: Ashok Raj <ashok.raj(a)intel.com>
---
drivers/pci/probe.c | 23 ++++++++++++++++++++++-
1 file changed, 22 insertions(+), 1 deletion(-)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index eeff8a07..a738b1c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1895,13 +1895,34 @@ static void pci_configure_mps(struct pci_dev *dev)
struct pci_dev *bridge = pci_upstream_bridge(dev);
int mps, mpss, p_mps, rc;
- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ if (!pci_is_pcie(dev))
return;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
if (dev->is_virtfn)
return;
+ /*
+ * If this is a Root Complex Integrated Endpoint
+ * Simply program the max value from DEVCAP. No additional
+ * Lookup is necessary
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ mps = 128;
+ else
+ mps = 128 << dev->pcie_mpss;
+ rc = pcie_set_mps(dev, mps);
+ if (rc) {
+ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps);
+ return;
+ }
+ }
+
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
--
2.7.4
When driver is set to enable bb credit recovery, the switch displayed
the setting as inactive. If the link bounces, it switches to Active.
During link up processing, the driver currently does a MBX_READ_SPARAM
followed by a MBX_CONFIG_LINK. These mbox commands are queued to be
executed, one at a time and the completion is processed by the worker
thread. Since the MBX_READ_SPARAM is done BEFORE the MBX_CONFIG_LINK,
the BB_SC_N bit is never set the the returned values. BB Credit recovery
status only gets set after the driver requests the feature in
CONFIG_LINK, which is done after the link up. Thus the ordering of
READ_SPARAM needs to follow the CONFIG_LINK.
Fix by reordering so that READ_SPARAM is done after CONFIG_LINK.
Added a HBA_DEFER_FLOGI flag so that any FLOGI handling waits until
after the READ_SPARAM is done so that the proper BB credit value is
set in the FLOGI payload.
Fixes: 6bfb16208298 ("scsi: lpfc: Fix configuration of BB credit recovery in service parameters")
Cc: <stable(a)vger.kernel.org> # v5.4+
Signed-off-by: Dick Kennedy <dick.kennedy(a)broadcom.com>
Signed-off-by: James Smart <jsmart2021(a)gmail.com>
---
drivers/scsi/lpfc/lpfc.h | 1 +
drivers/scsi/lpfc/lpfc_hbadisc.c | 59 ++++++++++++++++++++++++++--------------
2 files changed, 40 insertions(+), 20 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 04d73e2be373..3f2cb17c4574 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -749,6 +749,7 @@ struct lpfc_hba {
* capability
*/
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
+#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index dcc8999c6a68..6a2bdae0e52a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1163,13 +1163,16 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Start discovery by sending a FLOGI. port_state is identically
- * LPFC_FLOGI while waiting for FLOGI cmpl
+ * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
+ * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
*/
- if (vport->port_state != LPFC_FLOGI)
- lpfc_initial_flogi(vport);
- else if (vport->fc_flag & FC_PT2PT)
- lpfc_disc_start(vport);
-
+ if (vport->port_state != LPFC_FLOGI) {
+ if (!(phba->hba_flag & HBA_DEFER_FLOGI))
+ lpfc_initial_flogi(vport);
+ } else {
+ if (vport->fc_flag & FC_PT2PT)
+ lpfc_disc_start(vport);
+ }
return;
out:
@@ -3094,6 +3097,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Check if sending the FLOGI is being deferred to after we get
+ * up to date CSPs from MBX_READ_SPARAM.
+ */
+ if (phba->hba_flag & HBA_DEFER_FLOGI) {
+ lpfc_initial_flogi(vport);
+ phba->hba_flag &= ~HBA_DEFER_FLOGI;
+ }
return;
out:
@@ -3224,6 +3235,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
lpfc_linkup(phba);
+ sparam_mbox = NULL;
+
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!cfglink_mbox)
+ goto out;
+ vport->port_state = LPFC_LOCAL_CFG_LINK;
+ lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->vport = vport;
+ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
+
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
goto out;
@@ -3244,20 +3272,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!cfglink_mbox)
- goto out;
- vport->port_state = LPFC_LOCAL_CFG_LINK;
- lpfc_config_link(phba, cfglink_mbox);
- cfglink_mbox->vport = vport;
- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
- goto out;
- }
- } else {
+ if (phba->hba_flag & HBA_FCOE_MODE) {
vport->port_state = LPFC_VPORT_UNKNOWN;
/*
* Add the driver's default FCF record at FCF index 0 now. This
@@ -3314,6 +3329,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
+ } else {
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
+ !(phba->link_flag & LS_LOOPBACK_MODE))
+ phba->hba_flag |= HBA_DEFER_FLOGI;
}
/* Prepare for LINK up registrations */
--
2.13.7
boot_aggregate is the first entry of IMA measurement list. Its purpose is
to link pre-boot measurements to IMA measurements. As IMA was designed to
work with a TPM 1.2, the SHA1 PCR bank was always selected even if a
TPM 2.0 with support for stronger hash algorithms is available.
This patch first tries to find a PCR bank with the IMA default hash
algorithm. If it does not find it, it selects the SHA256 PCR bank for
TPM 2.0 and SHA1 for TPM 1.2. Ultimately, it selects SHA1 also for TPM 2.0
if the SHA256 PCR bank is not found.
If none of the PCR banks above can be found, boot_aggregate file digest is
filled with zeros, as for TPM bypass, making it impossible to perform a
remote attestation of the system.
Changelog
v3:
- Remove option to select the first PCR bank and select SHA1 as fallback
choice also for TPM 2.0 (suggested by Mimi)
v1:
- add Mimi's comments
- if there is no PCR bank for the IMA default hash algorithm use SHA256
(suggested by James Bottomley)
Cc: stable(a)vger.kernel.org # 5.1.x
Fixes: 879b589210a9 ("tpm: retrieve digest size of unknown algorithms with PCR read")
Reported-by: Jerry Snitselaar <jsnitsel(a)redhat.com>
Suggested-by: James Bottomley <James.Bottomley(a)HansenPartnership.com>
Signed-off-by: Roberto Sassu <roberto.sassu(a)huawei.com>
---
security/integrity/ima/ima_crypto.c | 47 +++++++++++++++++++++++++----
security/integrity/ima/ima_init.c | 22 +++++++++++---
2 files changed, 58 insertions(+), 11 deletions(-)
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 423c84f95a14..8e445a671225 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -655,18 +655,29 @@ static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
}
/*
- * Calculate the boot aggregate hash
+ * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
+ * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
+ * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
+ * allowing firmware to configure and enable different banks.
+ *
+ * Knowing which TPM bank is read to calculate the boot_aggregate digest
+ * needs to be conveyed to a verifier. For this reason, use the same
+ * hash algorithm for reading the TPM PCRs as for calculating the boot
+ * aggregate digest as stored in the measurement list.
*/
-static int __init ima_calc_boot_aggregate_tfm(char *digest,
+static int __init ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
struct crypto_shash *tfm)
{
- struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} };
+ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
int rc;
u32 i;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
+ pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
+ d.alg_id);
+
rc = crypto_shash_init(shash);
if (rc != 0)
return rc;
@@ -675,7 +686,8 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, &d);
/* now accumulate with current aggregate */
- rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
}
if (!rc)
crypto_shash_final(shash, digest);
@@ -685,14 +697,37 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
{
struct crypto_shash *tfm;
- int rc;
+ u16 crypto_id, alg_id;
+ int rc, i, bank_idx = -1;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (crypto_id == hash->algo) {
+ bank_idx = i;
+ break;
+ }
+
+ if (crypto_id == HASH_ALGO_SHA256)
+ bank_idx = i;
+
+ if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
+ bank_idx = i;
+ }
+
+ if (bank_idx == -1) {
+ pr_err("No suitable TPM algorithm for boot aggregate\n");
+ return 0;
+ }
+
+ hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
tfm = ima_alloc_tfm(hash->algo);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
hash->length = crypto_shash_digestsize(tfm);
- rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+ alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
ima_free_tfm(tfm);
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 567468188a61..fc1e1002b48d 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -25,7 +25,7 @@ struct tpm_chip *ima_tpm_chip;
/* Add the boot aggregate to the IMA measurement list and extend
* the PCR register.
*
- * Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
+ * Calculate the boot aggregate, a hash over tpm registers 0-7,
* assuming a TPM chip exists, and zeroes if the TPM chip does not
* exist. Add the boot aggregate measurement to the measurement
* list and extend the PCR register.
@@ -49,15 +49,27 @@ static int __init ima_add_boot_aggregate(void)
int violation = 0;
struct {
struct ima_digest_data hdr;
- char digest[TPM_DIGEST_SIZE];
+ char digest[TPM_MAX_DIGEST_SIZE];
} hash;
memset(iint, 0, sizeof(*iint));
memset(&hash, 0, sizeof(hash));
iint->ima_hash = &hash.hdr;
- iint->ima_hash->algo = HASH_ALGO_SHA1;
- iint->ima_hash->length = SHA1_DIGEST_SIZE;
-
+ iint->ima_hash->algo = ima_hash_algo;
+ iint->ima_hash->length = hash_digest_size[ima_hash_algo];
+
+ /*
+ * With TPM 2.0 hash agility, TPM chips could support multiple TPM
+ * PCR banks, allowing firmware to configure and enable different
+ * banks. The SHA1 bank is not necessarily enabled.
+ *
+ * Use the same hash algorithm for reading the TPM PCRs as for
+ * calculating the boot aggregate digest. Preference is given to
+ * the configured IMA default hash algorithm. Otherwise, use the
+ * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2.
+ * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank
+ * is not found.
+ */
if (ima_tpm_chip) {
result = ima_calc_boot_aggregate(&hash.hdr);
if (result < 0) {
--
2.17.1
The -modesetting ddx has a totally broken idea of how atomic works:
- doesn't disable old connectors, assuming they get auto-disable like
with the legacy setcrtc
- assumes ASYNC_FLIP is wired through for the atomic ioctl
- not a single call to TEST_ONLY
Iow the implementation is a 1:1 translation of legacy ioctls to
atomic, which is a) broken b) pointless.
We already have bugs in both i915 and amdgpu-DC where this prevents us
from enabling neat features.
If anyone ever cares about atomic in X we can easily add a new atomic
level (req->value == 2) for X to get back the shiny toys.
Since these broken versions of -modesetting have been shipping,
there's really no other way to get out of this bind.
References: https://gitlab.freedesktop.org/xorg/xserver/issues/629
References: https://gitlab.freedesktop.org/xorg/xserver/merge_requests/180
Cc: Maarten Lankhorst <maarten.lankhorst(a)linux.intel.com>
Cc: Michel Dänzer <michel(a)daenzer.net>
Cc: Alex Deucher <alexdeucher(a)gmail.com>
Cc: Adam Jackson <ajax(a)redhat.com>
Cc: Sean Paul <sean(a)poorly.run>
Cc: David Airlie <airlied(a)linux.ie>
Cc: stable(a)vger.kernel.org
Signed-off-by: Daniel Vetter <daniel.vetter(a)intel.com>
---
drivers/gpu/drm/drm_ioctl.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 2c120c58f72d..1cb7b4c3c87c 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -334,6 +334,9 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->universal_planes = req->value;
break;
case DRM_CLIENT_CAP_ATOMIC:
+ /* The modesetting DDX has a totally broken idea of atomic. */
+ if (strstr(current->comm, "X"))
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EOPNOTSUPP;
if (req->value > 1)
--
2.23.0
Avi Kivity reports that on fuse filesystems running in a user namespace
asyncronous fsync fails with EOVERFLOW.
The reason is that f_ops->fsync() is called with the creds of the kthread
performing aio work instead of the creds of the process originally
submitting IOCB_CMD_FSYNC.
Fuse sends the creds of the caller in the request header and it needs to
translate the uid and gid into the server's user namespace. Since the
kthread is running in init_user_ns, the translation will fail and the
operation returns an error.
It can be argued that fsync doesn't actually need any creds, but just
zeroing out those fields in the header (as with requests that currently
don't take creds) is a backward compatibility risk.
Instead of working around this issue in fuse, solve the core of the problem
by calling the filesystem with the proper creds.
Reported-by: Avi Kivity <avi(a)scylladb.com>
Tested-by: Giuseppe Scrivano <gscrivan(a)redhat.com>
Fixes: c9582eb0ff7d ("fuse: Fail all requests with invalid uids or gids")
Cc: stable(a)vger.kernel.org # 4.18+
Signed-off-by: Miklos Szeredi <mszeredi(a)redhat.com>
---
fs/aio.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/fs/aio.c b/fs/aio.c
index 0d9a559d488c..37828773e2fe 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -176,6 +176,7 @@ struct fsync_iocb {
struct file *file;
struct work_struct work;
bool datasync;
+ struct cred *creds;
};
struct poll_iocb {
@@ -1589,8 +1590,11 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
static void aio_fsync_work(struct work_struct *work)
{
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
+ const struct cred *old_cred = override_creds(iocb->fsync.creds);
iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+ revert_creds(old_cred);
+ put_cred(iocb->fsync.creds);
iocb_put(iocb);
}
@@ -1604,6 +1608,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
if (unlikely(!req->file->f_op->fsync))
return -EINVAL;
+ req->creds = prepare_creds();
+ if (!req->creds)
+ return -ENOMEM;
+
req->datasync = datasync;
INIT_WORK(&req->work, aio_fsync_work);
schedule_work(&req->work);
--
2.21.0
Commit a408e4a86b36 ("ima: open a new file instance if no read
permissions") tries to create a new file descriptor to calculate a file
digest if the file has not been opened with O_RDONLY flag. However, if a
new file descriptor cannot be obtained, it sets the FMODE_READ flag to
file->f_flags instead of file->f_mode.
This patch fixes this issue by replacing f_flags with f_mode as it was
before that commit.
Cc: stable(a)vger.kernel.org # 4.20.x
Fixes: a408e4a86b36 ("ima: open a new file instance if no read permissions")
Signed-off-by: Roberto Sassu <roberto.sassu(a)huawei.com>
---
security/integrity/ima/ima_crypto.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 423c84f95a14..8ab17aa867dd 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -436,7 +436,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
*/
pr_info_ratelimited("Unable to reopen file for reading.\n");
f = file;
- f->f_flags |= FMODE_READ;
+ f->f_mode |= FMODE_READ;
modified_flags = true;
} else {
new_file_instance = true;
@@ -456,7 +456,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
if (new_file_instance)
fput(f);
else if (modified_flags)
- f->f_flags &= ~FMODE_READ;
+ f->f_mode &= ~FMODE_READ;
return rc;
}
--
2.17.1
The command ring and cursor ring use different notify port addresses
definition: QXL_IO_NOTIFY_CMD and QXL_IO_NOTIFY_CURSOR. However, in
qxl_device_init() we use QXL_IO_NOTIFY_CMD to create both command ring
and cursor ring. This doesn't cause any problems now, because QEMU's
behaviors on QXL_IO_NOTIFY_CMD and QXL_IO_NOTIFY_CURSOR are the same.
However, QEMU's behavior may be change in future, so let's fix it.
P.S.: In the X.org QXL driver, the notify port address of cursor ring
is correct.
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Huacai Chen <chenhc(a)lemote.com>
---
drivers/gpu/drm/qxl/qxl_kms.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bfc1631..9bdbe0d 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -218,7 +218,7 @@ int qxl_device_init(struct qxl_device *qdev,
&(qdev->ram_header->cursor_ring_hdr),
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
- qdev->io_base + QXL_IO_NOTIFY_CMD,
+ qdev->io_base + QXL_IO_NOTIFY_CURSOR,
false,
&qdev->cursor_event);
--
2.7.0