From: Corey Minyard cminyard@mvista.com
[ Upstream commit 6b8526d3abc02c08a2f888e8c20b7ac9e5776dfe ]
In error cases a NULL can be passed to memcpy. The length will always be zero, so it doesn't really matter, but go ahead and check for NULL, anyway, to be more precise and avoid static analysis errors.
Reported-by: kbuild test robot lkp@intel.com Signed-off-by: Corey Minyard cminyard@mvista.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/char/ipmi/ipmi_ssif.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 22c6a2e612360..8ac390c2b5147 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); msg = ssif_info->curr_msg; if (msg) { + if (data) { + if (len > IPMI_MAX_MSG_LENGTH) + len = IPMI_MAX_MSG_LENGTH; + memcpy(msg->rsp, data, len); + } else { + len = 0; + } msg->rsp_size = len; - if (msg->rsp_size > IPMI_MAX_MSG_LENGTH) - msg->rsp_size = IPMI_MAX_MSG_LENGTH; - memcpy(msg->rsp, data, msg->rsp_size); ssif_info->curr_msg = NULL; }
From: Sean Paul seanpaul@chromium.org
[ Upstream commit db735fc4036bbe1fbe606819b5f0ff26cc76cdff ]
Turning on CONFIG_DMA_API_DEBUG_SG results in the following error:
[ 12.078665] msm ae00000.mdss: DMA-API: mapping sg segment longer than device claims to support [len=3526656] [max=65536] [ 12.089870] WARNING: CPU: 6 PID: 334 at /mnt/host/source/src/third_party/kernel/v4.19/kernel/dma/debug.c:1301 debug_dma_map_sg+0x1dc/0x318 [ 12.102655] Modules linked in: joydev [ 12.106442] CPU: 6 PID: 334 Comm: frecon Not tainted 4.19.0 #2 [ 12.112450] Hardware name: Google Cheza (rev3+) (DT) [ 12.117566] pstate: 60400009 (nZCv daif +PAN -UAO) [ 12.122506] pc : debug_dma_map_sg+0x1dc/0x318 [ 12.126995] lr : debug_dma_map_sg+0x1dc/0x318 [ 12.131487] sp : ffffff800cc3ba80 [ 12.134913] x29: ffffff800cc3ba80 x28: 0000000000000000 [ 12.140395] x27: 0000000000000004 x26: 0000000000000004 [ 12.145868] x25: ffffff8008e55b18 x24: 0000000000000000 [ 12.151337] x23: 00000000ffffffff x22: ffffff800921c000 [ 12.156809] x21: ffffffc0fa75b080 x20: ffffffc0f7195090 [ 12.162280] x19: ffffffc0f1c53280 x18: 0000000000000000 [ 12.167749] x17: 0000000000000000 x16: 0000000000000000 [ 12.173218] x15: 0000000000000000 x14: 0720072007200720 [ 12.178689] x13: 0720072007200720 x12: 0720072007200720 [ 12.184161] x11: 0720072007200720 x10: 0720072007200720 [ 12.189641] x9 : ffffffc0f1fc6b60 x8 : 0000000000000000 [ 12.195110] x7 : ffffff8008132ce0 x6 : 0000000000000000 [ 12.200585] x5 : 0000000000000000 x4 : ffffff8008134734 [ 12.206058] x3 : ffffff800cc3b830 x2 : ffffffc0f1fc6240 [ 12.211532] x1 : 25045a74f48a7400 x0 : 25045a74f48a7400 [ 12.217006] Call trace: [ 12.219535] debug_dma_map_sg+0x1dc/0x318 [ 12.223671] get_pages+0x19c/0x20c [ 12.227177] msm_gem_fault+0x64/0xfc [ 12.230874] __do_fault+0x3c/0x140 [ 12.234383] __handle_mm_fault+0x70c/0xdb8 [ 12.238603] handle_mm_fault+0xac/0xc4 [ 12.242473] do_page_fault+0x1bc/0x3d4 [ 12.246342] do_translation_fault+0x54/0x88 [ 12.250652] do_mem_abort+0x60/0xf0 [ 12.254250] el0_da+0x20/0x24 [ 12.257317] irq event stamp: 67260 [ 12.260828] hardirqs last enabled at (67259): [<ffffff8008132d0c>] console_unlock+0x214/0x608 [ 12.269693] hardirqs last disabled at (67260): [<ffffff8008080e0c>] do_debug_exception+0x5c/0x178 [ 12.278820] softirqs last enabled at (67256): [<ffffff8008081664>] __do_softirq+0x4d4/0x520 [ 12.287510] softirqs last disabled at (67249): [<ffffff80080be574>] irq_exit+0xa8/0x100 [ 12.295742] ---[ end trace e63cfc40c313ffab ]---
The root of the problem is that the default segment size for sgt is (UINT_MAX & PAGE_MASK), and the default segment size for device dma is 64K. As such, if you compare the 2, you would deduce that the sg segment will overflow the device's capacity. In reality, the hardware can accommodate the larger sg segments, it's just not initializing its max segment properly. This patch initializes the max segment size for the mdss device, which gets rid of that pesky warning.
Reported-by: Stephen Boyd swboyd@chromium.org Tested-by: Stephen Boyd swboyd@chromium.org Tested-by: Sai Prakash Ranjan saiprakash.ranjan@codeaurora.org Reviewed-by: Rob Clark robdclark@gmail.com Signed-off-by: Sean Paul seanpaul@chromium.org Signed-off-by: Douglas Anderson dianders@chromium.org Link: https://patchwork.freedesktop.org/patch/msgid/20200121111813.REPOST.1.I92c66... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/msm/msm_drv.c | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c84f0a8b3f2ce..b73fbb65e14b2 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto err_msm_uninit;
+ if (!dev->dma_parms) { + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), + GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + } + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
From: Scott Wood swood@redhat.com
[ Upstream commit 488603b815a7514c7009e6fc339d74ed4a30f343 ]
This will be used in the next patch to get a loadavg update from nohz cpus. The delta check is skipped because idle_sched_class doesn't update se.exec_start.
Signed-off-by: Scott Wood swood@redhat.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Link: https://lkml.kernel.org/r/1578736419-14628-2-git-send-email-swood@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/sched/core.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index dfaefb175ba05..5cbf64db4e0a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3667,22 +3667,24 @@ static void sched_tick_remote(struct work_struct *work) * statistics and checks timeslices in a time-independent way, regardless * of when exactly it is running. */ - if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) + if (!tick_nohz_tick_stopped_cpu(cpu)) goto out_requeue;
rq_lock_irq(rq, &rf); curr = rq->curr; - if (is_idle_task(curr) || cpu_is_offline(cpu)) + if (cpu_is_offline(cpu)) goto out_unlock;
update_rq_clock(rq); - delta = rq_clock_task(rq) - curr->se.exec_start;
- /* - * Make sure the next tick runs within a reasonable - * amount of time. - */ - WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); + if (!is_idle_task(curr)) { + /* + * Make sure the next tick runs within a reasonable + * amount of time. + */ + delta = rq_clock_task(rq) - curr->se.exec_start; + WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); + } curr->sched_class->task_tick(rq, curr, 0);
out_unlock:
From: "Peter Zijlstra (Intel)" peterz@infradead.org
[ Upstream commit ebc0f83c78a2d26384401ecf2d2fa48063c0ee27 ]
The way loadavg is tracked during nohz only pays attention to the load upon entering nohz. This can be particularly noticeable if full nohz is entered while non-idle, and then the cpu goes idle and stays that way for a long time.
Use the remote tick to ensure that full nohz cpus report their deltas within a reasonable time.
[ swood: Added changelog and removed recheck of stopped tick. ]
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Scott Wood swood@redhat.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Link: https://lkml.kernel.org/r/1578736419-14628-3-git-send-email-swood@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/sched/nohz.h | 2 ++ kernel/sched/core.c | 4 +++- kernel/sched/loadavg.c | 33 +++++++++++++++++++++++---------- 3 files changed, 28 insertions(+), 11 deletions(-)
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index 1abe91ff6e4a2..6d67e9a5af6bb 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { }
#ifdef CONFIG_NO_HZ_COMMON void calc_load_nohz_start(void); +void calc_load_nohz_remote(struct rq *rq); void calc_load_nohz_stop(void); #else static inline void calc_load_nohz_start(void) { } +static inline void calc_load_nohz_remote(struct rq *rq) { } static inline void calc_load_nohz_stop(void) { } #endif /* CONFIG_NO_HZ_COMMON */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5cbf64db4e0a3..a6eeca7021621 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3675,6 +3675,7 @@ static void sched_tick_remote(struct work_struct *work) if (cpu_is_offline(cpu)) goto out_unlock;
+ curr = rq->curr; update_rq_clock(rq);
if (!is_idle_task(curr)) { @@ -3687,10 +3688,11 @@ static void sched_tick_remote(struct work_struct *work) } curr->sched_class->task_tick(rq, curr, 0);
+ calc_load_nohz_remote(rq); out_unlock: rq_unlock_irq(rq, &rf); - out_requeue: + /* * Run the remote tick once per second (1Hz). This arbitrary * frequency is large enough to avoid overload but short enough diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 28a516575c181..de22da666ac73 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -231,16 +231,11 @@ static inline int calc_load_read_idx(void) return calc_load_idx & 1; }
-void calc_load_nohz_start(void) +static void calc_load_nohz_fold(struct rq *rq) { - struct rq *this_rq = this_rq(); long delta;
- /* - * We're going into NO_HZ mode, if there's any pending delta, fold it - * into the pending NO_HZ delta. - */ - delta = calc_load_fold_active(this_rq, 0); + delta = calc_load_fold_active(rq, 0); if (delta) { int idx = calc_load_write_idx();
@@ -248,6 +243,24 @@ void calc_load_nohz_start(void) } }
+void calc_load_nohz_start(void) +{ + /* + * We're going into NO_HZ mode, if there's any pending delta, fold it + * into the pending NO_HZ delta. + */ + calc_load_nohz_fold(this_rq()); +} + +/* + * Keep track of the load for NOHZ_FULL, must be called between + * calc_load_nohz_{start,stop}(). + */ +void calc_load_nohz_remote(struct rq *rq) +{ + calc_load_nohz_fold(rq); +} + void calc_load_nohz_stop(void) { struct rq *this_rq = this_rq(); @@ -268,7 +281,7 @@ void calc_load_nohz_stop(void) this_rq->calc_load_update += LOAD_FREQ; }
-static long calc_load_nohz_fold(void) +static long calc_load_nohz_read(void) { int idx = calc_load_read_idx(); long delta = 0; @@ -323,7 +336,7 @@ static void calc_global_nohz(void) } #else /* !CONFIG_NO_HZ_COMMON */
-static inline long calc_load_nohz_fold(void) { return 0; } +static inline long calc_load_nohz_read(void) { return 0; } static inline void calc_global_nohz(void) { }
#endif /* CONFIG_NO_HZ_COMMON */ @@ -346,7 +359,7 @@ void calc_global_load(unsigned long ticks) /* * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs. */ - delta = calc_load_nohz_fold(); + delta = calc_load_nohz_read(); if (delta) atomic_long_add(delta, &calc_load_tasks);
From: Vincent Guittot vincent.guittot@linaro.org
[ Upstream commit 2a4b03ffc69f2dedc6388e9a6438b5f4c133a40d ]
When a running task is moved on a throttled task group and there is no other task enqueued on the CPU, the task can keep running using 100% CPU whatever the allocated bandwidth for the group and although its cfs rq is throttled. Furthermore, the group entity of the cfs_rq and its parents are not enqueued but only set as curr on their respective cfs_rqs.
We have the following sequence:
sched_move_task -dequeue_task: dequeue task and group_entities. -put_prev_task: put task and group entities. -sched_change_group: move task to new group. -enqueue_task: enqueue only task but not group entities because cfs_rq is throttled. -set_next_task : set task and group_entities as current sched_entity of their cfs_rq.
Another impact is that the root cfs_rq runnable_load_avg at root rq stays null because the group_entities are not enqueued. This situation will stay the same until an "external" event triggers a reschedule. Let trigger it immediately instead.
Signed-off-by: Vincent Guittot vincent.guittot@linaro.org Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Acked-by: Ben Segall bsegall@google.com Link: https://lkml.kernel.org/r/1579011236-31256-1-git-send-email-vincent.guittot@... Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/sched/core.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a6eeca7021621..be146a1220e33 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7057,8 +7057,15 @@ void sched_move_task(struct task_struct *tsk)
if (queued) enqueue_task(rq, tsk, queue_flags); - if (running) + if (running) { set_next_task(rq, tsk); + /* + * After changing group, the running task may have joined a + * throttled one but it's still the running task. Trigger a + * resched to make sure that task can still run. + */ + resched_curr(rq); + }
task_rq_unlock(rq, tsk, &rf); }
From: Jeff Moyer jmoyer@redhat.com
[ Upstream commit 96222d53842dfe54869ec4e1b9d4856daf9105a2 ]
fstests generic/471 reports a failure when run with MOUNT_OPTIONS="-o dax". The reason is that the initial pwrite to an empty file with the RWF_NOWAIT flag set does not return -EAGAIN. It turns out that dax_iomap_rw doesn't pass that flag through to iomap_apply.
With this patch applied, generic/471 passes for me.
Signed-off-by: Jeff Moyer jmoyer@redhat.com Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Jan Kara jack@suse.cz Link: https://lore.kernel.org/r/x49r1z86e1d.fsf@segfault.boston.devel.redhat.com Signed-off-by: Dan Williams dan.j.williams@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/dax.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/fs/dax.c b/fs/dax.c index 2cc43cd914eb8..cc56313c6b3b9 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1207,6 +1207,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, lockdep_assert_held(&inode->i_rwsem); }
+ if (iocb->ki_flags & IOCB_NOWAIT) + flags |= IOMAP_NOWAIT; + while (iov_iter_count(iter)) { ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, iter, dax_iomap_actor);
From: Johannes Berg johannes.berg@intel.com
[ Upstream commit a04564c99bb4a92f805a58e56b2d22cc4978f152 ]
We only use the parsing CRC for checking if a beacon changed, and elements with an ID > 63 cannot be represented in the filter. Thus, like we did before with WMM and Cisco vendor elements, just statically add these forgotten items to the CRC: - WLAN_EID_VHT_OPERATION - WLAN_EID_OPMODE_NOTIF
I guess that in most cases when VHT/HE operation change, the HT operation also changed, and so the change was picked up, but we did notice that pure operating mode notification changes were ignored.
Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Luca Coelho luciano.coelho@intel.com Link: https://lore.kernel.org/r/20200131111300.891737-22-luca@coelho.fi [restrict to VHT for the mac80211 branch] Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/mac80211/util.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 32a7a53833c01..739e90555d8b9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1063,16 +1063,22 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, elem_parse_failed = true; break; case WLAN_EID_VHT_OPERATION: - if (elen >= sizeof(struct ieee80211_vht_operation)) + if (elen >= sizeof(struct ieee80211_vht_operation)) { elems->vht_operation = (void *)pos; - else - elem_parse_failed = true; + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + break; + } + elem_parse_failed = true; break; case WLAN_EID_OPMODE_NOTIF: - if (elen > 0) + if (elen > 0) { elems->opmode_notif = pos; - else - elem_parse_failed = true; + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + break; + } + elem_parse_failed = true; break; case WLAN_EID_MESH_ID: elems->mesh_id = pos;
From: Sergey Matyukevich sergey.matyukevich.os@quantenna.com
[ Upstream commit bfb7bac3a8f47100ebe7961bd14e924c96e21ca7 ]
When preparing ethtool drvinfo, check if wiphy driver is defined before dereferencing it. Driver may not exist, e.g. if wiphy is attached to a virtual platform device.
Signed-off-by: Sergey Matyukevich sergey.matyukevich.os@quantenna.com Link: https://lore.kernel.org/r/20200203105644.28875-1-sergey.matyukevich.os@quant... Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/wireless/ethtool.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c index a9c0f368db5d2..24e18405cdb48 100644 --- a/net/wireless/ethtool.c +++ b/net/wireless/ethtool.c @@ -7,9 +7,13 @@ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct wireless_dev *wdev = dev->ieee80211_ptr; + struct device *pdev = wiphy_dev(wdev->wiphy);
- strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, - sizeof(info->driver)); + if (pdev->driver) + strlcpy(info->driver, pdev->driver->name, + sizeof(info->driver)); + else + strlcpy(info->driver, "N/A", sizeof(info->driver));
strlcpy(info->version, init_utsname()->release, sizeof(info->version));
From: Harald Freudenberger freude@linux.ibm.com
[ Upstream commit fcd98d4002539f1e381916fc1b6648938c1eac76 ]
The internal statistic counters for the total number of requests processed per card and per queue used integers. So they do wrap after a rather huge amount of crypto requests processed. This patch introduces uint64 counters which should hold much longer but still may wrap. The sysfs attributes request_count for card and queue also used only %ld and now display the counter value with %llu.
This is not a security relevant fix. The int overflow which happened is not in any way exploitable as a security breach.
Signed-off-by: Harald Freudenberger freude@linux.ibm.com Signed-off-by: Vasily Gorbik gor@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/s390/crypto/ap_bus.h | 4 ++-- drivers/s390/crypto/ap_card.c | 8 ++++---- drivers/s390/crypto/ap_queue.c | 6 +++--- drivers/s390/crypto/zcrypt_api.c | 16 +++++++++------- 4 files changed, 18 insertions(+), 16 deletions(-)
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index bb35ba4a8d243..4348fdff1c61e 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -162,7 +162,7 @@ struct ap_card { unsigned int functions; /* AP device function bitfield. */ int queue_depth; /* AP queue depth.*/ int id; /* AP card number. */ - atomic_t total_request_count; /* # requests ever for this AP device.*/ + atomic64_t total_request_count; /* # requests ever for this AP device.*/ };
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) @@ -179,7 +179,7 @@ struct ap_queue { enum ap_state state; /* State of the AP device. */ int pendingq_count; /* # requests on pendingq list. */ int requestq_count; /* # requests on requestq list. */ - int total_request_count; /* # requests ever for this AP device.*/ + u64 total_request_count; /* # requests ever for this AP device.*/ int request_timeout; /* Request timeout in jiffies. */ struct timer_list timeout; /* Timer for request timeouts. */ struct list_head pendingq; /* List of message sent to AP queue. */ diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 63b4cc6cd7e59..e85bfca1ed163 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_card *ac = to_ap_card(dev); - unsigned int req_cnt; + u64 req_cnt;
req_cnt = 0; spin_lock_bh(&ap_list_lock); - req_cnt = atomic_read(&ac->total_request_count); + req_cnt = atomic64_read(&ac->total_request_count); spin_unlock_bh(&ap_list_lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); }
static ssize_t request_count_store(struct device *dev, @@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev, for_each_ap_queue(aq, ac) aq->total_request_count = 0; spin_unlock_bh(&ap_list_lock); - atomic_set(&ac->total_request_count, 0); + atomic64_set(&ac->total_request_count, 0);
return count; } diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 37c3bdc3642dc..a317ab4849320 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_queue *aq = to_ap_queue(dev); - unsigned int req_cnt; + u64 req_cnt;
spin_lock_bh(&aq->lock); req_cnt = aq->total_request_count; spin_unlock_bh(&aq->lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); }
static ssize_t request_count_store(struct device *dev, @@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) list_add_tail(&ap_msg->list, &aq->requestq); aq->requestq_count++; aq->total_request_count++; - atomic_inc(&aq->card->total_request_count); + atomic64_inc(&aq->card->total_request_count); /* Send/receive as many request from the queue as possible. */ ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 9157e728a362d..7fa0262e91af0 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -605,8 +605,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc, weight += atomic_read(&zc->load); pref_weight += atomic_read(&pref_zc->load); if (weight == pref_weight) - return atomic_read(&zc->card->total_request_count) > - atomic_read(&pref_zc->card->total_request_count); + return atomic64_read(&zc->card->total_request_count) > + atomic64_read(&pref_zc->card->total_request_count); return weight > pref_weight; }
@@ -1216,11 +1216,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) spin_unlock(&zcrypt_list_lock); }
-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) +static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) { struct zcrypt_card *zc; struct zcrypt_queue *zq; int card; + u64 cnt;
memset(reqcnt, 0, sizeof(int) * max_adapters); spin_lock(&zcrypt_list_lock); @@ -1232,8 +1233,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) || card >= max_adapters) continue; spin_lock(&zq->queue->lock); - reqcnt[card] = zq->queue->total_request_count; + cnt = zq->queue->total_request_count; spin_unlock(&zq->queue->lock); + reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; } } local_bh_enable(); @@ -1411,9 +1413,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, return 0; } case ZCRYPT_PERDEV_REQCNT: { - int *reqcnt; + u32 *reqcnt;
- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); + reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); @@ -1470,7 +1472,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, } case Z90STAT_PERDEV_REQCNT: { /* the old ioctl supports only 64 adapters */ - int reqcnt[MAX_ZDEV_CARDIDS]; + u32 reqcnt[MAX_ZDEV_CARDIDS];
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
From: Bjørn Mork bjorn@mork.no
[ Upstream commit 88bf54603f6f2c137dfee1abf6436ceac3528d2d ]
Commit f25e1392fdb5 removed the support for the pre-production variant of the Dell DW5821e to avoid probing another USB interface unnecessarily. However, the pre-production samples are found in the wild, and this lack of support is causing problems for users of such samples. It is therefore necessary to support both variants.
Matching on both interfaces 0 and 1 is not expected to cause any problem with either variant, as only the QMI function will be probed successfully on either. Interface 1 will be rejected based on the HID class for the production variant:
T: Bus=01 Lev=03 Prnt=04 Port=00 Cnt=01 Dev#= 16 Spd=480 MxCh= 0 D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 2 P: Vendor=413c ProdID=81d7 Rev=03.18 S: Manufacturer=DELL S: Product=DW5821e Snapdragon X20 LTE S: SerialNumber=0123456789ABCDEF C: #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA I: If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan I: If#= 1 Alt= 0 #EPs= 1 Cls=03(HID ) Sub=00 Prot=00 Driver=usbhid I: If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 5 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
And interface 0 will be rejected based on too few endpoints for the pre-production variant:
T: Bus=01 Lev=02 Prnt=02 Port=03 Cnt=03 Dev#= 7 Spd=480 MxCh= 0 D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 2 P: Vendor=413c ProdID=81d7 Rev= 3.18 S: Manufacturer=DELL S: Product=DW5821e Snapdragon X20 LTE S: SerialNumber=0123456789ABCDEF C: #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=500mA I: If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver= I: If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan I: If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option I: If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
Fixes: f25e1392fdb5 ("qmi_wwan: fix interface number for DW5821e production firmware") Link: https://whrl.pl/Rf0vNk Reported-by: Lars Melin larsm17@gmail.com Cc: Aleksander Morgado aleksander@aleksander.es Signed-off-by: Bjørn Mork bjorn@mork.no Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9485c8d1de8a3..839cef720cf64 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1363,6 +1363,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
From: Bjørn Mork bjorn@mork.no
[ Upstream commit 00516d13d4cfa56ce39da144db2dbf08b09b9357 ]
We have been using the fact that the QMI and DIAG functions usually are the only ones with class/subclass/protocol being ff/ff/ff on Quectel modems. This has allowed us to match the QMI function without knowing the exact interface number, which can vary depending on firmware configuration.
The ability to silently reject the DIAG function, which is usually handled by the option driver, is important for this method to work. This is done based on the knowledge that it has exactly 2 bulk endpoints. QMI function control interfaces will have either 3 or 1 endpoint. This rule is universal so the quirk condition can be removed.
The fixed layouts known from the Gobi1k and Gobi2k modems have been gradually replaced by more dynamic layouts, and many vendors now use configurable layouts without changing device IDs. Renaming the class/subclass/protocol matching macro makes it more obvious that this is now not Quectel specific anymore.
Cc: Kristian Evensen kristian.evensen@gmail.com Cc: Aleksander Morgado aleksander@aleksander.es Signed-off-by: Bjørn Mork bjorn@mork.no Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/usb/qmi_wwan.c | 42 ++++++++++++++------------------------ 1 file changed, 15 insertions(+), 27 deletions(-)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 839cef720cf64..3b7a3b8a5e067 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -61,7 +61,6 @@ enum qmi_wwan_flags {
enum qmi_wwan_quirks { QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ - QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */ };
struct qmimux_hdr { @@ -916,16 +915,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { .data = QMI_WWAN_QUIRK_DTR, };
-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { - .description = "WWAN/QMI device", - .flags = FLAG_WWAN | FLAG_SEND_ZLP, - .bind = qmi_wwan_bind, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .rx_fixup = qmi_wwan_rx_fixup, - .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG, -}; - #define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */ @@ -946,14 +935,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { #define QMI_GOBI_DEVICE(vend, prod) \ QMI_FIXED_INTF(vend, prod, 0)
-/* Quectel does not use fixed interface numbers on at least some of their - * devices. We need to check the number of endpoints to ensure that we bind to - * the correct interface. +/* Many devices have QMI and DIAG functions which are distinguishable + * from other vendor specific functions by class, subclass and + * protocol all being 0xff. The DIAG function has exactly 2 endpoints + * and is silently rejected when probed. + * + * This makes it possible to match dynamically numbered QMI functions + * as seen on e.g. many Quectel modems. */ -#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \ +#define QMI_MATCH_FF_FF_FF(vend, prod) \ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \ USB_SUBCLASS_VENDOR_SPEC, 0xff), \ - .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
static const struct usb_device_id products[] = { /* 1. CDC ECM like devices match on the control interface */ @@ -1059,10 +1052,10 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1455,7 +1448,6 @@ static int qmi_wwan_probe(struct usb_interface *intf, { struct usb_device_id *id = (struct usb_device_id *)prod; struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; - const struct driver_info *info;
/* Workaround to enable dynamic IDs. This disables usbnet * blacklisting functionality. Which, if required, can be @@ -1491,12 +1483,8 @@ static int qmi_wwan_probe(struct usb_interface *intf, * different. Ignore the current interface if the number of endpoints * equals the number for the diag interface (two). */ - info = (void *)id->driver_info; - - if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { - if (desc->bNumEndpoints == 2) - return -ENODEV; - } + if (desc->bNumEndpoints == 2) + return -ENODEV;
return usbnet_probe(intf, id); }
From: Trond Myklebust trondmy@gmail.com
[ Upstream commit cf5b4059ba7197d6cef9c0e024979d178ed8c8ec ]
We want to make sure that we revalidate the dentry if and only if we've done an OPEN by filename. In order to avoid races with remote changes to the directory on the server, we want to save the verifier before calling OPEN. The exception is if the server returned a delegation with our OPEN, as we then know that the filename can't have changed on the server.
Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Reviewed-by: Benjamin Coddington bcodding@gmail.com Tested-by: Benjamin Coddington bcodding@gmail.com Signed-off-by: Anna Schumaker Anna.Schumaker@Netapp.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/nfs/nfs4file.c | 1 - fs/nfs/nfs4proc.c | 18 ++++++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 339663d04bf83..54f1c1f626fc5 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -86,7 +86,6 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (inode != d_inode(dentry)) goto out_drop;
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); nfs_file_set_open_context(filp, ctx); nfs_fscache_open_file(inode, filp); err = 0; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f808fb34b1101..6b29703d2fe1e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2962,10 +2962,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, struct dentry *dentry; struct nfs4_state *state; fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); + struct inode *dir = d_inode(opendata->dir); + unsigned long dir_verifier; unsigned int seq; int ret;
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); + dir_verifier = nfs_save_change_attribute(dir);
ret = _nfs4_proc_open(opendata, ctx); if (ret != 0) @@ -2993,8 +2996,19 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, dput(ctx->dentry); ctx->dentry = dentry = alias; } - nfs_set_verifier(dentry, - nfs_save_change_attribute(d_inode(opendata->dir))); + } + + switch(opendata->o_arg.claim) { + default: + break; + case NFS4_OPEN_CLAIM_NULL: + case NFS4_OPEN_CLAIM_DELEGATE_CUR: + case NFS4_OPEN_CLAIM_DELEGATE_PREV: + if (!opendata->rpc_done) + break; + if (opendata->o_res.delegation_type != 0) + dir_verifier = nfs_save_change_attribute(dir); + nfs_set_verifier(dentry, dir_verifier); }
/* Parse layoutget results before we check for access */
From: John Garry john.garry@huawei.com
[ Upstream commit 0ca2c0319a7bce0e152b51b866979d62dc261e48 ]
Even though a SMMUv3 PMCG implementation may use an MSI as the form of interrupt source, the kernel would still complain that it does not find the wired (GSIV) interrupt in this case:
root@(none)$ dmesg | grep arm-smmu-v3-pmcg | grep "not found" [ 59.237219] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.8.auto: IRQ index 0 not found [ 59.322841] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.9.auto: IRQ index 0 not found [ 59.422155] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.10.auto: IRQ index 0 not found [ 59.539014] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.11.auto: IRQ index 0 not found [ 59.640329] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.12.auto: IRQ index 0 not found [ 59.743112] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.13.auto: IRQ index 0 not found [ 59.880577] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.14.auto: IRQ index 0 not found [ 60.017528] arm-smmu-v3-pmcg arm-smmu-v3-pmcg.15.auto: IRQ index 0 not found
Use platform_get_irq_optional() to silence the warning.
If neither interrupt source is found, then the driver will still warn that IRQ setup errored and the probe will fail.
Reviewed-by: Robin Murphy robin.murphy@arm.com Signed-off-by: John Garry john.garry@huawei.com Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/perf/arm_smmuv3_pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 191f410cf35cd..2f8787276d9b8 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -772,7 +772,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) smmu_pmu->reloc_base = smmu_pmu->reg_base; }
- irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq > 0) smmu_pmu->irq = irq;
From: Peter Zijlstra peterz@infradead.org
[ Upstream commit be993e44badc448add6a18d6f12b20615692c4c3 ]
The __patch_text() function already applies __opcode_to_mem_*(), so when __opcode_to_mem_*() is not the identity (BE*), it is applied twice, wrecking the instruction.
Fixes: 42e51f187f86 ("arm/ftrace: Use __patch_text()") Reported-by: Dmitry Osipenko digetx@gmail.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Tested-by: Dmitry Osipenko digetx@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm/kernel/ftrace.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index bda949fd84e8b..93caf757f1d5d 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -81,13 +81,10 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old, { unsigned long replaced;
- if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { + if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) old = __opcode_to_mem_thumb32(old); - new = __opcode_to_mem_thumb32(new); - } else { + else old = __opcode_to_mem_arm(old); - new = __opcode_to_mem_arm(new); - }
if (validate) { if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
From: Suren Baghdasaryan surenb@google.com
[ Upstream commit 6fcca0fa48118e6d63733eb4644c6cd880c15b8f ]
Issuing write() with count parameter set to 0 on any file under /proc/pressure/ will cause an OOB write because of the access to buf[buf_size-1] when NUL-termination is performed. Fix this by checking for buf_size to be non-zero.
Signed-off-by: Suren Baghdasaryan surenb@google.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Acked-by: Johannes Weiner hannes@cmpxchg.org Link: https://lkml.kernel.org/r/20200203212216.7076-1-surenb@google.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/sched/psi.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index ce8f6748678a2..9154e745f0978 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1199,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf, if (static_branch_likely(&psi_disabled)) return -EOPNOTSUPP;
+ if (!nbytes) + return -EINVAL; + buf_size = min(nbytes, sizeof(buf)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT;
From: Kan Liang kan.liang@linux.intel.com
[ Upstream commit eda23b387f6c4bb2971ac7e874a09913f533b22c ]
Elkhart Lake also uses Tremont CPU. From the perspective of Intel PMU, there is nothing changed compared with Jacobsville. Share the perf code with Jacobsville.
Signed-off-by: Kan Liang kan.liang@linux.intel.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Reviewed-by: Andi Kleen ak@linux.intel.com Link: https://lkml.kernel.org/r/1580236279-35492-1-git-send-email-kan.liang@linux.... Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/events/intel/core.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index fcef678c34230..c531e3f3269ed 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4746,6 +4746,7 @@ __init int intel_pmu_init(void) break;
case INTEL_FAM6_ATOM_TREMONT_D: + case INTEL_FAM6_ATOM_TREMONT: x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
From: Kan Liang kan.liang@linux.intel.com
[ Upstream commit ecf71fbccb9ac5cb964eb7de59bb9da3755b7885 ]
Tremont is Intel's successor to Goldmont Plus. From the perspective of Intel cstate residency counters, there is nothing changed compared with Goldmont Plus and Goldmont.
Share glm_cstates with Goldmont Plus and Goldmont. Update the comments for Tremont.
Signed-off-by: Kan Liang kan.liang@linux.intel.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Reviewed-by: Andi Kleen ak@linux.intel.com Link: https://lkml.kernel.org/r/1580236279-35492-2-git-send-email-kan.liang@linux.... Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/events/intel/cstate.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index e1daf4151e116..4814c964692cb 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -40,17 +40,18 @@ * Model specific counters: * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 - * Available model: SLM,AMT,GLM,CNL + * Available model: SLM,AMT,GLM,CNL,TNT * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM, - * CNL,KBL,CML + * CNL,KBL,CML,TNT * Scope: Core * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, - * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL + * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, + * TNT * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -60,17 +61,18 @@ * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. * perf code: 0x00 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL, - * KBL,CML,ICL,TGL + * KBL,CML,ICL,TGL,TNT * Scope: Package (physical package) * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * perf code: 0x01 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, - * GLM,CNL,KBL,CML,ICL,TGL + * GLM,CNL,KBL,CML,ICL,TGL,TNT * Scope: Package (physical package) * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. * perf code: 0x02 - * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW - * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, + * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL, + * TNT * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 @@ -87,7 +89,8 @@ * Scope: Package (physical package) * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * perf code: 0x06 - * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL + * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, + * TNT * Scope: Package (physical package) * */ @@ -640,8 +643,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates), - X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),
From: Kan Liang kan.liang@linux.intel.com
[ Upstream commit 0aa0e0d6b34b89649e6b5882a7e025a0eb9bd832 ]
Tremont is Intel's successor to Goldmont Plus. SMI_COUNT MSR is also supported.
Signed-off-by: Kan Liang kan.liang@linux.intel.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Reviewed-by: Andi Kleen ak@linux.intel.com Link: https://lkml.kernel.org/r/1580236279-35492-3-git-send-email-kan.liang@linux.... Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/events/msr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 6f86650b3f77d..a949f6f55991d 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -75,8 +75,9 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_D: - case INTEL_FAM6_ATOM_GOLDMONT_PLUS: + case INTEL_FAM6_ATOM_TREMONT_D: + case INTEL_FAM6_ATOM_TREMONT:
case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM:
From: Xiubo Li xiubli@redhat.com
[ Upstream commit 8e4473bb50a1796c9c32b244e5dbc5ee24ead937 ]
In O_APPEND & O_DIRECT mode, the data from different writers will be possibly overlapping each other since they take the shared lock.
For example, both Writer1 and Writer2 are in O_APPEND and O_DIRECT mode:
Writer1 Writer2
shared_lock() shared_lock() getattr(CAP_SIZE) getattr(CAP_SIZE) iocb->ki_pos = EOF iocb->ki_pos = EOF write(data1) write(data2) shared_unlock() shared_unlock()
The data2 will overlap the data1 from the same file offset, the old EOF.
Switch to exclusive lock instead when O_APPEND is specified.
Signed-off-by: Xiubo Li xiubli@redhat.com Reviewed-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ceph/file.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 11929d2bb594c..cd09e63d682b7 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1418,6 +1418,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) struct ceph_cap_flush *prealloc_cf; ssize_t count, written = 0; int err, want, got; + bool direct_lock = false; loff_t pos; loff_t limit = max(i_size_read(inode), fsc->max_file_size);
@@ -1428,8 +1429,11 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!prealloc_cf) return -ENOMEM;
+ if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT) + direct_lock = true; + retry_snap: - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_start_io_direct(inode); else ceph_start_io_write(inode); @@ -1519,14 +1523,15 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* we might need to revert back to that point */ data = *from; - if (iocb->ki_flags & IOCB_DIRECT) { + if (iocb->ki_flags & IOCB_DIRECT) written = ceph_direct_read_write(iocb, &data, snapc, &prealloc_cf); - ceph_end_io_direct(inode); - } else { + else written = ceph_sync_write(iocb, &data, pos, snapc); + if (direct_lock) + ceph_end_io_direct(inode); + else ceph_end_io_write(inode); - } if (written > 0) iov_iter_advance(from, written); ceph_put_snap_context(snapc); @@ -1577,7 +1582,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out_unlocked; out: - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_end_io_direct(inode); else ceph_end_io_write(inode);
From: Kuninori Morimoto kuninori.morimoto.gx@renesas.com
[ Upstream commit f24667779b5348279e5e4328312a141a730a1fc7 ]
frame-inversion is "flag" not "uint32". This patch fixup it.
Signed-off-by: Kuninori Morimoto kuninori.morimoto.gx@renesas.com Reviewed-by: Patrice Chotard patrice.chotard@st.com Signed-off-by: Patrice Chotard patrice.chotard@st.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm/boot/dts/stihxxx-b2120.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi index 60e11045ad762..d051f080e52ec 100644 --- a/arch/arm/boot/dts/stihxxx-b2120.dtsi +++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi @@ -46,7 +46,7 @@ /* DAC */ format = "i2s"; mclk-fs = <256>; - frame-inversion = <1>; + frame-inversion; cpu { sound-dai = <&sti_uni_player2>; };
From: Sung Lee sung.lee@amd.com
[ Upstream commit df36f6cf23ada812930afa8ee76681d4ad307c61 ]
[WHY] The optimized_require flag is needed to set watermarks and clocks lower in certain conditions. This flag is set to true and then set to false while programming front end in dcn20.
[HOW] Do not set the flag to false while disabling plane.
Signed-off-by: Sung Lee sung.lee@amd.com Reviewed-by: Tony Cheng Tony.Cheng@amd.com Acked-by: Bhawanpreet Lakha Bhawanpreet.Lakha@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 937a8ba811603..e933f6a369f92 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -493,7 +493,6 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true; - dc->optimized_required = false; /* We're powering off, no need to optimize */
dc->hwss.plane_atomic_power_down(dc, pipe_ctx->plane_res.dpp,
From: Krishnamraju Eraparaju krishna2@chelsio.com
[ Upstream commit 663218a3e715fd9339d143a3e10088316b180f4f ]
Warnings like below can fill up the dmesg while disconnecting RDMA connections. Hence, remove the unwanted WARN_ON.
WARNING: CPU: 6 PID: 0 at drivers/infiniband/sw/siw/siw_cm.c:1229 siw_cm_llp_data_ready+0xc1/0xd0 [siw] RIP: 0010:siw_cm_llp_data_ready+0xc1/0xd0 [siw] Call Trace: <IRQ> tcp_data_queue+0x226/0xb40 tcp_rcv_established+0x220/0x620 tcp_v4_do_rcv+0x12a/0x1e0 tcp_v4_rcv+0xb05/0xc00 ip_local_deliver_finish+0x69/0x210 ip_local_deliver+0x6b/0xe0 ip_rcv+0x273/0x362 __netif_receive_skb_core+0xb35/0xc30 netif_receive_skb_internal+0x3d/0xb0 napi_gro_frags+0x13b/0x200 t4_ethrx_handler+0x433/0x7d0 [cxgb4] process_responses+0x318/0x580 [cxgb4] napi_rx_handler+0x14/0x100 [cxgb4] net_rx_action+0x149/0x3b0 __do_softirq+0xe3/0x30a irq_exit+0x100/0x110 do_IRQ+0x7f/0xe0 common_interrupt+0xf/0xf </IRQ>
Link: https://lore.kernel.org/r/20200207141429.27927-1-krishna2@chelsio.com Signed-off-by: Krishnamraju Eraparaju krishna2@chelsio.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/sw/siw/siw_cm.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 0454561718d95..31aa41d85ccff 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk) read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk); - if (!cep) { - WARN_ON(1); + if (!cep) goto out; - } + siw_dbg_cep(cep, "state: %d\n", cep->state);
switch (cep->state) {
From: Aric Cyr aric.cyr@amd.com
[ Upstream commit 2b63d0ec0daf79ba503fa8bfa25e07dc3da274f3 ]
[Why] Engine can be NULL in some cases, so we must not acquire it.
[How] Check for NULL engine before acquiring.
Signed-off-by: Aric Cyr aric.cyr@amd.com Reviewed-by: Harry Wentland harry.wentland@amd.com Acked-by: Bhawanpreet Lakha Bhawanpreet.Lakha@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index c3f9f4185ce8d..cf877238fff9d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -386,7 +386,7 @@ static bool acquire( { enum gpio_result result;
- if (!is_engine_available(engine)) + if ((engine == NULL) || !is_engine_available(engine)) return false;
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
From: Yongqiang Sun yongqiang.sun@amd.com
[ Upstream commit 6c81917a0485ee2a1be0dc23321ac10ecfd9578b ]
[Why] Underflow is observed when plug in a 4K@60 monitor with 1366x768 eDP due to DPPCLK is too low.
[How] Limit minimum DPPCLK to 100MHz.
Signed-off-by: Yongqiang Sun yongqiang.sun@amd.com Reviewed-by: Eric Yang eric.yang2@amd.com Acked-by: Bhawanpreet Lakha Bhawanpreet.Lakha@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 787f94d815f42..dd92f9c295b45 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -91,6 +91,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); }
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + if (new_clocks->dppclk_khz < 100000) + new_clocks->dppclk_khz = 100000; + } + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) dpp_clock_lowered = true;
From: Isabel Zhang isabel.zhang@amd.com
[ Upstream commit c134c3cabae46a56ab2e1f5e5fa49405e1758838 ]
[Why] Starting from 14nm, the PLL is built into the PHY and the PLL is mapped to PHY on 1 to 1 basis. In the code, the DP port is mapped to a PLL that was not initialized. This causes DP to HDMI dongle to not light up the display.
[How] Initializations added for PLL2 when creating resources.
Signed-off-by: Isabel Zhang isabel.zhang@amd.com Reviewed-by: Eric Yang eric.yang2@amd.com Acked-by: Bhawanpreet Lakha Bhawanpreet.Lakha@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index b0e5e64df2127..161bf7caf3ae0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -57,6 +57,7 @@ #include "dcn20/dcn20_dccg.h" #include "dcn21_hubbub.h" #include "dcn10/dcn10_resource.h" +#include "dce110/dce110_resource.h"
#include "dcn20/dcn20_dwb.h" #include "dcn20/dcn20_mmhubbub.h" @@ -824,6 +825,7 @@ static const struct dc_debug_options debug_defaults_diags = { enum dcn20_clk_src_array_id { DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL1, + DCN20_CLK_SRC_PLL2, DCN20_CLK_SRC_TOTAL_DCN21 };
@@ -1492,6 +1494,10 @@ static bool construct( dcn21_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
From: Daniel Kolesa daniel@octaforge.org
[ Upstream commit 416611d9b6eebaeae58ed26cc7d23131c69126b1 ]
On PowerPC, the compiler will tag object files with whether they use hard or soft float FP ABI and whether they use 64 or 128-bit long double ABI. On systems with 64-bit long double ABI, a tag will get emitted whenever a double is used, as on those systems a long double is the same as a double. This will prevent linkage as other files are being compiled with hard-float.
On ppc64, this code will never actually get used for the time being, as the only currently existing hardware using it are the Renoir APUs. Therefore, until this is testable and can be fixed properly, at least make sure the build will not fail.
Signed-off-by: Daniel Kolesa daniel@octaforge.org Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index b864869cc7e3e..6fa7422c51da5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -91,6 +91,12 @@ ifdef CONFIG_DRM_AMD_DC_DCN2_1 ############################################################################### CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
+# prevent build errors regarding soft-float vs hard-float FP ABI tags +# this code is currently unused on ppc64, as it applies to Renoir APUs only +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute) +endif + AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
From: Thierry Reding treding@nvidia.com
[ Upstream commit 6f4ecbe284df5f22e386a640d9a4b32cede62030 ]
If only Tegra194 support is enabled, the tegra30_fuse_read() and tegra30_fuse_init() function are not declared and cause a build failure. Add Tegra194 to the preprocessor guard to make sure these functions are available for Tegra194-only builds as well.
Link: https://lore.kernel.org/r/20200203143114.3967295-1-thierry.reding@gmail.com Reported-by: kbuild test robot lkp@intel.com Signed-off-by: Thierry Reding treding@nvidia.com Signed-off-by: Olof Johansson olof@lixom.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/soc/tegra/fuse/fuse-tegra30.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index be9424a871734..9c3ef0a02fd4e 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -35,7 +35,8 @@ defined(CONFIG_ARCH_TEGRA_124_SOC) || \ defined(CONFIG_ARCH_TEGRA_132_SOC) || \ defined(CONFIG_ARCH_TEGRA_210_SOC) || \ - defined(CONFIG_ARCH_TEGRA_186_SOC) + defined(CONFIG_ARCH_TEGRA_186_SOC) || \ + defined(CONFIG_ARCH_TEGRA_194_SOC) static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset) { if (WARN_ON(!fuse->base))
From: Brett Creeley brett.creeley@intel.com
[ Upstream commit f27f37a04a69890ac85d9155f03ee2d23b678d8f ]
Commit d9d6a9aed3f6 ("i40e: Fix virtchnl_queue_select bitmap validation") introduced a necessary change for verifying how queue bitmaps from the iavf driver get validated. Unfortunately, the conditional was reversed. Fix this.
Fixes: d9d6a9aed3f6 ("i40e: Fix virtchnl_queue_select bitmap validation") Signed-off-by: Brett Creeley brett.creeley@intel.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3515ace0f0201..38042d610f82c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2363,7 +2363,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; }
- if (i40e_vc_validate_vqs_bitmaps(vqs)) { + if (!i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2425,7 +2425,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; }
- if (i40e_vc_validate_vqs_bitmaps(vqs)) { + if (!i40e_vc_validate_vqs_bitmaps(vqs)) { aq_ret = I40E_ERR_PARAM; goto error_param; }
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit 91a65b7d3ed8450f31ab717a65dcb5f9ceb5ab02 ]
When ethtool -X is called without an hkey, ena_com_fill_hash_function() is called with key=NULL, which is passed to memcpy causing a crash.
This commit fixes this issue by checking key is not NULL.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_com.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index ea62604fdf8ca..e54c44fdcaa73 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2297,15 +2297,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
switch (func) { case ENA_ADMIN_TOEPLITZ: - if (key_len > sizeof(hash_key->key)) { - pr_err("key len (%hu) is bigger than the max supported (%zu)\n", - key_len, sizeof(hash_key->key)); - return -EINVAL; + if (key) { + if (key_len != sizeof(hash_key->key)) { + pr_err("key len (%hu) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); + return -EINVAL; + } + memcpy(hash_key->key, key, key_len); + rss->hash_init_val = init_val; + hash_key->keys_num = key_len >> 2; } - - memcpy(hash_key->key, key, key_len); - rss->hash_init_val = init_val; - hash_key->keys_num = key_len >> 2; break; case ENA_ADMIN_CRC32: rss->hash_init_val = init_val;
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit 2a6e5fa2f4c25b66c763428a3e65363214946931 ]
From the documentation of round_jiffies():
"Rounds a time delta in the future (in jiffies) up or down to (approximately) full seconds. This is useful for timers for which the exact time they fire does not matter too much, as long as they fire approximately every X seconds. By rounding these timers to whole seconds, all such timers will fire at the same time, rather than at various times spread out. The goal of this is to have the CPU wake up less, which saves power."
There are 2 parts to this patch: ================================ Part 1: ------- In our case we need timer_service to be called approximately every X=1 seconds, and the exact time does not matter, so using round_jiffies() is the right way to go.
Therefore we add round_jiffies() to the mod_timer() in ena_timer_service().
Part 2: ------- round_jiffies() is used in check_for_missing_keep_alive() when getting the jiffies of the expiration of the keep_alive timeout. Here it is actually a mistake to use round_jiffies() because we want the exact time when keep_alive should expire and not an approximate rounded time, which can cause early, false positive, timeouts.
Therefore we remove round_jiffies() in the calculation of keep_alive_expired() in check_for_missing_keep_alive().
Fixes: 82ef30f13be0 ("net: ena: add hardware hints capability to the driver") Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index b4a145220abae..f0cddf250cfd5 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3035,8 +3035,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) return;
- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies + - adapter->keep_alive_timeout); + keep_alive_expired = adapter->last_keep_alive_jiffies + + adapter->keep_alive_timeout; if (unlikely(time_is_before_jiffies(keep_alive_expired))) { netif_err(adapter, drv, adapter->netdev, "Keep alive watchdog timeout.\n"); @@ -3138,7 +3138,7 @@ static void ena_timer_service(struct timer_list *t) }
/* Reset the timer */ - mod_timer(&adapter->timer_service, jiffies + HZ); + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); }
static int ena_calc_io_queue_num(struct pci_dev *pdev,
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit cf6d17fde93bdda23c9b02dd5906a12bf8c55209 ]
Current implementation of the driver calls skb_tx_timestamp()to add a software tx timestamp to the skb, however the software-transmit capability is not reported in ethtool -T.
This commit updates the ethtool structure to report the software-transmit capability in ethtool -T using the standard ethtool_op_get_ts_info(). This function reports all software timestamping capabilities (tx and rx), as well as setting phc_index = -1. phc_index is the index of the PTP hardware clock device that will be used for hardware timestamps. Since we don't have such a device in ENA, using the default -1 value is the correct setting.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Ezequiel Lara Gomez ezegomez@amazon.com Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 8c1c73b0ced74..4472e41bd7825 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -805,6 +805,7 @@ static const struct ethtool_ops ena_ethtool_ops = { .get_channels = ena_get_channels, .get_tunable = ena_get_tunable, .set_tunable = ena_set_tunable, + .get_ts_info = ethtool_op_get_ts_info, };
void ena_set_ethtool_ops(struct net_device *netdev)
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit 0d1c3de7b8c78a5e44b74b62ede4a63629f5d811 ]
Bug description: When running "ethtool -x <if_name>" the key shows up as all zeros.
When we use "ethtool -X <if_name> hfunc toeplitz hkey some:random:key" to set the key and then try to retrieve it using "ethtool -x <if_name>" then we return the correct key because we return the one we saved.
Bug cause: We don't fetch the key from the device but instead return the key that we have saved internally which is by default set to zero upon allocation.
Fix: This commit fixes the issue by initializing the key to a random value using netdev_rss_key_fill().
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_com.c | 15 +++++++++++++++ drivers/net/ethernet/amazon/ena/ena_com.h | 1 + 2 files changed, 16 insertions(+)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index e54c44fdcaa73..d6b894b06fa30 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -1041,6 +1041,19 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev, feature_ver); }
+static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) +{ + struct ena_admin_feature_rss_flow_hash_control *hash_key = + (ena_dev->rss).hash_key; + + netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); + /* The key is stored in the device in u32 array + * as well as the API requires the key to be passed in this + * format. Thus the size of our array should be divided by 4 + */ + hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32); +} + static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; @@ -2631,6 +2644,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) if (unlikely(rc)) goto err_hash_key;
+ ena_com_hash_key_fill_default_key(ena_dev); + rc = ena_com_hash_ctrl_init(ena_dev); if (unlikely(rc)) goto err_hash_ctrl; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 0ce37d54ed108..9b5bd28ed0ac6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -44,6 +44,7 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/wait.h> +#include <linux/netdevice.h>
#include "ena_common_defs.h" #include "ena_admin_defs.h"
From: Sameeh Jubran sameehj@amazon.com
[ Upstream commit 6a4f7dc82d1e3abd3feb0c60b5041056fcd9880c ]
Currently we allocate the key whether the device supports setting the key or not. This commit adds a check to the allocation function and handles the error accordingly.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_com.c | 24 ++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index d6b894b06fa30..6f758ece86f60 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -1057,6 +1057,20 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_flow_hash_control *hash_key; + struct ena_admin_get_feat_resp get_resp; + int rc; + + hash_key = (ena_dev->rss).hash_key; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + ena_dev->rss.hash_key_dma_addr, + sizeof(ena_dev->rss.hash_key), 0); + if (unlikely(rc)) { + hash_key = NULL; + return -EOPNOTSUPP; + }
rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), @@ -2640,11 +2654,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) if (unlikely(rc)) goto err_indr_tbl;
+ /* The following function might return unsupported in case the + * device doesn't support setting the key / hash function. We can safely + * ignore this error and have indirection table support only. + */ rc = ena_com_hash_key_allocate(ena_dev); - if (unlikely(rc)) + if (unlikely(rc) && rc != -EOPNOTSUPP) goto err_hash_key; - - ena_com_hash_key_fill_default_key(ena_dev); + else if (rc != -EOPNOTSUPP) + ena_com_hash_key_fill_default_key(ena_dev);
rc = ena_com_hash_ctrl_init(ena_dev); if (unlikely(rc))
From: Sameeh Jubran sameehj@amazon.com
[ Upstream commit 0c8923c0a64fb5d14bebb9a9065d2dc25ac5e600 ]
On old hardware, getting / setting the hash function is not supported while gettting / setting the indirection table is.
This commit enables us to still show the indirection table on older hardwares by setting the hash function and key to NULL.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 4472e41bd7825..52a3decff34a4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -648,7 +648,21 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (rc) return rc;
+ /* We call this function in order to check if the device + * supports getting/setting the hash function. + */ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key); + + if (rc) { + if (rc == -EOPNOTSUPP) { + key = NULL; + hfunc = NULL; + rc = 0; + } + + return rc; + } + if (rc) return rc;
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit 4844470d472d660c26149ad764da2406adb13423 ]
The device receives, stores and retrieves the hash function value as bits and not as their enum value.
The bug: * In ena_com_set_hash_function() we set cmd.u.flow_hash_func.selected_func to the bit value of rss->hash_func. (1 << rss->hash_func) * In ena_com_get_hash_function() we retrieve the hash function and store it's bit value in rss->hash_func. (Now the bit value of rss->hash_func is stored in rss->hash_func instead of it's enum value)
The fix: This commit fixes the issue by converting the retrieved hash function values from the device to the matching enum value of the set bit using ffs(). ffs() finds the first set bit's index in a word. Since the function returns 1 for the LSB's index, we need to subtract 1 from the returned value (note that BIT(0) is 1).
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_com.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 6f758ece86f60..8ab192cb26b74 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2370,7 +2370,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (unlikely(rc)) return rc;
- rss->hash_func = get_resp.u.flow_hash_func.selected_func; + /* ffs() returns 1 in case the lsb is set */ + rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); + if (rss->hash_func) + rss->hash_func--; + if (func) *func = rss->hash_func;
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit 92569fd27f5cb0ccbdf7c7d70044b690e89a0277 ]
The indirection table has the indices of the Rx queues. When we store it during set indirection operation, we convert the indices to our internal representation of the indices.
Our internal representation of the indices is: even indices for Tx and uneven indices for Rx, where every Tx/Rx pair are in a consecutive order starting from 0. For example if the driver has 3 queues (3 for Tx and 3 for Rx) then the indices are as follows: 0 1 2 3 4 5 Tx Rx Tx Rx Tx Rx
The BUG: The issue is that when we satisfy a get request for the indirection table, we don't convert the indices back to the original representation.
The FIX: Simply apply the inverse function for the indices of the indirection table after we set it.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 24 ++++++++++++++++++- drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 ++ 2 files changed, 25 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 52a3decff34a4..446873bed382b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -636,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev) return ENA_HASH_KEY_SIZE; }
+static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int i, rc; + + if (!indir) + return 0; + + rc = ena_com_indirect_table_get(ena_dev, indir); + if (rc) + return rc; + + /* Our internal representation of the indices is: even indices + * for Tx and uneven indices for Rx. We need to convert the Rx + * indices to be consecutive + */ + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) + indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]); + + return rc; +} + static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { @@ -644,7 +666,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 func; int rc;
- rc = ena_com_indirect_table_get(adapter->ena_dev, indir); + rc = ena_indirection_table_get(adapter, indir); if (rc) return rc;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 72ee51a82ec71..dc02950a96b8d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -127,6 +127,8 @@
#define ENA_IO_TXQ_IDX(q) (2 * (q)) #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) +#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2) +#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
#define ENA_MGMNT_IRQ_IDX 0 #define ENA_IO_IRQ_FIRST_IDX 1
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit e3f89f91e98ce07dc0f121a3b70d21aca749ba39 ]
The function ena_com_ind_tbl_convert_from_device() has an overflow bug as explained below. Either way, this function is not needed at all since we don't retrieve the indirection table from the device at any point which means that this conversion is not needed.
The bug: The for loop iterates over all io_sq_queues, when passing the actual number of used queues the io_sq_queues[i].idx equals 0 since they are uninitialized which results in the following code to be executed till the end of the loop:
dev_idx_to_host_tbl[0] = i;
This results dev_idx_to_host_tbl[0] in being equal to ENA_TOTAL_NUM_QUEUES - 1.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_com.c | 28 ----------------------- 1 file changed, 28 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 8ab192cb26b74..74743fd8a1e0a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -1281,30 +1281,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) return 0; }
-static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) -{ - u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; - struct ena_rss *rss = &ena_dev->rss; - u8 idx; - u16 i; - - for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) - dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; - - for (i = 0; i < 1 << rss->tbl_log_size; i++) { - if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) - return -EINVAL; - idx = (u8)rss->rss_ind_tbl[i].cq_idx; - - if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) - return -EINVAL; - - rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; - } - - return 0; -} - static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 intr_delay_resolution) { @@ -2638,10 +2614,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) if (!ind_tbl) return 0;
- rc = ena_com_ind_tbl_convert_from_device(ena_dev); - if (unlikely(rc)) - return rc; - for (i = 0; i < (1 << rss->tbl_log_size); i++) ind_tbl[i] = rss->host_rss_ind_tbl[i];
From: Sameeh Jubran sameehj@amazon.com
[ Upstream commit 886d2089276e40d460731765083a741c5c762461 ]
Up till kernel 4.11 there was no enum defined for crc32 hash in ethtool, thus the xor enum was used for supporting crc32.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 446873bed382b..b0151139abb18 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -693,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, func = ETH_RSS_HASH_TOP; break; case ENA_ADMIN_CRC32: - func = ETH_RSS_HASH_XOR; + func = ETH_RSS_HASH_CRC32; break; default: netif_err(adapter, drv, netdev, @@ -739,7 +739,7 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, case ETH_RSS_HASH_TOP: func = ENA_ADMIN_TOEPLITZ; break; - case ETH_RSS_HASH_XOR: + case ETH_RSS_HASH_CRC32: func = ENA_ADMIN_CRC32; break; default:
From: Arthur Kiyanovski akiyano@amazon.com
[ Upstream commit c207979f5ae10ed70aff1bb13f39f0736973de99 ]
comp_ctx can be NULL in a very rare case when an admin command is executed during the execution of ena_remove().
The bug scenario is as follows:
* ena_destroy_device() sets the comp_ctx to be NULL * An admin command is executed before executing unregister_netdev(), this can still happen because our device can still receive callbacks from the netdev infrastructure such as ethtool commands. * When attempting to access the comp_ctx, the bug occurs since it's set to NULL
Fix: Added a check that comp_ctx is not NULL
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)") Signed-off-by: Sameeh Jubran sameehj@amazon.com Signed-off-by: Arthur Kiyanovski akiyano@amazon.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/amazon/ena/ena_com.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 74743fd8a1e0a..304531332e70a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue, static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, u16 command_id, bool capture) { + if (unlikely(!queue->comp_ctx)) { + pr_err("Completion context is NULL\n"); + return NULL; + } + if (unlikely(command_id >= queue->q_depth)) { pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", command_id, queue->q_depth);
From: Firo Yang firo.yang@suse.com
[ Upstream commit 0f90522591fd09dd201065c53ebefdfe3c6b55cb ]
Recent months, our customer reported several kernel crashes all preceding with following message: NETDEV WATCHDOG: eth2 (enic): transmit queue 0 timed out Error message of one of those crashes: BUG: unable to handle kernel paging request at ffffffffa007e090
After analyzing severl vmcores, I found that most of crashes are caused by memory corruption. And all the corrupted memory areas are overwritten by data of network packets. Moreover, I also found that the tx queues were enabled over watchdog reset.
After going through the source code, I found that in enic_stop(), the tx queues stopped by netif_tx_disable() could be woken up over a small time window between netif_tx_disable() and the napi_disable() by the following code path: napi_poll-> enic_poll_msix_wq-> vnic_cq_service-> enic_wq_service-> netif_wake_subqueue(enic->netdev, q_number)-> test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state) In turn, upper netowrk stack could queue skb to ENIC NIC though enic_hard_start_xmit(). And this might introduce some race condition.
Our customer comfirmed that this kind of kernel crash doesn't occur over 90 days since they applied this patch.
Signed-off-by: Firo Yang firo.yang@suse.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/cisco/enic/enic_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index acb2856936d20..6e2ab10ad2e6f 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev) napi_disable(&enic->napi[i]);
netif_carrier_off(netdev); - netif_tx_disable(netdev); if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) for (i = 0; i < enic->wq_count; i++) napi_disable(&enic->napi[enic_cq_wq(enic, i)]); + netif_tx_disable(netdev);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_del_station_addr(enic);
From: Bruce Allan bruce.w.allan@intel.com
[ Upstream commit cf8fc2a0863f9ff27ebd2efcdb1f7d378b9fb8a6 ]
After a reset the Unit Load Status bits in the GLNVM_ULD register to check for completion should be 0x7FF before continuing. Update the mask to check (minus the three reserved bits that are always set).
Signed-off-by: Bruce Allan bruce.w.allan@intel.com Signed-off-by: Tony Nguyen anthony.l.nguyen@intel.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/intel/ice/ice_common.c | 17 ++++++++++++----- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 6 ++++++ 2 files changed, 18 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 3a6b3950eb0e2..171f0b6254073 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -934,7 +934,7 @@ void ice_deinit_hw(struct ice_hw *hw) */ enum ice_status ice_check_reset(struct ice_hw *hw) { - u32 cnt, reg = 0, grst_delay; + u32 cnt, reg = 0, grst_delay, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. @@ -956,13 +956,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw) return ICE_ERR_RESET_FAILED; }
-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ - GLNVM_ULD_GLOBR_DONE_M) +#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ + GLNVM_ULD_PCIER_DONE_1_M |\ + GLNVM_ULD_CORER_DONE_M |\ + GLNVM_ULD_GLOBR_DONE_M |\ + GLNVM_ULD_POR_DONE_M |\ + GLNVM_ULD_POR_DONE_1_M |\ + GLNVM_ULD_PCIER_DONE_2_M) + + uld_mask = ICE_RESET_DONE_MASK;
/* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { - reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; - if (reg == ICE_RESET_DONE_MASK) { + reg = rd32(hw, GLNVM_ULD) & uld_mask; + if (reg == uld_mask) { ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 152fbd556e9b4..9138b19de87e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -273,8 +273,14 @@ #define GLNVM_GENS_SR_SIZE_S 5 #define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5) #define GLNVM_ULD 0x000B6008 +#define GLNVM_ULD_PCIER_DONE_M BIT(0) +#define GLNVM_ULD_PCIER_DONE_1_M BIT(1) #define GLNVM_ULD_CORER_DONE_M BIT(3) #define GLNVM_ULD_GLOBR_DONE_M BIT(4) +#define GLNVM_ULD_POR_DONE_M BIT(5) +#define GLNVM_ULD_POR_DONE_1_M BIT(8) +#define GLNVM_ULD_PCIER_DONE_2_M BIT(9) +#define GLNVM_ULD_PE_DONE_M BIT(10) #define GLPCI_CNF2 0x000BE004 #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) #define PF_FUNC_RID 0x0009E880
From: Frank Sorenson sorenson@redhat.com
[ Upstream commit f52aa79df43c4509146140de0241bc21a4a3b4c7 ]
A number of the debug statements output file or directory mode in hex. Change these to print using octal.
Signed-off-by: Frank Sorenson sorenson@redhat.com Signed-off-by: Steve French stfrench@microsoft.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/cifs/cifsacl.c | 4 ++-- fs/cifs/connect.c | 2 +- fs/cifs/inode.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index f842944a5c76a..1619af216677c 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode, ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS)) *pmode |= (S_IXUGO & (*pbits_to_set));
- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode); + cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode); return; }
@@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use, if (mode & S_IXUGO) *pace_flags |= SET_FILE_EXEC_RIGHTS;
- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n", + cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n", mode, *pace_flags); return; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 02451d085ddd0..28af1f7523f34 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -4092,7 +4092,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, cifs_sb->mnt_gid = pvolume_info->linux_gid; cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; - cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n", + cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ed59e4a8db598..aafcd79c47722 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1586,7 +1586,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) struct TCP_Server_Info *server; char *full_path;
- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n", + cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n", mode, inode);
cifs_sb = CIFS_SB(inode->i_sb);
From: Coly Li colyli@suse.de
[ Upstream commit 0b96da639a4874311e9b5156405f69ef9fc3bef8 ]
When run a cache set, all the bcache btree node of this cache set will be checked by bch_btree_check(). If the bcache btree is very large, iterating all the btree nodes will occupy too much system memory and the bcache registering process might be selected and killed by system OOM killer. kthread_run() will fail if current process has pending signal, therefore the kthread creating in run_cache_set() for gc and allocator kernel threads are very probably failed for a very large bcache btree.
Indeed such OOM is safe and the registering process will exit after the registration done. Therefore this patch flushes pending signals during the cache set start up, specificly in bch_cache_allocator_start() and bch_gc_thread_start(), to make sure run_cache_set() won't fail for large cahced data set.
Signed-off-by: Coly Li colyli@suse.de Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/md/bcache/alloc.c | 18 ++++++++++++++++-- drivers/md/bcache/btree.c | 13 +++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index a1df0d95151c6..8bc1faf71ff2f 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -67,6 +67,7 @@ #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/random.h> +#include <linux/sched/signal.h> #include <trace/events/bcache.h>
#define MAX_OPEN_BUCKETS 128 @@ -733,8 +734,21 @@ int bch_open_buckets_alloc(struct cache_set *c)
int bch_cache_allocator_start(struct cache *ca) { - struct task_struct *k = kthread_run(bch_allocator_thread, - ca, "bcache_allocator"); + struct task_struct *k; + + /* + * In case previous btree check operation occupies too many + * system memory for bcache btree node cache, and the + * registering process is selected by OOM killer. Here just + * ignore the SIGKILL sent by OOM killer if there is, to + * avoid kthread_run() being failed by pending signals. The + * bcache registering process will exit after the registration + * done. + */ + if (signal_pending(current)) + flush_signals(current); + + k = kthread_run(bch_allocator_thread, ca, "bcache_allocator"); if (IS_ERR(k)) return PTR_ERR(k);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 46a8b5a91c386..a6e05503b7723 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -34,6 +34,7 @@ #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/sched/clock.h> +#include <linux/sched/signal.h> #include <linux/rculist.h> #include <linux/delay.h> #include <trace/events/bcache.h> @@ -1908,6 +1909,18 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c) { + /* + * In case previous btree check operation occupies too many + * system memory for bcache btree node cache, and the + * registering process is selected by OOM killer. Here just + * ignore the SIGKILL sent by OOM killer if there is, to + * avoid kthread_run() being failed by pending signals. The + * bcache registering process will exit after the registration + * done. + */ + if (signal_pending(current)) + flush_signals(current); + c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); return PTR_ERR_OR_ZERO(c->gc_thread); }
From: Sergey Matyukevich sergey.matyukevich.os@quantenna.com
[ Upstream commit ea75080110a4c1fa011b0a73cb8f42227143ee3e ]
The nl80211_policy is missing for NL80211_ATTR_STATUS_CODE attribute. As a result, for strictly validated commands, it's assumed to not be supported.
Signed-off-by: Sergey Matyukevich sergey.matyukevich.os@quantenna.com Link: https://lore.kernel.org/r/20200213131608.10541-2-sergey.matyukevich.os@quant... Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/wireless/nl80211.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c74646b7a751f..1930513f541e1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -437,6 +437,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG }, [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG }, [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, + [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 },
From: Shay Bar shay.bar@celeno.com
[ Upstream commit 33181ea7f5a62a17fbe55f0f73428ecb5e686be8 ]
Before this patch, STA's would set new width of 160/80+80 MHz based on AP capability only. This is wrong because STA may not support > 80MHz BW. Fix is to verify STA has 160/80+80 MHz capability before increasing its width to > 80MHz.
The "support_80_80" and "support_160" setting is based on: "Table 9-272 — Setting of the Supported Channel Width Set subfield and Extended NSS BW Support subfield at a STA transmitting the VHT Capabilities Information field"
From "Draft P802.11REVmd_D3.0.pdf"
Signed-off-by: Aviad Brikman aviad.brikman@celeno.com Signed-off-by: Shay Bar shay.bar@celeno.com Link: https://lore.kernel.org/r/20200210130728.23674-1-shay.bar@celeno.com Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/mac80211/util.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 739e90555d8b9..decd46b383938 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2993,10 +2993,22 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, int cf0, cf1; int ccfs0, ccfs1, ccfs2; int ccf0, ccf1; + u32 vht_cap; + bool support_80_80 = false; + bool support_160 = false;
if (!oper || !htop) return false;
+ vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap; + support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK | + IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)); + support_80_80 = ((vht_cap & + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) || + (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && + vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) || + ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >> + IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1)); ccfs0 = oper->center_freq_seg0_idx; ccfs1 = oper->center_freq_seg1_idx; ccfs2 = (le16_to_cpu(htop->operation_mode) & @@ -3024,10 +3036,10 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, unsigned int diff;
diff = abs(ccf1 - ccf0); - if (diff == 8) { + if ((diff == 8) && support_160) { new.width = NL80211_CHAN_WIDTH_160; new.center_freq1 = cf1; - } else if (diff > 8) { + } else if ((diff > 8) && support_80_80) { new.width = NL80211_CHAN_WIDTH_80P80; new.center_freq2 = cf1; }
From: Yufeng Mo moyufeng@huawei.com
[ Upstream commit d0db7ed397517c8b2be24a0d1abfa15df776908e ]
In the current process, the management table is missing after the IMP reset. This patch adds the management table to the reset process.
Fixes: f5aac71c0327 ("net: hns3: add manager table initialization for hardware") Signed-off-by: Yufeng Mo moyufeng@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 162881005a6df..0c3c63aed2c06 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9437,6 +9437,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; }
+ ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to reinit manager table, ret = %d\n", ret); + return ret; + } + ret = hclge_init_fd_config(hdev); if (ret) { dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
From: Guangbin Huang huangguangbin2@huawei.com
[ Upstream commit 47327c9315b2f3ae4ab659457977a26669631f20 ]
The IPv6 address defined in struct in6_addr is specified as big endian, but there is no specified endian in struct hclge_fd_rule_tuples, so it will cause a problem if directly use memcpy() to copy ipv6 address between these two structures since this field in struct hclge_fd_rule_tuples is little endian.
This patch fixes this problem by using be32_to_cpu() to convert endian of IPv6 address of struct in6_addr before copying.
Fixes: d93ed94fbeaf ("net: hns3: add aRFS support for PF") Signed-off-by: Guangbin Huang huangguangbin2@huawei.com Signed-off-by: Huazhong Tan tanhuazhong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 0c3c63aed2c06..c01cf8ef69df9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -6003,6 +6003,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, struct hclge_fd_rule_tuples *tuples) { +#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 +#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 + tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); tuples->ip_proto = fkeys->basic.ip_proto; tuples->dst_port = be16_to_cpu(fkeys->ports.dst); @@ -6011,12 +6014,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); } else { - memcpy(tuples->src_ip, - fkeys->addrs.v6addrs.src.in6_u.u6_addr32, - sizeof(tuples->src_ip)); - memcpy(tuples->dst_ip, - fkeys->addrs.v6addrs.dst.in6_u.u6_addr32, - sizeof(tuples->dst_ip)); + int i; + + for (i = 0; i < IPV6_SIZE; i++) { + tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); + tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); + } } }
From: Anton Eidelman anton@lightbitslabs.com
[ Upstream commit 2d570a7c0251c594489a2c16b82b14ae30345c03 ]
When nvme_tcp_io_work() fails to send to socket due to connection close/reset, error_recovery work is triggered from nvme_tcp_state_change() socket callback. This cancels all the active requests in the tagset, which requeues them.
The failed request, however, was ended and thus requeued individually as well unless send returned -EPIPE. Another return code to be treated the same way is -ECONNRESET.
Double requeue caused BUG_ON(blk_queued_rq(rq)) in blk_mq_requeue_request() from either the individual requeue of the failed request or the bulk requeue from blk_mq_tagset_busy_iter(, nvme_cancel_request, );
Signed-off-by: Anton Eidelman anton@lightbitslabs.com Reviewed-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Keith Busch kbusch@kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/tcp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7544be84ab358..a870144542159 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1054,7 +1054,12 @@ static void nvme_tcp_io_work(struct work_struct *w) } else if (unlikely(result < 0)) { dev_err(queue->ctrl->ctrl.device, "failed to send request %d\n", result); - if (result != -EPIPE) + + /* + * Fail the request unless peer closed the connection, + * in which case error recovery flow will complete all. + */ + if ((result != -EPIPE) && (result != -ECONNRESET)) nvme_tcp_fail_request(queue->request); nvme_tcp_done_send_req(queue); return;
From: Nigel Kirkland nigel.kirkland@broadcom.com
[ Upstream commit 97b2512ad000a409b4073dd1a71e4157d76675cb ]
Delayed keep alive work is queued on system workqueue and may be cancelled via nvme_stop_keep_alive from nvme_reset_wq, nvme_fc_wq or nvme_wq.
Check_flush_dependency detects mismatched attributes between the work-queue context used to cancel the keep alive work and system-wq. Specifically system-wq does not have the WQ_MEM_RECLAIM flag, whereas the contexts used to cancel keep alive work have WQ_MEM_RECLAIM flag.
Example warning:
workqueue: WQ_MEM_RECLAIM nvme-reset-wq:nvme_fc_reset_ctrl_work [nvme_fc] is flushing !WQ_MEM_RECLAIM events:nvme_keep_alive_work [nvme_core]
To avoid the flags mismatch, delayed keep alive work is queued on nvme_wq.
However this creates a secondary concern where work and a request to cancel that work may be in the same work queue - namely err_work in the rdma and tcp transports, which will want to flush/cancel the keep alive work which will now be on nvme_wq.
After reviewing the transports, it looks like err_work can be moved to nvme_reset_wq. In fact that aligns them better with transition into RESETTING and performing related reset work in nvme_reset_wq.
Change nvme-rdma and nvme-tcp to perform err_work in nvme_reset_wq.
Signed-off-by: Nigel Kirkland nigel.kirkland@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Reviewed-by: Sagi Grimberg sagi@grimberg.me Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Keith Busch kbusch@kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/core.c | 10 +++++----- drivers/nvme/host/rdma.c | 2 +- drivers/nvme/host/tcp.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e703827d27e9c..7dacfd102a992 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); * nvme_reset_wq - hosts nvme reset works * nvme_delete_wq - hosts nvme delete works * - * nvme_wq will host works such are scan, aen handling, fw activation, - * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq + * nvme_wq will host works such as scan, aen handling, fw activation, + * keep-alive, periodic reconnects etc. nvme_reset_wq * runs reset works which also flush works hosted on nvme_wq for * serialization purposes. nvme_delete_wq host controller deletion * works which flush reset works for serialization. @@ -972,7 +972,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) startka = true; spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); }
static int nvme_keep_alive(struct nvme_ctrl *ctrl) @@ -1002,7 +1002,7 @@ static void nvme_keep_alive_work(struct work_struct *work) dev_dbg(ctrl->device, "reschedule traffic based keep-alive timer\n"); ctrl->comp_seen = false; - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); return; }
@@ -1019,7 +1019,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) if (unlikely(ctrl->kato == 0)) return;
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); }
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index cb4c3000a57e8..4ff51da3b13fa 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) return;
- queue_work(nvme_wq, &ctrl->err_work); + queue_work(nvme_reset_wq, &ctrl->err_work); }
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index a870144542159..244984420b41b 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) return;
- queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work); + queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); }
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
From: Keith Busch kbusch@kernel.org
[ Upstream commit fa46c6fb5d61b1f17b06d7c6ef75478b576304c7 ]
Many users have reported nvme triggered irq_startup() warnings during shutdown. The driver uses the nvme queue's irq to synchronize scanning for completions, and enabling an interrupt affined to only offline CPUs triggers the alarming warning.
Move the final CQE check to after disabling the device and all registered interrupts have been torn down so that we do not have any IRQ to synchronize.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206509 Reviewed-by: Sagi Grimberg sagi@grimberg.me Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Keith Busch kbusch@kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/pci.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 14d513087a14b..858805929b2cd 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1413,6 +1413,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) nvme_poll_irqdisable(nvmeq, -1); }
+/* + * Called only on a device that has been disabled and after all other threads + * that can check this device's completion queues have synced. This is the + * last chance for the driver to see a natural completion before + * nvme_cancel_request() terminates all incomplete requests. + */ +static void nvme_reap_pending_cqes(struct nvme_dev *dev) +{ + u16 start, end; + int i; + + for (i = dev->ctrl.queue_count - 1; i > 0; i--) { + nvme_process_cq(&dev->queues[i], &start, &end, -1); + nvme_complete_cqes(&dev->queues[i], start, end); + } +} + static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, int entry_size) { @@ -2248,11 +2265,6 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) if (timeout == 0) return false;
- /* handle any remaining CQEs */ - if (opcode == nvme_admin_delete_cq && - !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags)) - nvme_poll_irqdisable(nvmeq, -1); - sent--; if (nr_queues) goto retry; @@ -2441,6 +2453,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_suspend_io_queues(dev); nvme_suspend_queue(&dev->queues[0]); nvme_pci_disable(dev); + nvme_reap_pending_cqes(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
linux-stable-mirror@lists.linaro.org