From: "Leonidas P. Papadakos" papadakospan@gmail.com
[ Upstream commit 924726888f660b2a86382a5dd051ec9ca1b18190 ]
The rk3328-roc-cc board exhibits tx stability issues with large packets, as does the rock64 board, which was fixed with this patch https://patchwork.kernel.org/patch/10178969/
A similar patch was merged for the rk3328-roc-cc here https://patchwork.kernel.org/patch/10804863/ but it doesn't include the tx/rx_delay tweaks, and I find that they help with an issue where large transfers would bring the ethernet link down, causing a link reset regularly.
Signed-off-by: Leonidas P. Papadakos papadakospan@gmail.com Signed-off-by: Heiko Stuebner heiko@sntech.de Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index 99d0d9912950..a91f87df662e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts @@ -107,8 +107,8 @@ snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - tx_delay = <0x25>; - rx_delay = <0x11>; + tx_delay = <0x24>; + rx_delay = <0x18>; status = "okay"; };
From: Kai-Heng Feng kai.heng.feng@canonical.com
[ Upstream commit 94a9992f7dbdfb28976b565af220e0c4a117144a ]
Commit 71f6fa90a353 ("HID: increase maximum global item tag report size to 256") increases the max report size from 128 to 256.
We also need to update the report size in hid_field_extract() otherwise it complains and truncates now valid report size: [ 406.165461] hid-sensor-hub 001F:8086:22D8.0002: hid_field_extract() called with n (192) > 32! (kworker/5:1)
BugLink: https://bugs.launchpad.net/bugs/1818547 Fixes: 71f6fa90a353 ("HID: increase maximum global item tag report size to 256") Signed-off-by: Kai-Heng Feng kai.heng.feng@canonical.com Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/hid/hid-core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9993b692598f..860e21ec6a49 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n) u32 hid_field_extract(const struct hid_device *hid, u8 *report, unsigned offset, unsigned n) { - if (n > 32) { - hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", + if (n > 256) { + hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n", n, current->comm); - n = 32; + n = 256; }
return __extract(report, offset, n);
From: Kangjie Lu kjlu@umn.edu
[ Upstream commit 6c44b15e1c9076d925d5236ddadf1318b0a25ce2 ]
create_singlethread_workqueue may fail and return NULL. The fix checks if it is NULL to avoid NULL pointer dereference. Also, the fix moves the call of create_singlethread_workqueue earlier to avoid resource-release issues.
Signed-off-by: Kangjie Lu kjlu@umn.edu Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/hid/hid-logitech-hidpp.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index f040c8a7f9a9..199cc256e9d9 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) kfree(data); return -ENOMEM; } + data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); + if (!data->wq) { + kfree(data->effect_ids); + kfree(data); + return -ENOMEM; + } + data->hidpp = hidpp; data->feature_index = feature_index; data->version = version; @@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) /* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */ - data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */
From: "He, Bo" bo.he@intel.com
[ Upstream commit cef0d4948cb0a02db37ebfdc320e127c77ab1637 ]
There is a race condition that could happen if hid_debug_rdesc_show() is running while hdev is in the process of going away (device removal, system suspend, etc) which could result in NULL pointer dereference:
BUG: unable to handle kernel paging request at 0000000783316040 CPU: 1 PID: 1512 Comm: getevent Tainted: G U O 4.19.20-quilt-2e5dc0ac-00029-gc455a447dd55 #1 RIP: 0010:hid_dump_device+0x9b/0x160 Call Trace: hid_debug_rdesc_show+0x72/0x1d0 seq_read+0xe0/0x410 full_proxy_read+0x5f/0x90 __vfs_read+0x3a/0x170 vfs_read+0xa0/0x150 ksys_read+0x58/0xc0 __x64_sys_read+0x1a/0x20 do_syscall_64+0x55/0x110 entry_SYSCALL_64_after_hwframe+0x49/0xbe
Grab driver_input_lock to make sure the input device exists throughout the whole process of dumping the rdesc.
[jkosina@suse.cz: update changelog a bit] Signed-off-by: "he, bo" bo.he@intel.com Signed-off-by: "Zhang, Jun" jun.zhang@intel.com Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/hid/hid-debug.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index ac9fda1b5a72..1384e57182af 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p) seq_printf(f, "\n\n");
/* dump parsed data and input mappings */ + if (down_interruptible(&hdev->driver_input_lock)) + return 0; + hid_dump_device(hdev, f); seq_printf(f, "\n"); hid_dump_input_mapping(hdev, f);
+ up(&hdev->driver_input_lock); + return 0; }
From: Stephen Boyd swboyd@chromium.org
[ Upstream commit d6752e185c3168771787a02dc6a55f32260943cc ]
If we encounter a failure during suspend where this RTC was programmed to wakeup the system from suspend, but that wakeup couldn't be configured because the system didn't support wakeup interrupts, we'll run into the following warning:
Unbalanced IRQ 166 wake disable WARNING: CPU: 7 PID: 3071 at kernel/irq/manage.c:669 irq_set_irq_wake+0x108/0x278
This happens because the suspend process isn't aborted when the RTC fails to configure the wakeup IRQ. Instead, we continue suspending the system and then another suspend callback fails the suspend process and "unwinds" the previously suspended drivers by calling their resume callbacks. When we get back to resuming this RTC driver, we'll call disable_irq_wake() on an IRQ that hasn't been configured for wake.
Let's just fail suspend/resume here if we can't configure the system to wake and the user has chosen to wakeup with this device. This fixes this warning and makes the code more robust in case there are systems out there that can't wakeup from suspend on this line but the user has chosen to do so.
Cc: Enric Balletbo i Serra enric.balletbo@collabora.com Cc: Evan Green evgreen@chromium.org Cc: Benson Leung bleung@chromium.org Cc: Guenter Roeck groeck@chromium.org Signed-off-by: Stephen Boyd swboyd@chromium.org Acked-By: Benson Leung bleung@chromium.org Signed-off-by: Alexandre Belloni alexandre.belloni@bootlin.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/rtc/rtc-cros-ec.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c index e5444296075e..4d6bf9304ceb 100644 --- a/drivers/rtc/rtc-cros-ec.c +++ b/drivers/rtc/rtc-cros-ec.c @@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev)) - enable_irq_wake(cros_ec_rtc->cros_ec->irq); + return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0; } @@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev)) - disable_irq_wake(cros_ec_rtc->cros_ec->irq); + return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
return 0; }
From: Geert Uytterhoeven geert+renesas@glider.be
[ Upstream commit 15d82d22498784966df8e4696174a16b02cc1052 ]
When no alarm has been programmed on RSK-RZA1, an error message is printed during boot:
rtc rtc0: invalid alarm value: 2019-03-14T255:255:255
sh_rtc_read_alarm_value() returns 0xff when querying a hardware alarm field that is not enabled. __rtc_read_alarm() validates the received alarm values, and fills in missing fields when needed. While 0xff is handled fine for the year, month, and day fields, and corrected as considered being out-of-range, this is not the case for the hour, minute, and second fields, where -1 is expected for missing fields.
Fix this by returning -1 instead, as this value is handled fine for all fields.
Signed-off-by: Geert Uytterhoeven geert+renesas@glider.be Signed-off-by: Alexandre Belloni alexandre.belloni@bootlin.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/rtc/rtc-sh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index d417b203cbc5..1d3de2a3d1a4 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) { unsigned int byte; - int value = 0xff; /* return 0xff for ignored values */ + int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off); if (byte & AR_ENB) {
From: Julia Lawall Julia.Lawall@lip6.fr
[ Upstream commit 30645307e5d2c8a4caf978558c66121ac91ad17e ]
Add an of_node_put when a tested device node is not available.
The semantic patch that fixes this problem is as follows (http://coccinelle.lip6.fr):
// <smpl> @@ identifier f; local idexpression e; expression x; @@
e = f(...); ... when != of_node_put(e) when != x = e when != e = x when any if (<+...of_device_is_available(e)...+>) { ... when != of_node_put(e) ( return e; | + of_node_put(e); return ...; ) } // </smpl>
Fixes: e0c827aca0730 ("drm/omap: Populate DSS children in omapdss driver") Signed-off-by: Julia Lawall Julia.Lawall@lip6.fr Signed-off-by: Tony Lindgren tony@atomide.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm/mach-omap2/display.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 1444b4b4bd9f..439e143cad7b 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -250,8 +250,10 @@ static int __init omapdss_init_of(void) if (!node) return 0;
- if (!of_device_is_available(node)) + if (!of_device_is_available(node)) { + of_node_put(node); return 0; + }
pdev = of_find_device_by_node(node);
From: Sven Eckelmann sven@narfation.org
[ Upstream commit 4ba104f468bbfc27362c393815d03aa18fb7a20f ]
The batadv_hash_remove is a function which searches the hashtable for an entry using a needle, a hashtable bucket selection function and a compare function. It will lock the bucket list and delete an entry when the compare function matches it with the needle. It returns the pointer to the hlist_node which matches or NULL when no entry matches the needle.
The batadv_bla_del_claim is not itself protected in anyway to avoid that any other function is modifying the hashtable between the search for the entry and the call to batadv_hash_remove. It can therefore happen that the entry either doesn't exist anymore or an entry was deleted which is not the same object as the needle. In such an situation, the reference counter (for the reference stored in the hashtable) must not be reduced for the needle. Instead the reference counter of the actually removed entry has to be reduced.
Otherwise the reference counter will underflow and the object might be freed before all its references were dropped. The kref helpers reported this problem as:
refcount_t: underflow; use-after-free.
Fixes: 23721387c409 ("batman-adv: add basic bridge loop avoidance code") Signed-off-by: Sven Eckelmann sven@narfation.org Signed-off-by: Simon Wunderlich sw@simonwunderlich.de Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- net/batman-adv/bridge_loop_avoidance.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 5fdde2947802..cf2bcea7df82 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, const u8 *mac, const unsigned short vid) { struct batadv_bla_claim search_claim, *claim; + struct batadv_bla_claim *claim_removed_entry; + struct hlist_node *claim_removed_node;
ether_addr_copy(search_claim.addr, mac); search_claim.vid = vid; @@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, mac, batadv_print_vid(vid));
- batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, - batadv_choose_claim, claim); - batadv_claim_put(claim); /* reference from the hash is gone */ + claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash, + batadv_compare_claim, + batadv_choose_claim, claim); + if (!claim_removed_node) + goto free_claim;
+ /* reference from the hash is gone */ + claim_removed_entry = hlist_entry(claim_removed_node, + struct batadv_bla_claim, hash_entry); + batadv_claim_put(claim_removed_entry); + +free_claim: /* don't need the reference from hash_find() anymore */ batadv_claim_put(claim); }
From: Sven Eckelmann sven@narfation.org
[ Upstream commit 3d65b9accab4a7ed5038f6df403fbd5e298398c7 ]
The batadv_hash_remove is a function which searches the hashtable for an entry using a needle, a hashtable bucket selection function and a compare function. It will lock the bucket list and delete an entry when the compare function matches it with the needle. It returns the pointer to the hlist_node which matches or NULL when no entry matches the needle.
The batadv_tt_local_remove is not itself protected in anyway to avoid that any other function is modifying the hashtable between the search for the entry and the call to batadv_hash_remove. It can therefore happen that the entry either doesn't exist anymore or an entry was deleted which is not the same object as the needle. In such an situation, the reference counter (for the reference stored in the hashtable) must not be reduced for the needle. Instead the reference counter of the actually removed entry has to be reduced.
Otherwise the reference counter will underflow and the object might be freed before all its references were dropped. The kref helpers reported this problem as:
refcount_t: underflow; use-after-free.
Fixes: ef72706a0543 ("batman-adv: protect tt_local_entry from concurrent delete events") Signed-off-by: Sven Eckelmann sven@narfation.org Signed-off-by: Simon Wunderlich sw@simonwunderlich.de Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- net/batman-adv/translation-table.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 8dcd4968cde7..2ee87d3ca6d0 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1337,9 +1337,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid, const char *message, bool roaming) { + struct batadv_tt_local_entry *tt_removed_entry; struct batadv_tt_local_entry *tt_local_entry; u16 flags, curr_flags = BATADV_NO_FLAGS; - void *tt_entry_exists; + struct hlist_node *tt_removed_node;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) @@ -1368,15 +1369,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, */ batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
- tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, + tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash, batadv_compare_tt, batadv_choose_tt, &tt_local_entry->common); - if (!tt_entry_exists) + if (!tt_removed_node) goto out;
- /* extra call to free the local tt entry */ - batadv_tt_local_entry_put(tt_local_entry); + /* drop reference of remove hash entry */ + tt_removed_entry = hlist_entry(tt_removed_node, + struct batadv_tt_local_entry, + common.hash_entry); + batadv_tt_local_entry_put(tt_removed_entry);
out: if (tt_local_entry)
From: Sven Eckelmann sven@narfation.org
[ Upstream commit f131a56880d10932931e74773fb8702894a94a75 ]
The batadv_hash_remove is a function which searches the hashtable for an entry using a needle, a hashtable bucket selection function and a compare function. It will lock the bucket list and delete an entry when the compare function matches it with the needle. It returns the pointer to the hlist_node which matches or NULL when no entry matches the needle.
The batadv_tt_global_free is not itself protected in anyway to avoid that any other function is modifying the hashtable between the search for the entry and the call to batadv_hash_remove. It can therefore happen that the entry either doesn't exist anymore or an entry was deleted which is not the same object as the needle. In such an situation, the reference counter (for the reference stored in the hashtable) must not be reduced for the needle. Instead the reference counter of the actually removed entry has to be reduced.
Otherwise the reference counter will underflow and the object might be freed before all its references were dropped. The kref helpers reported this problem as:
refcount_t: underflow; use-after-free.
Fixes: 7683fdc1e886 ("batman-adv: protect the local and the global trans-tables with rcu") Reported-by: Martin Weinelt martin@linuxlounge.net Signed-off-by: Sven Eckelmann sven@narfation.org Acked-by: Antonio Quartulli a@unstable.cc Signed-off-by: Simon Wunderlich sw@simonwunderlich.de Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- net/batman-adv/translation-table.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 2ee87d3ca6d0..6ec0e67be560 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, struct batadv_tt_global_entry *tt_global, const char *message) { + struct batadv_tt_global_entry *tt_removed_entry; + struct hlist_node *tt_removed_node; + batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting global tt entry %pM (vid: %d): %s\n", tt_global->common.addr, batadv_print_vid(tt_global->common.vid), message);
- batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, - batadv_choose_tt, &tt_global->common); - batadv_tt_global_entry_put(tt_global); + tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash, + batadv_compare_tt, + batadv_choose_tt, + &tt_global->common); + if (!tt_removed_node) + return; + + /* drop reference of remove hash entry */ + tt_removed_entry = hlist_entry(tt_removed_node, + struct batadv_tt_global_entry, + common.hash_entry); + batadv_tt_global_entry_put(tt_removed_entry); }
/**
From: Anders Roxell anders.roxell@linaro.org
[ Upstream commit ca8c3b922e7032aff6cc3fd05548f4df1f3df90e ]
When CONFIG_CFG80211 isn't enabled the compiler correcly warns about 'sinfo.pertid' may be unused. It can also happen for other error conditions that it not warn about.
net/batman-adv/bat_v_elp.c: In function ‘batadv_v_elp_get_throughput.isra.0’: include/net/cfg80211.h:6370:13: warning: ‘sinfo.pertid’ may be used uninitialized in this function [-Wmaybe-uninitialized] kfree(sinfo->pertid); ~~~~~^~~~~~~~
Rework so that we only release '&sinfo' if cfg80211_get_station returns zero.
Fixes: 7d652669b61d ("batman-adv: release station info tidstats") Signed-off-by: Anders Roxell anders.roxell@linaro.org Signed-off-by: Sven Eckelmann sven@narfation.org Signed-off-by: Simon Wunderlich sw@simonwunderlich.de Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- net/batman-adv/bat_v_elp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index ef0dec20c7d8..5da183b2f4c9 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
- /* free the TID stats immediately */ - cfg80211_sinfo_release_content(&sinfo); + if (!ret) { + /* free the TID stats immediately */ + cfg80211_sinfo_release_content(&sinfo); + }
dev_put(real_netdev); if (ret == -ENOENT) {
From: Douglas Anderson dianders@chromium.org
[ Upstream commit d040e4e8deeaa8257d6aa260e29ad69832b5d630 ]
The device tree compiler yells like this: Warning (unit_address_vs_reg): /gpu-opp-table/opp@100000000: node has a unit name, but no reg property
Let's match the cpu opp node names and use a dash.
Signed-off-by: Douglas Anderson dianders@chromium.org Reviewed-by: Matthias Kaehlcke mka@chromium.org Signed-off-by: Heiko Stuebner heiko@sntech.de Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm/boot/dts/rk3288.dtsi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 09868dcee34b..df0c5456c94f 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -1282,27 +1282,27 @@ gpu_opp_table: gpu-opp-table { compatible = "operating-points-v2";
- opp@100000000 { + opp-100000000 { opp-hz = /bits/ 64 <100000000>; opp-microvolt = <950000>; }; - opp@200000000 { + opp-200000000 { opp-hz = /bits/ 64 <200000000>; opp-microvolt = <950000>; }; - opp@300000000 { + opp-300000000 { opp-hz = /bits/ 64 <300000000>; opp-microvolt = <1000000>; }; - opp@400000000 { + opp-400000000 { opp-hz = /bits/ 64 <400000000>; opp-microvolt = <1100000>; }; - opp@500000000 { + opp-500000000 { opp-hz = /bits/ 64 <500000000>; opp-microvolt = <1200000>; }; - opp@600000000 { + opp-600000000 { opp-hz = /bits/ 64 <600000000>; opp-microvolt = <1250000>; };
From: Axel Lin axel.lin@ingics.com
[ Upstream commit 13e8a05b922457761ddef39cfff6231bd4ed9eef ]
Set .owner to prevent module unloading while being used.
Signed-off-by: Axel Lin axel.lin@ingics.com Fixes: d903779b58be ("reset: meson: add meson audio arb driver") Signed-off-by: Philipp Zabel p.zabel@pengutronix.de Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/reset/reset-meson-audio-arb.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 91751617b37a..c53a2185a039 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c @@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); arb->rstc.ops = &meson_audio_arb_rstc_ops; arb->rstc.of_node = dev->of_node; + arb->rstc.owner = THIS_MODULE;
/* * Enable general :
From: Tony Lindgren tony@atomide.com
[ Upstream commit 7d56bedb2730dc2ea8abf0fd7240ee99ecfee3c9 ]
We must not use legacy clock defines for dts clckctrl clocks as the offsets will be wrong.
Fixes: 87fc89ced3a7 ("ARM: dts: am335x: Move l4 child devices to probe them with ti-sysc") Cc: Tero Kristo t-kristo@ti.com Signed-off-by: Tony Lindgren tony@atomide.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm/boot/dts/am33xx-l4.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 7b818d9d2eab..8396faa9ac28 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -1763,7 +1763,7 @@ reg = <0xcc000 0x4>; reg-names = "rev"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>; + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; @@ -1786,7 +1786,7 @@ reg = <0xd0000 0x4>; reg-names = "rev"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>; + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; #size-cells = <1>;
From: Jacob Keller jacob.e.keller@intel.com
[ Upstream commit b3ccbbce1e455b8454d3935eb9ae0a5f18939e24 ]
Commit 0ac30ce43323 ("i40e: fix up 32 bit timespec references", 2017-07-26) claims to be cleaning up references to 32-bit timespecs.
The actual contents of the commit make no sense, as it converts a call to timespec64_add into timespec64_add_ns. This would seem ok, if (a) the change was documented in the commit message, and (b) timespec64_add_ns supported negative numbers.
timespec64_add_ns doesn't work with signed deltas, because the implementation is based around iter_div_u64_rem. This change resulted in a regression where i40e_ptp_adjtime would interpret small negative adjustments as large positive additions, resulting in incorrect behavior.
This commit doesn't appear to fix anything, is not well explained, and introduces a bug, so lets just revert it.
Reverts: 0ac30ce43323 ("i40e: fix up 32 bit timespec references", 2017-07-26) Signed-off-by: Jacob Keller jacob.e.keller@intel.com Reviewed-by: Arnd Bergmann arnd@arndb.de Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5fb4353c742b..31575c0bb884 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now; + struct timespec64 now, then;
+ then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now, NULL); - timespec64_add_ns(&now, delta); + now = timespec64_add(now, then); i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock);
From: Arvind Sankar niveditas98@gmail.com
[ Upstream commit dabb8338be533c18f50255cf39ff4f66d4dabdbe ]
The runtime_suspend device callbacks are not supposed to save configuration state or change the power state. Commit fb29f76cc566 ("igb: Fix an issue that PME is not enabled during runtime suspend") changed the driver to not save configuration state during runtime suspend, however the driver callback still put the device into a low-power state. This causes a warning in the pci pm core and results in pci_pm_runtime_suspend not calling pci_save_state or pci_finish_runtime_suspend.
Fix this by not changing the power state either, leaving that to pci pm core, and make the same change for suspend callback as well.
Also move a couple of defines into the appropriate header file instead of inline in the .c file.
Fixes: fb29f76cc566 ("igb: Fix an issue that PME is not enabled during runtime suspend") Signed-off-by: Arvind Sankar niveditas98@gmail.com Reviewed-by: Kai-Heng Feng kai.heng.feng@canonical.com Tested-by: Aaron Brown aaron.f.brown@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- .../net/ethernet/intel/igb/e1000_defines.h | 2 + drivers/net/ethernet/intel/igb/igb_main.c | 57 +++---------------- 2 files changed, 10 insertions(+), 49 deletions(-)
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 01fcfc6f3415..d2e2c50ce257 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -194,6 +194,8 @@ /* enable link status from external LINK_0 and LINK_1 pins */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7137e7f9c7f3..21ccadb720d1 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8755,9 +8755,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif + bool wake;
rtnl_lock(); netif_device_detach(netdev); @@ -8770,14 +8768,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, igb_clear_interrupt_scheme(adapter); rtnl_unlock();
-#ifdef CONFIG_PM - if (!runtime) { - retval = pci_save_state(pdev); - if (retval) - return retval; - } -#endif - status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -8794,10 +8784,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, }
ctrl = rd32(E1000_CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; wr32(E1000_CTRL, ctrl);
@@ -8811,12 +8797,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, wr32(E1000_WUFC, 0); }
- *enable_wake = wufc || adapter->en_mng_pt; - if (!*enable_wake) + wake = wufc || adapter->en_mng_pt; + if (!wake) igb_power_down_link(adapter); else igb_power_up_link(adapter);
+ if (enable_wake) + *enable_wake = wake; + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -8859,22 +8848,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev) { - int retval; - bool wake; - struct pci_dev *pdev = to_pci_dev(dev); - - retval = __igb_shutdown(pdev, &wake, 0); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 0); }
static int __maybe_unused igb_resume(struct device *dev) @@ -8945,22 +8919,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __igb_shutdown(pdev, &wake, 1); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 1); }
static int __maybe_unused igb_runtime_resume(struct device *dev)
From: Ivan Vecera ivecera@redhat.com
[ Upstream commit 7ec52b9df7d7472240fa96223185894b1897aeb0 ]
The ixgbe ignores errors returned from mdiobus_register() and leaves adapter->mii_bus non-NULL and MDIO bus state as MDIOBUS_ALLOCATED. This triggers a BUG from mdiobus_unregister() during ixgbe_remove() call.
Fixes: 8fa10ef01260 ("ixgbe: register a mdiobus") Signed-off-by: Ivan Vecera ivecera@redhat.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index cc4907f9ff02..2fb97967961c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; struct mii_bus *bus; + int err = -ENODEV;
- adapter->mii_bus = devm_mdiobus_alloc(dev); - if (!adapter->mii_bus) + bus = devm_mdiobus_alloc(dev); + if (!bus) return -ENOMEM;
- bus = adapter->mii_bus; - switch (hw->device_id) { /* C3000 SoCs */ case IXGBE_DEV_ID_X550EM_A_KR: @@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) */ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
- return mdiobus_register(bus); + err = mdiobus_register(bus); + if (!err) { + adapter->mii_bus = bus; + return 0; + }
ixgbe_no_mii_bus: devm_mdiobus_free(dev, bus); - adapter->mii_bus = NULL; - return -ENODEV; + return err; }
/**
From: Stefan Assmann sassmann@kpanic.de
[ Upstream commit f669d24f3dd00beab452c0fc9257f6a942ffca9b ]
The current check for WoL on i40e is broken. Code comment says only magic packet is supported, so only check for that.
Fixes: 540a152da762 (i40e/ixgbe/igb: fail on new WoL flag setting WAKE_MAGICSECURE)
Signed-off-by: Stefan Assmann sassmann@kpanic.de Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a6bc7847346b..5d544e661445 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2378,8 +2378,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP;
/* only magic packet is supported */ - if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) - | (wol->wolopts != WAKE_FILTER)) + if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP;
/* is this a new value? */
From: Yue Haibing yuehaibing@huawei.com
[ Upstream commit 01ca667133d019edc9f0a1f70a272447c84ec41f ]
Syzkaller report this:
kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN PTI CPU: 0 PID: 4378 Comm: syz-executor.0 Tainted: G C 5.0.0+ #5 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 RIP: 0010:__lock_acquire+0x95b/0x3200 kernel/locking/lockdep.c:3573 Code: 00 0f 85 28 1e 00 00 48 81 c4 08 01 00 00 5b 5d 41 5c 41 5d 41 5e 41 5f c3 4c 89 ea 48 b8 00 00 00 00 00 fc ff df 48 c1 ea 03 <80> 3c 02 00 0f 85 cc 24 00 00 49 81 7d 00 e0 de 03 a6 41 bc 00 00 RSP: 0018:ffff8881e3c07a40 EFLAGS: 00010002 RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000010 RSI: 0000000000000000 RDI: 0000000000000080 RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000 R10: ffff8881e3c07d98 R11: ffff8881c7f21f80 R12: 0000000000000001 R13: 0000000000000080 R14: 0000000000000000 R15: 0000000000000001 FS: 00007fce2252e700(0000) GS:ffff8881f2400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fffc7eb0228 CR3: 00000001e5bea002 CR4: 00000000007606f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: lock_acquire+0xff/0x2c0 kernel/locking/lockdep.c:4211 __mutex_lock_common kernel/locking/mutex.c:925 [inline] __mutex_lock+0xdf/0x1050 kernel/locking/mutex.c:1072 drain_workqueue+0x24/0x3f0 kernel/workqueue.c:2934 destroy_workqueue+0x23/0x630 kernel/workqueue.c:4319 __do_sys_delete_module kernel/module.c:1018 [inline] __se_sys_delete_module kernel/module.c:961 [inline] __x64_sys_delete_module+0x30c/0x480 kernel/module.c:961 do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x462e99 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fce2252dc58 EFLAGS: 00000246 ORIG_RAX: 00000000000000b0 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000020000140 RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007fce2252e6bc R13: 00000000004bcca9 R14: 00000000006f6b48 R15: 00000000ffffffff
If alloc_workqueue fails, it should return -ENOMEM, otherwise may trigger this NULL pointer dereference while unloading drivers.
Reported-by: Hulk Robot hulkci@huawei.com Fixes: 0a38c17a21a0 ("fm10k: Remove create_workqueue") Signed-off-by: Yue Haibing yuehaibing@huawei.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 6fd15a734324..58f02c85f2fe 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) /* create driver workqueue */ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, fm10k_driver_name); + if (!fm10k_workqueue) + return -ENOMEM;
fm10k_dbg_init();
From: Alan Kao alankao@andestech.com
[ Upstream commit dbee9c9c45846f003ec2f819710c2f4835630a6a ]
A memory save operation to 8-byte variable in RV32 is divided into two sw instructions in the put_user macro. The current fixup returns execution flow to the second sw instead of the one after it.
This patch fixes this fixup code according to the load access part.
Signed-off-by: Alan Kaoalankao@andestech.com Cc: Greentime Hu greentime@andestech.com Cc: Vincent Chen deanbo422@gmail.com Signed-off-by: Palmer Dabbelt palmer@sifive.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/riscv/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 637b896894fc..aa82df30e38a 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -301,7 +301,7 @@ do { \ " .balign 4\n" \ "4:\n" \ " li %0, %6\n" \ - " jump 2b, %1\n" \ + " jump 3b, %1\n" \ " .previous\n" \ " .section __ex_table,"a"\n" \ " .balign " RISCV_SZPTR "\n" \
From: Jeffrey Hugo jeffrey.l.hugo@gmail.com
[ Upstream commit 2bafa1e9625400bec4c840a168d70ba52607a58d ]
Similar to commit edfc3722cfef ("HID: quirks: Fix keyboard + touchpad on Toshiba Click Mini not working"), the Lenovo Miix 630 has a combo keyboard/touchpad device with vid:pid of 04F3:0400, which is shared with Elan touchpads. The combo on the Miix 630 has an ACPI id of QTEC0001, which is not claimed by the elan_i2c driver, so key on that similar to what was done for the Toshiba Click Mini.
Signed-off-by: Jeffrey Hugo jeffrey.l.hugo@gmail.com Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/hid/hid-quirks.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 94088c0ed68a..e24790c988c0 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -744,7 +744,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, @@ -1025,6 +1024,10 @@ bool hid_ignore(struct hid_device *hdev) if (hdev->product == 0x0401 && strncmp(hdev->name, "ELAN0800", 8) != 0) return true; + /* Same with product id 0x0400 */ + if (hdev->product == 0x0400 && + strncmp(hdev->name, "QTEC0001", 8) != 0) + return true; break; }
From: Xi Wang wangxi11@huawei.com
[ Upstream commit 669efc76b317b3aa550ffbf0b79d064cb00a5f96 ]
Currently, the rules for configuring search paths in Kbuild have changed, this will lead some erros when compiling hns3 with the following command:
make O=DIR M=drivers/net/ethernet/hisilicon/hns3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c:11:10: fatal error: hnae3.h: No such file or directory
This patch fix it by adding $(srctree)/ prefix to the serach paths.
Signed-off-by: Xi Wang wangxi11@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index fffe8c1c45d3..0fb61d440d3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -3,7 +3,7 @@ # Makefile for the HISILICON network device drivers. #
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index fb93bbd35845..6193f8fa7cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -3,7 +3,7 @@ # Makefile for the HISILICON network device drivers. #
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
From: Jesper Dangaard Brouer brouer@redhat.com
[ Upstream commit 676e4a6fe703f2dae699ee9d56f14516f9ada4ea ]
We want to avoid leaking pointer info from xdp_frame (that is placed in top of frame) like commit 6dfb970d3dbd ("xdp: avoid leaking info stored in frame data on page reuse"), and followup commit 97e19cce05e5 ("bpf: reserve xdp_frame size in xdp headroom") that reserve this headroom.
These changes also affected how cpumap constructed SKBs, as xdpf->headroom size changed, the skb data starting point were in-effect shifted with 32 bytes (sizeof xdp_frame). This was still okay, as the cpumap frame_size calculation also included xdpf->headroom which were reduced by same amount.
A bug was introduced in commit 77ea5f4cbe20 ("bpf/cpumap: make sure frame_size for build_skb is aligned if headroom isn't"), where the xdpf->headroom became part of the SKB_DATA_ALIGN rounding up. This round-up to find the frame_size is in principle still correct as it does not exceed the 2048 bytes frame_size (which is max for ixgbe and i40e), but the 32 bytes offset of pkt_data_start puts this over the 2048 bytes limit. This cause skb_shared_info to spill into next frame. It is a little hard to trigger, as the SKB need to use above 15 skb_shinfo->frags[] as far as I calculate. This does happen in practise for TCP streams when skb_try_coalesce() kicks in.
KASAN can be used to detect these wrong memory accesses, I've seen: BUG: KASAN: use-after-free in skb_try_coalesce+0x3cb/0x760 BUG: KASAN: wild-memory-access in skb_release_data+0xe2/0x250
Driver veth also construct a SKB from xdp_frame in this way, but is not affected, as it doesn't reserve/deduct the room (used by xdp_frame) from the SKB headroom. Instead is clears the pointers via xdp_scrub_frame(), and allows SKB to use this area.
The fix in this patch is to do like veth and instead allow SKB to (re)use the area occupied by xdp_frame, by clearing via xdp_scrub_frame(). (This does kill the idea of the SKB being able to access (mem) info from this area, but I guess it was a bad idea anyhow, and it was already killed by the veth changes.)
Fixes: 77ea5f4cbe20 ("bpf/cpumap: make sure frame_size for build_skb is aligned if headroom isn't") Signed-off-by: Jesper Dangaard Brouer brouer@redhat.com Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- kernel/bpf/cpumap.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 8974b3755670..3c18260403dd 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work) static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) { + unsigned int hard_start_headroom; unsigned int frame_size; void *pkt_data_start; struct sk_buff *skb;
+ /* Part of headroom was reserved to xdpf */ + hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom; + /* build_skb need to place skb_shared_info after SKB end, and * also want to know the memory "truesize". Thus, need to * know the memory frame size backing xdp_buff. @@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, * is not at a fixed memory location, with mixed length * packets, which is bad for cache-line hotness. */ - frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) + + frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- pkt_data_start = xdpf->data - xdpf->headroom; + pkt_data_start = xdpf->data - hard_start_headroom; skb = build_skb(pkt_data_start, frame_size); if (!skb) return NULL;
- skb_reserve(skb, xdpf->headroom); + skb_reserve(skb, hard_start_headroom); __skb_put(skb, xdpf->len); if (xdpf->metasize) skb_metadata_set(skb, xdpf->metasize); @@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, * - RX ring dev queue index (skb_record_rx_queue) */
+ /* Allow SKB to reuse area used by xdp_frame */ + xdp_scrub_frame(xdpf); + return skb; }
From: Roi Dayan roid@mellanox.com
[ Upstream commit 5c1d260ed10cf08dd7a0299c103ad0a3f9a9f7a1 ]
The esw offloads structures share a union with the legacy mode structs. Reset the offloads struct to zero in init to protect from null assumptions made by the legacy mode code.
Signed-off-by: Roi Dayan roid@mellanox.com Reviewed-by: Or Gerlitz ogerlitz@mellanox.com Signed-off-by: Saeed Mahameed saeedm@mellanox.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d4e6fe5b9300..ce5766a26baa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1402,6 +1402,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) { int err;
+ memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_offloads_fdb_tables(esw, nvports);
From: Omri Kahalon omrik@mellanox.com
[ Upstream commit eca4a928585ac08147e5cc8e2111ecbc6279ee31 ]
Traditionally, the PF (Physical Function) which resides on vport 0 was the E-switch manager. Since the ECPF (Embedded CPU Physical Function), which resides on vport 0xfffe, was introduced as the E-Switch manager, the assumption that the E-switch manager is on vport 0 is incorrect.
Since the eswitch code already uses the actual vport value, all we need is to always set other_vport=1.
Signed-off-by: Omri Kahalon omrik@mellanox.com Reviewed-by: Max Gurtovoy maxg@mellanox.com Signed-off-by: Saeed Mahameed saeedm@mellanox.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 13c48883ed61..619f96940b65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -81,8 +81,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
@@ -110,8 +109,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(modify_esw_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); }
From: Konstantin Khorenko khorenko@virtuozzo.com
[ Upstream commit 18bebc6dd3281955240062655a4df35eef2c46b3 ]
Bond expects ethernet hwaddr for its slave, but it can be longer than 6 bytes - infiniband interface for example.
# cat /sys/devices/<skipped>/net/ib0/address 80:00:02:08:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:be:5d:e1
# cat /sys/devices/<skipped>/net/ib0/bonding_slave/perm_hwaddr 80:00:02:08:fe:80
So print full hwaddr in sysfs "bonding_slave/perm_hwaddr" as well.
Signed-off-by: Konstantin Khorenko khorenko@virtuozzo.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/bonding/bond_sysfs_slave.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 2f120b2ffef0..4985268e2273 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) { - return sprintf(buf, "%pM\n", slave->perm_hwaddr); + return sprintf(buf, "%*phC\n", + slave->dev->addr_len, + slave->perm_hwaddr); } static SLAVE_ATTR_RO(perm_hwaddr);
From: Aaro Koskinen aaro.koskinen@nokia.com
[ Upstream commit 583e6361414903c5206258a30e5bd88cb03c0254 ]
We always program the maximum DMA buffer size into the receive descriptor, although the allocated size may be less. E.g. with the default MTU size we allocate only 1536 bytes. If somebody sends us a bigger frame, then memory may get corrupted.
Fix by using exact buffer sizes.
Signed-off-by: Aaro Koskinen aaro.koskinen@nokia.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- .../net/ethernet/stmicro/stmmac/descs_com.h | 22 ++++++++++++------- .../ethernet/stmicro/stmmac/dwmac4_descs.c | 2 +- .../ethernet/stmicro/stmmac/dwxgmac2_descs.c | 2 +- .../net/ethernet/stmicro/stmmac/enh_desc.c | 10 ++++++--- drivers/net/ethernet/stmicro/stmmac/hwif.h | 2 +- .../net/ethernet/stmicro/stmmac/norm_desc.c | 10 ++++++--- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 6 +++-- 7 files changed, 35 insertions(+), 19 deletions(-)
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 40d6356a7e73..3dfb07a78952 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -29,11 +29,13 @@ /* Specific functions used for Ring mode */
/* Enhanced descriptors */ -static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, + int bfsize) { - p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - << ERDES1_BUFFER2_SIZE_SHIFT) - & ERDES1_BUFFER2_SIZE_MASK); + if (bfsize == BUF_SIZE_16KiB) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB + << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK);
if (end) p->des1 |= cpu_to_le32(ERDES1_END_RING); @@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) }
/* Normal descriptors */ -static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) { - p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) - << RDES1_BUFFER2_SIZE_SHIFT) - & RDES1_BUFFER2_SIZE_MASK); + if (bfsize >= BUF_SIZE_2KiB) { + int bfsize2; + + bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK); + }
if (end) p->des1 |= cpu_to_le32(RDES1_END_RING); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 736e29635b77..313a58b68fee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -296,7 +296,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, }
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwmac4_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 1d858fdec997..98fa471da7c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, }
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwxgmac2_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 5ef91a790f9d..e8855e6adb48 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -259,15 +259,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, }
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_8KiB); + p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE) ehn_desc_rx_set_on_chain(p); else - ehn_desc_rx_set_on_ring(p, end); + ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic) p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 92b8944f26e3..5bb00234d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -33,7 +33,7 @@ struct dma_extended_desc; struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, - int end); + int end, int bfsize); /* DMA TX descriptor ring initialization */ void (*init_tx_desc)(struct dma_desc *p, int mode, int end); /* Invoked by the xmit function to prepare the tx descriptor */ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index de65bb29feba..c55a9815b394 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -135,15 +135,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, }
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, - int end) + int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE) ndesc_rx_set_on_chain(p, end); else - ndesc_rx_set_on_ring(p, end); + ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic) p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 019ab99e65bb..7ee545b27af2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1114,11 +1114,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); }
/**
From: Aaro Koskinen aaro.koskinen@nokia.com
[ Upstream commit 972c9be784e077bc56472c78243e0326e525b689 ]
Ratelimit RX error logs.
Signed-off-by: Aaro Koskinen aaro.koskinen@nokia.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7ee545b27af2..e41136d57bfa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3407,9 +3407,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) * ignored */ if (frame_len > priv->dma_buf_sz) { - netdev_err(priv->dev, - "len %d larger than size (%d)\n", - frame_len, priv->dma_buf_sz); + if (net_ratelimit()) + netdev_err(priv->dev, + "len %d larger than size (%d)\n", + frame_len, priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; break; } @@ -3466,9 +3467,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) } else { skb = rx_q->rx_skbuff[entry]; if (unlikely(!skb)) { - netdev_err(priv->dev, - "%s: Inconsistent Rx chain\n", - priv->dev->name); + if (net_ratelimit()) + netdev_err(priv->dev, + "%s: Inconsistent Rx chain\n", + priv->dev->name); priv->dev->stats.rx_dropped++; break; }
From: Aaro Koskinen aaro.koskinen@nokia.com
[ Upstream commit 07b3975352374c3f5ebb4a42ef0b253fe370542d ]
Currently, if we drop a packet, we exit from NAPI loop before the budget is consumed. In some situations this will make the RX processing stall e.g. when flood pinging the system with oversized packets, as the errorneous packets are not dropped efficiently.
If we drop a packet, we should just continue to the next one as long as the budget allows.
Signed-off-by: Aaro Koskinen aaro.koskinen@nokia.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e41136d57bfa..ca36f05de476 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3328,9 +3328,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; - unsigned int entry = rx_q->cur_rx; + unsigned int next_entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; - unsigned int next_entry; unsigned int count = 0; bool xmac;
@@ -3348,10 +3347,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { - int status; + int entry, status; struct dma_desc *p; struct dma_desc *np;
+ entry = next_entry; + if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + entry); else @@ -3412,7 +3413,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) "len %d larger than size (%d)\n", frame_len, priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; - break; + continue; }
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 @@ -3447,7 +3448,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dev_warn(priv->device, "packet dropped\n"); priv->dev->stats.rx_dropped++; - break; + continue; }
dma_sync_single_for_cpu(priv->device, @@ -3472,7 +3473,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) "%s: Inconsistent Rx chain\n", priv->dev->name); priv->dev->stats.rx_dropped++; - break; + continue; } prefetch(skb->data - NET_IP_ALIGN); rx_q->rx_skbuff[entry] = NULL; @@ -3507,7 +3508,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; } - entry = next_entry; }
stmmac_rx_refill(priv, queue);
From: Aaro Koskinen aaro.koskinen@nokia.com
[ Upstream commit 1b746ce8b397e58f9e40ce5c63b7198de6930482 ]
If we have error bits set, the discard_frame status will get overwritten by checksum bit checks, which might set the status back to good one. Fix by checking the COE status only if the frame is good.
Signed-off-by: Aaro Koskinen aaro.koskinen@nokia.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index e8855e6adb48..c42ef6c729c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -231,9 +231,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, * It doesn't match with the information reported into the databook. * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ - ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), - !!(rdes0 & RDES0_FRAME_TYPE), - !!(rdes0 & ERDES0_RX_MAC_ADDR)); + if (likely(ret == good_frame)) + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++;
From: Aaro Koskinen aaro.koskinen@nokia.com
[ Upstream commit 8ac0c24fe1c256af6644caf3d311029440ec2fbd ]
Packets without the last descriptor set should be dropped early. If we receive a frame larger than the DMA buffer, the HW will continue using the next descriptor. Driver mistakes these as individual frames, and sometimes a truncated frame (without the LD set) may look like a valid packet.
This fixes a strange issue where the system replies to 4098-byte ping although the MTU/DMA buffer size is set to 4096, and yet at the same time it's logging an oversized packet.
Signed-off-by: Aaro Koskinen aaro.koskinen@nokia.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index c42ef6c729c0..5202d6ad7919 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, if (unlikely(rdes0 & RDES0_OWN)) return dma_own;
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++;
From: Aaro Koskinen aaro.koskinen@nokia.com
[ Upstream commit 057a0c5642a2ff2db7c421cdcde34294a23bf37b ]
This is log is harmful as it can trigger multiple times per packet. Delete it.
Signed-off-by: Aaro Koskinen aaro.koskinen@nokia.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index c55a9815b394..b7dd4e3c760d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { - pr_warn("%s: Oversized frame spanned multiple buffers\n", - __func__); stats->rx_length_errors++; return discard_frame; }
From: Al Viro viro@zeniv.linux.org.uk
[ Upstream commit 4fdcfab5b5537c21891e22e65996d4d0dd8ab4ca ]
free the symlink body after the same RCU delay we have for freeing the struct inode itself, so that traversal during RCU pathwalk wouldn't step into freed memory.
Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- fs/jffs2/readinode.c | 5 ----- fs/jffs2/super.c | 5 ++++- 2 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 389ea53ea487..bccfc40b3a74 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
- if (f->target) { - kfree(f->target); - f->target = NULL; - } - fds = f->dents; while(fds) { fd = fds; diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index bb6ae387469f..05d892c79339 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb) static void jffs2_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); - kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + + kfree(f->target); + kmem_cache_free(jffs2_inode_cachep, f); }
static void jffs2_destroy_inode(struct inode *inode)
From: Al Viro viro@zeniv.linux.org.uk
[ Upstream commit 93b919da64c15b90953f96a536e5e61df896ca57 ]
symlink body shouldn't be freed without an RCU delay. Switch debugfs to ->destroy_inode() and use of call_rcu(); free both the inode and symlink body in the callback. Similar to solution for bpf, only here it's even more obvious that ->evict_inode() can be dropped.
Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- fs/debugfs/inode.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 29c68c5d44d5..c4a4fc6f1a95 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) return 0; }
-static void debugfs_evict_inode(struct inode *inode) +static void debugfs_i_callback(struct rcu_head *head) { - truncate_inode_pages_final(&inode->i_data); - clear_inode(inode); + struct inode *inode = container_of(head, struct inode, i_rcu); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); + free_inode_nonrcu(inode); +} + +static void debugfs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, debugfs_i_callback); }
static const struct super_operations debugfs_super_operations = { .statfs = simple_statfs, .remount_fs = debugfs_remount, .show_options = debugfs_show_options, - .evict_inode = debugfs_evict_inode, + .destroy_inode = debugfs_destroy_inode, };
static void debugfs_release_dentry(struct dentry *dentry)
From: Andreas Kemnade andreas@kemnade.info
[ Upstream commit 20bb907f7dc82ecc9e135ad7067ac7eb69c81222 ]
Since commit 6e2bd956936 ("i2c: omap: Use noirq system sleep pm ops to idle device for suspend") on gta04 we have handle_twl4030_pih() called in situations where pm_runtime_get() in i2c-omap.c returns -EACCES.
[ 86.474365] Freezing remaining freezable tasks ... (elapsed 0.002 seconds) done. [ 86.485473] printk: Suspending console(s) (use no_console_suspend to debug) [ 86.555572] Disabling non-boot CPUs ... [ 86.555664] Successfully put all powerdomains to target state [ 86.563720] twl: Read failed (mod 1, reg 0x01 count 1) [ 86.563751] twl4030: I2C error -13 reading PIH ISR [ 86.563812] twl: Read failed (mod 1, reg 0x01 count 1) [ 86.563812] twl4030: I2C error -13 reading PIH ISR [ 86.563873] twl: Read failed (mod 1, reg 0x01 count 1) [ 86.563903] twl4030: I2C error -13 reading PIH ISR
This happens when we wakeup via something behing twl4030 (powerbutton or rtc alarm). This goes on for minutes until the system is finally resumed. Disable the irq on suspend and enable it on resume to avoid having i2c access problems when the irq registers are checked.
Fixes: 6e2bd956936 ("i2c: omap: Use noirq system sleep pm ops to idle device for suspend") Signed-off-by: Andreas Kemnade andreas@kemnade.info Tested-by: Tony Lindgren tony@atomide.com Signed-off-by: Lee Jones lee.jones@linaro.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/mfd/twl-core.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+)
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 299016bc46d9..104477b512a2 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -1245,6 +1245,28 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) return status; }
+static int __maybe_unused twl_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + disable_irq(client->irq); + + return 0; +} + +static int __maybe_unused twl_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + enable_irq(client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume); + static const struct i2c_device_id twl_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ @@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = { /* One Client Driver , 4 Clients */ static struct i2c_driver twl_driver = { .driver.name = DRIVER_NAME, + .driver.pm = &twl_dev_pm_ops, .id_table = twl_ids, .probe = twl_probe, .remove = twl_remove,
From: Shenghui Wang shhuiw@foxmail.com
[ Upstream commit b9a1ff504b9492ad6beb7d5606e0e3365d4d8499 ]
kfree() can leak the hctx->fq->flush_rq field.
Reviewed-by: Ming Lei ming.lei@redhat.com Signed-off-by: Shenghui Wang shhuiw@foxmail.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index 16f9675c57e6..97eba6d23425 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2341,7 +2341,7 @@ static int blk_mq_init_hctx(struct request_queue *q, return 0;
free_fq: - kfree(hctx->fq); + blk_free_flush_queue(hctx->fq); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx);
From: Alexandre Belloni alexandre.belloni@bootlin.com
[ Upstream commit 882c5e552ffd06856de42261460f46e18319d259 ]
The DA9063AD doesn't support alarms on any seconds and its granularity is the minute. Set uie_unsupported in that case.
Reported-by: Wolfram Sang wsa+renesas@sang-engineering.com Reported-by: Geert Uytterhoeven geert+renesas@glider.be Reviewed-by: Wolfram Sang wsa+renesas@sang-engineering.com Tested-by: Wolfram Sang wsa+renesas@sang-engineering.com Acked-by: Steve Twiss stwiss.opensource@diasemi.com Signed-off-by: Alexandre Belloni alexandre.belloni@bootlin.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/rtc/rtc-da9063.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index b4e054c64bad..69b54e5556c0 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c @@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev) da9063_data_to_tm(data, &rtc->alarm_time, rtc); rtc->rtc_sync = false;
+ /* + * TODO: some models have alarms on a minute boundary but still support + * real hardware interrupts. Add this once the core supports it. + */ + if (config->rtc_data_start != RTC_SEC) + rtc->rtc_dev->uie_unsupported = 1; + irq_alarm = platform_get_irq_byname(pdev, "ALARM"); ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, da9063_alarm_event,
From: Dmitry Torokhov dmitry.torokhov@gmail.com
[ Upstream commit ce856634af8cda3490947df8ac1ef5843e6356af ]
According to HUTRR89 usage 0x1cb from the consumer page was assigned to allow launching desktop-aware assistant application, so let's add the mapping.
Signed-off-by: Dmitry Torokhov dmitry.torokhov@gmail.com Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/hid/hid-input.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 59a5608b8dc0..ff92a7b2fc89 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -995,6 +995,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x1b8: map_key_clear(KEY_VIDEO); break; case 0x1bc: map_key_clear(KEY_MESSENGER); break; case 0x1bd: map_key_clear(KEY_INFO); break; + case 0x1cb: map_key_clear(KEY_ASSISTANT); break; case 0x201: map_key_clear(KEY_NEW); break; case 0x202: map_key_clear(KEY_OPEN); break; case 0x203: map_key_clear(KEY_CLOSE); break;
From: Louis Taylor louis@kragniz.eu
[ Upstream commit 426b046b748d1f47e096e05bdcc6fb4172791307 ]
When compiling with -Wformat, clang emits the following warnings:
drivers/vfio/pci/vfio_pci.c:1601:5: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~
drivers/vfio/pci/vfio_pci.c:1601:13: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~
drivers/vfio/pci/vfio_pci.c:1601:21: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~~~~
drivers/vfio/pci/vfio_pci.c:1601:32: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~~~~
drivers/vfio/pci/vfio_pci.c:1605:5: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~
drivers/vfio/pci/vfio_pci.c:1605:13: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~
drivers/vfio/pci/vfio_pci.c:1605:21: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~~~~
drivers/vfio/pci/vfio_pci.c:1605:32: warning: format specifies type 'unsigned short' but the argument has type 'unsigned int' [-Wformat] vendor, device, subvendor, subdevice, ^~~~~~~~~ The types of these arguments are unconditionally defined, so this patch updates the format character to the correct ones for unsigned ints.
Link: https://github.com/ClangBuiltLinux/linux/issues/378 Signed-off-by: Louis Taylor louis@kragniz.eu Reviewed-by: Nick Desaulniers ndesaulniers@google.com Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/vfio/pci/vfio_pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index ff60bd1ea587..eb8fc8ccffc6 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1597,11 +1597,11 @@ static void __init vfio_pci_fill_ids(void) rc = pci_add_dynid(&vfio_pci_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) - pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", + pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", vendor, device, subvendor, subdevice, class, class_mask, rc); else - pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", + pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", vendor, device, subvendor, subdevice, class, class_mask); }
From: Alex Williamson alex.williamson@redhat.com
[ Upstream commit 492855939bdb59c6f947b0b5b44af9ad82b7e38c ]
Memory backed DMA mappings are accounted against a user's locked memory limit, including multiple mappings of the same memory. This accounting bounds the number of such mappings that a user can create. However, DMA mappings that are not backed by memory, such as DMA mappings of device MMIO via mmaps, do not make use of page pinning and therefore do not count against the user's locked memory limit. These mappings still consume memory, but the memory is not well associated to the process for the purpose of oom killing a task.
To add bounding on this use case, we introduce a limit to the total number of concurrent DMA mappings that a user is allowed to create. This limit is exposed as a tunable module option where the default value of 64K is expected to be well in excess of any reasonable use case (a large virtual machine configuration would typically only make use of tens of concurrent mappings).
This fixes CVE-2019-3882.
Reviewed-by: Eric Auger eric.auger@redhat.com Tested-by: Eric Auger eric.auger@redhat.com Reviewed-by: Peter Xu peterx@redhat.com Reviewed-by: Cornelia Huck cohuck@redhat.com Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/vfio/vfio_iommu_type1.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 73652e21efec..d0f731c9920a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -58,12 +58,18 @@ module_param_named(disable_hugepages, MODULE_PARM_DESC(disable_hugepages, "Disable VFIO IOMMU support for IOMMU hugepages.");
+static unsigned int dma_entry_limit __read_mostly = U16_MAX; +module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); +MODULE_PARM_DESC(dma_entry_limit, + "Maximum number of user DMA mappings per container (65535)."); + struct vfio_iommu { struct list_head domain_list; struct vfio_domain *external_domain; /* domain for external user */ struct mutex lock; struct rb_root dma_list; struct blocking_notifier_head notifier; + unsigned int dma_avail; bool v2; bool nesting; }; @@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) vfio_unlink_dma(iommu, dma); put_task_struct(dma->task); kfree(dma); + iommu->dma_avail++; }
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) @@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, goto out_unlock; }
+ if (!iommu->dma_avail) { + ret = -ENOSPC; + goto out_unlock; + } + dma = kzalloc(sizeof(*dma), GFP_KERNEL); if (!dma) { ret = -ENOMEM; goto out_unlock; }
+ iommu->dma_avail--; dma->iova = iova; dma->vaddr = vaddr; dma->prot = prot; @@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->domain_list); iommu->dma_list = RB_ROOT; + iommu->dma_avail = dma_entry_limit; mutex_init(&iommu->lock); BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
From: Xose Vazquez Perez xose.vazquez@gmail.com
[ Upstream commit 1cb1d2c64e812928fe0a40b8f7e74523d0283dbe ]
Blacklist "Universal Xport" LUN. It's used for in-band storage array management. Also add model to the rdac dh family.
Cc: Martin Wilck mwilck@suse.com Cc: Hannes Reinecke hare@suse.de Cc: NetApp RDAC team ng-eseries-upstream-maintainers@netapp.com Cc: Christophe Varoqui christophe.varoqui@opensvc.com Cc: James E.J. Bottomley jejb@linux.vnet.ibm.com Cc: Martin K. Petersen martin.petersen@oracle.com Cc: SCSI ML linux-scsi@vger.kernel.org Cc: DM ML dm-devel@redhat.com Signed-off-by: Xose Vazquez Perez xose.vazquez@gmail.com Reviewed-by: Martin Wilck mwilck@suse.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/scsi/scsi_devinfo.c | 1 + drivers/scsi/scsi_dh.c | 1 + 2 files changed, 2 insertions(+)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c4cbfd07b916..a08ff3bd6310 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -238,6 +238,7 @@ static struct { {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 5a58cbf3a75d..c14006ac98f9 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"NETAPP", "INF-01-00", "rdac", }, {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, + {"LENOVO", "DE_Series", "rdac", }, {NULL, NULL, NULL }, };
From: Michael Kelley mikelley@microsoft.com
[ Upstream commit 382e06d11e075a40b4094b6ef809f8d4bcc7ab2a ]
When the number of sub-channels offered by Hyper-V is >= the number of CPUs in the VM, calculate the correct number of sub-channels. The current code produces one too many.
This scenario arises only when the number of CPUs is artificially restricted (for example, with maxcpus=<n> on the kernel boot line), because Hyper-V normally offers a sub-channel count < number of CPUs. While the current code doesn't break, the extra sub-channel is unbalanced across the CPUs (for example, a total of 5 channels on a VM with 4 CPUs).
Signed-off-by: Michael Kelley mikelley@microsoft.com Reviewed-by: Vitaly Kuznetsov vkuznets@redhat.com Reviewed-by: Long Li longli@microsoft.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/scsi/storvsc_drv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 84380bae20f1..e186743033f4 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) { struct device *dev = &device->device; struct storvsc_device *stor_device; - int num_cpus = num_online_cpus(); int num_sc; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t;
- num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); + /* + * If the number of CPUs is artificially restricted, such as + * with maxcpus=1 on the kernel boot line, Hyper-V could offer + * sub-channels >= the number of CPUs. These sub-channels + * should not be created. The primary channel is already created + * and assigned to one CPU, so check against # CPUs - 1. + */ + num_sc = min((int)(num_online_cpus() - 1), max_chns); + if (!num_sc) + return; + stor_device = get_out_stor_device(device); if (!stor_device) return;
From: Peng Hao peng.hao2@zte.com.cn
[ Upstream commit ba5e60c9b75dec92d4c695b928f69300b17d7686 ]
of_find_device_by_node() takes a reference to the struct device when it finds a match via get_device. When returning error we should call put_device.
Reviewed-by: Mukesh Ojha mojha@codeaurora.org Signed-off-by: Peng Hao peng.hao2@zte.com.cn Signed-off-by: Ludovic Desroches ludovic.desroches@microchip.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm/mach-at91/pm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 51e808adb00c..2a757dcaa1a5 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); if (!np) - goto securam_fail; + goto securam_fail_no_ref_dev;
pdev = of_find_device_by_node(np); of_node_put(np); if (!pdev) { pr_warn("%s: failed to find securam device!\n", __func__); - goto securam_fail; + goto securam_fail_no_ref_dev; }
sram_pool = gen_pool_get(&pdev->dev, NULL); @@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void) return 0;
securam_fail: + put_device(&pdev->dev); +securam_fail_no_ref_dev: iounmap(pm_data.sfrbu); pm_data.sfrbu = NULL; return ret;
From: Dongli Zhang dongli.zhang@oracle.com
[ Upstream commit bcc816dfe51ab86ca94663c7b225f2d6eb0fddb9 ]
We would never be able to sort the list if we first reset plug->rq_count which is used in conditional check later.
Fixes: ce5b009cff19 ("block: improve logic around when to sort a plug list") Reviewed-by: Ming Lei ming.lei@redhat.com Signed-off-by: Dongli Zhang dongli.zhang@oracle.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- block/blk-mq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index 97eba6d23425..5a2585d69c81 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1716,11 +1716,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) unsigned int depth;
list_splice_init(&plug->mq_list, &list); - plug->rq_count = 0;
if (plug->rq_count > 2 && plug->multiple_queues) list_sort(NULL, &list, plug_rq_cmp);
+ plug->rq_count = 0; + this_q = NULL; this_hctx = NULL; this_ctx = NULL;
From: Wei Li liwei391@huawei.com
[ Upstream commit 1c41860864c8ae0387ef7d44f0000e99cbb2e06d ]
When doing unwind_frame() in the context of pseudo nmi (need enable CONFIG_ARM64_PSEUDO_NMI), reaching the bottom of the stack (fp == 0, pc != 0), function on_sdei_stack() will return true while the sdei acpi table is not inited in fact. This will cause a "NULL pointer dereference" oops when going on.
Reviewed-by: Julien Thierry julien.thierry@arm.com Signed-off-by: Wei Li liwei391@huawei.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm64/kernel/sdei.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 5ba4465e44f0..ea94cf8f9dc6 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long high = low + SDEI_STACK_SIZE;
+ if (!low) + return false; + if (sp < low || sp >= high) return false;
@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long high = low + SDEI_STACK_SIZE;
+ if (!low) + return false; + if (sp < low || sp >= high) return false;
From: Liubin Shu shuliubin@huawei.com
[ Upstream commit 3a39a12ad364a9acd1038ba8da67cd8430f30de4 ]
This patch is trying to fix the issue due to: [27237.844750] BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x708/0xa18[hns_enet_drv]
After hnae_queue_xmit() in hns_nic_net_xmit_hw(), can be interrupted by interruptions, and than call hns_nic_tx_poll_one() to handle the new packets, and free the skb. So, when turn back to hns_nic_net_xmit_hw(), calling skb->len will cause use-after-free.
This patch update tx ring statistics in hns_nic_tx_poll_one() to fix the bug.
Signed-off-by: Liubin Shu shuliubin@huawei.com Signed-off-by: Zhen Lei thunder.leizhen@huawei.com Signed-off-by: Yonglong Liu liuyonglong@huawei.com Signed-off-by: Peng Li lipeng321@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 60e7d7ae3787..e5a7c0761dbd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -376,8 +376,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, wmb(); /* commit all data before submit */ assert(skb->queue_mapping < priv->ae_handle->q_num); hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); - ring->stats.tx_pkts++; - ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@@ -999,6 +997,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, /* issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ring->next_to_clean]); } + /* update tx ring statistics. */ + ring->stats.tx_pkts += pkts; + ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
From: Yonglong Liu liuyonglong@huawei.com
[ Upstream commit acb1ce15a61154aa501891d67ebf79bc9ea26818 ]
When the HNS driver loaded, always have an error print: "netif_napi_add() called with weight 256"
This is because the kernel checks the NAPI polling weights requested by drivers and it prints an error message if a driver requests a weight bigger than 64.
So use NAPI_POLL_WEIGHT to fix it.
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Signed-off-by: Peng Li lipeng321@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index e5a7c0761dbd..4cd86ba1f050 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
-#define NIC_TX_CLEAN_MAX_NUM 256 -#define NIC_RX_CLEAN_MAX_NUM 64 - #define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_INITED 1 #define HNS_BUFFER_SIZE_2048 2048 @@ -2153,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; } for (i = h->q_num; i < h->q_num * 2; i++) { @@ -2166,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; }
From: Yonglong Liu liuyonglong@huawei.com
[ Upstream commit c0b0984426814f3a9251873b689e67d34d8ccd84 ]
When reboot the system again and again, may cause a memory overwrite.
[ 15.638922] systemd[1]: Reached target Swap. [ 15.667561] tun: Universal TUN/TAP device driver, 1.6 [ 15.676756] Bridge firewalling registered [ 17.344135] Unable to handle kernel paging request at virtual address 0000000200000040 [ 17.352179] Mem abort info: [ 17.355007] ESR = 0x96000004 [ 17.358105] Exception class = DABT (current EL), IL = 32 bits [ 17.364112] SET = 0, FnV = 0 [ 17.367209] EA = 0, S1PTW = 0 [ 17.370393] Data abort info: [ 17.373315] ISV = 0, ISS = 0x00000004 [ 17.377206] CM = 0, WnR = 0 [ 17.380214] user pgtable: 4k pages, 48-bit VAs, pgdp = (____ptrval____) [ 17.386926] [0000000200000040] pgd=0000000000000000 [ 17.391878] Internal error: Oops: 96000004 [#1] SMP [ 17.396824] CPU: 23 PID: 95 Comm: kworker/u130:0 Tainted: G E 4.19.25-1.2.78.aarch64 #1 [ 17.414175] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.54 08/16/2018 [ 17.425615] Workqueue: events_unbound async_run_entry_fn [ 17.435151] pstate: 00000005 (nzcv daif -PAN -UAO) [ 17.444139] pc : __mutex_lock.isra.1+0x74/0x540 [ 17.453002] lr : __mutex_lock.isra.1+0x3c/0x540 [ 17.461701] sp : ffff000100d9bb60 [ 17.469146] x29: ffff000100d9bb60 x28: 0000000000000000 [ 17.478547] x27: 0000000000000000 x26: ffff802fb8945000 [ 17.488063] x25: 0000000000000000 x24: ffff802fa32081a8 [ 17.497381] x23: 0000000000000002 x22: ffff801fa2b15220 [ 17.506701] x21: ffff000009809000 x20: ffff802fa23a0888 [ 17.515980] x19: ffff801fa2b15220 x18: 0000000000000000 [ 17.525272] x17: 0000000200000000 x16: 0000000200000000 [ 17.534511] x15: 0000000000000000 x14: 0000000000000000 [ 17.543652] x13: ffff000008d95db8 x12: 000000000000000d [ 17.552780] x11: ffff000008d95d90 x10: 0000000000000b00 [ 17.561819] x9 : ffff000100d9bb90 x8 : ffff802fb89d6560 [ 17.570829] x7 : 0000000000000004 x6 : 00000004a1801d05 [ 17.579839] x5 : 0000000000000000 x4 : 0000000000000000 [ 17.588852] x3 : ffff802fb89d5a00 x2 : 0000000000000000 [ 17.597734] x1 : 0000000200000000 x0 : 0000000200000000 [ 17.606631] Process kworker/u130:0 (pid: 95, stack limit = 0x(____ptrval____)) [ 17.617438] Call trace: [ 17.623349] __mutex_lock.isra.1+0x74/0x540 [ 17.630927] __mutex_lock_slowpath+0x24/0x30 [ 17.638602] mutex_lock+0x50/0x60 [ 17.645295] drain_workqueue+0x34/0x198 [ 17.652623] __sas_drain_work+0x7c/0x168 [ 17.659903] sas_drain_work+0x60/0x68 [ 17.666947] hisi_sas_scan_finished+0x30/0x40 [hisi_sas_main] [ 17.676129] do_scsi_scan_host+0x70/0xb0 [ 17.683534] do_scan_async+0x20/0x228 [ 17.690586] async_run_entry_fn+0x4c/0x1d0 [ 17.697997] process_one_work+0x1b4/0x3f8 [ 17.705296] worker_thread+0x54/0x470
Every time the call trace is not the same, but the overwrite address is always the same: Unable to handle kernel paging request at virtual address 0000000200000040
The root cause is, when write the reg XGMAC_MAC_TX_LF_RF_CONTROL_REG, didn't use the io_base offset.
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index ba4316910dea..a60f207768fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); - dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); + dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); }
/**
From: Yonglong Liu liuyonglong@huawei.com
[ Upstream commit f058e46855dcbc28edb2ed4736f38a71fd19cadb ]
ICMP6 neighbor solicitation messages will be discard by the Hip06 chips, because of not setting forwarding pool. Enable promisc mode has the same problem.
This patch fix the wrong forwarding table configs for the multicast vague matching when enable promisc mode, and add forwarding pool for the forwarding table.
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- .../ethernet/hisilicon/hns/hns_dsaf_main.c | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ac55db065f16..f5ff07cb2b72 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -2750,6 +2750,17 @@ int hns_dsaf_get_regs_count(void) return DSAF_DUMP_REGS_NUM; }
+static int hns_dsaf_get_port_id(u8 port) +{ + if (port < DSAF_SERVICE_NW_NUM) + return port; + + if (port >= DSAF_BASE_INNER_PORT_NUM) + return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; + + return -EINVAL; +} + static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) { struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; @@ -2815,23 +2826,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) memset(&temp_key, 0x0, sizeof(temp_key)); mask_entry.addr[0] = 0x01; hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, - port, mask_entry.addr); + 0xf, mask_entry.addr); tbl_tcam_mcast.tbl_mcast_item_vld = 1; tbl_tcam_mcast.tbl_mcast_old_en = 0;
- if (port < DSAF_SERVICE_NW_NUM) { - mskid = port; - } else if (port >= DSAF_BASE_INNER_PORT_NUM) { - mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; - } else { + /* set MAC port to handle multicast */ + mskid = hns_dsaf_get_port_id(port); + if (mskid == -EINVAL) { dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", dsaf_dev->ae_dev.name, port, mask_key.high.val, mask_key.low.val); return; } + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], + mskid % 32, 1);
+ /* set pool bit map to handle multicast */ + mskid = hns_dsaf_get_port_id(port_num); + if (mskid == -EINVAL) { + dev_err(dsaf_dev->dev, + "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n", + dsaf_dev->ae_dev.name, port_num, + mask_key.high.val, mask_key.low.val); + return; + } dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1); + memcpy(&temp_key, &mask_key, sizeof(mask_key)); hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, (struct dsaf_tbl_tcam_data *)(&mask_key),
From: Yonglong Liu liuyonglong@huawei.com
[ Upstream commit 8601a99d7c0256b7a7fdd1ab14cf6c1f1dfcadc6 ]
When enable SMMU, remove HNS driver will cause a WARNING:
[ 141.924177] WARNING: CPU: 36 PID: 2708 at drivers/iommu/dma-iommu.c:443 __iommu_dma_unmap+0xc0/0xc8 [ 141.954673] Modules linked in: hns_enet_drv(-) [ 141.963615] CPU: 36 PID: 2708 Comm: rmmod Tainted: G W 5.0.0-rc1-28723-gb729c57de95c-dirty #32 [ 141.983593] Hardware name: Huawei D05/D05, BIOS Hisilicon D05 UEFI Nemo 1.8 RC0 08/31/2017 [ 142.000244] pstate: 60000005 (nZCv daif -PAN -UAO) [ 142.009886] pc : __iommu_dma_unmap+0xc0/0xc8 [ 142.018476] lr : __iommu_dma_unmap+0xc0/0xc8 [ 142.027066] sp : ffff000013533b90 [ 142.033728] x29: ffff000013533b90 x28: ffff8013e6983600 [ 142.044420] x27: 0000000000000000 x26: 0000000000000000 [ 142.055113] x25: 0000000056000000 x24: 0000000000000015 [ 142.065806] x23: 0000000000000028 x22: ffff8013e66eee68 [ 142.076499] x21: ffff8013db919800 x20: 0000ffffefbff000 [ 142.087192] x19: 0000000000001000 x18: 0000000000000007 [ 142.097885] x17: 000000000000000e x16: 0000000000000001 [ 142.108578] x15: 0000000000000019 x14: 363139343a70616d [ 142.119270] x13: 6e75656761705f67 x12: 0000000000000000 [ 142.129963] x11: 00000000ffffffff x10: 0000000000000006 [ 142.140656] x9 : 1346c1aa88093500 x8 : ffff0000114de4e0 [ 142.151349] x7 : 6662666578303d72 x6 : ffff0000105ffec8 [ 142.162042] x5 : 0000000000000000 x4 : 0000000000000000 [ 142.172734] x3 : 00000000ffffffff x2 : ffff0000114de500 [ 142.183427] x1 : 0000000000000000 x0 : 0000000000000035 [ 142.194120] Call trace: [ 142.199030] __iommu_dma_unmap+0xc0/0xc8 [ 142.206920] iommu_dma_unmap_page+0x20/0x28 [ 142.215335] __iommu_unmap_page+0x40/0x60 [ 142.223399] hnae_unmap_buffer+0x110/0x134 [ 142.231639] hnae_free_desc+0x6c/0x10c [ 142.239177] hnae_fini_ring+0x14/0x34 [ 142.246540] hnae_fini_queue+0x2c/0x40 [ 142.254080] hnae_put_handle+0x38/0xcc [ 142.261619] hns_nic_dev_remove+0x54/0xfc [hns_enet_drv] [ 142.272312] platform_drv_remove+0x24/0x64 [ 142.280552] device_release_driver_internal+0x17c/0x20c [ 142.291070] driver_detach+0x4c/0x90 [ 142.298259] bus_remove_driver+0x5c/0xd8 [ 142.306148] driver_unregister+0x2c/0x54 [ 142.314037] platform_driver_unregister+0x10/0x18 [ 142.323505] hns_nic_dev_driver_exit+0x14/0xf0c [hns_enet_drv] [ 142.335248] __arm64_sys_delete_module+0x214/0x25c [ 142.344891] el0_svc_common+0xb0/0x10c [ 142.352430] el0_svc_handler+0x24/0x80 [ 142.359968] el0_svc+0x8/0x7c0 [ 142.366104] ---[ end trace 60ad1cd58e63c407 ]---
The tx ring buffer map when xmit and unmap when xmit done. So in hnae_init_ring() did not map tx ring buffer, but in hnae_fini_ring() have a unmap operation for tx ring buffer, which is already unmapped when xmit done, than cause this WARNING.
The hnae_alloc_buffers() is called in hnae_init_ring(), so the hnae_free_buffers() should be in hnae_fini_ring(), not in hnae_free_desc().
In hnae_fini_ring(), adds a check is_rx_ring() as in hnae_init_ring(). When the ring buffer is tx ring, adds a piece of code to ensure that the tx ring is unmap.
Signed-off-by: Yonglong Liu liuyonglong@huawei.com Signed-off-by: Peng Li lipeng321@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/hisilicon/hns/hnae.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 79d03f8ee7b1..c7fa97a7e1f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -150,7 +150,6 @@ static int hnae_alloc_buffers(struct hnae_ring *ring) /* free desc along with its attached buffer */ static void hnae_free_desc(struct hnae_ring *ring) { - hnae_free_buffers(ring); dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, ring->desc_num * sizeof(ring->desc[0]), ring_to_dma_dir(ring)); @@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) /* fini ring, also free the buffer for the ring */ static void hnae_fini_ring(struct hnae_ring *ring) { + if (is_rx_ring(ring)) + hnae_free_buffers(ring); + hnae_free_desc(ring); kfree(ring->desc_cb); ring->desc_cb = NULL;
From: Varun Prakash varun@chelsio.com
[ Upstream commit cc5a726c79158bd307150e8d4176ec79b52001ea ]
BITS_TO_LONGS() uses DIV_ROUND_UP() because of this ppmax value can be greater than available per cpu page pods.
This patch removes BITS_TO_LONGS() to fix this issue.
Signed-off-by: Varun Prakash varun@chelsio.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 74849be5f004..e2919005ead3 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, ppmax = max;
/* pool size must be multiple of unsigned long */ - bmap = BITS_TO_LONGS(ppmax); + bmap = ppmax / BITS_PER_TYPE(unsigned long); + if (!bmap) + return NULL; + ppmax = (bmap * sizeof(unsigned long)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; @@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, if (reserve_factor) { ppmax_pool = ppmax / reserve_factor; pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); + if (!pool) { + ppmax_pool = 0; + reserve_factor = 0; + }
pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", ndev->name, ppmax, ppmax_pool, pool_index_max);
From: David Rientjes rientjes@google.com
[ Upstream commit b86bc2858b389255cd44555ce4b1e427b2b770c0 ]
This ensures that the address and length provided to DBG_DECRYPT and DBG_ENCRYPT do not cause an overflow.
At the same time, pass the actual number of pages pinned in memory to sev_unpin_memory() as a cleanup.
Reported-by: Cfir Cohen cfir@google.com Signed-off-by: David Rientjes rientjes@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/x86/kvm/svm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a9b8e38d78ad..4c110956d188 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6799,7 +6799,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) struct page **src_p, **dst_p; struct kvm_sev_dbg debug; unsigned long n; - int ret, size; + unsigned int size; + int ret;
if (!sev_guest(kvm)) return -ENOTTY; @@ -6807,6 +6808,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) return -EFAULT;
+ if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) + return -EINVAL; + if (!debug.dst_uaddr) + return -EINVAL; + vaddr = debug.src_uaddr; size = debug.len; vaddr_end = vaddr + size; @@ -6857,8 +6863,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) dst_vaddr, len, &argp->error);
- sev_unpin_memory(kvm, src_p, 1); - sev_unpin_memory(kvm, dst_p, 1); + sev_unpin_memory(kvm, src_p, n); + sev_unpin_memory(kvm, dst_p, n);
if (ret) goto err;
From: Catalin Marinas catalin.marinas@arm.com
[ Upstream commit 298a32b132087550d3fa80641ca58323c5dfd4d9 ]
Commit 2d4f567103ff ("KVM: PPC: Introduce kvm_tmp framework") adds kvm_tmp[] into the .bss section and then free the rest of unused spaces back to the page allocator.
kernel_init kvm_guest_init kvm_free_tmp free_reserved_area free_unref_page free_unref_page_prepare
With DEBUG_PAGEALLOC=y, it will unmap those pages from kernel. As the result, kmemleak scan will trigger a panic when it scans the .bss section with unmapped pages.
This patch creates dedicated kmemleak objects for the .data, .bss and potentially .data..ro_after_init sections to allow partial freeing via the kmemleak_free_part() in the powerpc kvm_free_tmp() function.
Link: http://lkml.kernel.org/r/20190321171917.62049-1-catalin.marinas@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Reported-by: Qian Cai cai@lca.pw Acked-by: Michael Ellerman mpe@ellerman.id.au (powerpc) Tested-by: Qian Cai cai@lca.pw Cc: Paul Mackerras paulus@samba.org Cc: Benjamin Herrenschmidt benh@kernel.crashing.org Cc: Avi Kivity avi@redhat.com Cc: Paolo Bonzini pbonzini@redhat.com Cc: Radim Krcmar rkrcmar@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/powerpc/kernel/kvm.c | 7 +++++++ mm/kmemleak.c | 16 +++++++++++----- 2 files changed, 18 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 683b5b3805bd..cd381e2291df 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -22,6 +22,7 @@ #include <linux/kvm_host.h> #include <linux/init.h> #include <linux/export.h> +#include <linux/kmemleak.h> #include <linux/kvm_para.h> #include <linux/slab.h> #include <linux/of.h> @@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
static __init void kvm_free_tmp(void) { + /* + * Inform kmemleak about the hole in the .bss section since the + * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y. + */ + kmemleak_free_part(&kvm_tmp[kvm_tmp_index], + ARRAY_SIZE(kvm_tmp) - kvm_tmp_index); free_reserved_area(&kvm_tmp[kvm_tmp_index], &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 707fa5579f66..6c318f5ac234 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1529,11 +1529,6 @@ static void kmemleak_scan(void) } rcu_read_unlock();
- /* data/bss scanning */ - scan_large_block(_sdata, _edata); - scan_large_block(__bss_start, __bss_stop); - scan_large_block(__start_ro_after_init, __end_ro_after_init); - #ifdef CONFIG_SMP /* per-cpu sections scanning */ for_each_possible_cpu(i) @@ -2071,6 +2066,17 @@ void __init kmemleak_init(void) } local_irq_restore(flags);
+ /* register the data/bss sections */ + create_object((unsigned long)_sdata, _edata - _sdata, + KMEMLEAK_GREY, GFP_ATOMIC); + create_object((unsigned long)__bss_start, __bss_stop - __bss_start, + KMEMLEAK_GREY, GFP_ATOMIC); + /* only register .data..ro_after_init if not within .data */ + if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) + create_object((unsigned long)__start_ro_after_init, + __end_ro_after_init - __start_ro_after_init, + KMEMLEAK_GREY, GFP_ATOMIC); + /* * This is the point where tracking allocations is safe. Automatic * scanning is started during the late initcall. Add the early logged
From: Mike Kravetz mike.kravetz@oracle.com
[ Upstream commit 58b6e5e8f1addd44583d61b0a03c0f5519527e35 ]
When mknod is used to create a block special file in hugetlbfs, it will allocate an inode and kmalloc a 'struct resv_map' via resv_map_alloc(). inode->i_mapping->private_data will point the newly allocated resv_map. However, when the device special file is opened bd_acquire() will set inode->i_mapping to bd_inode->i_mapping. Thus the pointer to the allocated resv_map is lost and the structure is leaked.
Programs to reproduce: mount -t hugetlbfs nodev hugetlbfs mknod hugetlbfs/dev b 0 0 exec 30<> hugetlbfs/dev umount hugetlbfs/
resv_map structures are only needed for inodes which can have associated page allocations. To fix the leak, only allocate resv_map for those inodes which could possibly be associated with page allocations.
Link: http://lkml.kernel.org/r/20190401213101.16476-1-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz mike.kravetz@oracle.com Reviewed-by: Andrew Morton akpm@linux-foundation.org Reported-by: Yufen Yu yuyufen@huawei.com Suggested-by: Yufen Yu yuyufen@huawei.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- fs/hugetlbfs/inode.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a7fa037b876b..a3a3d256fb0e 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -741,11 +741,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, umode_t mode, dev_t dev) { struct inode *inode; - struct resv_map *resv_map; + struct resv_map *resv_map = NULL;
- resv_map = resv_map_alloc(); - if (!resv_map) - return NULL; + /* + * Reserve maps are only needed for inodes that can have associated + * page allocations. + */ + if (S_ISREG(mode) || S_ISLNK(mode)) { + resv_map = resv_map_alloc(); + if (!resv_map) + return NULL; + }
inode = new_inode(sb); if (inode) { @@ -780,8 +786,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, break; } lockdep_annotate_inode_mutex_key(inode); - } else - kref_put(&resv_map->refs, resv_map_release); + } else { + if (resv_map) + kref_put(&resv_map->refs, resv_map_release); + }
return inode; }
From: Randy Dunlap rdunlap@infradead.org
[ Upstream commit acaf892ecbf5be7710ae05a61fd43c668f68ad95 ]
Many of the sh CPU-types have their own plat_irq_setup() and arch_init_clk_ops() functions, so these same (empty) functions in arch/sh/boards/of-generic.c are not needed and cause build errors.
If there is some case where these empty functions are needed, they can be retained by marking them as "__weak" while at the same time making builds that do not need them succeed.
Fixes these build errors:
arch/sh/boards/of-generic.o: In function `plat_irq_setup': (.init.text+0x134): multiple definition of `plat_irq_setup' arch/sh/kernel/cpu/sh2/setup-sh7619.o:(.init.text+0x30): first defined here arch/sh/boards/of-generic.o: In function `arch_init_clk_ops': (.init.text+0x118): multiple definition of `arch_init_clk_ops' arch/sh/kernel/cpu/sh2/clock-sh7619.o:(.init.text+0x0): first defined here
Link: http://lkml.kernel.org/r/9ee4e0c5-f100-86a2-bd4d-1d3287ceab31@infradead.org Signed-off-by: Randy Dunlap rdunlap@infradead.org Reported-by: kbuild test robot lkp@intel.com Cc: Takashi Iwai tiwai@suse.de Cc: Yoshinori Sato ysato@users.sourceforge.jp Cc: Rich Felker dalias@libc.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/sh/boards/of-generic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 958f46da3a79..d91065e81a4e 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c @@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
struct sh_clk_ops;
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { }
-void __init plat_irq_setup(void) +void __init __weak plat_irq_setup(void) { }
From: Will Deacon will.deacon@arm.com
[ Upstream commit 9002b21465fa4d829edfc94a5a441005cffaa972 ]
Commit 32a5ad9c2285 ("sysctl: handle overflow for file-max") hooked up min/max values for the file-max sysctl parameter via the .extra1 and .extra2 fields in the corresponding struct ctl_table entry.
Unfortunately, the minimum value points at the global 'zero' variable, which is an int. This results in a KASAN splat when accessed as a long by proc_doulongvec_minmax on 64-bit architectures:
| BUG: KASAN: global-out-of-bounds in __do_proc_doulongvec_minmax+0x5d8/0x6a0 | Read of size 8 at addr ffff2000133d1c20 by task systemd/1 | | CPU: 0 PID: 1 Comm: systemd Not tainted 5.1.0-rc3-00012-g40b114779944 #2 | Hardware name: linux,dummy-virt (DT) | Call trace: | dump_backtrace+0x0/0x228 | show_stack+0x14/0x20 | dump_stack+0xe8/0x124 | print_address_description+0x60/0x258 | kasan_report+0x140/0x1a0 | __asan_report_load8_noabort+0x18/0x20 | __do_proc_doulongvec_minmax+0x5d8/0x6a0 | proc_doulongvec_minmax+0x4c/0x78 | proc_sys_call_handler.isra.19+0x144/0x1d8 | proc_sys_write+0x34/0x58 | __vfs_write+0x54/0xe8 | vfs_write+0x124/0x3c0 | ksys_write+0xbc/0x168 | __arm64_sys_write+0x68/0x98 | el0_svc_common+0x100/0x258 | el0_svc_handler+0x48/0xc0 | el0_svc+0x8/0xc | | The buggy address belongs to the variable: | zero+0x0/0x40 | | Memory state around the buggy address: | ffff2000133d1b00: 00 00 00 00 00 00 00 00 fa fa fa fa 04 fa fa fa | ffff2000133d1b80: fa fa fa fa 04 fa fa fa fa fa fa fa 04 fa fa fa | >ffff2000133d1c00: fa fa fa fa 04 fa fa fa fa fa fa fa 00 00 00 00 | ^ | ffff2000133d1c80: fa fa fa fa 00 fa fa fa fa fa fa fa 00 00 00 00 | ffff2000133d1d00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Fix the splat by introducing a unsigned long 'zero_ul' and using that instead.
Link: http://lkml.kernel.org/r/20190403153409.17307-1-will.deacon@arm.com Fixes: 32a5ad9c2285 ("sysctl: handle overflow for file-max") Signed-off-by: Will Deacon will.deacon@arm.com Acked-by: Christian Brauner christian@brauner.io Cc: Kees Cook keescook@chromium.org Cc: Alexey Dobriyan adobriyan@gmail.com Cc: Matteo Croce mcroce@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- kernel/sysctl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 28ec71d914c7..f50f1471c119 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -126,6 +126,7 @@ static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; static int __maybe_unused four = 4; +static unsigned long zero_ul; static unsigned long one_ul = 1; static unsigned long long_max = LONG_MAX; static int one_hundred = 100; @@ -1723,7 +1724,7 @@ static struct ctl_table fs_table[] = { .maxlen = sizeof(files_stat.max_files), .mode = 0644, .proc_handler = proc_doulongvec_minmax, - .extra1 = &zero, + .extra1 = &zero_ul, .extra2 = &long_max, }, {
From: John Pittman jpittman@redhat.com
[ Upstream commit 7ff684a683d777c4956fce93e60accbab2bd7696 ]
At module load, if the selected home_node value is greater than the available numa nodes, the system will crash in __alloc_pages_nodemask() due to a bad paging request. Prevent this user error crash by detecting the bad value, logging an error, and setting g_home_node back to the default of NUMA_NO_NODE.
Signed-off-by: John Pittman jpittman@redhat.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/block/null_blk_main.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 62c9654b9ce8..fd7a9be54595 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1749,6 +1749,11 @@ static int __init null_init(void) return -EINVAL; }
+ if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { + pr_err("null_blk: invalid home_node value\n"); + g_home_node = NUMA_NO_NODE; + } + if (g_queue_mode == NULL_Q_RQ) { pr_err("null_blk: legacy IO path no longer available\n"); return -EINVAL;
From: Guenter Roeck linux@roeck-us.net
[ Upstream commit 47b16820c490149c2923e8474048f2c6e7557cab ]
If xace hardware reports a bad version number, the error handling code in ace_setup() calls put_disk(), followed by queue cleanup. However, since the disk data structure has the queue pointer set, put_disk() also cleans and releases the queue. This results in blk_cleanup_queue() accessing an already released data structure, which in turn may result in a crash such as the following.
[ 10.681671] BUG: Kernel NULL pointer dereference at 0x00000040 [ 10.681826] Faulting instruction address: 0xc0431480 [ 10.682072] Oops: Kernel access of bad area, sig: 11 [#1] [ 10.682251] BE PAGE_SIZE=4K PREEMPT Xilinx Virtex440 [ 10.682387] Modules linked in: [ 10.682528] CPU: 0 PID: 1 Comm: swapper Tainted: G W 5.0.0-rc6-next-20190218+ #2 [ 10.682733] NIP: c0431480 LR: c043147c CTR: c0422ad8 [ 10.682863] REGS: cf82fbe0 TRAP: 0300 Tainted: G W (5.0.0-rc6-next-20190218+) [ 10.683065] MSR: 00029000 <CE,EE,ME> CR: 22000222 XER: 00000000 [ 10.683236] DEAR: 00000040 ESR: 00000000 [ 10.683236] GPR00: c043147c cf82fc90 cf82ccc0 00000000 00000000 00000000 00000002 00000000 [ 10.683236] GPR08: 00000000 00000000 c04310bc 00000000 22000222 00000000 c0002c54 00000000 [ 10.683236] GPR16: 00000000 00000001 c09aa39c c09021b0 c09021dc 00000007 c0a68c08 00000000 [ 10.683236] GPR24: 00000001 ced6d400 ced6dcf0 c0815d9c 00000000 00000000 00000000 cedf0800 [ 10.684331] NIP [c0431480] blk_mq_run_hw_queue+0x28/0x114 [ 10.684473] LR [c043147c] blk_mq_run_hw_queue+0x24/0x114 [ 10.684602] Call Trace: [ 10.684671] [cf82fc90] [c043147c] blk_mq_run_hw_queue+0x24/0x114 (unreliable) [ 10.684854] [cf82fcc0] [c04315bc] blk_mq_run_hw_queues+0x50/0x7c [ 10.685002] [cf82fce0] [c0422b24] blk_set_queue_dying+0x30/0x68 [ 10.685154] [cf82fcf0] [c0423ec0] blk_cleanup_queue+0x34/0x14c [ 10.685306] [cf82fd10] [c054d73c] ace_probe+0x3dc/0x508 [ 10.685445] [cf82fd50] [c052d740] platform_drv_probe+0x4c/0xb8 [ 10.685592] [cf82fd70] [c052abb0] really_probe+0x20c/0x32c [ 10.685728] [cf82fda0] [c052ae58] driver_probe_device+0x68/0x464 [ 10.685877] [cf82fdc0] [c052b500] device_driver_attach+0xb4/0xe4 [ 10.686024] [cf82fde0] [c052b5dc] __driver_attach+0xac/0xfc [ 10.686161] [cf82fe00] [c0528428] bus_for_each_dev+0x80/0xc0 [ 10.686314] [cf82fe30] [c0529b3c] bus_add_driver+0x144/0x234 [ 10.686457] [cf82fe50] [c052c46c] driver_register+0x88/0x15c [ 10.686610] [cf82fe60] [c09de288] ace_init+0x4c/0xac [ 10.686742] [cf82fe80] [c0002730] do_one_initcall+0xac/0x330 [ 10.686888] [cf82fee0] [c09aafd0] kernel_init_freeable+0x34c/0x478 [ 10.687043] [cf82ff30] [c0002c6c] kernel_init+0x18/0x114 [ 10.687188] [cf82ff40] [c000f2f0] ret_from_kernel_thread+0x14/0x1c [ 10.687349] Instruction dump: [ 10.687435] 3863ffd4 4bfffd70 9421ffd0 7c0802a6 93c10028 7c9e2378 93e1002c 38810008 [ 10.687637] 7c7f1b78 90010034 4bfffc25 813f008c <81290040> 75290100 4182002c 80810008 [ 10.688056] ---[ end trace 13c9ff51d41b9d40 ]---
Fix the problem by setting the disk queue pointer to NULL before calling put_disk(). A more comprehensive fix might be to rearrange the code to check the hardware version before initializing data structures, but I don't know if this would have undesirable side effects, and it would increase the complexity of backporting the fix to older kernels.
Fixes: 74489a91dd43a ("Add support for Xilinx SystemACE CompactFlash interface") Acked-by: Michal Simek michal.simek@xilinx.com Signed-off-by: Guenter Roeck linux@roeck-us.net Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/block/xsysace.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 87ccef4bd69e..32a21b8d1d85 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace) return 0;
err_read: + /* prevent double queue cleanup */ + ace->gd->queue = NULL; put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue);
From: Kirill Smelkov kirr@nexedi.com
[ Upstream commit 10dce8af34226d90fa56746a934f8da5dcdba3df ]
Commit 9c225f2655e3 ("vfs: atomic f_pos accesses as per POSIX") added locking for file.f_pos access and in particular made concurrent read and write not possible - now both those functions take f_pos lock for the whole run, and so if e.g. a read is blocked waiting for data, write will deadlock waiting for that read to complete.
This caused regression for stream-like files where previously read and write could run simultaneously, but after that patch could not do so anymore. See e.g. commit 581d21a2d02a ("xenbus: fix deadlock on writes to /proc/xen/xenbus") which fixes such regression for particular case of /proc/xen/xenbus.
The patch that added f_pos lock in 2014 did so to guarantee POSIX thread safety for read/write/lseek and added the locking to file descriptors of all regular files. In 2014 that thread-safety problem was not new as it was already discussed earlier in 2006.
However even though 2006'th version of Linus's patch was adding f_pos locking "only for files that are marked seekable with FMODE_LSEEK (thus avoiding the stream-like objects like pipes and sockets)", the 2014 version - the one that actually made it into the tree as 9c225f2655e3 - is doing so irregardless of whether a file is seekable or not.
See
https://lore.kernel.org/lkml/53022DB1.4070805@gmail.com/ https://lwn.net/Articles/180387 https://lwn.net/Articles/180396
for historic context.
The reason that it did so is, probably, that there are many files that are marked non-seekable, but e.g. their read implementation actually depends on knowing current position to correctly handle the read. Some examples:
kernel/power/user.c snapshot_read fs/debugfs/file.c u32_array_read fs/fuse/control.c fuse_conn_waiting_read + ... drivers/hwmon/asus_atk0110.c atk_debugfs_ggrp_read arch/s390/hypfs/inode.c hypfs_read_iter ...
Despite that, many nonseekable_open users implement read and write with pure stream semantics - they don't depend on passed ppos at all. And for those cases where read could wait for something inside, it creates a situation similar to xenbus - the write could be never made to go until read is done, and read is waiting for some, potentially external, event, for potentially unbounded time -> deadlock.
Besides xenbus, there are 14 such places in the kernel that I've found with semantic patch (see below):
drivers/xen/evtchn.c:667:8-24: ERROR: evtchn_fops: .read() can deadlock .write() drivers/isdn/capi/capi.c:963:8-24: ERROR: capi_fops: .read() can deadlock .write() drivers/input/evdev.c:527:1-17: ERROR: evdev_fops: .read() can deadlock .write() drivers/char/pcmcia/cm4000_cs.c:1685:7-23: ERROR: cm4000_fops: .read() can deadlock .write() net/rfkill/core.c:1146:8-24: ERROR: rfkill_fops: .read() can deadlock .write() drivers/s390/char/fs3270.c:488:1-17: ERROR: fs3270_fops: .read() can deadlock .write() drivers/usb/misc/ldusb.c:310:1-17: ERROR: ld_usb_fops: .read() can deadlock .write() drivers/hid/uhid.c:635:1-17: ERROR: uhid_fops: .read() can deadlock .write() net/batman-adv/icmp_socket.c:80:1-17: ERROR: batadv_fops: .read() can deadlock .write() drivers/media/rc/lirc_dev.c:198:1-17: ERROR: lirc_fops: .read() can deadlock .write() drivers/leds/uleds.c:77:1-17: ERROR: uleds_fops: .read() can deadlock .write() drivers/input/misc/uinput.c:400:1-17: ERROR: uinput_fops: .read() can deadlock .write() drivers/infiniband/core/user_mad.c:985:7-23: ERROR: umad_fops: .read() can deadlock .write() drivers/gnss/core.c:45:1-17: ERROR: gnss_fops: .read() can deadlock .write()
In addition to the cases above another regression caused by f_pos locking is that now FUSE filesystems that implement open with FOPEN_NONSEEKABLE flag, can no longer implement bidirectional stream-like files - for the same reason as above e.g. read can deadlock write locking on file.f_pos in the kernel.
FUSE's FOPEN_NONSEEKABLE was added in 2008 in a7c1b990f715 ("fuse: implement nonseekable open") to support OSSPD. OSSPD implements /dev/dsp in userspace with FOPEN_NONSEEKABLE flag, with corresponding read and write routines not depending on current position at all, and with both read and write being potentially blocking operations:
See
https://github.com/libfuse/osspd https://lwn.net/Articles/308445
https://github.com/libfuse/osspd/blob/14a9cff0/osspd.c#L1406 https://github.com/libfuse/osspd/blob/14a9cff0/osspd.c#L1438-L1477 https://github.com/libfuse/osspd/blob/14a9cff0/osspd.c#L1479-L1510
Corresponding libfuse example/test also describes FOPEN_NONSEEKABLE as "somewhat pipe-like files ..." with read handler not using offset. However that test implements only read without write and cannot exercise the deadlock scenario:
https://github.com/libfuse/libfuse/blob/fuse-3.4.2-3-ga1bff7d/example/poll.c... https://github.com/libfuse/libfuse/blob/fuse-3.4.2-3-ga1bff7d/example/poll.c... https://github.com/libfuse/libfuse/blob/fuse-3.4.2-3-ga1bff7d/example/poll.c...
I've actually hit the read vs write deadlock for real while implementing my FUSE filesystem where there is /head/watch file, for which open creates separate bidirectional socket-like stream in between filesystem and its user with both read and write being later performed simultaneously. And there it is semantically not easy to split the stream into two separate read-only and write-only channels:
https://lab.nexedi.com/kirr/wendelin.core/blob/f13aa600/wcfs/wcfs.go#L88-169
Let's fix this regression. The plan is:
1. We can't change nonseekable_open to include &~FMODE_ATOMIC_POS - doing so would break many in-kernel nonseekable_open users which actually use ppos in read/write handlers.
2. Add stream_open() to kernel to open stream-like non-seekable file descriptors. Read and write on such file descriptors would never use nor change ppos. And with that property on stream-like files read and write will be running without taking f_pos lock - i.e. read and write could be running simultaneously.
3. With semantic patch search and convert to stream_open all in-kernel nonseekable_open users for which read and write actually do not depend on ppos and where there is no other methods in file_operations which assume @offset access.
4. Add FOPEN_STREAM to fs/fuse/ and open in-kernel file-descriptors via steam_open if that bit is present in filesystem open reply.
It was tempting to change fs/fuse/ open handler to use stream_open instead of nonseekable_open on just FOPEN_NONSEEKABLE flags, but grepping through Debian codesearch shows users of FOPEN_NONSEEKABLE, and in particular GVFS which actually uses offset in its read and write handlers
https://codesearch.debian.net/search?q=-%3Enonseekable+%3D https://gitlab.gnome.org/GNOME/gvfs/blob/1.40.0-6-gcbc54396/client/gvfsfused... https://gitlab.gnome.org/GNOME/gvfs/blob/1.40.0-6-gcbc54396/client/gvfsfused... https://gitlab.gnome.org/GNOME/gvfs/blob/1.40.0-6-gcbc54396/client/gvfsfused...
so if we would do such a change it will break a real user.
5. Add stream_open and FOPEN_STREAM handling to stable kernels starting from v3.14+ (the kernel where 9c225f2655 first appeared).
This will allow to patch OSSPD and other FUSE filesystems that provide stream-like files to return FOPEN_STREAM | FOPEN_NONSEEKABLE in their open handler and this way avoid the deadlock on all kernel versions. This should work because fs/fuse/ ignores unknown open flags returned from a filesystem and so passing FOPEN_STREAM to a kernel that is not aware of this flag cannot hurt. In turn the kernel that is not aware of FOPEN_STREAM will be < v3.14 where just FOPEN_NONSEEKABLE is sufficient to implement streams without read vs write deadlock.
This patch adds stream_open, converts /proc/xen/xenbus to it and adds semantic patch to automatically locate in-kernel places that are either required to be converted due to read vs write deadlock, or that are just safe to be converted because read and write do not use ppos and there are no other funky methods in file_operations.
Regarding semantic patch I've verified each generated change manually - that it is correct to convert - and each other nonseekable_open instance left - that it is either not correct to convert there, or that it is not converted due to current stream_open.cocci limitations.
The script also does not convert files that should be valid to convert, but that currently have .llseek = noop_llseek or generic_file_llseek for unknown reason despite file being opened with nonseekable_open (e.g. drivers/input/mousedev.c)
Cc: Michael Kerrisk mtk.manpages@gmail.com Cc: Yongzhi Pan panyongzhi@gmail.com Cc: Jonathan Corbet corbet@lwn.net Cc: David Vrabel david.vrabel@citrix.com Cc: Juergen Gross jgross@suse.com Cc: Miklos Szeredi miklos@szeredi.hu Cc: Tejun Heo tj@kernel.org Cc: Kirill Tkhai ktkhai@virtuozzo.com Cc: Arnd Bergmann arnd@arndb.de Cc: Christoph Hellwig hch@lst.de Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Julia Lawall Julia.Lawall@lip6.fr Cc: Nikolaus Rath Nikolaus@rath.org Cc: Han-Wen Nienhuys hanwen@google.com Signed-off-by: Kirill Smelkov kirr@nexedi.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- drivers/xen/xenbus/xenbus_dev_frontend.c | 4 +- fs/open.c | 18 ++ fs/read_write.c | 5 +- include/linux/fs.h | 4 + scripts/coccinelle/api/stream_open.cocci | 363 +++++++++++++++++++++++ 5 files changed, 389 insertions(+), 5 deletions(-) create mode 100644 scripts/coccinelle/api/stream_open.cocci
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index c3e201025ef0..0782ff3c2273 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) if (xen_store_evtchn == 0) return -ENOENT;
- nonseekable_open(inode, filp); - - filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ + stream_open(inode, filp);
u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) diff --git a/fs/open.c b/fs/open.c index f1c2f855fd43..a00350018a47 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1215,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp) }
EXPORT_SYMBOL(nonseekable_open); + +/* + * stream_open is used by subsystems that want stream-like file descriptors. + * Such file descriptors are not seekable and don't have notion of position + * (file.f_pos is always 0). Contrary to file descriptors of other regular + * files, .read() and .write() can run simultaneously. + * + * stream_open never fails and is marked to return int so that it could be + * directly used as file_operations.open . + */ +int stream_open(struct inode *inode, struct file *filp) +{ + filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS); + filp->f_mode |= FMODE_STREAM; + return 0; +} + +EXPORT_SYMBOL(stream_open); diff --git a/fs/read_write.c b/fs/read_write.c index 27b69b85d49f..3d3194e32201 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
static inline loff_t file_pos_read(struct file *file) { - return file->f_pos; + return file->f_mode & FMODE_STREAM ? 0 : file->f_pos; }
static inline void file_pos_write(struct file *file, loff_t pos) { - file->f_pos = pos; + if ((file->f_mode & FMODE_STREAM) == 0) + file->f_pos = pos; }
ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count) diff --git a/include/linux/fs.h b/include/linux/fs.h index fd423fec8d83..09ce2646c78a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -153,6 +153,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_OPENED ((__force fmode_t)0x80000) #define FMODE_CREATED ((__force fmode_t)0x100000)
+/* File is stream-like */ +#define FMODE_STREAM ((__force fmode_t)0x200000) + /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -3074,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); extern loff_t no_seek_end_llseek(struct file *, loff_t, int); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); +extern int stream_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci new file mode 100644 index 000000000000..350145da7669 --- /dev/null +++ b/scripts/coccinelle/api/stream_open.cocci @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +// Author: Kirill Smelkov (kirr@nexedi.com) +// +// Search for stream-like files that are using nonseekable_open and convert +// them to stream_open. A stream-like file is a file that does not use ppos in +// its read and write. Rationale for the conversion is to avoid deadlock in +// between read and write. + +virtual report +virtual patch +virtual explain // explain decisions in the patch (SPFLAGS="-D explain") + +// stream-like reader & writer - ones that do not depend on f_pos. +@ stream_reader @ +identifier readstream, ppos; +identifier f, buf, len; +type loff_t; +@@ + ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos) + { + ... when != ppos + } + +@ stream_writer @ +identifier writestream, ppos; +identifier f, buf, len; +type loff_t; +@@ + ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos) + { + ... when != ppos + } + + +// a function that blocks +@ blocks @ +identifier block_f; +identifier wait_event =~ "^wait_event_.*"; +@@ + block_f(...) { + ... when exists + wait_event(...) + ... when exists + } + +// stream_reader that can block inside. +// +// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait()) +// XXX currently reader_blocks supports only direct and 1-level indirect cases. +@ reader_blocks_direct @ +identifier stream_reader.readstream; +identifier wait_event =~ "^wait_event_.*"; +@@ + readstream(...) + { + ... when exists + wait_event(...) + ... when exists + } + +@ reader_blocks_1 @ +identifier stream_reader.readstream; +identifier blocks.block_f; +@@ + readstream(...) + { + ... when exists + block_f(...) + ... when exists + } + +@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @ +identifier stream_reader.readstream; +@@ + readstream(...) { + ... + } + + +// file_operations + whether they have _any_ .read, .write, .llseek ... at all. +// +// XXX add support for file_operations xxx[N] = ... (sound/core/pcm_native.c) +@ fops0 @ +identifier fops; +@@ + struct file_operations fops = { + ... + }; + +@ has_read @ +identifier fops0.fops; +identifier read_f; +@@ + struct file_operations fops = { + .read = read_f, + }; + +@ has_read_iter @ +identifier fops0.fops; +identifier read_iter_f; +@@ + struct file_operations fops = { + .read_iter = read_iter_f, + }; + +@ has_write @ +identifier fops0.fops; +identifier write_f; +@@ + struct file_operations fops = { + .write = write_f, + }; + +@ has_write_iter @ +identifier fops0.fops; +identifier write_iter_f; +@@ + struct file_operations fops = { + .write_iter = write_iter_f, + }; + +@ has_llseek @ +identifier fops0.fops; +identifier llseek_f; +@@ + struct file_operations fops = { + .llseek = llseek_f, + }; + +@ has_no_llseek @ +identifier fops0.fops; +@@ + struct file_operations fops = { + .llseek = no_llseek, + }; + +@ has_mmap @ +identifier fops0.fops; +identifier mmap_f; +@@ + struct file_operations fops = { + .mmap = mmap_f, + }; + +@ has_copy_file_range @ +identifier fops0.fops; +identifier copy_file_range_f; +@@ + struct file_operations fops = { + .copy_file_range = copy_file_range_f, + }; + +@ has_remap_file_range @ +identifier fops0.fops; +identifier remap_file_range_f; +@@ + struct file_operations fops = { + .remap_file_range = remap_file_range_f, + }; + +@ has_splice_read @ +identifier fops0.fops; +identifier splice_read_f; +@@ + struct file_operations fops = { + .splice_read = splice_read_f, + }; + +@ has_splice_write @ +identifier fops0.fops; +identifier splice_write_f; +@@ + struct file_operations fops = { + .splice_write = splice_write_f, + }; + + +// file_operations that is candidate for stream_open conversion - it does not +// use mmap and other methods that assume @offset access to file. +// +// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now. +// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops". +@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @ +identifier fops0.fops; +@@ + struct file_operations fops = { + }; + + +// ---- conversions ---- + +// XXX .open = nonseekable_open -> .open = stream_open +// XXX .open = func -> openfunc -> nonseekable_open + +// read & write +// +// if both are used in the same file_operations together with an opener - +// under that conditions we can use stream_open instead of nonseekable_open. +@ fops_rw depends on maybe_stream @ +identifier fops0.fops, openfunc; +identifier stream_reader.readstream; +identifier stream_writer.writestream; +@@ + struct file_operations fops = { + .open = openfunc, + .read = readstream, + .write = writestream, + }; + +@ report_rw depends on report @ +identifier fops_rw.openfunc; +position p1; +@@ + openfunc(...) { + <... + nonseekable_open@p1 + ...> + } + +@ script:python depends on report && reader_blocks @ +fops << fops0.fops; +p << report_rw.p1; +@@ +coccilib.report.print_report(p[0], + "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,)) + +@ script:python depends on report && !reader_blocks @ +fops << fops0.fops; +p << report_rw.p1; +@@ +coccilib.report.print_report(p[0], + "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) + + +@ explain_rw_deadlocked depends on explain && reader_blocks @ +identifier fops_rw.openfunc; +@@ + openfunc(...) { + <... +- nonseekable_open ++ nonseekable_open /* read & write (was deadlock) */ + ...> + } + + +@ explain_rw_nodeadlock depends on explain && !reader_blocks @ +identifier fops_rw.openfunc; +@@ + openfunc(...) { + <... +- nonseekable_open ++ nonseekable_open /* read & write (no direct deadlock) */ + ...> + } + +@ patch_rw depends on patch @ +identifier fops_rw.openfunc; +@@ + openfunc(...) { + <... +- nonseekable_open ++ stream_open + ...> + } + + +// read, but not write +@ fops_r depends on maybe_stream && !has_write @ +identifier fops0.fops, openfunc; +identifier stream_reader.readstream; +@@ + struct file_operations fops = { + .open = openfunc, + .read = readstream, + }; + +@ report_r depends on report @ +identifier fops_r.openfunc; +position p1; +@@ + openfunc(...) { + <... + nonseekable_open@p1 + ...> + } + +@ script:python depends on report @ +fops << fops0.fops; +p << report_r.p1; +@@ +coccilib.report.print_report(p[0], + "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) + +@ explain_r depends on explain @ +identifier fops_r.openfunc; +@@ + openfunc(...) { + <... +- nonseekable_open ++ nonseekable_open /* read only */ + ...> + } + +@ patch_r depends on patch @ +identifier fops_r.openfunc; +@@ + openfunc(...) { + <... +- nonseekable_open ++ stream_open + ...> + } + + +// write, but not read +@ fops_w depends on maybe_stream && !has_read @ +identifier fops0.fops, openfunc; +identifier stream_writer.writestream; +@@ + struct file_operations fops = { + .open = openfunc, + .write = writestream, + }; + +@ report_w depends on report @ +identifier fops_w.openfunc; +position p1; +@@ + openfunc(...) { + <... + nonseekable_open@p1 + ...> + } + +@ script:python depends on report @ +fops << fops0.fops; +p << report_w.p1; +@@ +coccilib.report.print_report(p[0], + "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,)) + +@ explain_w depends on explain @ +identifier fops_w.openfunc; +@@ + openfunc(...) { + <... +- nonseekable_open ++ nonseekable_open /* write only */ + ...> + } + +@ patch_w depends on patch @ +identifier fops_w.openfunc; +@@ + openfunc(...) { + <... +- nonseekable_open ++ stream_open + ...> + } + + +// no read, no write - don't change anything
On Wed, Apr 24, 2019 at 10:33:33AM -0400, Sasha Levin wrote:
From: Kirill Smelkov kirr@nexedi.com
[ Upstream commit 10dce8af34226d90fa56746a934f8da5dcdba3df ]
Commit 9c225f2655e3 ("vfs: atomic f_pos accesses as per POSIX") added locking for file.f_pos access and in particular made concurrent read and write not possible - now both those functions take f_pos lock for the whole run, and so if e.g. a read is blocked waiting for data, write will deadlock waiting for that read to complete.
This caused regression for stream-like files where previously read and write could run simultaneously, but after that patch could not do so anymore. See e.g. commit 581d21a2d02a ("xenbus: fix deadlock on writes to /proc/xen/xenbus") which fixes such regression for particular case of /proc/xen/xenbus.
The patch that added f_pos lock in 2014 did so to guarantee POSIX thread safety for read/write/lseek and added the locking to file descriptors of all regular files. In 2014 that thread-safety problem was not new as it was already discussed earlier in 2006.
However even though 2006'th version of Linus's patch was adding f_pos locking "only for files that are marked seekable with FMODE_LSEEK (thus avoiding the stream-like objects like pipes and sockets)", the 2014 version - the one that actually made it into the tree as 9c225f2655e3 - is doing so irregardless of whether a file is seekable or not.
See
https://lore.kernel.org/lkml/53022DB1.4070805@gmail.com/ https://lwn.net/Articles/180387 https://lwn.net/Articles/180396
for historic context.
The reason that it did so is, probably, that there are many files that are marked non-seekable, but e.g. their read implementation actually depends on knowing current position to correctly handle the read. Some examples:
kernel/power/user.c snapshot_read fs/debugfs/file.c u32_array_read fs/fuse/control.c fuse_conn_waiting_read + ... drivers/hwmon/asus_atk0110.c atk_debugfs_ggrp_read arch/s390/hypfs/inode.c hypfs_read_iter ...
Despite that, many nonseekable_open users implement read and write with pure stream semantics - they don't depend on passed ppos at all. And for those cases where read could wait for something inside, it creates a situation similar to xenbus - the write could be never made to go until read is done, and read is waiting for some, potentially external, event, for potentially unbounded time -> deadlock.
Besides xenbus, there are 14 such places in the kernel that I've found with semantic patch (see below):
drivers/xen/evtchn.c:667:8-24: ERROR: evtchn_fops: .read() can deadlock .write() drivers/isdn/capi/capi.c:963:8-24: ERROR: capi_fops: .read() can deadlock .write() drivers/input/evdev.c:527:1-17: ERROR: evdev_fops: .read() can deadlock .write() drivers/char/pcmcia/cm4000_cs.c:1685:7-23: ERROR: cm4000_fops: .read() can deadlock .write() net/rfkill/core.c:1146:8-24: ERROR: rfkill_fops: .read() can deadlock .write() drivers/s390/char/fs3270.c:488:1-17: ERROR: fs3270_fops: .read() can deadlock .write() drivers/usb/misc/ldusb.c:310:1-17: ERROR: ld_usb_fops: .read() can deadlock .write() drivers/hid/uhid.c:635:1-17: ERROR: uhid_fops: .read() can deadlock .write() net/batman-adv/icmp_socket.c:80:1-17: ERROR: batadv_fops: .read() can deadlock .write() drivers/media/rc/lirc_dev.c:198:1-17: ERROR: lirc_fops: .read() can deadlock .write() drivers/leds/uleds.c:77:1-17: ERROR: uleds_fops: .read() can deadlock .write() drivers/input/misc/uinput.c:400:1-17: ERROR: uinput_fops: .read() can deadlock .write() drivers/infiniband/core/user_mad.c:985:7-23: ERROR: umad_fops: .read() can deadlock .write() drivers/gnss/core.c:45:1-17: ERROR: gnss_fops: .read() can deadlock .write()
In addition to the cases above another regression caused by f_pos locking is that now FUSE filesystems that implement open with FOPEN_NONSEEKABLE flag, can no longer implement bidirectional stream-like files - for the same reason as above e.g. read can deadlock write locking on file.f_pos in the kernel.
FUSE's FOPEN_NONSEEKABLE was added in 2008 in a7c1b990f715 ("fuse: implement nonseekable open") to support OSSPD. OSSPD implements /dev/dsp in userspace with FOPEN_NONSEEKABLE flag, with corresponding read and write routines not depending on current position at all, and with both read and write being potentially blocking operations:
See
https://github.com/libfuse/osspd https://lwn.net/Articles/308445 https://github.com/libfuse/osspd/blob/14a9cff0/osspd.c#L1406 https://github.com/libfuse/osspd/blob/14a9cff0/osspd.c#L1438-L1477 https://github.com/libfuse/osspd/blob/14a9cff0/osspd.c#L1479-L1510
Corresponding libfuse example/test also describes FOPEN_NONSEEKABLE as "somewhat pipe-like files ..." with read handler not using offset. However that test implements only read without write and cannot exercise the deadlock scenario:
https://github.com/libfuse/libfuse/blob/fuse-3.4.2-3-ga1bff7d/example/poll.c#L124-L131 https://github.com/libfuse/libfuse/blob/fuse-3.4.2-3-ga1bff7d/example/poll.c#L146-L163 https://github.com/libfuse/libfuse/blob/fuse-3.4.2-3-ga1bff7d/example/poll.c#L209-L216
I've actually hit the read vs write deadlock for real while implementing my FUSE filesystem where there is /head/watch file, for which open creates separate bidirectional socket-like stream in between filesystem and its user with both read and write being later performed simultaneously. And there it is semantically not easy to split the stream into two separate read-only and write-only channels:
https://lab.nexedi.com/kirr/wendelin.core/blob/f13aa600/wcfs/wcfs.go#L88-169
Let's fix this regression. The plan is:
We can't change nonseekable_open to include &~FMODE_ATOMIC_POS - doing so would break many in-kernel nonseekable_open users which actually use ppos in read/write handlers.
Add stream_open() to kernel to open stream-like non-seekable file descriptors. Read and write on such file descriptors would never use nor change ppos. And with that property on stream-like files read and write will be running without taking f_pos lock - i.e. read and write could be running simultaneously.
With semantic patch search and convert to stream_open all in-kernel nonseekable_open users for which read and write actually do not depend on ppos and where there is no other methods in file_operations which assume @offset access.
Add FOPEN_STREAM to fs/fuse/ and open in-kernel file-descriptors via steam_open if that bit is present in filesystem open reply.
It was tempting to change fs/fuse/ open handler to use stream_open instead of nonseekable_open on just FOPEN_NONSEEKABLE flags, but grepping through Debian codesearch shows users of FOPEN_NONSEEKABLE, and in particular GVFS which actually uses offset in its read and write handlers
https://codesearch.debian.net/search?q=-%3Enonseekable+%3D https://gitlab.gnome.org/GNOME/gvfs/blob/1.40.0-6-gcbc54396/client/gvfsfused... https://gitlab.gnome.org/GNOME/gvfs/blob/1.40.0-6-gcbc54396/client/gvfsfused... https://gitlab.gnome.org/GNOME/gvfs/blob/1.40.0-6-gcbc54396/client/gvfsfused...
so if we would do such a change it will break a real user.
Add stream_open and FOPEN_STREAM handling to stable kernels starting from v3.14+ (the kernel where 9c225f2655 first appeared).
This will allow to patch OSSPD and other FUSE filesystems that provide stream-like files to return FOPEN_STREAM | FOPEN_NONSEEKABLE in their open handler and this way avoid the deadlock on all kernel versions. This should work because fs/fuse/ ignores unknown open flags returned from a filesystem and so passing FOPEN_STREAM to a kernel that is not aware of this flag cannot hurt. In turn the kernel that is not aware of FOPEN_STREAM will be < v3.14 where just FOPEN_NONSEEKABLE is sufficient to implement streams without read vs write deadlock.
This patch adds stream_open, converts /proc/xen/xenbus to it and adds semantic patch to automatically locate in-kernel places that are either required to be converted due to read vs write deadlock, or that are just safe to be converted because read and write do not use ppos and there are no other funky methods in file_operations.
Regarding semantic patch I've verified each generated change manually - that it is correct to convert - and each other nonseekable_open instance left - that it is either not correct to convert there, or that it is not converted due to current stream_open.cocci limitations.
The script also does not convert files that should be valid to convert, but that currently have .llseek = noop_llseek or generic_file_llseek for unknown reason despite file being opened with nonseekable_open (e.g. drivers/input/mousedev.c)
Cc: Michael Kerrisk mtk.manpages@gmail.com Cc: Yongzhi Pan panyongzhi@gmail.com Cc: Jonathan Corbet corbet@lwn.net Cc: David Vrabel david.vrabel@citrix.com Cc: Juergen Gross jgross@suse.com Cc: Miklos Szeredi miklos@szeredi.hu Cc: Tejun Heo tj@kernel.org Cc: Kirill Tkhai ktkhai@virtuozzo.com Cc: Arnd Bergmann arnd@arndb.de Cc: Christoph Hellwig hch@lst.de Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Julia Lawall Julia.Lawall@lip6.fr Cc: Nikolaus Rath Nikolaus@rath.org Cc: Han-Wen Nienhuys hanwen@google.com Signed-off-by: Kirill Smelkov kirr@nexedi.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org
drivers/xen/xenbus/xenbus_dev_frontend.c | 4 +- fs/open.c | 18 ++ fs/read_write.c | 5 +- include/linux/fs.h | 4 + scripts/coccinelle/api/stream_open.cocci | 363 +++++++++++++++++++++++ 5 files changed, 389 insertions(+), 5 deletions(-) create mode 100644 scripts/coccinelle/api/stream_open.cocci
I think there is a follow-on patch for this one as well, that adds the proper "stream open" logic to all of the individual locations.
But even with that, I don't think this is stable material, it should just be for 5.1 and newer kernels.
thanks,
greg k-h
On Wed, Apr 24, 2019 at 9:34 AM Greg Kroah-Hartman gregkh@linuxfoundation.org wrote:
I think there is a follow-on patch for this one as well, that adds the proper "stream open" logic to all of the individual locations.
That bulk one hasn't been applied yet, and wouldn't be appropriate for stable anyway. It's 5.2 material.
But...
But even with that, I don't think this is stable material, it should just be for 5.1 and newer kernels.
We likely *will* have a fuse-only patch that uses stream_open() and _will_ be marked for stable, but that hasn't actually happened yet either. But if/when that happens, then this infrastructure patch will indeed be needed in stable.
Linus
On Wed, Apr 24, 2019 at 09:40:53AM -0700, Linus Torvalds wrote:
On Wed, Apr 24, 2019 at 9:34 AM Greg Kroah-Hartman gregkh@linuxfoundation.org wrote:
I think there is a follow-on patch for this one as well, that adds the proper "stream open" logic to all of the individual locations.
That bulk one hasn't been applied yet, and wouldn't be appropriate for stable anyway. It's 5.2 material.
Ah, ok.
But...
But even with that, I don't think this is stable material, it should just be for 5.1 and newer kernels.
We likely *will* have a fuse-only patch that uses stream_open() and _will_ be marked for stable, but that hasn't actually happened yet either. But if/when that happens, then this infrastructure patch will indeed be needed in stable.
Ok, I'll add this to my list of things to apply sometime in the future when it shows up.
thanks,
greg k-h
On Wed, Apr 24, 2019 at 09:40:53AM -0700, Linus Torvalds wrote:
On Wed, Apr 24, 2019 at 9:34 AM Greg Kroah-Hartman gregkh@linuxfoundation.org wrote:
I think there is a follow-on patch for this one as well, that adds the proper "stream open" logic to all of the individual locations.
That bulk one hasn't been applied yet, and wouldn't be appropriate for stable anyway. It's 5.2 material.
Hm, I might be confusing something here but I see a bunch of patches that convert existing callers mentioned in this patch to use stream_open() which was introduced here.
Are these the same patches you're referring to? If so, why are they not appropriate for stable?
-- Thanks, Sasha
On Wed, Apr 24, 2019 at 10:19 AM Sasha Levin sashal@kernel.org wrote:
Hm, I might be confusing something here but I see a bunch of patches that convert existing callers mentioned in this patch to use stream_open() which was introduced here.
The only use of stream_open() upstream right now is the xenbus conversion, and that isn't actually a bugfix, because xenbus used to manually do that
filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
that stream_open() does.
So no, there isn't "a bunch of patches" anywhere.
There are *future* cleanups for 5.2 that will happen, and that might have hit linux-next. And there is at least one FUSE patch (again - pending, not upstream) that may get marked for stable.
But I see nothing right now that makes it stable material yet.
Linus
On Wed, Apr 24, 2019 at 10:26:55AM -0700, Linus Torvalds wrote:
On Wed, Apr 24, 2019 at 10:19 AM Sasha Levin sashal@kernel.org wrote:
Hm, I might be confusing something here but I see a bunch of patches that convert existing callers mentioned in this patch to use stream_open() which was introduced here.
The only use of stream_open() upstream right now is the xenbus conversion, and that isn't actually a bugfix, because xenbus used to manually do that
filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
that stream_open() does.
So no, there isn't "a bunch of patches" anywhere.
There are *future* cleanups for 5.2 that will happen, and that might have hit linux-next. And there is at least one FUSE patch (again - pending, not upstream) that may get marked for stable.
But I see nothing right now that makes it stable material yet.
Linus, thanks for explaining. Sasha, Greg, there is a FUSE patch that should be stable material that will need this stream_open() thing. That patch has just entered fuse.git#for-next today:
https://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git/commit/?id...
and will hopefully enter 5.2 when merge window opens. I agree we should not blindly backport bulk stream_open conversions as performed by stream_open.cocci, at least unless there is a bug report indicating that it is actually required for a particular driver. On the other hand both Xen and FUSE deadlocks were hit for real which justifies stable propagation for their fixes.
You can read about the deadlock regression and the plan to fix it in original "fs: stream_open - opener for stream-like files so that read and write can run simultaneously without deadlock" patch (the 59/66 patch that was send in this thread), or here:
https://git.kernel.org/linus/10dce8af3422
Hope it clarifies things a bit,
Kirill
From: Kirill Smelkov
Sent: 24 April 2019 19:30
On Wed, Apr 24, 2019 at 10:26:55AM -0700, Linus Torvalds wrote:
On Wed, Apr 24, 2019 at 10:19 AM Sasha Levin sashal@kernel.org wrote:
Hm, I might be confusing something here but I see a bunch of patches that convert existing callers mentioned in this patch to use stream_open() which was introduced here.
The only use of stream_open() upstream right now is the xenbus conversion, and that isn't actually a bugfix, because xenbus used to manually do that
filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
that stream_open() does.
So no, there isn't "a bunch of patches" anywhere.
There are *future* cleanups for 5.2 that will happen, and that might have hit linux-next. And there is at least one FUSE patch (again - pending, not upstream) that may get marked for stable.
But I see nothing right now that makes it stable material yet.
Linus, thanks for explaining. Sasha, Greg, there is a FUSE patch that should be stable material that will need this stream_open() thing. That patch has just entered fuse.git#for-next today:
https://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git/commit/?id...
and will hopefully enter 5.2 when merge window opens. I agree we should not blindly backport bulk stream_open conversions as performed by stream_open.cocci, at least unless there is a bug report indicating that it is actually required for a particular driver. On the other hand both Xen and FUSE deadlocks were hit for real which justifies stable propagation for their fixes.
You can read about the deadlock regression and the plan to fix it in original "fs: stream_open - opener for stream-like files so that read and write can run simultaneously without deadlock" patch (the 59/66 patch that was send in this thread), or here:
https://git.kernel.org/linus/10dce8af3422
Hope it clarifies things a bit,
I can also imagine drivers that expect accesses to be done using pread() and pwrite() - maybe only if the fd is shared. Provided accesses get the correct offset they can be concurrent. In fact they only need to update the offset in the file structure when they complete - they may do this already.
I know (I think) uclibc implementing pread() as lseek() + read() caused me grief - but that might just have been the extra system call overhead rather than any problems with the offset.
David
- Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK Registration No: 1397386 (Wales)
On Thu, Apr 25, 2019 at 10:04:34AM +0000, David Laight wrote:
From: Kirill Smelkov
Sent: 24 April 2019 19:30
On Wed, Apr 24, 2019 at 10:26:55AM -0700, Linus Torvalds wrote:
On Wed, Apr 24, 2019 at 10:19 AM Sasha Levin sashal@kernel.org wrote:
Hm, I might be confusing something here but I see a bunch of patches that convert existing callers mentioned in this patch to use stream_open() which was introduced here.
The only use of stream_open() upstream right now is the xenbus conversion, and that isn't actually a bugfix, because xenbus used to manually do that
filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
that stream_open() does.
So no, there isn't "a bunch of patches" anywhere.
There are *future* cleanups for 5.2 that will happen, and that might have hit linux-next. And there is at least one FUSE patch (again - pending, not upstream) that may get marked for stable.
But I see nothing right now that makes it stable material yet.
Linus, thanks for explaining. Sasha, Greg, there is a FUSE patch that should be stable material that will need this stream_open() thing. That patch has just entered fuse.git#for-next today:
https://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git/commit/?id...
and will hopefully enter 5.2 when merge window opens. I agree we should not blindly backport bulk stream_open conversions as performed by stream_open.cocci, at least unless there is a bug report indicating that it is actually required for a particular driver. On the other hand both Xen and FUSE deadlocks were hit for real which justifies stable propagation for their fixes.
You can read about the deadlock regression and the plan to fix it in original "fs: stream_open - opener for stream-like files so that read and write can run simultaneously without deadlock" patch (the 59/66 patch that was send in this thread), or here:
https://git.kernel.org/linus/10dce8af3422
Hope it clarifies things a bit,
I can also imagine drivers that expect accesses to be done using pread() and pwrite() - maybe only if the fd is shared. Provided accesses get the correct offset they can be concurrent. In fact they only need to update the offset in the file structure when they complete - they may do this already.
I know (I think) uclibc implementing pread() as lseek() + read() caused me grief - but that might just have been the extra system call overhead rather than any problems with the offset.
I'm not sure I understand your comment completely, but we convert to stream_open only drivers that actually do _not_ use position at all, and that were already using nonseekable_open, thus pread and pwrite were already returning -ESPIPE for them (nonseekable_open clears FMODE_{PREAD,PWRITE} and ksys_{pread,pwrite}64 check for that flag). We also convert only drivers that use no_llseek for .llseek, so lseek on those files is/was always returning -ESPIPE as well.
If a driver uses position in its read and write and has support for pread/pwrite (FMODE_PREAD and FMODE_PWRITE), pread and pwrite are already working _without_ file->f_pos locking - because those system calls do not semantically update file->f_pos at all and thus do not take file->f_pos_lock - i.e. pread/pwrite can be run simultaneously already.
If libc implements pread as lseek+read it will work for a single user case (single thread, or fd not shared between processes), but it will break because of lseek+read non-atomicity if multiple preads are simultaneously used from several threads. And also for such emulation for multiple users case there is a chance for pread vs pwrite deadlock, since those system calls are using read and write and read and write take file->f_pos_lock.
Kirill
From: Kirill Smelkov
Sent: 26 April 2019 08:46
...
I'm not sure I understand your comment completely, but we convert to stream_open only drivers that actually do _not_ use position at all, and that were already using nonseekable_open, thus pread and pwrite were already returning -ESPIPE for them (nonseekable_open clears FMODE_{PREAD,PWRITE} and ksys_{pread,pwrite}64 check for that flag). We also convert only drivers that use no_llseek for .llseek, so lseek on those files is/was always returning -ESPIPE as well.
If a driver uses position in its read and write and has support for pread/pwrite (FMODE_PREAD and FMODE_PWRITE), pread and pwrite are already working _without_ file->f_pos locking - because those system calls do not semantically update file->f_pos at all and thus do not take file->f_pos_lock - i.e. pread/pwrite can be run simultaneously already.
Looks like I knew that once :-) Mind you, 'man pread' on my system is somewhat uninformative.
Maybe pread() should always be allowed at offset 0. Then you wouldn't need all this extra logic.
If libc implements pread as lseek+read it will work for a single user case (single thread, or fd not shared between processes), but it will break because of lseek+read non-atomicity if multiple preads are simultaneously used from several threads. And also for such emulation for multiple users case there is a chance for pread vs pwrite deadlock, since those system calls are using read and write and read and write take file->f_pos_lock.
I'd actually rather the pread() failed to compile. The actual implementation did 3 lseek()s (to save and restore the offset). A user level emulation could usually get away with one lseek().
David
- Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK Registration No: 1397386 (Wales)
On Fri, Apr 26, 2019 at 11:00:01AM +0000, David Laight wrote:
From: Kirill Smelkov
Sent: 26 April 2019 08:46
...
I'm not sure I understand your comment completely, but we convert to stream_open only drivers that actually do _not_ use position at all, and that were already using nonseekable_open, thus pread and pwrite were already returning -ESPIPE for them (nonseekable_open clears FMODE_{PREAD,PWRITE} and ksys_{pread,pwrite}64 check for that flag). We also convert only drivers that use no_llseek for .llseek, so lseek on those files is/was always returning -ESPIPE as well.
If a driver uses position in its read and write and has support for pread/pwrite (FMODE_PREAD and FMODE_PWRITE), pread and pwrite are already working _without_ file->f_pos locking - because those system calls do not semantically update file->f_pos at all and thus do not take file->f_pos_lock - i.e. pread/pwrite can be run simultaneously already.
Looks like I knew that once :-) Mind you, 'man pread' on my system is somewhat uninformative.
Maybe pread() should always be allowed at offset 0. Then you wouldn't need all this extra logic.
I'm not sure I understand. Do you propose any change? If yes - what is the change you are proposing?
If libc implements pread as lseek+read it will work for a single user case (single thread, or fd not shared between processes), but it will break because of lseek+read non-atomicity if multiple preads are simultaneously used from several threads. And also for such emulation for multiple users case there is a chance for pread vs pwrite deadlock, since those system calls are using read and write and read and write take file->f_pos_lock.
I'd actually rather the pread() failed to compile.
Ok.
The actual implementation did 3 lseek()s (to save and restore the offset). A user level emulation could usually get away with one lseek().
From: Arnd Bergmann arnd@arndb.de
[ Upstream commit cd92d74d67c811dc22544430b9ac3029f5bd64c5 ]
clang warns about statically defined DMA masks from the DMA_BIT_MASK macro with length 64:
arch/arm/plat-orion/common.c:625:29: error: shift count >= width of type [-Werror,-Wshift-count-overflow] .coherent_dma_mask = DMA_BIT_MASK(64), ^~~~~~~~~~~~~~~~ include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK' #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
The ones in orion shouldn't really be 64 bit masks, so changing them to what the driver can support avoids the warning.
Signed-off-by: Arnd Bergmann arnd@arndb.de Signed-off-by: Olof Johansson olof@lixom.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm/plat-orion/common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index a2399fd66e97..1e970873439c 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = { .resource = orion_xor0_shared_resources, .dev = { .dma_mask = &orion_xor_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &orion_xor0_pdata, }, }; @@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = { .resource = orion_xor1_shared_resources, .dev = { .dma_mask = &orion_xor_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &orion_xor1_pdata, }, };
From: Arnd Bergmann arnd@arndb.de
[ Upstream commit 2125801ccce19249708ca3245d48998e70569ab8 ]
clang warns about statically defined DMA masks from the DMA_BIT_MASK macro with length 64:
arch/arm/mach-iop13xx/setup.c:303:35: error: shift count >= width of type [-Werror,-Wshift-count-overflow] static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); ^~~~~~~~~~~~~~~~ include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK' #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) ^ ~~~
The ones in iop shouldn't really be 64 bit masks, so changing them to what the driver can support avoids the warning.
Signed-off-by: Arnd Bergmann arnd@arndb.de Signed-off-by: Olof Johansson olof@lixom.net Signed-off-by: Sasha Levin (Microsoft) sashal@kernel.org --- arch/arm/mach-iop13xx/setup.c | 8 ++++---- arch/arm/mach-iop13xx/tpmi.c | 10 +++++----- arch/arm/plat-iop/adma.c | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 53c316f7301e..fe4932fda01d 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = { } };
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); +static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32); static struct iop_adma_platform_data iop13xx_adma_0_data = { .hw_id = 0, .pool_size = PAGE_SIZE, @@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = { .resource = iop13xx_adma_0_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_0_data, }, }; @@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = { .resource = iop13xx_adma_1_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_1_data, }, }; @@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = { .resource = iop13xx_adma_2_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop13xx_adma_2_data, }, }; diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c index db511ec2b1df..116feb6b261e 100644 --- a/arch/arm/mach-iop13xx/tpmi.c +++ b/arch/arm/mach-iop13xx/tpmi.c @@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = { } };
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); +u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32); static struct platform_device iop13xx_tpmi_0_device = { .name = "iop-tpmi", .id = 0, @@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = { .resource = iop13xx_tpmi_0_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, };
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = { .resource = iop13xx_tpmi_1_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, };
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = { .resource = iop13xx_tpmi_2_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, };
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = { .resource = iop13xx_tpmi_3_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), }, };
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index a4d1f8de3b5b..d9612221e484 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = { .resource = iop3xx_dma_0_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_dma_0_data, }, }; @@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = { .resource = iop3xx_dma_1_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_dma_1_data, }, }; @@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = { .resource = iop3xx_aau_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, - .coherent_dma_mask = DMA_BIT_MASK(64), + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = (void *) &iop3xx_aau_data, }, };
From: Linus Torvalds torvalds@linux-foundation.org
[ Upstream commit b53119f13a04879c3bf502828d99d13726639ead ]
aio_poll() is not the only case that needs file pinned; worse, while aio_read()/aio_write() can live without pinning iocb itself, the proof is rather brittle and can easily break on later changes.
Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/aio.c | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c index 3d9669d011b9..363d7d7c8bff 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1022,6 +1022,9 @@ static bool get_reqs_available(struct kioctx *ctx) /* aio_get_req * Allocate a slot for an aio request. * Returns NULL if no requests are free. + * + * The refcount is initialized to 2 - one for the async op completion, + * one for the synchronous code that does this. */ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) { @@ -1034,7 +1037,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) percpu_ref_get(&ctx->reqs); req->ki_ctx = ctx; INIT_LIST_HEAD(&req->ki_list); - refcount_set(&req->ki_refcnt, 0); + refcount_set(&req->ki_refcnt, 2); req->ki_eventfd = NULL; return req; } @@ -1067,15 +1070,18 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) return ret; }
+static inline void iocb_destroy(struct aio_kiocb *iocb) +{ + if (iocb->ki_filp) + fput(iocb->ki_filp); + percpu_ref_put(&iocb->ki_ctx->reqs); + kmem_cache_free(kiocb_cachep, iocb); +} + static inline void iocb_put(struct aio_kiocb *iocb) { - if (refcount_read(&iocb->ki_refcnt) == 0 || - refcount_dec_and_test(&iocb->ki_refcnt)) { - if (iocb->ki_filp) - fput(iocb->ki_filp); - percpu_ref_put(&iocb->ki_ctx->reqs); - kmem_cache_free(kiocb_cachep, iocb); - } + if (refcount_dec_and_test(&iocb->ki_refcnt)) + iocb_destroy(iocb); }
static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, @@ -1749,9 +1755,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) INIT_LIST_HEAD(&req->wait.entry); init_waitqueue_func_entry(&req->wait, aio_poll_wake);
- /* one for removal from waitqueue, one for this function */ - refcount_set(&aiocb->ki_refcnt, 2); - mask = vfs_poll(req->file, &apt.pt) & req->events; if (unlikely(!req->head)) { /* we did not manage to set up a waitqueue, done */ @@ -1782,7 +1785,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
if (mask) aio_poll_complete(aiocb, mask); - iocb_put(aiocb); return 0; }
@@ -1873,18 +1875,21 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, break; }
+ /* Done with the synchronous reference */ + iocb_put(req); + /* * If ret is 0, we'd either done aio_complete() ourselves or have * arranged for that to be done asynchronously. Anything non-zero * means that we need to destroy req ourselves. */ - if (ret) - goto out_put_req; - return 0; + if (!ret) + return 0; + out_put_req: if (req->ki_eventfd) eventfd_ctx_put(req->ki_eventfd); - iocb_put(req); + iocb_destroy(req); out_put_reqs_available: put_reqs_available(ctx, 1); return ret;
From: Al Viro viro@zeniv.linux.org.uk
[ Upstream commit 833f4154ed560232120bc475935ee1d6a20e159f ]
Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/aio.c | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c index 363d7d7c8bff..ae60c29b8a98 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -2002,24 +2002,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, } #endif
-/* lookup_kiocb - * Finds a given iocb for cancellation. - */ -static struct aio_kiocb * -lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb) -{ - struct aio_kiocb *kiocb; - - assert_spin_locked(&ctx->ctx_lock); - - /* TODO: use a hash or array, this sucks. */ - list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { - if (kiocb->ki_user_iocb == iocb) - return kiocb; - } - return NULL; -} - /* sys_io_cancel: * Attempts to cancel an iocb previously passed to io_submit. If * the operation is successfully cancelled, the resulting event is @@ -2048,10 +2030,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, return -EINVAL;
spin_lock_irq(&ctx->ctx_lock); - kiocb = lookup_kiocb(ctx, iocb); - if (kiocb) { - ret = kiocb->ki_cancel(&kiocb->rw); - list_del_init(&kiocb->ki_list); + /* TODO: use a hash or array, this sucks. */ + list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { + if (kiocb->ki_user_iocb == iocb) { + ret = kiocb->ki_cancel(&kiocb->rw); + list_del_init(&kiocb->ki_list); + break; + } } spin_unlock_irq(&ctx->ctx_lock);
From: Al Viro viro@zeniv.linux.org.uk
[ Upstream commit a9339b7855094ba11a97e8822ae038135e879e79 ]
We want to separate forming the resulting io_event from putting it into the ring buffer.
Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/aio.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c index ae60c29b8a98..387b224217b5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -204,8 +204,7 @@ struct aio_kiocb { struct kioctx *ki_ctx; kiocb_cancel_fn *ki_cancel;
- struct iocb __user *ki_user_iocb; /* user's aiocb */ - __u64 ki_user_data; /* user's data for completion */ + struct io_event ki_res;
struct list_head ki_list; /* the aio core uses this * for cancellation */ @@ -1084,15 +1083,6 @@ static inline void iocb_put(struct aio_kiocb *iocb) iocb_destroy(iocb); }
-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, - long res, long res2) -{ - ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; - ev->data = iocb->ki_user_data; - ev->res = res; - ev->res2 = res2; -} - /* aio_complete * Called when the io request on the given iocb is complete. */ @@ -1104,6 +1094,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) unsigned tail, pos, head; unsigned long flags;
+ iocb->ki_res.res = res; + iocb->ki_res.res2 = res2; /* * Add a completion event to the ring buffer. Must be done holding * ctx->completion_lock to prevent other code from messing with the tail @@ -1120,14 +1112,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); event = ev_page + pos % AIO_EVENTS_PER_PAGE;
- aio_fill_event(event, iocb, res, res2); + *event = iocb->ki_res;
kunmap_atomic(ev_page); flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
- pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", - ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, - res, res2); + pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, + (void __user *)(unsigned long)iocb->ki_res.obj, + iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
/* after flagging the request as done, we * must never even look at it again @@ -1844,8 +1836,10 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, goto out_put_req; }
- req->ki_user_iocb = user_iocb; - req->ki_user_data = iocb->aio_data; + req->ki_res.obj = (u64)(unsigned long)user_iocb; + req->ki_res.data = iocb->aio_data; + req->ki_res.res = 0; + req->ki_res.res2 = 0;
switch (iocb->aio_lio_opcode) { case IOCB_CMD_PREAD: @@ -2019,6 +2013,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct aio_kiocb *kiocb; int ret = -EINVAL; u32 key; + u64 obj = (u64)(unsigned long)iocb;
if (unlikely(get_user(key, &iocb->aio_key))) return -EFAULT; @@ -2032,7 +2027,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, spin_lock_irq(&ctx->ctx_lock); /* TODO: use a hash or array, this sucks. */ list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { - if (kiocb->ki_user_iocb == iocb) { + if (kiocb->ki_res.obj == obj) { ret = kiocb->ki_cancel(&kiocb->rw); list_del_init(&kiocb->ki_list); break;
From: Al Viro viro@zeniv.linux.org.uk
[ Upstream commit 2bb874c0d873d13bd9b9b9c6d7b7c4edab18c8b4 ]
Instead of having aio_complete() set ->ki_res.{res,res2}, do that explicitly in its callers, drop the reference (as aio_complete() used to do) and delay the rest until the final iocb_put().
Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/aio.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c index 387b224217b5..82bf5dffb272 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1077,16 +1077,10 @@ static inline void iocb_destroy(struct aio_kiocb *iocb) kmem_cache_free(kiocb_cachep, iocb); }
-static inline void iocb_put(struct aio_kiocb *iocb) -{ - if (refcount_dec_and_test(&iocb->ki_refcnt)) - iocb_destroy(iocb); -} - /* aio_complete * Called when the io request on the given iocb is complete. */ -static void aio_complete(struct aio_kiocb *iocb, long res, long res2) +static void aio_complete(struct aio_kiocb *iocb) { struct kioctx *ctx = iocb->ki_ctx; struct aio_ring *ring; @@ -1094,8 +1088,6 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) unsigned tail, pos, head; unsigned long flags;
- iocb->ki_res.res = res; - iocb->ki_res.res2 = res2; /* * Add a completion event to the ring buffer. Must be done holding * ctx->completion_lock to prevent other code from messing with the tail @@ -1161,7 +1153,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
if (waitqueue_active(&ctx->wait)) wake_up(&ctx->wait); - iocb_put(iocb); +} + +static inline void iocb_put(struct aio_kiocb *iocb) +{ + if (refcount_dec_and_test(&iocb->ki_refcnt)) { + aio_complete(iocb); + iocb_destroy(iocb); + } }
/* aio_read_events_ring @@ -1435,7 +1434,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) file_end_write(kiocb->ki_filp); }
- aio_complete(iocb, res, res2); + iocb->ki_res.res = res; + iocb->ki_res.res2 = res2; + iocb_put(iocb); }
static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) @@ -1583,11 +1584,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
static void aio_fsync_work(struct work_struct *work) { - struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); - int ret; + struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
- ret = vfs_fsync(req->file, req->datasync); - aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); + iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); + iocb_put(iocb); }
static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, @@ -1608,7 +1608,8 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) { - aio_complete(iocb, mangle_poll(mask), 0); + iocb->ki_res.res = mangle_poll(mask); + iocb_put(iocb); }
static void aio_poll_complete_work(struct work_struct *work)
From: Al Viro viro@zeniv.linux.org.uk
[ Upstream commit af5c72b1fc7a00aa484e90b0c4e0eeb582545634 ]
aio_poll() has to cope with several unpleasant problems: * requests that might stay around indefinitely need to be made visible for io_cancel(2); that must not be done to a request already completed, though. * in cases when ->poll() has placed us on a waitqueue, wakeup might have happened (and request completed) before ->poll() returns. * worse, in some early wakeup cases request might end up re-added into the queue later - we can't treat "woken up and currently not in the queue" as "it's not going to stick around indefinitely" * ... moreover, ->poll() might have decided not to put it on any queues to start with, and that needs to be distinguished from the previous case * ->poll() might have tried to put us on more than one queue. Only the first will succeed for aio poll, so we might end up missing wakeups. OTOH, we might very well notice that only after the wakeup hits and request gets completed (all before ->poll() gets around to the second poll_wait()). In that case it's too late to decide that we have an error.
req->woken was an attempt to deal with that. Unfortunately, it was broken. What we need to keep track of is not that wakeup has happened - the thing might come back after that. It's that async reference is already gone and won't come back, so we can't (and needn't) put the request on the list of cancellables.
The easiest case is "request hadn't been put on any waitqueues"; we can tell by seeing NULL apt.head, and in that case there won't be anything async. We should either complete the request ourselves (if vfs_poll() reports anything of interest) or return an error.
In all other cases we get exclusion with wakeups by grabbing the queue lock.
If request is currently on queue and we have something interesting from vfs_poll(), we can steal it and complete the request ourselves.
If it's on queue and vfs_poll() has not reported anything interesting, we either put it on the cancellable list, or, if we know that it hadn't been put on all queues ->poll() wanted it on, we steal it and return an error.
If it's _not_ on queue, it's either been already dealt with (in which case we do nothing), or there's aio_poll_complete_work() about to be executed. In that case we either put it on the cancellable list, or, if we know it hadn't been put on all queues ->poll() wanted it on, simulate what cancel would've done.
It's a lot more convoluted than I'd like it to be. Single-consumer APIs suck, and unfortunately aio is not an exception...
Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/aio.c | 90 +++++++++++++++++++++++++------------------------------- 1 file changed, 40 insertions(+), 50 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c index 82bf5dffb272..efa13410e04e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -181,7 +181,7 @@ struct poll_iocb { struct file *file; struct wait_queue_head *head; __poll_t events; - bool woken; + bool done; bool cancelled; struct wait_queue_entry wait; struct work_struct work; @@ -1606,12 +1606,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, return 0; }
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) -{ - iocb->ki_res.res = mangle_poll(mask); - iocb_put(iocb); -} - static void aio_poll_complete_work(struct work_struct *work) { struct poll_iocb *req = container_of(work, struct poll_iocb, work); @@ -1637,9 +1631,11 @@ static void aio_poll_complete_work(struct work_struct *work) return; } list_del_init(&iocb->ki_list); + iocb->ki_res.res = mangle_poll(mask); + req->done = true; spin_unlock_irq(&ctx->ctx_lock);
- aio_poll_complete(iocb, mask); + iocb_put(iocb); }
/* assumes we are called with irqs disabled */ @@ -1667,31 +1663,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, __poll_t mask = key_to_poll(key); unsigned long flags;
- req->woken = true; - /* for instances that support it check for an event match first: */ - if (mask) { - if (!(mask & req->events)) - return 0; + if (mask && !(mask & req->events)) + return 0;
+ list_del_init(&req->wait.entry); + + if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { /* * Try to complete the iocb inline if we can. Use * irqsave/irqrestore because not all filesystems (e.g. fuse) * call this function with IRQs disabled and because IRQs * have to be disabled before ctx_lock is obtained. */ - if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { - list_del(&iocb->ki_list); - spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); - - list_del_init(&req->wait.entry); - aio_poll_complete(iocb, mask); - return 1; - } + list_del(&iocb->ki_list); + iocb->ki_res.res = mangle_poll(mask); + req->done = true; + spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); + iocb_put(iocb); + } else { + schedule_work(&req->work); } - - list_del_init(&req->wait.entry); - schedule_work(&req->work); return 1; }
@@ -1723,6 +1715,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) struct kioctx *ctx = aiocb->ki_ctx; struct poll_iocb *req = &aiocb->poll; struct aio_poll_table apt; + bool cancel = false; __poll_t mask;
/* reject any unknown events outside the normal event mask. */ @@ -1736,7 +1729,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL; - req->woken = false; + req->done = false; req->cancelled = false;
apt.pt._qproc = aio_poll_queue_proc; @@ -1749,36 +1742,33 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) init_waitqueue_func_entry(&req->wait, aio_poll_wake);
mask = vfs_poll(req->file, &apt.pt) & req->events; - if (unlikely(!req->head)) { - /* we did not manage to set up a waitqueue, done */ - goto out; - } - spin_lock_irq(&ctx->ctx_lock); - spin_lock(&req->head->lock); - if (req->woken) { - /* wake_up context handles the rest */ - mask = 0; + if (likely(req->head)) { + spin_lock(&req->head->lock); + if (unlikely(list_empty(&req->wait.entry))) { + if (apt.error) + cancel = true; + apt.error = 0; + mask = 0; + } + if (mask || apt.error) { + list_del_init(&req->wait.entry); + } else if (cancel) { + WRITE_ONCE(req->cancelled, true); + } else if (!req->done) { /* actually waiting for an event */ + list_add_tail(&aiocb->ki_list, &ctx->active_reqs); + aiocb->ki_cancel = aio_poll_cancel; + } + spin_unlock(&req->head->lock); + } + if (mask) { /* no async, we'd stolen it */ + aiocb->ki_res.res = mangle_poll(mask); apt.error = 0; - } else if (mask || apt.error) { - /* if we get an error or a mask we are done */ - WARN_ON_ONCE(list_empty(&req->wait.entry)); - list_del_init(&req->wait.entry); - } else { - /* actually waiting for an event */ - list_add_tail(&aiocb->ki_list, &ctx->active_reqs); - aiocb->ki_cancel = aio_poll_cancel; } - spin_unlock(&req->head->lock); spin_unlock_irq(&ctx->ctx_lock); - -out: - if (unlikely(apt.error)) - return apt.error; - if (mask) - aio_poll_complete(aiocb, mask); - return 0; + iocb_put(aiocb); + return apt.error; }
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
linux-stable-mirror@lists.linaro.org