This is a note to let you know that I've just added the patch titled
s390/qeth: on channel error, reject further cmd requests
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
s390-qeth-on-channel-error-reject-further-cmd-requests.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
Date: Tue, 20 Mar 2018 07:59:15 +0100
Subject: s390/qeth: on channel error, reject further cmd requests
From: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
[ Upstream commit a6c3d93963e4b333c764fde69802c3ea9eaa9d5c ]
When the IRQ handler determines that one of the cmd IO channels has
failed and schedules recovery, block any further cmd requests from
being submitted. The request would inevitably stall, and prevent the
recovery from making progress until the request times out.
This sort of error was observed after Live Guest Relocation, where
the pending IO on the READ channel intentionally gets terminated to
kick-start recovery. Simultaneously the guest executed SIOCETHTOOL,
triggering qeth to issue a QUERY CARD INFO command. The command
then stalled in the inoperabel WRITE channel.
Signed-off-by: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/s390/net/qeth_core_main.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1175,6 +1175,7 @@ static void qeth_irq(struct ccw_device *
}
rc = qeth_get_problem(cdev, irb);
if (rc) {
+ card->read_or_write_problem = 1;
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
Patches currently in stable-queue which might be from jwi(a)linux.vnet.ibm.com are
queue-4.14/s390-qeth-when-thread-completes-wake-up-all-waiters.patch
queue-4.14/s390-qeth-lock-read-device-while-queueing-next-buffer.patch
queue-4.14/net-iucv-free-memory-obtained-by-kzalloc.patch
queue-4.14/s390-qeth-on-channel-error-reject-further-cmd-requests.patch
queue-4.14/s390-qeth-free-netdevice-when-removing-a-card.patch
This is a note to let you know that I've just added the patch titled
s390/qeth: lock read device while queueing next buffer
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
s390-qeth-lock-read-device-while-queueing-next-buffer.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
Date: Tue, 20 Mar 2018 07:59:14 +0100
Subject: s390/qeth: lock read device while queueing next buffer
From: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
[ Upstream commit 17bf8c9b3d499d5168537c98b61eb7a1fcbca6c2 ]
For calling ccw_device_start(), issue_next_read() needs to hold the
device's ccwlock.
This is satisfied for the IRQ handler path (where qeth_irq() gets called
under the ccwlock), but we need explicit locking for the initial call by
the MPC initialization.
Signed-off-by: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/s390/net/qeth_core_main.c | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -526,8 +526,7 @@ static inline int qeth_is_cq(struct qeth
queue == card->qdio.no_in_queues - 1;
}
-
-static int qeth_issue_next_read(struct qeth_card *card)
+static int __qeth_issue_next_read(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -558,6 +557,17 @@ static int qeth_issue_next_read(struct q
return rc;
}
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+ int ret;
+
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+ ret = __qeth_issue_next_read(card);
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+
+ return ret;
+}
+
static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
@@ -1183,7 +1193,7 @@ static void qeth_irq(struct ccw_device *
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
- qeth_issue_next_read(card);
+ __qeth_issue_next_read(card);
iob = channel->iob;
index = channel->buf_no;
Patches currently in stable-queue which might be from jwi(a)linux.vnet.ibm.com are
queue-4.14/s390-qeth-when-thread-completes-wake-up-all-waiters.patch
queue-4.14/s390-qeth-lock-read-device-while-queueing-next-buffer.patch
queue-4.14/net-iucv-free-memory-obtained-by-kzalloc.patch
queue-4.14/s390-qeth-on-channel-error-reject-further-cmd-requests.patch
queue-4.14/s390-qeth-free-netdevice-when-removing-a-card.patch
This is a note to let you know that I've just added the patch titled
s390/qeth: free netdevice when removing a card
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
s390-qeth-free-netdevice-when-removing-a-card.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
Date: Tue, 20 Mar 2018 07:59:12 +0100
Subject: s390/qeth: free netdevice when removing a card
From: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
[ Upstream commit 6be687395b3124f002a653c1a50b3260222b3cd7 ]
On removal, a qeth card's netdevice is currently not properly freed
because the call chain looks as follows:
qeth_core_remove_device(card)
lx_remove_device(card)
unregister_netdev(card->dev)
card->dev = NULL !!!
qeth_core_free_card(card)
if (card->dev) !!!
free_netdev(card->dev)
Fix it by free'ing the netdev straight after unregistering. This also
fixes the sysfs-driven layer switch case (qeth_dev_layer2_store()),
where the need to free the current netdevice was not considered at all.
Note that free_netdev() takes care of the netif_napi_del() for us too.
Fixes: 4a71df50047f ("qeth: new qeth device driver")
Signed-off-by: Julian Wiedmann <jwi(a)linux.vnet.ibm.com>
Reviewed-by: Ursula Braun <ubraun(a)linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/s390/net/qeth_core_main.c | 2 --
drivers/s390/net/qeth_l2_main.c | 2 +-
drivers/s390/net/qeth_l3_main.c | 2 +-
3 files changed, 2 insertions(+), 4 deletions(-)
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5061,8 +5061,6 @@ static void qeth_core_free_card(struct q
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
- if (card->dev)
- free_netdev(card->dev);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -935,8 +935,8 @@ static void qeth_l2_remove_device(struct
qeth_l2_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
return;
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3046,8 +3046,8 @@ static void qeth_l3_remove_device(struct
qeth_l3_set_offline(cgdev);
if (card->dev) {
- netif_napi_del(&card->napi);
unregister_netdev(card->dev);
+ free_netdev(card->dev);
card->dev = NULL;
}
Patches currently in stable-queue which might be from jwi(a)linux.vnet.ibm.com are
queue-4.14/s390-qeth-when-thread-completes-wake-up-all-waiters.patch
queue-4.14/s390-qeth-lock-read-device-while-queueing-next-buffer.patch
queue-4.14/net-iucv-free-memory-obtained-by-kzalloc.patch
queue-4.14/s390-qeth-on-channel-error-reject-further-cmd-requests.patch
queue-4.14/s390-qeth-free-netdevice-when-removing-a-card.patch
This is a note to let you know that I've just added the patch titled
rhashtable: Fix rhlist duplicates insertion
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
rhashtable-fix-rhlist-duplicates-insertion.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Paul Blakey <paulb(a)mellanox.com>
Date: Sun, 4 Mar 2018 17:29:48 +0200
Subject: rhashtable: Fix rhlist duplicates insertion
From: Paul Blakey <paulb(a)mellanox.com>
[ Upstream commit d3dcf8eb615537526bd42ff27a081d46d337816e ]
When inserting duplicate objects (those with the same key),
current rhlist implementation messes up the chain pointers by
updating the bucket pointer instead of prev next pointer to the
newly inserted node. This causes missing elements on removal and
travesal.
Fix that by properly updating pprev pointer to point to
the correct rhash_head next pointer.
Issue: 1241076
Change-Id: I86b2c140bcb4aeb10b70a72a267ff590bb2b17e7
Fixes: ca26893f05e8 ('rhashtable: Add rhlist interface')
Signed-off-by: Paul Blakey <paulb(a)mellanox.com>
Acked-by: Herbert Xu <herbert(a)gondor.apana.org.au>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
include/linux/rhashtable.h | 4 +++-
lib/rhashtable.c | 4 +++-
2 files changed, 6 insertions(+), 2 deletions(-)
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -750,8 +750,10 @@ slow_path:
if (!key ||
(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
data = rht_obj(ht, head);
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -537,8 +537,10 @@ static void *rhashtable_lookup_one(struc
if (!key ||
(ht->p.obj_cmpfn ?
ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
if (!ht->rhlist)
return rht_obj(ht, head);
Patches currently in stable-queue which might be from paulb(a)mellanox.com are
queue-4.14/rhashtable-fix-rhlist-duplicates-insertion.patch
This is a note to let you know that I've just added the patch titled
qede: Fix qedr link update
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
qede-fix-qedr-link-update.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Michal Kalderon <Michal.Kalderon(a)cavium.com>
Date: Wed, 14 Mar 2018 14:56:53 +0200
Subject: qede: Fix qedr link update
From: Michal Kalderon <Michal.Kalderon(a)cavium.com>
[ Upstream commit 4609adc27175839408359822523de7247d56c87f ]
Link updates were not reported to qedr correctly.
Leading to cases where a link could be down, but qedr
would see it as up.
In addition, once qede was loaded, link state would be up,
regardless of the actual link state.
Signed-off-by: Michal Kalderon <michal.kalderon(a)cavium.com>
Signed-off-by: Ariel Elior <ariel.elior(a)cavium.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/ethernet/qlogic/qede/qede_main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2066,8 +2066,6 @@ static int qede_load(struct qede_dev *ed
link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params);
- qede_rdma_dev_event_open(edev);
-
edev->state = QEDE_STATE_OPEN;
DP_INFO(edev, "Ending successfully qede load\n");
@@ -2168,12 +2166,14 @@ static void qede_link_update(void *dev,
DP_NOTICE(edev, "Link is up\n");
netif_tx_start_all_queues(edev->ndev);
netif_carrier_on(edev->ndev);
+ qede_rdma_dev_event_open(edev);
}
} else {
if (netif_carrier_ok(edev->ndev)) {
DP_NOTICE(edev, "Link is down\n");
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
+ qede_rdma_dev_event_close(edev);
}
}
}
Patches currently in stable-queue which might be from Michal.Kalderon(a)cavium.com are
queue-4.14/qed-fix-non-tcp-packets-should-be-dropped-on-iwarp-ll2-connection.patch
queue-4.14/qede-fix-qedr-link-update.patch
This is a note to let you know that I've just added the patch titled
ppp: avoid loop in xmit recursion detection code
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
ppp-avoid-loop-in-xmit-recursion-detection-code.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Guillaume Nault <g.nault(a)alphalink.fr>
Date: Tue, 20 Mar 2018 16:49:26 +0100
Subject: ppp: avoid loop in xmit recursion detection code
From: Guillaume Nault <g.nault(a)alphalink.fr>
[ Upstream commit 6d066734e9f09cdea4a3b9cb76136db3f29cfb02 ]
We already detect situations where a PPP channel sends packets back to
its upper PPP device. While this is enough to avoid deadlocking on xmit
locks, this doesn't prevent packets from looping between the channel
and the unit.
The problem is that ppp_start_xmit() enqueues packets in ppp->file.xq
before checking for xmit recursion. Therefore, __ppp_xmit_process()
might dequeue a packet from ppp->file.xq and send it on the channel
which, in turn, loops it back on the unit. Then ppp_start_xmit()
queues the packet back to ppp->file.xq and __ppp_xmit_process() picks
it up and sends it again through the channel. Therefore, the packet
will loop between __ppp_xmit_process() and ppp_start_xmit() until some
other part of the xmit path drops it.
For L2TP, we rapidly fill the skb's headroom and pppol2tp_xmit() drops
the packet after a few iterations. But PPTP reallocates the headroom
if necessary, letting the loop run and exhaust the machine resources
(as reported in https://bugzilla.kernel.org/show_bug.cgi?id=199109).
Fix this by letting __ppp_xmit_process() enqueue the skb to
ppp->file.xq, so that we can check for recursion before adding it to
the queue. Now ppp_xmit_process() can drop the packet when recursion is
detected.
__ppp_channel_push() is a bit special. It calls __ppp_xmit_process()
without having any actual packet to send. This is used by
ppp_output_wakeup() to re-enable transmission on the parent unit (for
implementations like ppp_async.c, where the .start_xmit() function
might not consume the skb, leaving it in ppp->xmit_pending and
disabling transmission).
Therefore, __ppp_xmit_process() needs to handle the case where skb is
NULL, dequeuing as many packets as possible from ppp->file.xq.
Reported-by: xu heng <xuheng333(a)zoho.com>
Fixes: 55454a565836 ("ppp: avoid dealock on recursive xmit")
Signed-off-by: Guillaume Nault <g.nault(a)alphalink.fr>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/ppp/ppp_generic.c | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -256,7 +256,7 @@ struct ppp_net {
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
-static void ppp_xmit_process(struct ppp *ppp);
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
@@ -512,13 +512,12 @@ static ssize_t ppp_write(struct file *fi
goto out;
}
- skb_queue_tail(&pf->xq, skb);
-
switch (pf->kind) {
case INTERFACE:
- ppp_xmit_process(PF_TO_PPP(pf));
+ ppp_xmit_process(PF_TO_PPP(pf), skb);
break;
case CHANNEL:
+ skb_queue_tail(&pf->xq, skb);
ppp_channel_push(PF_TO_CHANNEL(pf));
break;
}
@@ -1264,8 +1263,8 @@ ppp_start_xmit(struct sk_buff *skb, stru
put_unaligned_be16(proto, pp);
skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
- skb_queue_tail(&ppp->file.xq, skb);
- ppp_xmit_process(ppp);
+ ppp_xmit_process(ppp, skb);
+
return NETDEV_TX_OK;
outf:
@@ -1417,13 +1416,14 @@ static void ppp_setup(struct net_device
*/
/* Called to do any work queued up on the transmit side that can now be done */
-static void __ppp_xmit_process(struct ppp *ppp)
+static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
- struct sk_buff *skb;
-
ppp_xmit_lock(ppp);
if (!ppp->closing) {
ppp_push(ppp);
+
+ if (skb)
+ skb_queue_tail(&ppp->file.xq, skb);
while (!ppp->xmit_pending &&
(skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb);
@@ -1437,7 +1437,7 @@ static void __ppp_xmit_process(struct pp
ppp_xmit_unlock(ppp);
}
-static void ppp_xmit_process(struct ppp *ppp)
+static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
{
local_bh_disable();
@@ -1445,7 +1445,7 @@ static void ppp_xmit_process(struct ppp
goto err;
(*this_cpu_ptr(ppp->xmit_recursion))++;
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, skb);
(*this_cpu_ptr(ppp->xmit_recursion))--;
local_bh_enable();
@@ -1455,6 +1455,8 @@ static void ppp_xmit_process(struct ppp
err:
local_bh_enable();
+ kfree_skb(skb);
+
if (net_ratelimit())
netdev_err(ppp->dev, "recursion detected\n");
}
@@ -1939,7 +1941,7 @@ static void __ppp_channel_push(struct ch
if (skb_queue_empty(&pch->file.xq)) {
ppp = pch->ppp;
if (ppp)
- __ppp_xmit_process(ppp);
+ __ppp_xmit_process(ppp, NULL);
}
}
Patches currently in stable-queue which might be from g.nault(a)alphalink.fr are
queue-4.14/ppp-avoid-loop-in-xmit-recursion-detection-code.patch
queue-4.14/ipv6-old_dport-should-be-a-__be16-in-__ip6_datagram_connect.patch
queue-4.14/l2tp-do-not-accept-arbitrary-sockets.patch
This is a note to let you know that I've just added the patch titled
qed: Fix non TCP packets should be dropped on iWARP ll2 connection
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
qed-fix-non-tcp-packets-should-be-dropped-on-iwarp-ll2-connection.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Michal Kalderon <Michal.Kalderon(a)cavium.com>
Date: Wed, 14 Mar 2018 14:49:28 +0200
Subject: qed: Fix non TCP packets should be dropped on iWARP ll2 connection
From: Michal Kalderon <Michal.Kalderon(a)cavium.com>
[ Upstream commit 16da09047d3fb991dc48af41f6d255fd578e8ca2 ]
FW workaround. The iWARP LL2 connection did not expect TCP packets
to arrive on it's connection. The fix drops any non-tcp packets
Fixes b5c29ca ("qed: iWARP CM - setup a ll2 connection for handling
SYN packets")
Signed-off-by: Michal Kalderon <Michal.Kalderon(a)cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior(a)cavium.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 15 +++++++++++++++
1 file changed, 15 insertions(+)
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1663,6 +1663,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *
iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
if (eth_type == ETH_P_IP) {
+ if (iph->protocol != IPPROTO_TCP) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected ip protocol on ll2 %x\n",
+ iph->protocol);
+ return -EINVAL;
+ }
+
cm_info->local_ip[0] = ntohl(iph->daddr);
cm_info->remote_ip[0] = ntohl(iph->saddr);
cm_info->ip_version = TCP_IPV4;
@@ -1671,6 +1678,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *
*payload_len = ntohs(iph->tot_len) - ip_hlen;
} else if (eth_type == ETH_P_IPV6) {
ip6h = (struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr != IPPROTO_TCP) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected ip protocol on ll2 %x\n",
+ iph->protocol);
+ return -EINVAL;
+ }
+
for (i = 0; i < 4; i++) {
cm_info->local_ip[i] =
ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
Patches currently in stable-queue which might be from Michal.Kalderon(a)cavium.com are
queue-4.14/qed-fix-non-tcp-packets-should-be-dropped-on-iwarp-ll2-connection.patch
queue-4.14/qede-fix-qedr-link-update.patch
This is a note to let you know that I've just added the patch titled
netlink: avoid a double skb free in genlmsg_mcast()
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
netlink-avoid-a-double-skb-free-in-genlmsg_mcast.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Nicolas Dichtel <nicolas.dichtel(a)6wind.com>
Date: Wed, 14 Mar 2018 21:10:23 +0100
Subject: netlink: avoid a double skb free in genlmsg_mcast()
From: Nicolas Dichtel <nicolas.dichtel(a)6wind.com>
[ Upstream commit 02a2385f37a7c6594c9d89b64c4a1451276f08eb ]
nlmsg_multicast() consumes always the skb, thus the original skb must be
freed only when this function is called with a clone.
Fixes: cb9f7a9a5c96 ("netlink: ensure to loop over all netns in genlmsg_multicast_allns()")
Reported-by: Ben Hutchings <ben.hutchings(a)codethink.co.uk>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel(a)6wind.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
net/netlink/genetlink.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff
if (!err)
delivered = true;
else if (err != -ESRCH)
- goto error;
+ return err;
return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
Patches currently in stable-queue which might be from nicolas.dichtel(a)6wind.com are
queue-4.14/netlink-avoid-a-double-skb-free-in-genlmsg_mcast.patch
This is a note to let you know that I've just added the patch titled
net: use skb_to_full_sk() in skb_update_prio()
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-use-skb_to_full_sk-in-skb_update_prio.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Eric Dumazet <edumazet(a)google.com>
Date: Wed, 14 Mar 2018 09:04:16 -0700
Subject: net: use skb_to_full_sk() in skb_update_prio()
From: Eric Dumazet <edumazet(a)google.com>
[ Upstream commit 4dcb31d4649df36297296b819437709f5407059c ]
Andrei Vagin reported a KASAN: slab-out-of-bounds error in
skb_update_prio()
Since SYNACK might be attached to a request socket, we need to
get back to the listener socket.
Since this listener is manipulated without locks, add const
qualifiers to sock_cgroup_prioidx() so that the const can also
be used in skb_update_prio()
Also add the const qualifier to sock_cgroup_classid() for consistency.
Fixes: ca6fb0651883 ("tcp: attach SYNACK messages to request sockets instead of listener")
Signed-off-by: Eric Dumazet <edumazet(a)google.com>
Reported-by: Andrei Vagin <avagin(a)virtuozzo.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
include/linux/cgroup-defs.h | 4 ++--
net/core/dev.c | 22 +++++++++++++++-------
2 files changed, 17 insertions(+), 9 deletions(-)
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -696,13 +696,13 @@ struct sock_cgroup_data {
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3224,15 +3224,23 @@ static inline int __dev_xmit_skb(struct
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+ const struct netprio_map *map;
+ const struct sock *sk;
+ unsigned int prioidx;
- if (!skb->priority && skb->sk && map) {
- unsigned int prioidx =
- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
+ if (skb->priority)
+ return;
+ map = rcu_dereference_bh(skb->dev->priomap);
+ if (!map)
+ return;
+ sk = skb_to_full_sk(skb);
+ if (!sk)
+ return;
- if (prioidx < map->priomap_len)
- skb->priority = map->priomap[prioidx];
- }
+ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
+
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
}
#else
#define skb_update_prio(skb)
Patches currently in stable-queue which might be from edumazet(a)google.com are
queue-4.14/ipv6-fix-access-to-non-linear-packet-in-ndisc_fill_redirect_hdr_option.patch
queue-4.14/skbuff-fix-not-waking-applications-when-errors-are-enqueued.patch
queue-4.14/l2tp-do-not-accept-arbitrary-sockets.patch
queue-4.14/tcp-purge-write-queue-upon-aborting-the-connection.patch
queue-4.14/net-use-skb_to_full_sk-in-skb_update_prio.patch
queue-4.14/ieee802154-6lowpan-fix-possible-null-deref-in-lowpan_device_event.patch
queue-4.14/tcp-reset-sk_send_head-in-tcp_write_queue_purge.patch
This is a note to let you know that I've just added the patch titled
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
net-systemport-rewrite-__bcm_sysport_tx_reclaim.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Mar 28 18:38:30 CEST 2018
From: Florian Fainelli <f.fainelli(a)gmail.com>
Date: Tue, 13 Mar 2018 14:45:07 -0700
Subject: net: systemport: Rewrite __bcm_sysport_tx_reclaim()
From: Florian Fainelli <f.fainelli(a)gmail.com>
[ Upstream commit 484d802d0f2f29c335563fcac2a8facf174a1bbc ]
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli(a)gmail.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/ethernet/broadcom/bcmsysport.c | 33 +++++++++++++----------------
drivers/net/ethernet/broadcom/bcmsysport.h | 2 -
2 files changed, 16 insertions(+), 19 deletions(-)
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(s
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
- unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct net_device *ndev = priv->netdev;
+ unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb;
+ unsigned int txbds_ready;
+ unsigned int c_index;
u32 hw_ind;
/* Clear status before servicing to reduce spurious interrupts */
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_rec
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
- ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
- last_c_index = ring->c_index;
- num_tx_cbs = ring->size;
-
- c_index &= (num_tx_cbs - 1);
-
- if (c_index >= last_c_index)
- last_tx_cn = c_index - last_c_index;
- else
- last_tx_cn = num_tx_cbs - last_c_index + c_index;
+ txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
netif_dbg(priv, tx_done, ndev,
- "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
- ring->index, c_index, last_tx_cn, last_c_index);
+ "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+ ring->index, ring->c_index, c_index, txbds_ready);
- while (last_tx_cn-- > 0) {
- cb = ring->cbs + last_c_index;
+ while (txbds_processed < txbds_ready) {
+ cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
- last_c_index++;
- last_c_index &= (num_tx_cbs - 1);
+ txbds_processed++;
+
+ if (likely(ring->clean_index < ring->size - 1))
+ ring->clean_index++;
+ else
+ ring->clean_index = 0;
}
u64_stats_update_begin(&priv->syncp);
@@ -1406,6 +1402,7 @@ static int bcm_sysport_init_tx_ring(stru
netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index;
ring->size = size;
+ ring->clean_index = 0;
ring->alloc_size = ring->size;
ring->desc_cpu = p;
ring->desc_count = ring->size;
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */
- unsigned int p_index; /* Current producer index */
+ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
Patches currently in stable-queue which might be from f.fainelli(a)gmail.com are
queue-4.14/net-systemport-rewrite-__bcm_sysport_tx_reclaim.patch
queue-4.14/sysfs-symlink-export-sysfs_create_link_nowarn.patch
queue-4.14/net-fec-fix-unbalanced-pm-runtime-calls.patch
queue-4.14/net-phy-relax-error-checking-when-creating-sysfs-link-netdev-phydev.patch