intel_uncore_suspend() unregisters the uncore code's PMIC bus access
notifier and gets called on both normal and runtime suspend.
intel_uncore_resume_early() re-registers the notifier, but only on
normal resume. Add a new intel_uncore_runtime_resume() function which
only re-registers the notifier and call that on runtime resume.
Cc: stable(a)vger.kernel.org
Reported-by: Imre Deak <imre.deak(a)intel.com>
Reviewed-by: Imre Deak <imre.deak(a)intel.com>
Signed-off-by: Hans de Goede <hdegoede(a)redhat.com>
---
drivers/gpu/drm/i915/i915_drv.c | 2 ++
drivers/gpu/drm/i915/intel_uncore.c | 6 ++++++
drivers/gpu/drm/i915/intel_uncore.h | 1 +
3 files changed, 9 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e7e9e061073b..cdff9825a3da 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2627,6 +2627,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
+ intel_uncore_runtime_resume(dev_priv);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index dbc5cc309cbc..9567ba385f41 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -557,6 +557,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
}
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+}
+
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
i915_modparams.enable_rc6 =
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 582771251b57..9ce079b5dd0d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -134,6 +134,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
void intel_uncore_fini(struct drm_i915_private *dev_priv);
void intel_uncore_suspend(struct drm_i915_private *dev_priv);
void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
--
2.14.3
From: K. Y. Srinivasan <kys(a)microsoft.com>
The current rescind processing code will not correctly handle
the case where the host immediately rescinds a channel that has
been offerred. In this case, we could be blocked in the open call and
since the channel is rescinded, the host will not respond and we could
be blocked forever in the vmbus open call.i Fix this problem.
Signed-off-by: K. Y. Srinivasan <kys(a)microsoft.com>
Cc: stable(a)vger.kernel.org
---
drivers/hv/channel.c | 10 ++++++++--
drivers/hv/channel_mgmt.c | 7 ++++---
include/linux/hyperv.h | 1 +
3 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 19f0cf3..ba0a092 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -659,22 +659,28 @@ void vmbus_close(struct vmbus_channel *channel)
*/
return;
}
- mutex_lock(&vmbus_connection.channel_mutex);
/*
* Close all the sub-channels first and then close the
* primary channel.
*/
list_for_each_safe(cur, tmp, &channel->sc_list) {
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
- vmbus_close_internal(cur_channel);
if (cur_channel->rescind) {
+ wait_for_completion(&cur_channel->rescind_event);
+ mutex_lock(&vmbus_connection.channel_mutex);
+ vmbus_close_internal(cur_channel);
hv_process_channel_removal(
cur_channel->offermsg.child_relid);
+ } else {
+ mutex_lock(&vmbus_connection.channel_mutex);
+ vmbus_close_internal(cur_channel);
}
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
/*
* Now close the primary.
*/
+ mutex_lock(&vmbus_connection.channel_mutex);
vmbus_close_internal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index ec5454f..c21020b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -333,6 +333,7 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
return NULL;
spin_lock_init(&channel->lock);
+ init_completion(&channel->rescind_event);
INIT_LIST_HEAD(&channel->sc_list);
INIT_LIST_HEAD(&channel->percpu_list);
@@ -898,6 +899,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
/*
* Now wait for offer handling to complete.
*/
+ vmbus_rescind_cleanup(channel);
while (READ_ONCE(channel->probe_done) == false) {
/*
* We wait here until any channel offer is currently
@@ -913,7 +915,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
- vmbus_rescind_cleanup(channel);
return;
}
/*
@@ -922,7 +923,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
*/
dev = get_device(&channel->device_obj->device);
if (dev) {
- vmbus_rescind_cleanup(channel);
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
@@ -936,13 +936,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* 2. Then close the primary channel.
*/
mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_rescind_cleanup(channel);
if (channel->state == CHANNEL_OPEN_STATE) {
/*
* The channel is currently not open;
* it is safe for us to cleanup the channel.
*/
hv_process_channel_removal(rescind->child_relid);
+ } else {
+ complete(&channel->rescind_event);
}
mutex_unlock(&vmbus_connection.channel_mutex);
}
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f3e97c5..6c93366 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -708,6 +708,7 @@ struct vmbus_channel {
u8 monitor_bit;
bool rescind; /* got rescind msg */
+ struct completion rescind_event;
u32 ringbuffer_gpadlhandle;
--
1.7.1
The mvneta controller provides a 8-bit register to update the pending
Tx descriptor counter. Then, a maximum of 255 Tx descriptors can be
added at once. In the current code the mvneta_txq_pend_desc_add function
assumes the caller takes care of this limit. But it is not the case. In
some situations (xmit_more flag), more than 255 descriptors are added.
When this happens, the Tx descriptor counter register is updated with a
wrong value, which breaks the whole Tx queue management.
This patch fixes the issue by allowing the mvneta_txq_pend_desc_add
function to process more than 255 Tx descriptors.
Fixes: 2a90f7e1d5d0 ("net: mvneta: add xmit_more support")
Cc: stable(a)vger.kernel.org # 4.11+
Signed-off-by: Simon Guinot <simon.guinot(a)sequanux.org>
---
drivers/net/ethernet/marvell/mvneta.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 64a04975bcf8..027c08ce4e5d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
{
u32 val;
- /* Only 255 descriptors can be added at once ; Assume caller
- * process TX desriptors in quanta less than 256
- */
- val = pend_desc + txq->pending;
- mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ pend_desc += txq->pending;
+
+ /* Only 255 Tx descriptors can be added at once */
+ while (pend_desc > 0) {
+ val = min(pend_desc, 255);
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ pend_desc -= val;
+ }
txq->pending = 0;
}
@@ -2413,8 +2416,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
- if (!skb->xmit_more || netif_xmit_stopped(nq) ||
- txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+ if (!skb->xmit_more || netif_xmit_stopped(nq))
mvneta_txq_pend_desc_add(pp, txq, frags);
else
txq->pending += frags;
--
2.9.3
Function devm_gpiod_get_optional() returns an ERR_PTR on failure. Its
return value should not be validated by a NULL check. Instead, use IS_ERR.
Signed-off-by: Pan Bian <bianpan2016(a)163.com>
---
drivers/net/dsa/lan9303-core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b471413..6d3fc8f 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -828,7 +828,7 @@ static void lan9303_probe_reset_gpio(struct lan9303 *chip,
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
GPIOD_OUT_LOW);
- if (!chip->reset_gpio) {
+ if (IS_ERR(chip->reset_gpio)) {
dev_dbg(chip->dev, "No reset GPIO defined\n");
return;
}
--
1.9.1
Hi Doug,
Here is the latest set of patches I have that we'd like to land in 4.15. There
are some driver fixes, including a qib that is marked as stable. We also have
a CM and SA patch thrown in the mix.
As always patches available for browsing at:
https://github.com/ddalessa/kernel/tree/for-4.15
---
Dennis Dalessandro (1):
IB/hfi1: Initialize bth1 in 16B rc ack builder
Don Hiatt (2):
IB/hfi1: Mask the path bits with the LMC for 16B RC Acks
IB/CM: Change sgid to IB GID when handling CM request
Jan Sokolowski (1):
IB/hfi1: Use 4096 for default active MTU in query_qp
Mike Marciniszyn (1):
IB/qib: Fix comparison error with qperf compare/swap test
Venkata Sandeep Dhanalakota (1):
IB/SA: Check dlid before SA agent queries for ClassPortInfo
drivers/infiniband/core/cm.c | 38 ++++++++++++++++++++++++++++++++++--
drivers/infiniband/core/sa_query.c | 10 +++++++++
drivers/infiniband/hw/hfi1/rc.c | 10 +++++----
drivers/infiniband/hw/hfi1/verbs.c | 2 +-
drivers/infiniband/hw/qib/qib_rc.c | 6 +++---
5 files changed, 55 insertions(+), 11 deletions(-)
--
-Denny
On Fri, 2017-10-13 at 04:36:41 UTC, Shriya wrote:
> The call to /proc/cpuinfo in turn calls cpufreq_quick_get() which
> returns the last frequency requested by the kernel, but may not
> reflect the actual frequency the processor is running at.
> This patch makes a call to cpufreq_get() instead which returns the
> current frequency reported by the hardware.
>
> Fixes : commit fb5153d05a7d ("powerpc: powernv: Implement
> ppc_md.get_proc_freq()")
> Cc: stable(a)vger.kernel.org
>
> Signed-off-by: Shriya <shriyak(a)linux.vnet.ibm.com>
Applied to powerpc next, thanks.
https://git.kernel.org/powerpc/c/cd77b5ce208c153260ed7882d8910f
cheers
Commit da2a68b3eb47 ("watchdog: Enable COMPILE_TEST where possible")
enabled building the Indy watchdog driver when COMPILE_TEST is enabled.
However, the driver makes reference to symbols that are only defined for
certain platforms are selected in the config. These platforms select
SGI_HAS_INDYDOG. Without this, link time errors result, for example
when building a MIPS allyesconfig.
drivers/watchdog/indydog.o: In function `indydog_write':
indydog.c:(.text+0x18): undefined reference to `sgimc'
indydog.c:(.text+0x1c): undefined reference to `sgimc'
drivers/watchdog/indydog.o: In function `indydog_start':
indydog.c:(.text+0x54): undefined reference to `sgimc'
indydog.c:(.text+0x58): undefined reference to `sgimc'
drivers/watchdog/indydog.o: In function `indydog_stop':
indydog.c:(.text+0xa4): undefined reference to `sgimc'
drivers/watchdog/indydog.o:indydog.c:(.text+0xa8): more undefined
references to `sgimc' follow
make: *** [Makefile:1005: vmlinux] Error 1
Fix this by ensuring that CONFIG_INDIDOG can only be selected when the
necessary dependent platform symbols are built in.
Fixes: da2a68b3eb47 ("watchdog: Enable COMPILE_TEST where possible")
Signed-off-by: Matt Redfearn <matt.redfearn(a)mips.com>
Cc: <stable(a)vger.kernel.org> # 4.11 +
---
drivers/watchdog/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ca200d1f310a..d96e2e7544fc 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1451,7 +1451,7 @@ config RC32434_WDT
config INDYDOG
tristate "Indy/I2 Hardware Watchdog"
- depends on SGI_HAS_INDYDOG || (MIPS && COMPILE_TEST)
+ depends on SGI_HAS_INDYDOG || (MIPS && COMPILE_TEST && SGI_HAS_INDYDOG)
help
Hardware driver for the Indy's/I2's watchdog. This is a
watchdog timer that will reboot the machine after a 60 second
--
2.7.4