From: Martin Kelly <mkelly(a)xevo.com>
commit c043ec1ca5baae63726aae32abbe003192bc6eec upstream.
Currently, we use int for buffer length and bytes_per_datum. However,
kfifo uses unsigned int for length and size_t for element size. We need
to make sure these matches or we will have bugs related to overflow (in
the range between INT_MAX and UINT_MAX for length, for example).
In addition, set_bytes_per_datum uses size_t while bytes_per_datum is an
int, which would cause bugs for large values of bytes_per_datum.
Change buffer length to use unsigned int and bytes_per_datum to use
size_t.
Signed-off-by: Martin Kelly <mkelly(a)xevo.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
[bwh: Backported to 4.9:
- Drop change to iio_dma_buffer_set_length()
- Adjust filename, context]
Signed-off-by: Ben Hutchings <ben.hutchings(a)codethink.co.uk>
---
drivers/iio/buffer/kfifo_buf.c | 4 ++--
include/linux/iio/buffer.h | 6 +++---
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 7ef9b13262a8..e44181f9eb36 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -19,7 +19,7 @@ struct iio_kfifo {
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
- int bytes_per_datum, int length)
+ size_t bytes_per_datum, unsigned int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
@@ -71,7 +71,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
return 0;
}
-static int iio_set_length_kfifo(struct iio_buffer *r, int length)
+static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
{
/* Avoid an invalid state */
if (length < 2)
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 70a5164f4728..821965c90070 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -61,7 +61,7 @@ struct iio_buffer_access_funcs {
int (*request_update)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*set_length)(struct iio_buffer *buffer, int length);
+ int (*set_length)(struct iio_buffer *buffer, unsigned int length);
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
@@ -96,8 +96,8 @@ struct iio_buffer_access_funcs {
* @watermark: [INTERN] number of datums to wait for poll/read.
*/
struct iio_buffer {
- int length;
- int bytes_per_datum;
+ unsigned int length;
+ size_t bytes_per_datum;
struct attribute_group *scan_el_attrs;
long *scan_mask;
bool scan_timestamp;
--
Ben Hutchings, Software Developer Codethink Ltd
https://www.codethink.co.uk/ Dale House, 35 Dale Street
Manchester, M1 2HF, United Kingdom
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From ca4c8fc97e669d7464a95e006d0ca4eadfa4e4bc Mon Sep 17 00:00:00 2001
From: Jean-Baptiste Maneyrol <jmaneyrol(a)invensense.com>
Date: Mon, 30 Apr 2018 12:14:09 +0200
Subject: [PATCH] iio: imu: inv_mpu6050: fix possible deadlock between mutex
and iio
Detected by kernel circular locking dependency checker.
We are locking iio mutex (iio_device_claim_direct_mode) after
locking our internal mutex. But when the buffer starts, iio first
locks its mutex and then we lock our internal one.
To avoid possible deadlock, we need to use the same order
everwhere. So we change the ordering by locking first iio mutex,
then our internal mutex.
Fixes: 68cd6e5b206b ("iio: imu: inv_mpu6050: fix lock issues by using our own mutex")
Cc: stable(a)vger.kernel.org
Signed-off-by: Jean-Baptiste Maneyrol <jmaneyrol(a)invensense.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index aafa77766b08..7358935fb391 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -340,12 +340,9 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
int result;
int ret;
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- return result;
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto error_release;
+ return result;
switch (chan->type) {
case IIO_ANGL_VEL:
@@ -386,14 +383,11 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
result = inv_mpu6050_set_power_itg(st, false);
if (result)
goto error_power_off;
- iio_device_release_direct_mode(indio_dev);
return ret;
error_power_off:
inv_mpu6050_set_power_itg(st, false);
-error_release:
- iio_device_release_direct_mode(indio_dev);
return result;
}
@@ -407,9 +401,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&st->lock);
ret = inv_mpu6050_read_channel_data(indio_dev, chan, val);
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -532,17 +530,18 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
struct inv_mpu6050_state *st = iio_priv(indio_dev);
int result;
- mutex_lock(&st->lock);
/*
* we should only update scale when the chip is disabled, i.e.
* not running
*/
result = iio_device_claim_direct_mode(indio_dev);
if (result)
- goto error_write_raw_unlock;
+ return result;
+
+ mutex_lock(&st->lock);
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto error_write_raw_release;
+ goto error_write_raw_unlock;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -581,10 +580,9 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
}
result |= inv_mpu6050_set_power_itg(st, false);
-error_write_raw_release:
- iio_device_release_direct_mode(indio_dev);
error_write_raw_unlock:
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return result;
}
@@ -643,17 +641,18 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
return -EINVAL;
+ result = iio_device_claim_direct_mode(indio_dev);
+ if (result)
+ return result;
+
mutex_lock(&st->lock);
if (fifo_rate == st->chip_config.fifo_rate) {
result = 0;
goto fifo_rate_fail_unlock;
}
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- goto fifo_rate_fail_unlock;
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto fifo_rate_fail_release;
+ goto fifo_rate_fail_unlock;
d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
result = regmap_write(st->map, st->reg->sample_rate_div, d);
@@ -667,10 +666,9 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
fifo_rate_fail_power_off:
result |= inv_mpu6050_set_power_itg(st, false);
-fifo_rate_fail_release:
- iio_device_release_direct_mode(indio_dev);
fifo_rate_fail_unlock:
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
if (result)
return result;
The patch below does not apply to the 4.17-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From ca4c8fc97e669d7464a95e006d0ca4eadfa4e4bc Mon Sep 17 00:00:00 2001
From: Jean-Baptiste Maneyrol <jmaneyrol(a)invensense.com>
Date: Mon, 30 Apr 2018 12:14:09 +0200
Subject: [PATCH] iio: imu: inv_mpu6050: fix possible deadlock between mutex
and iio
Detected by kernel circular locking dependency checker.
We are locking iio mutex (iio_device_claim_direct_mode) after
locking our internal mutex. But when the buffer starts, iio first
locks its mutex and then we lock our internal one.
To avoid possible deadlock, we need to use the same order
everwhere. So we change the ordering by locking first iio mutex,
then our internal mutex.
Fixes: 68cd6e5b206b ("iio: imu: inv_mpu6050: fix lock issues by using our own mutex")
Cc: stable(a)vger.kernel.org
Signed-off-by: Jean-Baptiste Maneyrol <jmaneyrol(a)invensense.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index aafa77766b08..7358935fb391 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -340,12 +340,9 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
int result;
int ret;
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- return result;
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto error_release;
+ return result;
switch (chan->type) {
case IIO_ANGL_VEL:
@@ -386,14 +383,11 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
result = inv_mpu6050_set_power_itg(st, false);
if (result)
goto error_power_off;
- iio_device_release_direct_mode(indio_dev);
return ret;
error_power_off:
inv_mpu6050_set_power_itg(st, false);
-error_release:
- iio_device_release_direct_mode(indio_dev);
return result;
}
@@ -407,9 +401,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&st->lock);
ret = inv_mpu6050_read_channel_data(indio_dev, chan, val);
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -532,17 +530,18 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
struct inv_mpu6050_state *st = iio_priv(indio_dev);
int result;
- mutex_lock(&st->lock);
/*
* we should only update scale when the chip is disabled, i.e.
* not running
*/
result = iio_device_claim_direct_mode(indio_dev);
if (result)
- goto error_write_raw_unlock;
+ return result;
+
+ mutex_lock(&st->lock);
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto error_write_raw_release;
+ goto error_write_raw_unlock;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -581,10 +580,9 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
}
result |= inv_mpu6050_set_power_itg(st, false);
-error_write_raw_release:
- iio_device_release_direct_mode(indio_dev);
error_write_raw_unlock:
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return result;
}
@@ -643,17 +641,18 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
return -EINVAL;
+ result = iio_device_claim_direct_mode(indio_dev);
+ if (result)
+ return result;
+
mutex_lock(&st->lock);
if (fifo_rate == st->chip_config.fifo_rate) {
result = 0;
goto fifo_rate_fail_unlock;
}
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- goto fifo_rate_fail_unlock;
result = inv_mpu6050_set_power_itg(st, true);
if (result)
- goto fifo_rate_fail_release;
+ goto fifo_rate_fail_unlock;
d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
result = regmap_write(st->map, st->reg->sample_rate_div, d);
@@ -667,10 +666,9 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
fifo_rate_fail_power_off:
result |= inv_mpu6050_set_power_itg(st, false);
-fifo_rate_fail_release:
- iio_device_release_direct_mode(indio_dev);
fifo_rate_fail_unlock:
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
if (result)
return result;
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 5811375325420052fcadd944792a416a43072b7f Mon Sep 17 00:00:00 2001
From: Liu Bo <bo.li.liu(a)oracle.com>
Date: Wed, 31 Jan 2018 17:09:13 -0700
Subject: [PATCH] Btrfs: fix unexpected cow in run_delalloc_nocow
Fstests generic/475 provides a way to fail metadata reads while
checking if checksum exists for the inode inside run_delalloc_nocow(),
and csum_exist_in_range() interprets error (-EIO) as inode having
checksum and makes its caller enter the cow path.
In case of free space inode, this ends up with a warning in
cow_file_range().
The same problem applies to btrfs_cross_ref_exist() since it may also
read metadata in between.
With this, run_delalloc_nocow() bails out when errors occur at the two
places.
cc: <stable(a)vger.kernel.org> v2.6.28+
Fixes: 17d217fe970d ("Btrfs: fix nodatasum handling in balancing code")
Signed-off-by: Liu Bo <bo.li.liu(a)oracle.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6504e63b2317..491a7397f6fa 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1256,6 +1256,8 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
list_del(&sums->list);
kfree(sums);
}
+ if (ret < 0)
+ return ret;
return 1;
}
@@ -1388,10 +1390,23 @@ static noinline int run_delalloc_nocow(struct inode *inode,
goto out_check;
if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out_check;
- if (btrfs_cross_ref_exist(root, ino,
- found_key.offset -
- extent_offset, disk_bytenr))
+ ret = btrfs_cross_ref_exist(root, ino,
+ found_key.offset -
+ extent_offset, disk_bytenr);
+ if (ret) {
+ /*
+ * ret could be -EIO if the above fails to read
+ * metadata.
+ */
+ if (ret < 0) {
+ if (cow_start != (u64)-1)
+ cur_offset = cow_start;
+ goto error;
+ }
+
+ WARN_ON_ONCE(nolock);
goto out_check;
+ }
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
@@ -1409,10 +1424,22 @@ static noinline int run_delalloc_nocow(struct inode *inode,
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
- if (csum_exist_in_range(fs_info, disk_bytenr,
- num_bytes)) {
+ ret = csum_exist_in_range(fs_info, disk_bytenr,
+ num_bytes);
+ if (ret) {
if (!nolock)
btrfs_end_write_no_snapshotting(root);
+
+ /*
+ * ret could be -EIO if the above fails to read
+ * metadata.
+ */
+ if (ret < 0) {
+ if (cow_start != (u64)-1)
+ cur_offset = cow_start;
+ goto error;
+ }
+ WARN_ON_ONCE(nolock);
goto out_check;
}
if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b5c40d598f5408bd0ca22dfffa82f03cd9433f23 Mon Sep 17 00:00:00 2001
From: Omar Sandoval <osandov(a)fb.com>
Date: Tue, 22 May 2018 15:02:12 -0700
Subject: [PATCH] Btrfs: fix clone vs chattr NODATASUM race
In btrfs_clone_files(), we must check the NODATASUM flag while the
inodes are locked. Otherwise, it's possible that btrfs_ioctl_setflags()
will change the flags after we check and we can end up with a party
checksummed file.
The race window is only a few instructions in size, between the if and
the locks which is:
3834 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
3835 return -EISDIR;
where the setflags must be run and toggle the NODATASUM flag (provided
the file size is 0). The clone will block on the inode lock, segflags
takes the inode lock, changes flags, releases log and clone continues.
Not impossible but still needs a lot of bad luck to hit unintentionally.
Fixes: 0e7b824c4ef9 ("Btrfs: don't make a file partly checksummed through file clone")
CC: stable(a)vger.kernel.org # 4.4+
Signed-off-by: Omar Sandoval <osandov(a)fb.com>
Reviewed-by: Nikolay Borisov <nborisov(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
[ update changelog ]
Signed-off-by: David Sterba <dsterba(a)suse.com>
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 743c4f1b8001..b9b779a4ab6e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3808,11 +3808,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
src->i_sb != inode->i_sb)
return -EXDEV;
- /* don't make the dst file partly checksummed */
- if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
- return -EINVAL;
-
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
return -EISDIR;
@@ -3822,6 +3817,13 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
inode_lock(src);
}
+ /* don't make the dst file partly checksummed */
+ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+ (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/* determine range to clone */
ret = -EINVAL;
if (off + len > src->i_size || off + len < off)
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From ff7c9917143b3a6cf2fa61212a32d67cf259bf9c Mon Sep 17 00:00:00 2001
From: Srinivas Pandruvada <srinivas.pandruvada(a)linux.intel.com>
Date: Mon, 18 Jun 2018 12:47:45 -0700
Subject: [PATCH] cpufreq: intel_pstate: Fix scaling max/min limits with Turbo
3.0
When scaling max/min settings are changed, internally they are converted
to a ratio using the max turbo 1 core turbo frequency. This works fine
when 1 core max is same irrespective of the core. But under Turbo 3.0,
this will not be the case. For example:
Core 0: max turbo pstate: 43 (4.3GHz)
Core 1: max turbo pstate: 45 (4.5GHz)
In this case 1 core turbo ratio will be maximum of all, so it will be
45 (4.5GHz). Suppose scaling max is set to 4GHz (ratio 40) for all cores
,then on core one it will be
= max_state * policy->max / max_freq;
= 43 * (4000000/4500000) = 38 (3.8GHz)
= 38
which is 200MHz less than the desired.
On core2, it will be correctly set to ratio 40 (4GHz). Same holds true
for scaling min frequency limit. So this requires usage of correct turbo
max frequency for core one, which in this case is 4.3GHz. So we need to
adjust per CPU cpu->pstate.turbo_freq using the maximum HWP ratio of that
core.
This change uses the HWP capability of a core to adjust max turbo
frequency. But since Broadwell HWP doesn't use ratios in the HWP
capabilities, we have to use legacy max 1 core turbo ratio. This is not
a problem as the HWP capabilities don't differ among cores in Broadwell.
We need to check for non Broadwell CPU model for applying this change,
though.
Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada(a)linux.intel.com>
Cc: 4.6+ <stable(a)vger.kernel.org> # 4.6+
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki(a)intel.com>
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1de5ec8d5ea3..ece120da3353 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -294,6 +294,7 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static bool hwp_boost __read_mostly;
@@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
- cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+ if (hwp_active && !hwp_mode_bdw) {
+ unsigned int phy_max, current_max;
+
+ intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max);
+ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ } else {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ }
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2467,28 +2476,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */
+#define INTEL_PSTATE_HWP_BROADWELL 0x01
+
+#define ICPU_HWP(model, hwp_mode) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
- { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+ ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+ ICPU_HWP(X86_MODEL_ANY, 0),
{}
};
static int __init intel_pstate_init(void)
{
+ const struct x86_cpu_id *id;
int rc;
if (no_load)
return -ENODEV;
- if (x86_match_cpu(hwp_support_ids)) {
+ id = x86_match_cpu(hwp_support_ids);
+ if (id) {
copy_cpu_funcs(&core_funcs);
if (!no_hwp) {
hwp_active++;
+ hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
}
} else {
- const struct x86_cpu_id *id;
-
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 988a35f8da1dec5a8cd2788054d1e717be61bf25 Mon Sep 17 00:00:00 2001
From: Tetsuo Handa <penguin-kernel(a)I-love.SAKURA.ne.jp>
Date: Fri, 11 May 2018 19:54:19 +0900
Subject: [PATCH] printk: fix possible reuse of va_list variable
I noticed that there is a possibility that printk_safe_log_store() causes
kernel oops because "args" parameter is passed to vsnprintf() again when
atomic_cmpxchg() detected that we raced. Fix this by using va_copy().
Link: http://lkml.kernel.org/r/201805112002.GIF21216.OFVHFOMLJtQFSO@I-love.SAKURA…
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Steven Rostedt <rostedt(a)goodmis.org>
Cc: dvyukov(a)google.com
Cc: syzkaller(a)googlegroups.com
Cc: fengguang.wu(a)intel.com
Cc: linux-kernel(a)vger.kernel.org
Signed-off-by: Tetsuo Handa <penguin-kernel(a)I-love.SAKURA.ne.jp>
Fixes: 42a0bb3f71383b45 ("printk/nmi: generic solution for safe printk in NMI")
Cc: 4.7+ <stable(a)vger.kernel.org> # v4.7+
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky(a)gmail.com>
Signed-off-by: Petr Mladek <pmladek(a)suse.com>
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 3e3c2004bb23..449d67edfa4b 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -82,6 +82,7 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
{
int add;
size_t len;
+ va_list ap;
again:
len = atomic_read(&s->len);
@@ -100,7 +101,9 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
if (!len)
smp_rmb();
- add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
+ va_copy(ap, args);
+ add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
+ va_end(ap);
if (!add)
return 0;
The patch below does not apply to the 4.17-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From c2deae44616dab0112d965a0dc72d053b5727b4b Mon Sep 17 00:00:00 2001
From: Koen Vandeputte <koen.vandeputte(a)ncentric.com>
Date: Mon, 15 Jan 2018 10:36:08 +0100
Subject: [PATCH] PCI: dwc: Fix enumeration end when reaching root subordinate
The subordinate value indicates the highest bus number which can be
reached downstream though a certain device.
Commit a20c7f36bd3d ("PCI: Do not allocate more buses than available in
parent")
ensures that downstream devices cannot assign busnumbers higher than the
upstream device subordinate number, which was indeed illogical.
By default, dw_pcie_setup_rc() inits the Root Complex subordinate to a
value of 0x01.
Due to this combined with above commit, enumeration stops digging deeper
downstream as soon as bus num 0x01 has been assigned, which is always
the case for a bridge device.
This results in all devices behind a bridge bus to remain undetected, as
these would be connected to bus 0x02 or higher.
Fix this by initializing the RC to a subordinate value of 0xff, which is
not altering hardware behaviour in any way, but informs probing
function pci_scan_bridge() later on which reads this value back from
register.
Following nasty errors during boot are also fixed by this:
[ 0.459145] pci_bus 0000:02: busn_res: can not insert [bus 02-ff]
under [bus 01] (conflicts with (null) [bus 01])
...
[ 0.464515] pci_bus 0000:03: [bus 03] partially hidden behind bridge
0000:01 [bus 01]
...
[ 0.464892] pci_bus 0000:04: [bus 04] partially hidden behind bridge
0000:01 [bus 01]
...
[ 0.466488] pci_bus 0000:05: [bus 05] partially hidden behind bridge
0000:01 [bus 01]
[ 0.466506] pci_bus 0000:02: busn_res: [bus 02-ff] end is updated to
05
[ 0.466517] pci_bus 0000:02: busn_res: can not insert [bus 02-05]
under [bus 01] (conflicts with (null) [bus 01])
[ 0.466534] pci_bus 0000:02: [bus 02-05] partially hidden behind
bridge 0000:01 [bus 01]
Fixes: a20c7f36bd3d ("PCI: Do not allocate more buses than available in
parent")
Tested-by: Niklas Cassel <niklas.cassel(a)axis.com>
Tested-by: Fabio Estevam <fabio.estevam(a)nxp.com>
Tested-by: Sebastian Reichel <sebastian.reichel(a)collabora.co.uk>
Signed-off-by: Koen Vandeputte <koen.vandeputte(a)ncentric.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi(a)arm.com>
Reviewed-by: Mika Westerberg <mika.westerberg(a)linux.intel.com>
Acked-by: Lucas Stach <l.stach(a)pengutronix.de>
Cc: <stable(a)vger.kernel.org> # 4.15
Cc: Binghui Wang <wangbinghui(a)hisilicon.com>
Cc: Bjorn Helgaas <bhelgaas(a)google.com>
Cc: Jesper Nilsson <jesper.nilsson(a)axis.com>
Cc: Jianguo Sun <sunjianguo1(a)huawei.com>
Cc: Jingoo Han <jingoohan1(a)gmail.com>
Cc: Kishon Vijay Abraham I <kishon(a)ti.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi(a)arm.com>
Cc: Lucas Stach <l.stach(a)pengutronix.de>
Cc: Mika Westerberg <mika.westerberg(a)linux.intel.com>
Cc: Minghuan Lian <minghuan.Lian(a)freescale.com>
Cc: Mingkai Hu <mingkai.hu(a)freescale.com>
Cc: Murali Karicheri <m-karicheri2(a)ti.com>
Cc: Pratyush Anand <pratyush.anand(a)gmail.com>
Cc: Richard Zhu <hongxing.zhu(a)nxp.com>
Cc: Roy Zang <tie-fei.zang(a)freescale.com>
Cc: Shawn Guo <shawn.guo(a)linaro.org>
Cc: Stanimir Varbanov <svarbanov(a)mm-sol.com>
Cc: Thomas Petazzoni <thomas.petazzoni(a)free-electrons.com>
Cc: Xiaowei Song <songxiaowei(a)hisilicon.com>
Cc: Zhou Wang <wangzhou1(a)hisilicon.com>
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 8de2d5c69b1d..dc9303abda42 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
val &= 0xff000000;
- val |= 0x00010100;
+ val |= 0x00ff0100;
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
/* setup command register */
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 3f77f244d8ec28e3a0a81240ffac7d626390060c Mon Sep 17 00:00:00 2001
From: Martin Kaiser <martin(a)kaiser.cx>
Date: Mon, 18 Jun 2018 22:41:03 +0200
Subject: [PATCH] mtd: rawnand: mxc: set spare area size register explicitly
The v21 version of the NAND flash controller contains a Spare Area Size
Register (SPAS) at offset 0x10. Its setting defaults to the maximum
spare area size of 218 bytes. The size that is set in this register is
used by the controller when it calculates the ECC bytes internally in
hardware.
Usually, this register is updated from settings in the IIM fuses when
the system is booting from NAND flash. For other boot media, however,
the SPAS register remains at the default setting, which may not work for
the particular flash chip on the board. The same goes for flash chips
whose configuration cannot be set in the IIM fuses (e.g. chips with 2k
sector size and 128 bytes spare area size can't be configured in the IIM
fuses on imx25 systems).
Set the SPAS register explicitly during the preset operation. Derive the
register value from mtd->oobsize that was detected during probe by
decoding the flash chip's ID bytes.
While at it, rename the define for the spare area register's offset to
NFC_V21_RSLTSPARE_AREA. The register at offset 0x10 on v1 controllers is
different from the register on v21 controllers.
Fixes: d484018 ("mtd: mxc_nand: set NFC registers after reset")
Cc: stable(a)vger.kernel.org
Signed-off-by: Martin Kaiser <martin(a)kaiser.cx>
Reviewed-by: Sascha Hauer <s.hauer(a)pengutronix.de>
Reviewed-by: Miquel Raynal <miquel.raynal(a)bootlin.com>
Signed-off-by: Boris Brezillon <boris.brezillon(a)bootlin.com>
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 45786e707b7b..26cef218bb43 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -48,7 +48,7 @@
#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
+ /* spare area size in 16-bit half-words */
+ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
/* Unlock the internal RAM Buffer */
writew(0x2, NFC_V1_V2_CONFIG);
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 3f77f244d8ec28e3a0a81240ffac7d626390060c Mon Sep 17 00:00:00 2001
From: Martin Kaiser <martin(a)kaiser.cx>
Date: Mon, 18 Jun 2018 22:41:03 +0200
Subject: [PATCH] mtd: rawnand: mxc: set spare area size register explicitly
The v21 version of the NAND flash controller contains a Spare Area Size
Register (SPAS) at offset 0x10. Its setting defaults to the maximum
spare area size of 218 bytes. The size that is set in this register is
used by the controller when it calculates the ECC bytes internally in
hardware.
Usually, this register is updated from settings in the IIM fuses when
the system is booting from NAND flash. For other boot media, however,
the SPAS register remains at the default setting, which may not work for
the particular flash chip on the board. The same goes for flash chips
whose configuration cannot be set in the IIM fuses (e.g. chips with 2k
sector size and 128 bytes spare area size can't be configured in the IIM
fuses on imx25 systems).
Set the SPAS register explicitly during the preset operation. Derive the
register value from mtd->oobsize that was detected during probe by
decoding the flash chip's ID bytes.
While at it, rename the define for the spare area register's offset to
NFC_V21_RSLTSPARE_AREA. The register at offset 0x10 on v1 controllers is
different from the register on v21 controllers.
Fixes: d484018 ("mtd: mxc_nand: set NFC registers after reset")
Cc: stable(a)vger.kernel.org
Signed-off-by: Martin Kaiser <martin(a)kaiser.cx>
Reviewed-by: Sascha Hauer <s.hauer(a)pengutronix.de>
Reviewed-by: Miquel Raynal <miquel.raynal(a)bootlin.com>
Signed-off-by: Boris Brezillon <boris.brezillon(a)bootlin.com>
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 45786e707b7b..26cef218bb43 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -48,7 +48,7 @@
#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
+ /* spare area size in 16-bit half-words */
+ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
/* Unlock the internal RAM Buffer */
writew(0x2, NFC_V1_V2_CONFIG);
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From e9893e6fa932f42c90c4ac5849fa9aa0f0f00a34 Mon Sep 17 00:00:00 2001
From: Abhishek Sahu <absahu(a)codeaurora.org>
Date: Wed, 13 Jun 2018 14:32:36 +0530
Subject: [PATCH] mtd: rawnand: fix return value check for bad block status
Positive return value from read_oob() is making false BAD
blocks. For some of the NAND controllers, OOB bytes will be
protected with ECC and read_oob() will return number of bitflips.
If there is any bitflip in ECC protected OOB bytes for BAD block
status page, then that block is getting treated as BAD.
Fixes: c120e75e0e7d ("mtd: nand: use read_oob() instead of cmdfunc() for bad block check")
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Abhishek Sahu <absahu(a)codeaurora.org>
Reviewed-by: Miquel Raynal <miquel.raynal(a)bootlin.com>
Signed-off-by: Boris Brezillon <boris.brezillon(a)bootlin.com>
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 10c4f9919850..b01d15ec4c56 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
for (; page < page_end; page++) {
res = chip->ecc.read_oob(mtd, chip, page);
- if (res)
+ if (res < 0)
return res;
bad = chip->oob_poi[chip->badblockpos];
The patch below does not apply to the 4.17-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 79ca484b613041ca223f74b34608bb6f5221724b Mon Sep 17 00:00:00 2001
From: Tokunori Ikegami <ikegami(a)allied-telesis.co.jp>
Date: Wed, 30 May 2018 18:32:29 +0900
Subject: [PATCH] mtd: cfi_cmdset_0002: Change erase functions to check chip
good only
Currently the functions use to check both chip ready and good.
But the chip ready is not enough to check the operation status.
So change this to check the chip good instead of this.
About the retry functions to make sure the error handling remain it.
Signed-off-by: Tokunori Ikegami <ikegami(a)allied-telesis.co.jp>
Reviewed-by: Joakim Tjernlund <Joakim.Tjernlund(a)infinera.com>
Cc: Chris Packham <chris.packham(a)alliedtelesis.co.nz>
Cc: Brian Norris <computersforpeace(a)gmail.com>
Cc: David Woodhouse <dwmw2(a)infradead.org>
Cc: Boris Brezillon <boris.brezillon(a)free-electrons.com>
Cc: Marek Vasut <marek.vasut(a)gmail.com>
Cc: Richard Weinberger <richard(a)nod.at>
Cc: Cyrille Pitchen <cyrille.pitchen(a)wedev4u.fr>
Cc: linux-mtd(a)lists.infradead.org
Cc: stable(a)vger.kernel.org
Signed-off-by: Boris Brezillon <boris.brezillon(a)bootlin.com>
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 217db661943d..d8e3a737c62f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -2294,12 +2294,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->erase_suspended = 0;
}
- if (chip_ready(map, adr))
+ if (chip_good(map, adr, map_word_ff(map)))
break;
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__);
+ ret = -EIO;
break;
}
@@ -2307,15 +2308,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
- if (!chip_good(map, adr, map_word_ff(map))) {
+ if (ret) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
goto retry;
-
- ret = -EIO;
+ }
}
chip->state = FL_READY;
@@ -2388,7 +2389,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->erase_suspended = 0;
}
- if (chip_ready(map, adr)) {
+ if (chip_good(map, adr, map_word_ff(map))) {
xip_enable(map, chip, adr);
break;
}
@@ -2397,6 +2398,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__);
+ ret = -EIO;
break;
}
@@ -2404,15 +2406,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
- if (!chip_good(map, adr, map_word_ff(map))) {
+ if (ret) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
goto retry;
-
- ret = -EIO;
+ }
}
chip->state = FL_READY;
The patch below does not apply to the 4.17-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 85a82e28b023de9b259a86824afbd6ba07bd6475 Mon Sep 17 00:00:00 2001
From: Tokunori Ikegami <ikegami(a)allied-telesis.co.jp>
Date: Wed, 30 May 2018 18:32:27 +0900
Subject: [PATCH] mtd: cfi_cmdset_0002: Change definition naming to retry write
operation
The definition can be used for other program and erase operations also.
So change the naming to MAX_RETRIES from MAX_WORD_RETRIES.
Signed-off-by: Tokunori Ikegami <ikegami(a)allied-telesis.co.jp>
Reviewed-by: Joakim Tjernlund <Joakim.Tjernlund(a)infinera.com>
Cc: Chris Packham <chris.packham(a)alliedtelesis.co.nz>
Cc: Brian Norris <computersforpeace(a)gmail.com>
Cc: David Woodhouse <dwmw2(a)infradead.org>
Cc: Boris Brezillon <boris.brezillon(a)free-electrons.com>
Cc: Marek Vasut <marek.vasut(a)gmail.com>
Cc: Richard Weinberger <richard(a)nod.at>
Cc: Cyrille Pitchen <cyrille.pitchen(a)wedev4u.fr>
Cc: linux-mtd(a)lists.infradead.org
Cc: stable(a)vger.kernel.org
Signed-off-by: Boris Brezillon <boris.brezillon(a)bootlin.com>
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 75d2c16029fd..62ada405fe93 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -42,7 +42,7 @@
#define AMD_BOOTLOC_BUG
#define FORCE_WORD_WRITE 0
-#define MAX_WORD_RETRIES 3
+#define MAX_RETRIES 3
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
@@ -1646,7 +1646,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_WORD_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES)
goto retry;
ret = -EIO;
@@ -2105,7 +2105,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_WORD_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES)
goto retry;
ret = -EIO;
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 1bc0299d976e000ececc6acd76e33b4582646cb7 Mon Sep 17 00:00:00 2001
From: Mike Marciniszyn <mike.marciniszyn(a)intel.com>
Date: Thu, 31 May 2018 11:30:09 -0700
Subject: [PATCH] IB/hfi1: Fix user context tail allocation for DMA_RTAIL
The following code fails to allocate a buffer for the
tail address that the hardware DMAs into when the user
context DMA_RTAIL is set.
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
&dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
}
So the rcvhdrtail_kvaddr would then be NULL.
The mmap logic fails to check for a NULL rcvhdrtail_kvaddr.
The fix is to test for both user and kernel DMA_TAIL options
during the allocation as well as testing for a NULL
rcvhdrtail_kvaddr during the mmap processing.
Additionally, all downstream testing of the capmask for DMA_RTAIL
have been eliminated in favor of testing rcvhdrtail_kvaddr.
Cc: <stable(a)vger.kernel.org> # 4.9.x
Reviewed-by: Michael J. Ruhl <michael.j.ruhl(a)intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn(a)intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro(a)intel.com>
Signed-off-by: Jason Gunthorpe <jgg(a)mellanox.com>
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 68580cb2ae1e..f75080d63142 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6841,7 +6841,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
}
rcvmask = HFI1_RCVCTRL_CTXT_ENB;
/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
- rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
+ rcvmask |= rcd->rcvhdrtail_kvaddr ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
hfi1_rcvctrl(dd, rcvmask, rcd);
hfi1_rcd_put(rcd);
@@ -8367,7 +8367,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
u32 tail;
int present;
- if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
+ if (!rcd->rcvhdrtail_kvaddr)
present = (rcd->seq_cnt ==
rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
else /* is RDMA rtail */
@@ -11843,7 +11843,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
rcd->rcvhdrq_dma);
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
+ if (rcd->rcvhdrtail_kvaddr)
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
rcd->rcvhdrqtailaddr_dma);
rcd->seq_cnt = 1;
@@ -11923,7 +11923,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
+ if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
/* See comment on RcvCtxtCtrl.TailUpd above */
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c9d23c37a371..0fc4aa9455c3 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -505,7 +505,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EINVAL;
goto done;
}
- if (flags & VM_WRITE) {
+ if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
ret = -EPERM;
goto done;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 3feecf926322..4a478ee0a79b 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1853,7 +1853,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
u64 reg;
if (!rcd->rcvhdrq) {
- dma_addr_t dma_hdrqtail;
gfp_t gfp_flags;
/*
@@ -1878,13 +1877,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
goto bail;
}
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
+ HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
- gfp_flags);
+ &dd->pcidev->dev, PAGE_SIZE,
+ &rcd->rcvhdrqtailaddr_dma, gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
- rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
}
rcd->rcvhdrq_size = amt;
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From a93a0a31111231bb1949f4a83b17238f0fa32d6a Mon Sep 17 00:00:00 2001
From: "Michael J. Ruhl" <michael.j.ruhl(a)intel.com>
Date: Wed, 2 May 2018 06:43:07 -0700
Subject: [PATCH] IB/hfi1: Reorder incorrect send context disable
User send context integrity bits are cleared before the context is
disabled. If the send context is still processing data, any packets
that need those integrity bits will cause an error and halt the send
context.
During the disable handling, the driver waits for the context to drain.
If the context is halted, the driver will eventually timeout because
the context won't drain and then incorrectly bounce the link.
Reorder the bit clearing and the context disable.
Examine the software state and send context status as well as the
egress status to determine if a send context is in the halted state.
Promote the check macros to static functions for consistency with the
new check and to follow kernel style.
Remove an unused define that refers to the egress timeout.
Cc: <stable(a)vger.kernel.org> # 4.9.x
Reviewed-by: Mitko Haralanov <mitko.haralanov(a)intel.com>
Reviewed-by: Mike Marciniszyn <mike.marciniszyn(a)intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl(a)intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro(a)intel.com>
Signed-off-by: Doug Ledford <dledford(a)redhat.com>
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1b778fd16a32..c9d23c37a371 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -689,8 +689,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
* checks to default and disable the send context.
*/
if (uctxt->sc) {
- set_pio_integrity(uctxt->sc);
sc_disable(uctxt->sc);
+ set_pio_integrity(uctxt->sc);
}
hfi1_free_ctxt_rcv_groups(uctxt);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 40dac4d16eb8..9cac15d10c4f 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -50,8 +50,6 @@
#include "qp.h"
#include "trace.h"
-#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
-
#define SC(name) SEND_CTXT_##name
/*
* Send Context functions
@@ -961,15 +959,40 @@ void sc_disable(struct send_context *sc)
}
/* return SendEgressCtxtStatus.PacketOccupancy */
-#define packet_occupancy(r) \
- (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
- >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
+static u64 packet_occupancy(u64 reg)
+{
+ return (reg &
+ SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
+ >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
+}
/* is egress halted on the context? */
-#define egress_halted(r) \
- ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
+static bool egress_halted(u64 reg)
+{
+ return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
+}
-/* wait for packet egress, optionally pause for credit return */
+/* is the send context halted? */
+static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
+{
+ return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
+ SC(STATUS_CTXT_HALTED_SMASK));
+}
+
+/**
+ * sc_wait_for_packet_egress
+ * @sc: valid send context
+ * @pause: wait for credit return
+ *
+ * Wait for packet egress, optionally pause for credit return
+ *
+ * Egress halt and Context halt are not necessarily the same thing, so
+ * check for both.
+ *
+ * NOTE: The context halt bit may not be set immediately. Because of this,
+ * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
+ * context bit to determine if the context is halted.
+ */
static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
{
struct hfi1_devdata *dd = sc->dd;
@@ -981,8 +1004,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
reg_prev = reg;
reg = read_csr(dd, sc->hw_context * 8 +
SEND_EGRESS_CTXT_STATUS);
- /* done if egress is stopped */
- if (egress_halted(reg))
+ /* done if any halt bits, SW or HW are set */
+ if (sc->flags & SCF_HALTED ||
+ is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
break;
reg = packet_occupancy(reg);
if (reg == 0)
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 08bb558ac11ab944e0539e78619d7b4c356278bd Mon Sep 17 00:00:00 2001
From: Jack Morgenstein <jackm(a)dev.mellanox.co.il>
Date: Wed, 23 May 2018 15:30:30 +0300
Subject: [PATCH] IB/core: Make testing MR flags for writability a static
inline function
Make the MR writability flags check, which is performed in umem.c,
a static inline function in file ib_verbs.h
This allows the function to be used by low-level infiniband drivers.
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Jason Gunthorpe <jgg(a)mellanox.com>
Signed-off-by: Jack Morgenstein <jackm(a)dev.mellanox.co.il>
Signed-off-by: Leon Romanovsky <leonro(a)mellanox.com>
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2b6c9b516070..d76455edd292 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -119,16 +119,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
- /*
- * We ask for writable memory if any of the following
- * access flags are set. "Local write" and "remote write"
- * obviously require write access. "Remote atomic" can do
- * things like fetch and add, which will modify memory, and
- * "MW bind" can change permissions by binding a window.
- */
- umem->writable = !!(access &
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+ umem->writable = ib_access_writable(access);
if (access & IB_ACCESS_ON_DEMAND) {
ret = ib_umem_odp_get(context, umem, access);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9fc8a825aa28..20fa5c591e81 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3734,6 +3734,20 @@ static inline int ib_check_mr_access(int flags)
return 0;
}
+static inline bool ib_access_writable(int access_flags)
+{
+ /*
+ * We have writable memory backing the MR if any of the following
+ * access flags are set. "Local write" and "remote write" obviously
+ * require write access. "Remote atomic" can do things like fetch and
+ * add, which will modify memory, and "MW bind" can change permissions
+ * by binding a window.
+ */
+ return access_flags &
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
+}
+
/**
* ib_check_mr_status: lightweight check of MR status.
* This routine may provide status checks on a selected
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 3ab2011ea368ec3433ad49e1b9e1c7b70d2e65df Mon Sep 17 00:00:00 2001
From: Tadeusz Struk <tadeusz.struk(a)intel.com>
Date: Tue, 22 May 2018 14:37:18 -0700
Subject: [PATCH] tpm: fix race condition in tpm_common_write()
There is a race condition in tpm_common_write function allowing
two threads on the same /dev/tpm<N>, or two different applications
on the same /dev/tpmrm<N> to overwrite each other commands/responses.
Fixed this by taking the priv->buffer_mutex early in the function.
Also converted the priv->data_pending from atomic to a regular size_t
type. There is no need for it to be atomic since it is only touched
under the protection of the priv->buffer_mutex.
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable(a)vger.kernel.org
Signed-off-by: Tadeusz Struk <tadeusz.struk(a)intel.com>
Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen(a)linux.intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen(a)linux.intel.com>
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 230b99288024..e4a04b2d3c32 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work)
struct file_priv *priv = container_of(work, struct file_priv, work);
mutex_lock(&priv->buffer_mutex);
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
}
@@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
struct file_priv *priv)
{
priv->chip = chip;
- atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->work, timeout_work);
@@ -58,29 +57,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
- ssize_t ret_size;
- ssize_t orig_ret_size;
+ ssize_t ret_size = 0;
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
- ret_size = atomic_read(&priv->data_pending);
- if (ret_size > 0) { /* relay data */
- orig_ret_size = ret_size;
- if (size < ret_size)
- ret_size = size;
+ mutex_lock(&priv->buffer_mutex);
- mutex_lock(&priv->buffer_mutex);
+ if (priv->data_pending) {
+ ret_size = min_t(ssize_t, size, priv->data_pending);
rc = copy_to_user(buf, priv->data_buffer, ret_size);
- memset(priv->data_buffer, 0, orig_ret_size);
+ memset(priv->data_buffer, 0, priv->data_pending);
if (rc)
ret_size = -EFAULT;
- mutex_unlock(&priv->buffer_mutex);
+ priv->data_pending = 0;
}
- atomic_set(&priv->data_pending, 0);
-
+ mutex_unlock(&priv->buffer_mutex);
return ret_size;
}
@@ -91,17 +85,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
size_t in_size = size;
ssize_t out_size;
+ if (in_size > TPM_BUFSIZE)
+ return -E2BIG;
+
+ mutex_lock(&priv->buffer_mutex);
+
/* Cannot perform a write until the read has cleared either via
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
- if (atomic_read(&priv->data_pending) != 0)
+ if (priv->data_pending != 0) {
+ mutex_unlock(&priv->buffer_mutex);
return -EBUSY;
-
- if (in_size > TPM_BUFSIZE)
- return -E2BIG;
-
- mutex_lock(&priv->buffer_mutex);
+ }
if (copy_from_user
(priv->data_buffer, (void __user *) buf, in_size)) {
@@ -132,7 +128,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return out_size;
}
- atomic_set(&priv->data_pending, out_size);
+ priv->data_pending = out_size;
mutex_unlock(&priv->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
@@ -149,5 +145,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
file->private_data = NULL;
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
}
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index ba3b6f9dacf7..b24cfb4d3ee1 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -8,7 +8,7 @@ struct file_priv {
struct tpm_chip *chip;
/* Data passed to and from the tpm via the read/write calls */
- atomic_t data_pending;
+ size_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 2f872ddcdb1e8e2186162616cea4581b8403849d Mon Sep 17 00:00:00 2001
From: Miquel Raynal <miquel.raynal(a)bootlin.com>
Date: Tue, 22 May 2018 11:40:28 +0200
Subject: [PATCH] arm64: dts: marvell: fix CP110 ICU node size
ICU size in CP110 is not 0x10 but at least 0x440 bytes long (from the
specification).
Fixes: 6ef84a827c37 ("arm64: dts: marvell: enable GICP and ICU on Armada 7K/8K")
Cc: stable(a)vger.kernel.org
Signed-off-by: Miquel Raynal <miquel.raynal(a)bootlin.com>
Reviewed-by: Thomas Petazzoni <thomas.petazzoni(a)bootlin.com>
Signed-off-by: Gregory CLEMENT <gregory.clement(a)bootlin.com>
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
index ed2f1237ea1e..8259b32f0ced 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
@@ -149,7 +149,7 @@
CP110_LABEL(icu): interrupt-controller@1e0000 {
compatible = "marvell,cp110-icu";
- reg = <0x1e0000 0x10>;
+ reg = <0x1e0000 0x440>;
#interrupt-cells = <3>;
interrupt-controller;
msi-parent = <&gicp>;
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From ac9816dcbab53c57bcf1d7b15370b08f1e284318 Mon Sep 17 00:00:00 2001
From: Akshay Adiga <akshay.adiga(a)linux.vnet.ibm.com>
Date: Wed, 16 May 2018 17:32:14 +0530
Subject: [PATCH] powerpc/powernv/cpuidle: Init all present cpus for deep
states
Init all present cpus for deep states instead of "all possible" cpus.
Init fails if a possible cpu is guarded. Resulting in making only
non-deep states available for cpuidle/hotplug.
Stewart says, this means that for single threaded workloads, if you
guard out a CPU core you'll not get WoF (Workload Optimised
Frequency), which means that performance goes down when you wouldn't
expect it to.
Fixes: 77b54e9f213f ("powernv/powerpc: Add winkle support for offline cpus")
Cc: stable(a)vger.kernel.org # v3.19+
Signed-off-by: Akshay Adiga <akshay.adiga(a)linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe(a)ellerman.id.au>
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 1f12ab1e6030..1c5d0675b43c 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -79,7 +79,7 @@ static int pnv_save_sprs_for_deep_states(void)
uint64_t msr_val = MSR_IDLE;
uint64_t psscr_val = pnv_deepest_stop_psscr_val;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
uint64_t pir = get_hard_smp_processor_id(cpu);
uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
@@ -814,7 +814,7 @@ static int __init pnv_init_idle_states(void)
int cpu;
pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
int base_cpu = cpu_first_thread_sibling(cpu);
int idx = cpu_thread_in_core(cpu);
int i;