From: Paul Davey paul.davey@alliedtelesis.co.nz
The MHI driver does not work on big endian architectures. The controller never transitions into mission mode. This appears to be due to the modem device expecting the various contexts and transfer rings to have fields in little endian order in memory, but the driver constructs them in native endianness.
Fix MHI event, channel and command contexts and TRE handling macros to use explicit conversion to little endian. Mark fields in relevant structures as little endian to document this requirement.
Fixes: a6e2e3522f29 ("bus: mhi: core: Add support for PM state transitions") Fixes: 6cd330ae76ff ("bus: mhi: core: Add support for ringing channel/event ring doorbells") Signed-off-by: Paul Davey paul.davey@alliedtelesis.co.nz Cc: stable@vger.kernel.org Signed-off-by: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org --- drivers/bus/mhi/core/debugfs.c | 26 +++---- drivers/bus/mhi/core/init.c | 36 +++++----- drivers/bus/mhi/core/internal.h | 119 ++++++++++++++++---------------- drivers/bus/mhi/core/main.c | 22 +++--- drivers/bus/mhi/core/pm.c | 4 +- 5 files changed, 104 insertions(+), 103 deletions(-)
diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c index 858d7516410b..d818586c229d 100644 --- a/drivers/bus/mhi/core/debugfs.c +++ b/drivers/bus/mhi/core/debugfs.c @@ -60,16 +60,16 @@ static int mhi_debugfs_events_show(struct seq_file *m, void *d) }
seq_printf(m, "Index: %d intmod count: %lu time: %lu", - i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >> + i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> EV_CTX_INTMODC_SHIFT, - (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >> + (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> EV_CTX_INTMODT_SHIFT);
- seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase, - er_ctxt->rlen); + seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase), + le64_to_cpu(er_ctxt->rlen));
- seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp, - er_ctxt->wp); + seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp), + le64_to_cpu(er_ctxt->wp));
seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, &mhi_event->db_cfg.db_val); @@ -106,18 +106,18 @@ static int mhi_debugfs_channels_show(struct seq_file *m, void *d)
seq_printf(m, "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", - mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg & + mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, - (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >> - CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg & + (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >> + CHAN_CTX_BRSTMODE_SHIFT, (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT);
- seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype, - chan_ctxt->erindex); + seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype), + le32_to_cpu(chan_ctxt->erindex));
seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", - chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp, - chan_ctxt->wp); + le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen), + le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp));
seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", ring->rp, ring->wp, diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index af484b03558a..4bd62f32695d 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -293,17 +293,17 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) if (mhi_chan->offload_ch) continue;
- tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); tmp &= ~CHAN_CTX_BRSTMODE_MASK; tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); tmp &= ~CHAN_CTX_POLLCFG_MASK; tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); - chan_ctxt->chcfg = tmp; + chan_ctxt->chcfg = cpu_to_le32(tmp);
- chan_ctxt->chtype = mhi_chan->type; - chan_ctxt->erindex = mhi_chan->er_index; + chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); + chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
mhi_chan->ch_state = MHI_CH_STATE_DISABLED; mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; @@ -328,14 +328,14 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) if (mhi_event->offload_ev) continue;
- tmp = er_ctxt->intmod; + tmp = le32_to_cpu(er_ctxt->intmod); tmp &= ~EV_CTX_INTMODC_MASK; tmp &= ~EV_CTX_INTMODT_MASK; tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); - er_ctxt->intmod = tmp; + er_ctxt->intmod = cpu_to_le32(tmp);
- er_ctxt->ertype = MHI_ER_TYPE_VALID; - er_ctxt->msivec = mhi_event->irq; + er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); + er_ctxt->msivec = cpu_to_le32(mhi_event->irq); mhi_event->db_cfg.db_mode = true;
ring->el_size = sizeof(struct mhi_tre); @@ -349,9 +349,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) * ring is empty */ ring->rp = ring->wp = ring->base; - er_ctxt->rbase = ring->iommu_base; + er_ctxt->rbase = cpu_to_le64(ring->iommu_base); er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; - er_ctxt->rlen = ring->len; + er_ctxt->rlen = cpu_to_le64(ring->len); ring->ctxt_wp = &er_ctxt->wp; }
@@ -378,9 +378,9 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) goto error_alloc_cmd;
ring->rp = ring->wp = ring->base; - cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; - cmd_ctxt->rlen = ring->len; + cmd_ctxt->rlen = cpu_to_le64(ring->len); ring->ctxt_wp = &cmd_ctxt->wp; }
@@ -581,10 +581,10 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, chan_ctxt->rp = 0; chan_ctxt->wp = 0;
- tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = tmp; + chan_ctxt->chcfg = cpu_to_le32(tmp);
/* Update to all cores */ smp_wmb(); @@ -618,14 +618,14 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, return -ENOMEM; }
- tmp = chan_ctxt->chcfg; + tmp = le32_to_cpu(chan_ctxt->chcfg); tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); - chan_ctxt->chcfg = tmp; + chan_ctxt->chcfg = cpu_to_le32(tmp);
- chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; - chan_ctxt->rlen = tre_ring->len; + chan_ctxt->rlen = cpu_to_le64(tre_ring->len); tre_ring->ctxt_wp = &chan_ctxt->wp;
tre_ring->rp = tre_ring->wp = tre_ring->base; diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index e2e10474a9d9..fa64340a8997 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -209,14 +209,14 @@ extern struct bus_type mhi_bus_type; #define EV_CTX_INTMODT_MASK GENMASK(31, 16) #define EV_CTX_INTMODT_SHIFT 16 struct mhi_event_ctxt { - __u32 intmod; - __u32 ertype; - __u32 msivec; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); + __le32 intmod; + __le32 ertype; + __le32 msivec; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); };
#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) @@ -227,25 +227,25 @@ struct mhi_event_ctxt { #define CHAN_CTX_POLLCFG_SHIFT 10 #define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) struct mhi_chan_ctxt { - __u32 chcfg; - __u32 chtype; - __u32 erindex; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); + __le32 chcfg; + __le32 chtype; + __le32 erindex; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); };
struct mhi_cmd_ctxt { - __u32 reserved0; - __u32 reserved1; - __u32 reserved2; - - __u64 rbase __packed __aligned(4); - __u64 rlen __packed __aligned(4); - __u64 rp __packed __aligned(4); - __u64 wp __packed __aligned(4); + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); };
struct mhi_ctxt { @@ -258,8 +258,8 @@ struct mhi_ctxt { };
struct mhi_tre { - u64 ptr; - u32 dword[2]; + __le64 ptr; + __le32 dword[2]; };
struct bhi_vec_entry { @@ -277,57 +277,58 @@ enum mhi_cmd_type { /* No operation command */ #define MHI_TRE_CMD_NOOP_PTR (0) #define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) +#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16))
/* Channel reset command */ #define MHI_TRE_CMD_RESET_PTR (0) #define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_RESET_CHAN << 16)) +#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_RESET_CHAN << 16)))
/* Channel stop command */ #define MHI_TRE_CMD_STOP_PTR (0) #define MHI_TRE_CMD_STOP_DWORD0 (0) -#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_STOP_CHAN << 16)) +#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_STOP_CHAN << 16)))
/* Channel start command */ #define MHI_TRE_CMD_START_PTR (0) #define MHI_TRE_CMD_START_DWORD0 (0) -#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ - (MHI_CMD_START_CHAN << 16)) +#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \ + (MHI_CMD_START_CHAN << 16)))
-#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) +#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF)
/* Event descriptor macros */ -#define MHI_TRE_EV_PTR(ptr) (ptr) -#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) -#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) -#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) -#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) -#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) -#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) +#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) +#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) +#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) +#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF)
/* Transfer descriptor macros */ -#define MHI_TRE_DATA_PTR(ptr) (ptr) -#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) -#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ - | (ieot << 9) | (ieob << 8) | chain) +#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain))
/* RSC transfer descriptor macros */ -#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) -#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) -#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) +#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) +#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16))
enum mhi_pkt_type { MHI_PKT_TYPE_INVALID = 0x0, @@ -500,7 +501,7 @@ struct state_transition { struct mhi_ring { dma_addr_t dma_handle; dma_addr_t iommu_base; - u64 *ctxt_wp; /* point to ctxt wp */ + __le64 *ctxt_wp; /* point to ctxt wp */ void *pre_aligned; void *base; void *rp; diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index ffde617f93a3..85f4f7c8d7c6 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -114,7 +114,7 @@ void mhi_ring_er_db(struct mhi_event *mhi_event) struct mhi_ring *ring = &mhi_event->ring;
mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, - ring->db_addr, *ring->ctxt_wp); + ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); }
void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) @@ -123,7 +123,7 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) struct mhi_ring *ring = &mhi_cmd->ring;
db = ring->iommu_base + (ring->wp - ring->base); - *ring->ctxt_wp = db; + *ring->ctxt_wp = cpu_to_le64(db); mhi_write_db(mhi_cntrl, ring->db_addr, db); }
@@ -140,7 +140,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, * before letting h/w know there is new element to fetch. */ dma_wmb(); - *ring->ctxt_wp = db; + *ring->ctxt_wp = cpu_to_le64(db);
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, db); @@ -432,7 +432,7 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_ring *ev_ring = &mhi_event->ring; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); void *dev_rp;
if (!is_valid_ring_ptr(ev_ring, ptr)) { @@ -537,14 +537,14 @@ static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
/* Update the WP */ ring->wp += ring->el_size; - ctxt_wp = *ring->ctxt_wp + ring->el_size; + ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
if (ring->wp >= (ring->base + ring->len)) { ring->wp = ring->base; ctxt_wp = ring->iommu_base; }
- *ring->ctxt_wp = ctxt_wp; + *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
/* Update the RP */ ring->rp += ring->el_size; @@ -801,7 +801,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 chan; int count = 0; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
/* * This is a quick check to avoid unnecessary event processing @@ -940,7 +940,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
- ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); @@ -970,7 +970,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, int count = 0; u32 chan; struct mhi_chan *mhi_chan; - dma_addr_t ptr = er_ctxt->rp; + dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO; @@ -1011,7 +1011,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
- ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); @@ -1533,7 +1533,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags);
- ptr = er_ctxt->rp; + ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 4aae0baea008..c35c5ddc7220 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -218,7 +218,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) continue;
ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update all cores */ smp_wmb();
@@ -420,7 +420,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) continue;
ring->wp = ring->base + ring->len - ring->el_size; - *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update to all cores */ smp_wmb();
On 2/12/22 12:20 PM, Manivannan Sadhasivam wrote:
From: Paul Davey paul.davey@alliedtelesis.co.nz
The MHI driver does not work on big endian architectures. The controller never transitions into mission mode. This appears to be due to the modem device expecting the various contexts and transfer rings to have fields in little endian order in memory, but the driver constructs them in native endianness.
Yes, this is true.
Fix MHI event, channel and command contexts and TRE handling macros to use explicit conversion to little endian. Mark fields in relevant structures as little endian to document this requirement.
Basically every field in the external interface whose size is greater than one byte must have its endianness noted. From what I can tell, you did that for all of the exposed structures defined in "drivers/bus/mhi/core/internal.h", which is good.
*However* some of the *constants* were defined the wrong way.
Basically, all of the constant values should be expressed in host byte order. And any needed byte swapping should be done at the time the value is read from memory--immediately. That way, we isolate that activity to the one place we interface with the possibly "foreign" format, and from then on, everything may be assumed to be in natural (CPU) byte order.
I will point out what I mean, below.
Fixes: a6e2e3522f29 ("bus: mhi: core: Add support for PM state transitions") Fixes: 6cd330ae76ff ("bus: mhi: core: Add support for ringing channel/event ring doorbells") Signed-off-by: Paul Davey paul.davey@alliedtelesis.co.nz Cc: stable@vger.kernel.org Signed-off-by: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org
drivers/bus/mhi/core/debugfs.c | 26 +++---- drivers/bus/mhi/core/init.c | 36 +++++----- drivers/bus/mhi/core/internal.h | 119 ++++++++++++++++---------------- drivers/bus/mhi/core/main.c | 22 +++--- drivers/bus/mhi/core/pm.c | 4 +- 5 files changed, 104 insertions(+), 103 deletions(-)
diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c index 858d7516410b..d818586c229d 100644 --- a/drivers/bus/mhi/core/debugfs.c +++ b/drivers/bus/mhi/core/debugfs.c @@ -60,16 +60,16 @@ static int mhi_debugfs_events_show(struct seq_file *m, void *d) }
These look fine, because they're doing the conversion of the fields just as they're read from memory.
seq_printf(m, "Index: %d intmod count: %lu time: %lu",
i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >>
i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> EV_CTX_INTMODC_SHIFT,
(er_ctxt->intmod & EV_CTX_INTMODT_MASK) >>
(le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> EV_CTX_INTMODT_SHIFT);
. . .
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index af484b03558a..4bd62f32695d 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -293,17 +293,17 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) if (mhi_chan->offload_ch) continue;
tmp = chan_ctxt->chcfg;
tmp &= ~CHAN_CTX_CHSTATE_MASK;tmp = le32_to_cpu(chan_ctxt->chcfg);
Note that CHAN_CTX_CHSTATE_MASK, etc. here are assumed to be in CPU byte order. This is good, and that pattern is followed for a bunch more code that I've omitted.
tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); tmp &= ~CHAN_CTX_BRSTMODE_MASK; tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); tmp &= ~CHAN_CTX_POLLCFG_MASK; tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
chan_ctxt->chcfg = tmp;
chan_ctxt->chcfg = cpu_to_le32(tmp);
chan_ctxt->chtype = mhi_chan->type;
chan_ctxt->erindex = mhi_chan->er_index;
chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
mhi_chan->ch_state = MHI_CH_STATE_DISABLED; mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
. . .
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index e2e10474a9d9..fa64340a8997 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -209,14 +209,14 @@ extern struct bus_type mhi_bus_type; #define EV_CTX_INTMODT_MASK GENMASK(31, 16) #define EV_CTX_INTMODT_SHIFT 16 struct mhi_event_ctxt {
- __u32 intmod;
- __u32 ertype;
- __u32 msivec;
- __u64 rbase __packed __aligned(4);
- __u64 rlen __packed __aligned(4);
- __u64 rp __packed __aligned(4);
- __u64 wp __packed __aligned(4);
These are all good.
- __le32 intmod;
- __le32 ertype;
- __le32 msivec;
- __le64 rbase __packed __aligned(4);
- __le64 rlen __packed __aligned(4);
- __le64 rp __packed __aligned(4);
- __le64 wp __packed __aligned(4); };
This is separate from the subject of this patch, but I'm pretty sure the entire structure (rather than all of those fields) can be defined with the __packed and __aligned(4) attributes to achieve the same effect.
#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
. . .
@@ -277,57 +277,58 @@ enum mhi_cmd_type { /* No operation command */ #define MHI_TRE_CMD_NOOP_PTR (0) #define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) +#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16))
This just looks wrong to me. The original definition should be fine, but then where it's *used* it should be passed to cpu_to_le32(). I realize this might be a special case, where these "DWORD" values are getting written out to command ring elements, but even so, the byte swapping that's happening is important and should be made obvious in the code using these symbols.
This comment applies to many more similar definitions below. I don't know; maybe it looks cumbersome if it's done in the code, but I still think it's better to consistenly define symbols like this in CPU byte order and do the conversions explicitly only when the values are read/written to "foreign" (external interface) memory.
Outside of this issue, the remainder of the patch looks OK to me.
-Alex
/* Channel reset command */ #define MHI_TRE_CMD_RESET_PTR (0) #define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_RESET_CHAN << 16))
+#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \
(MHI_CMD_RESET_CHAN << 16)))
/* Channel stop command */ #define MHI_TRE_CMD_STOP_PTR (0) #define MHI_TRE_CMD_STOP_DWORD0 (0) -#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_STOP_CHAN << 16))
+#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \
(MHI_CMD_STOP_CHAN << 16)))
/* Channel start command */ #define MHI_TRE_CMD_START_PTR (0) #define MHI_TRE_CMD_START_DWORD0 (0) -#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_START_CHAN << 16))
+#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \
(MHI_CMD_START_CHAN << 16)))
-#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) +#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) /* Event descriptor macros */ -#define MHI_TRE_EV_PTR(ptr) (ptr) -#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) -#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) -#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) -#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) -#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) -#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) +#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) +#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) +#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) +#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF) /* Transfer descriptor macros */ -#define MHI_TRE_DATA_PTR(ptr) (ptr) -#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) -#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
- | (ieot << 9) | (ieob << 8) | chain)
+#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \
- | (ieot << 9) | (ieob << 8) | chain))
/* RSC transfer descriptor macros */ -#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) -#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) -#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) +#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) +#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16)) enum mhi_pkt_type { MHI_PKT_TYPE_INVALID = 0x0, @@ -500,7 +501,7 @@ struct state_transition { struct mhi_ring { dma_addr_t dma_handle; dma_addr_t iommu_base;
- u64 *ctxt_wp; /* point to ctxt wp */
- __le64 *ctxt_wp; /* point to ctxt wp */ void *pre_aligned; void *base; void *rp;
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index ffde617f93a3..85f4f7c8d7c6 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -114,7 +114,7 @@ void mhi_ring_er_db(struct mhi_event *mhi_event) struct mhi_ring *ring = &mhi_event->ring; mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
ring->db_addr, *ring->ctxt_wp);
}ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) @@ -123,7 +123,7 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) struct mhi_ring *ring = &mhi_cmd->ring; db = ring->iommu_base + (ring->wp - ring->base);
- *ring->ctxt_wp = db;
- *ring->ctxt_wp = cpu_to_le64(db); mhi_write_db(mhi_cntrl, ring->db_addr, db); }
@@ -140,7 +140,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, * before letting h/w know there is new element to fetch. */ dma_wmb();
- *ring->ctxt_wp = db;
- *ring->ctxt_wp = cpu_to_le64(db);
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, db); @@ -432,7 +432,7 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); void *dev_rp;
if (!is_valid_ring_ptr(ev_ring, ptr)) { @@ -537,14 +537,14 @@ static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, /* Update the WP */ ring->wp += ring->el_size;
- ctxt_wp = *ring->ctxt_wp + ring->el_size;
- ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size;
if (ring->wp >= (ring->base + ring->len)) { ring->wp = ring->base; ctxt_wp = ring->iommu_base; }
- *ring->ctxt_wp = ctxt_wp;
- *ring->ctxt_wp = cpu_to_le64(ctxt_wp);
/* Update the RP */ ring->rp += ring->el_size; @@ -801,7 +801,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 chan; int count = 0;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
/* * This is a quick check to avoid unnecessary event processing @@ -940,7 +940,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
ptr = er_ctxt->rp;
if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");ptr = le64_to_cpu(er_ctxt->rp);
@@ -970,7 +970,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, int count = 0; u32 chan; struct mhi_chan *mhi_chan;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO; @@ -1011,7 +1011,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
ptr = er_ctxt->rp;
if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");ptr = le64_to_cpu(er_ctxt->rp);
@@ -1533,7 +1533,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags);
- ptr = er_ctxt->rp;
- ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 4aae0baea008..c35c5ddc7220 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -218,7 +218,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
/* Update all cores */ smp_wmb();*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
@@ -420,7 +420,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
/* Update to all cores */ smp_wmb();*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
On Tue, Feb 15, 2022 at 02:02:01PM -0600, Alex Elder wrote:
On 2/12/22 12:20 PM, Manivannan Sadhasivam wrote:
From: Paul Davey paul.davey@alliedtelesis.co.nz
The MHI driver does not work on big endian architectures. The controller never transitions into mission mode. This appears to be due to the modem device expecting the various contexts and transfer rings to have fields in little endian order in memory, but the driver constructs them in native endianness.
Yes, this is true.
Fix MHI event, channel and command contexts and TRE handling macros to use explicit conversion to little endian. Mark fields in relevant structures as little endian to document this requirement.
Basically every field in the external interface whose size is greater than one byte must have its endianness noted. From what I can tell, you did that for all of the exposed structures defined in "drivers/bus/mhi/core/internal.h", which is good.
*However* some of the *constants* were defined the wrong way.
Basically, all of the constant values should be expressed in host byte order. And any needed byte swapping should be done at the time the value is read from memory--immediately. That way, we isolate that activity to the one place we interface with the possibly "foreign" format, and from then on, everything may be assumed to be in natural (CPU) byte order.
Well, I did think about it but I convinced myself that doing the conversion in code rather in defines make the code look messy. Also in some places it just makes it look complicated. More below:
I will point out what I mean, below.
Fixes: a6e2e3522f29 ("bus: mhi: core: Add support for PM state transitions") Fixes: 6cd330ae76ff ("bus: mhi: core: Add support for ringing channel/event ring doorbells") Signed-off-by: Paul Davey paul.davey@alliedtelesis.co.nz Cc: stable@vger.kernel.org Signed-off-by: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org
drivers/bus/mhi/core/debugfs.c | 26 +++---- drivers/bus/mhi/core/init.c | 36 +++++----- drivers/bus/mhi/core/internal.h | 119 ++++++++++++++++---------------- drivers/bus/mhi/core/main.c | 22 +++--- drivers/bus/mhi/core/pm.c | 4 +- 5 files changed, 104 insertions(+), 103 deletions(-)
[...]
@@ -277,57 +277,58 @@ enum mhi_cmd_type { /* No operation command */ #define MHI_TRE_CMD_NOOP_PTR (0) #define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) +#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16))
This just looks wrong to me. The original definition should be fine, but then where it's *used* it should be passed to cpu_to_le32(). I realize this might be a special case, where these "DWORD" values are getting written out to command ring elements, but even so, the byte swapping that's happening is important and should be made obvious in the code using these symbols.
This comment applies to many more similar definitions below. I don't know; maybe it looks cumbersome if it's done in the code, but I still think it's better to consistenly define symbols like this in CPU byte order and do the conversions explicitly only when the values are read/written to "foreign" (external interface) memory.
Defines like MHI_TRE_GET_CMD_CHID are making the conversion look messy to me. In this we first extract the DWORD from TRE and then doing shifting + masking to get the CHID.
So without splitting the DWORD extraction and GET_CHID macros separately, we can't just do the conversion in code. And we may end up doing the conversion in defines just for these special cases but that will break the uniformity.
So IMO it looks better if we trust the defines to do the conversion itself.
Please let me know if you think the other way.
Thanks, Mani
Outside of this issue, the remainder of the patch looks OK to me.
-Alex
/* Channel reset command */ #define MHI_TRE_CMD_RESET_PTR (0) #define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_RESET_CHAN << 16))
+#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \
/* Channel stop command */ #define MHI_TRE_CMD_STOP_PTR (0) #define MHI_TRE_CMD_STOP_DWORD0 (0)(MHI_CMD_RESET_CHAN << 16)))
-#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_STOP_CHAN << 16))
+#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \
/* Channel start command */ #define MHI_TRE_CMD_START_PTR (0) #define MHI_TRE_CMD_START_DWORD0 (0)(MHI_CMD_STOP_CHAN << 16)))
-#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_START_CHAN << 16))
+#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \
(MHI_CMD_START_CHAN << 16)))
-#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) +#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) /* Event descriptor macros */ -#define MHI_TRE_EV_PTR(ptr) (ptr) -#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) -#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) -#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) -#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) -#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) -#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) +#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) +#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) +#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) +#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF) /* Transfer descriptor macros */ -#define MHI_TRE_DATA_PTR(ptr) (ptr) -#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) -#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
- | (ieot << 9) | (ieob << 8) | chain)
+#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \
- | (ieot << 9) | (ieob << 8) | chain)) /* RSC transfer descriptor macros */
-#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) -#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) -#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) +#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) +#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16)) enum mhi_pkt_type { MHI_PKT_TYPE_INVALID = 0x0, @@ -500,7 +501,7 @@ struct state_transition { struct mhi_ring { dma_addr_t dma_handle; dma_addr_t iommu_base;
- u64 *ctxt_wp; /* point to ctxt wp */
- __le64 *ctxt_wp; /* point to ctxt wp */ void *pre_aligned; void *base; void *rp;
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index ffde617f93a3..85f4f7c8d7c6 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -114,7 +114,7 @@ void mhi_ring_er_db(struct mhi_event *mhi_event) struct mhi_ring *ring = &mhi_event->ring; mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
ring->db_addr, *ring->ctxt_wp);
} void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
@@ -123,7 +123,7 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) struct mhi_ring *ring = &mhi_cmd->ring; db = ring->iommu_base + (ring->wp - ring->base);
- *ring->ctxt_wp = db;
- *ring->ctxt_wp = cpu_to_le64(db); mhi_write_db(mhi_cntrl, ring->db_addr, db); }
@@ -140,7 +140,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, * before letting h/w know there is new element to fetch. */ dma_wmb();
- *ring->ctxt_wp = db;
- *ring->ctxt_wp = cpu_to_le64(db); mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, db);
@@ -432,7 +432,7 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); void *dev_rp; if (!is_valid_ring_ptr(ev_ring, ptr)) {
@@ -537,14 +537,14 @@ static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, /* Update the WP */ ring->wp += ring->el_size;
- ctxt_wp = *ring->ctxt_wp + ring->el_size;
- ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; if (ring->wp >= (ring->base + ring->len)) { ring->wp = ring->base; ctxt_wp = ring->iommu_base; }
- *ring->ctxt_wp = ctxt_wp;
- *ring->ctxt_wp = cpu_to_le64(ctxt_wp); /* Update the RP */ ring->rp += ring->el_size;
@@ -801,7 +801,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 chan; int count = 0;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); /*
- This is a quick check to avoid unnecessary event processing
@@ -940,7 +940,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
ptr = er_ctxt->rp;
if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");ptr = le64_to_cpu(er_ctxt->rp);
@@ -970,7 +970,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, int count = 0; u32 chan; struct mhi_chan *mhi_chan;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO;
@@ -1011,7 +1011,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
ptr = er_ctxt->rp;
if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");ptr = le64_to_cpu(er_ctxt->rp);
@@ -1533,7 +1533,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags);
- ptr = er_ctxt->rp;
- ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 4aae0baea008..c35c5ddc7220 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -218,7 +218,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
/* Update all cores */ smp_wmb();*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
@@ -420,7 +420,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
/* Update to all cores */ smp_wmb();*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
On 2/16/22 1:04 AM, Manivannan Sadhasivam wrote:
On Tue, Feb 15, 2022 at 02:02:01PM -0600, Alex Elder wrote:
On 2/12/22 12:20 PM, Manivannan Sadhasivam wrote:
From: Paul Davey paul.davey@alliedtelesis.co.nz
The MHI driver does not work on big endian architectures. The controller never transitions into mission mode. This appears to be due to the modem device expecting the various contexts and transfer rings to have fields in little endian order in memory, but the driver constructs them in native endianness.
Yes, this is true.
Fix MHI event, channel and command contexts and TRE handling macros to use explicit conversion to little endian. Mark fields in relevant structures as little endian to document this requirement.
Basically every field in the external interface whose size is greater than one byte must have its endianness noted. From what I can tell, you did that for all of the exposed structures defined in "drivers/bus/mhi/core/internal.h", which is good.
*However* some of the *constants* were defined the wrong way.
Basically, all of the constant values should be expressed in host byte order. And any needed byte swapping should be done at the time the value is read from memory--immediately. That way, we isolate that activity to the one place we interface with the possibly "foreign" format, and from then on, everything may be assumed to be in natural (CPU) byte order.
Well, I did think about it but I convinced myself that doing the conversion in code rather in defines make the code look messy. Also in some places it just makes it look complicated. More below:
I thought this might the case.
I will point out what I mean, below.
Fixes: a6e2e3522f29 ("bus: mhi: core: Add support for PM state transitions") Fixes: 6cd330ae76ff ("bus: mhi: core: Add support for ringing channel/event ring doorbells") Signed-off-by: Paul Davey paul.davey@alliedtelesis.co.nz Cc: stable@vger.kernel.org Signed-off-by: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org
drivers/bus/mhi/core/debugfs.c | 26 +++---- drivers/bus/mhi/core/init.c | 36 +++++----- drivers/bus/mhi/core/internal.h | 119 ++++++++++++++++---------------- drivers/bus/mhi/core/main.c | 22 +++--- drivers/bus/mhi/core/pm.c | 4 +- 5 files changed, 104 insertions(+), 103 deletions(-)
[...]
@@ -277,57 +277,58 @@ enum mhi_cmd_type { /* No operation command */ #define MHI_TRE_CMD_NOOP_PTR (0) #define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) +#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16))
This just looks wrong to me. The original definition should be fine, but then where it's *used* it should be passed to cpu_to_le32(). I realize this might be a special case, where these "DWORD" values are getting written out to command ring elements, but even so, the byte swapping that's happening is important and should be made obvious in the code using these symbols.
This comment applies to many more similar definitions below. I don't know; maybe it looks cumbersome if it's done in the code, but I still think it's better to consistenly define symbols like this in CPU byte order and do the conversions explicitly only when the values are read/written to "foreign" (external interface) memory.
Defines like MHI_TRE_GET_CMD_CHID are making the conversion look messy to me. In this we first extract the DWORD from TRE and then doing shifting + masking to get the CHID.
I didn't say so, but I don't really like those defines either. I personally would rather see the field values get extracted in open code rather than this, because they're actually pretty simple operations. But I understand, sometimes things just "look complicated" if you do them certain ways (even if simple).
I did it in a certain way in the IPA code and I find that preferable to the use of the "DWORD" definitions you're using. I also stand by my belief that it's preferable to not hide the byte swaps in macro definitions.
You use this for reading/writing the command/transfer/event ring elements (only) though, and you do that consistently.
So without splitting the DWORD extraction and GET_CHID macros separately, we can't just do the conversion in code. And we may end up doing the conversion in defines just for these special cases but that will break the uniformity.
So IMO it looks better if we trust the defines to do the conversion itself.
Please let me know if you think the other way.
I'm OK with it. I'm not convinced, but I won't object...
-Alex
Thanks, Mani
Outside of this issue, the remainder of the patch looks OK to me.
-Alex
/* Channel reset command */ #define MHI_TRE_CMD_RESET_PTR (0) #define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_RESET_CHAN << 16))
+#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \
/* Channel stop command */ #define MHI_TRE_CMD_STOP_PTR (0) #define MHI_TRE_CMD_STOP_DWORD0 (0)(MHI_CMD_RESET_CHAN << 16)))
-#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_STOP_CHAN << 16))
+#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \
/* Channel start command */ #define MHI_TRE_CMD_START_PTR (0) #define MHI_TRE_CMD_START_DWORD0 (0)(MHI_CMD_STOP_CHAN << 16)))
-#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
(MHI_CMD_START_CHAN << 16))
+#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \
(MHI_CMD_START_CHAN << 16)))
-#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) +#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) /* Event descriptor macros */ -#define MHI_TRE_EV_PTR(ptr) (ptr) -#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) -#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) -#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) -#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) -#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) -#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) -#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) -#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) -#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) +#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) +#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) +#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) +#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) +#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) +#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) +#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF) /* Transfer descriptor macros */ -#define MHI_TRE_DATA_PTR(ptr) (ptr) -#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) -#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
- | (ieot << 9) | (ieob << 8) | chain)
+#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) +#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \
- | (ieot << 9) | (ieob << 8) | chain)) /* RSC transfer descriptor macros */
-#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) -#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) -#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) +#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) +#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16)) enum mhi_pkt_type { MHI_PKT_TYPE_INVALID = 0x0, @@ -500,7 +501,7 @@ struct state_transition { struct mhi_ring { dma_addr_t dma_handle; dma_addr_t iommu_base;
- u64 *ctxt_wp; /* point to ctxt wp */
- __le64 *ctxt_wp; /* point to ctxt wp */ void *pre_aligned; void *base; void *rp;
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index ffde617f93a3..85f4f7c8d7c6 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -114,7 +114,7 @@ void mhi_ring_er_db(struct mhi_event *mhi_event) struct mhi_ring *ring = &mhi_event->ring; mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
ring->db_addr, *ring->ctxt_wp);
} void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
@@ -123,7 +123,7 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) struct mhi_ring *ring = &mhi_cmd->ring; db = ring->iommu_base + (ring->wp - ring->base);
- *ring->ctxt_wp = db;
- *ring->ctxt_wp = cpu_to_le64(db); mhi_write_db(mhi_cntrl, ring->db_addr, db); }
@@ -140,7 +140,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, * before letting h/w know there is new element to fetch. */ dma_wmb();
- *ring->ctxt_wp = db;
- *ring->ctxt_wp = cpu_to_le64(db); mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, db);
@@ -432,7 +432,7 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev) struct mhi_event_ctxt *er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); void *dev_rp; if (!is_valid_ring_ptr(ev_ring, ptr)) {
@@ -537,14 +537,14 @@ static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, /* Update the WP */ ring->wp += ring->el_size;
- ctxt_wp = *ring->ctxt_wp + ring->el_size;
- ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; if (ring->wp >= (ring->base + ring->len)) { ring->wp = ring->base; ctxt_wp = ring->iommu_base; }
- *ring->ctxt_wp = ctxt_wp;
- *ring->ctxt_wp = cpu_to_le64(ctxt_wp); /* Update the RP */ ring->rp += ring->el_size;
@@ -801,7 +801,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 chan; int count = 0;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); /*
- This is a quick check to avoid unnecessary event processing
@@ -940,7 +940,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
ptr = er_ctxt->rp;
ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");
@@ -970,7 +970,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, int count = 0; u32 chan; struct mhi_chan *mhi_chan;
- dma_addr_t ptr = er_ctxt->rp;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO;
@@ -1011,7 +1011,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); local_rp = ev_ring->rp;
ptr = er_ctxt->rp;
ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");
@@ -1533,7 +1533,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, /* mark all stale events related to channel as STALE event */ spin_lock_irqsave(&mhi_event->lock, flags);
- ptr = er_ctxt->rp;
- ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) { dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n");
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 4aae0baea008..c35c5ddc7220 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -218,7 +218,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update all cores */ smp_wmb();
@@ -420,7 +420,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) continue; ring->wp = ring->base + ring->len - ring->el_size;
*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); /* Update to all cores */ smp_wmb();
linux-stable-mirror@lists.linaro.org