Initial addition of the files for the Coresight CTI driver.
Signed-off-by: Mike Leach mike.leach@linaro.org --- drivers/hwtracing/coresight/coresight-cti.c | 2025 +++++++++++++++++++ drivers/hwtracing/coresight/coresight-cti.h | 195 ++ 2 files changed, 2220 insertions(+) create mode 100644 drivers/hwtracing/coresight/coresight-cti.c create mode 100644 drivers/hwtracing/coresight/coresight-cti.h
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c new file mode 100644 index 000000000000..d06e19f111e2 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cti.c @@ -0,0 +1,2025 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Linaro Limited, All rights reserved. + * Author: Mike Leach mike.leach@linaro.org + */ + +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/smp.h> +#include <linux/sysfs.h> +#include <linux/stat.h> +#include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/coresight.h> +#include <linux/coresight-pmu.h> +#include <linux/pm_wakeup.h> +#include <linux/amba/bus.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/perf_event.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <asm/sections.h> +#include <asm/local.h> + +#include "coresight-cti.h" + +#define csdev_to_cti_drvdata(csdev) \ + dev_get_drvdata(csdev->dev.parent) + +/* CTI device tree trigger connection node keyword */ +#define CTI_DT_CONNS "trig-conns" +/* CTI device tree connection property keywords */ +#define CTI_DT_V8ARCH "arm,cti-v8-arch" +#define CTI_DT_TRIGIN_SIGS "arm,trig-in-sigs" +#define CTI_DT_TRIGOUT_SIGS "arm,trig-out-sigs" +#define CTI_DT_FILTER_OUT_SIGS "arm,trig-filters" +#define CTI_DT_CSDEV_ASSOC "arm,cs-dev-assoc" +#define CTI_DT_CTM_ID "arm,cti-ctm-id" +#define CTI_DT_CONN_NAME "arm,trig-conn-name" + + +/** + * CTI devices can be associated with a PE, or be connected to CoreSight + * hardware. We have a list of all CTIs, and a subset array associated + * with individual PEs for pwr management as they will power up in the + * PE power domain. + * + * At this point we assume that the none CPU CTIs are always powered as + * we do with sinks etc. + * + * We leave the client to figure out if all the CTIs are interconnected with + * the same CTM, in general this is the case but does not always have to be. + */ +struct ect_node { + struct cti_drvdata *cti_drv; + struct list_head next; +}; + +/* net of CTI devices connected via CTM */ +LIST_HEAD(ect_net); + +/* protect the list */ +static DEFINE_MUTEX(ect_mutex); + +/* quick reference for CPU CTIs */ +static struct cti_drvdata *cti_cpu_drv[NR_CPUS]; + +/* number of registered CTI devices */ +static int cti_count; + +/* number of cpu associated CTI devices in use */ +static int cti_cpu_count; + +/* CTI / cpu hotplug notification ID */ +static enum cpuhp_state cti_hp_online; + +/* cpu powered and enabled */ +#define CTI_PWR_ENA(p_cfg) (p_cfg->hw_enabled && p_cfg->hw_powered) + +/* write set of regs to hardware - call with spinlock claimed */ +static void cti_write_all_hw_regs(struct cti_drvdata *drvdata) +{ + struct cti_config *config = &drvdata->config; + int i; + + CS_UNLOCK(drvdata->base); + + /* disable CTI before writing registers */ + writel_relaxed(0, drvdata->base + CTICONTROL); + + /* write the CTI trigger registers */ + for (i = 0; i < config->nr_trig_max; i++) { + writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN(i)); + writel_relaxed(config->ctiouten[i], + drvdata->base + CTIOUTEN(i)); + } + + /* other regs */ + writel_relaxed(config->ctigate, drvdata->base + CTIGATE); + writel_relaxed(config->asicctl, drvdata->base + ASICCTL); + writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET); + + /* re-enable CTI */ + writel_relaxed(1, drvdata->base + CTICONTROL); + + CS_LOCK(drvdata->base); +} + +static void cti_enable_hw_smp_call(void *info) +{ + struct cti_drvdata *drvdata = info; + + cti_write_all_hw_regs(drvdata); +} + +/* write regs to hardware and enable */ +static int cti_enable_hw(void *info) +{ + struct cti_drvdata *drvdata = info; + struct cti_config *config = &drvdata->config; + int rc = 0; + + pm_runtime_get_sync(drvdata->dev); + spin_lock(&drvdata->spinlock); + + /* no need to do anything if enabled or unpowered*/ + if (config->hw_enabled || !config->hw_powered) { + spin_unlock(&drvdata->spinlock); + pm_runtime_put(drvdata->dev); + return 0; + } + + if (drvdata->ctidev.cpu >= 0) { + dev_info(drvdata->dev, "cti enable smp call for cpu %d\n", + drvdata->ctidev.cpu); + rc = smp_call_function_single(drvdata->ctidev.cpu, + cti_enable_hw_smp_call, + drvdata, 1); + } else { + dev_info(drvdata->dev, "cti enable not cpu call\n"); + cti_write_all_hw_regs(drvdata); + } + if (!rc) + config->hw_enabled = true; + spin_unlock(&drvdata->spinlock); + return 0; +} + +/* disable hardware */ +static int cti_disable_hw(void *info) +{ + struct cti_drvdata *drvdata = info; + struct cti_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + /* no need to do anything if disabled or cpu unpowered*/ + if (!config->hw_enabled || !config->hw_powered) { + spin_unlock(&drvdata->spinlock); + return 0; + } + + CS_UNLOCK(drvdata->base); + + /* disable CTI */ + writel_relaxed(0, drvdata->base + CTICONTROL); + config->hw_enabled = false; + + CS_LOCK(drvdata->base); + spin_unlock(&drvdata->spinlock); + pm_runtime_put(drvdata->dev); + return 0; +} + +static void +cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value) +{ + CS_UNLOCK(drvdata->base); + writel_relaxed(value, drvdata->base+offset); + CS_LOCK(drvdata->base); +} + +static void cti_set_default_config(struct cti_config *config) +{ + /* Most regs default to 0 as zalloc'ed except...*/ + config->trig_filter_enable = 1; + config->ctigate = ((u32)0x1 << config->nr_ctm_channels) - 1; + atomic_set(&config->enable_req_count, 0); +} + +static void cti_write_intack(struct device *dev, u32 ackval) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + /* write if enabled */ + if (CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, CTIINTACK, ackval); + spin_unlock(&drvdata->spinlock); +} + +/* need to update cross references once CTI csdev has been generated. */ +static void cti_update_conn_xrefs(struct cti_drvdata *drvdata) +{ + struct cti_trig_con *tc; + struct cti_device *ctidev = &drvdata->ctidev; + + list_for_each_entry(tc, &ctidev->trig_cons, node) { + if (tc->con_dev) { + tc->con_dev->ect_dev = drvdata->csdev; + dev_info(drvdata->dev, + "Setting assoc csdev (%px) with (%px)\n", + tc->con_dev, drvdata->csdev); + } + } +} + +/* connection information */ +static int cti_add_connection_entry(struct cti_drvdata *drvdata, + struct cti_trig_grp *in_info, + struct cti_trig_grp *out_info, + struct coresight_device *csdev, + const char *assoc_dev_name) +{ + struct cti_trig_con *tc; + struct cti_device *cti_dev = &drvdata->ctidev; + + tc = kzalloc(sizeof(struct cti_trig_con), GFP_KERNEL); + if (tc == 0) + return -ENOMEM; + + tc->con_dev = csdev; + /* prefer actual associated CS device dev name to supplied value - + * which is likely to be node name / other conn name + */ + if (csdev) + tc->con_dev_name = kstrdup(dev_name(&csdev->dev), GFP_KERNEL); + else if (assoc_dev_name != NULL) + tc->con_dev_name = kstrdup(assoc_dev_name, GFP_KERNEL); + tc->con_in = in_info; + tc->con_out = out_info; + list_add_tail(&tc->node, &cti_dev->trig_cons); + cti_dev->nr_trig_con++; + + /* add connection usage bit info to overall info */ + drvdata->config.trig_in_use |= in_info->used_mask; + drvdata->config.trig_out_use |= out_info->used_mask; + + return 0; +} + +/* add a default connection if nothing else is specified. + * single connection based on max in/out info, no assoc device + */ +static int cti_add_default_connection(struct cti_drvdata *drvdata) +{ + struct cti_trig_grp *in, *out; + int n_trigs = drvdata->config.nr_trig_max; + u32 n_trig_mask = (0x1 << n_trigs) - 1; + + in = kzalloc(sizeof(struct cti_trig_grp), GFP_KERNEL); + if (!in) + return -ENOMEM; + + out = kzalloc(sizeof(struct cti_trig_grp), GFP_KERNEL); + if (!out) + return -ENOMEM; + + dev_info(drvdata->dev, "default connection: trigs %d, trig_mask %08x\n", + n_trigs, n_trig_mask); + + /* assume max trigs for in and out, all used */ + in->nr_sigs = n_trigs; + in->used_mask = n_trig_mask; + in->sig_names = NULL; + out->nr_sigs = n_trigs; + out->used_mask = n_trig_mask; + out->sig_names = NULL; + return cti_add_connection_entry(drvdata, in, out, NULL, "default"); +} + +/* create an architecturally defined v8 connection + * must hava a cpu, can have an ETM + */ +static int of_cti_create_v8_connections(struct cti_drvdata *drvdata, + struct device_node *np) +{ + struct cti_device *cti_dev = &drvdata->ctidev; + struct cti_trig_grp *in, *out; + int cpuid = 0; + struct device_node *cs_np; + struct coresight_device *csdev = NULL; + char cpu_name_str[16]; + const char *assoc_name = NULL; + + cpuid = of_coresight_ect_get_cpu(np); + if (cpuid < 0) { + dev_warn(drvdata->dev, "CTI v8 DT binding no cpu\n"); + return -EINVAL; + } + cti_dev->cpu = cpuid; + + /* do the v8 cpu connection */ + in = kzalloc(sizeof(struct cti_trig_grp), GFP_KERNEL); + if (!in) + return -ENOMEM; + + out = kzalloc(sizeof(struct cti_trig_grp), GFP_KERNEL); + if (!out) + return -ENOMEM; + + /* v8 PE CTI config... */ + in->nr_sigs = 3; + in->used_mask = 0x7; + in->sig_names = NULL; + out->nr_sigs = 3; + out->used_mask = 0x7; + out->sig_names = NULL; + scnprintf(cpu_name_str, 16, "cpu%d", cpuid); + cti_add_connection_entry(drvdata, in, out, NULL, cpu_name_str); + + drvdata->config.trig_out_filter = 0x1; /* filter dbgreq */ + + /* v8 ETM associated config... */ + cs_np = of_parse_phandle(np, CTI_DT_CSDEV_ASSOC, 0); + if (cs_np) { + in = kzalloc(sizeof(struct cti_trig_grp), GFP_KERNEL); + if (!in) + return -ENOMEM; + + out = kzalloc(sizeof(struct cti_trig_grp), GFP_KERNEL); + if (!out) + return -ENOMEM; + in->nr_sigs = 4; + in->used_mask = 0xF0; + in->sig_names = NULL; + out->nr_sigs = 4; + out->used_mask = 0xF0; + out->sig_names = NULL; + dev_info(drvdata->dev, "v8 finding assoc dev %s\n", + cs_np->full_name); + csdev = of_coresight_get_cs_device_by_node(cs_np); + if (csdev) + assoc_name = dev_name(&csdev->dev); + else + assoc_name = cs_np->full_name; + cti_add_connection_entry(drvdata, in, out, csdev, assoc_name); + of_node_put(cs_np); + } + return 0; +} + +static int of_cti_read_trig_group(struct cti_trig_grp **trig_grp_ptr, + struct device_node *np, + const char *grp_name, + int max_trigs) +{ + int items, err = 0; + u32 value, pidx; + struct cti_trig_grp *grp; + + grp = kzalloc(sizeof(struct cti_trig_grp), GFP_KERNEL); + if (!grp) + return -ENOMEM; + *trig_grp_ptr = grp; + + items = of_property_count_elems_of_size(np, grp_name, 4); + /* if the property doesn't exist or has no values, then return + * an empty connection group + */ + if (items < 0) + return 0; + + if (items > max_trigs) + return -EINVAL; + + /* set the number of signals and usage mask */ + for (pidx = 0; pidx < items; pidx++) { + err = of_property_read_u32_index(np, grp_name, pidx, &value); + if (err) + return err; + grp->nr_sigs++; + grp->used_mask |= (0x1 << value); + } + return 0; +} + +static int of_cti_create_connection(struct device *dev, + struct device_node *np, + struct cti_drvdata *drvdata) +{ + struct cti_trig_grp *in = 0, *out = 0, *filter = 0; + int cpuid = -1, err = 0, trig_max = drvdata->config.nr_trig_max; + struct device_node *cs_np; + struct coresight_device *csdev = NULL; + const char *assoc_name = "unknown"; + char cpu_name_str[16]; + + /* look for the signals properties. */ + err = of_cti_read_trig_group(&in, np, CTI_DT_TRIGIN_SIGS, trig_max); + if (err) + return err; + err = of_cti_read_trig_group(&out, np, CTI_DT_TRIGOUT_SIGS, trig_max); + if (err) + return err; + err = of_cti_read_trig_group(&filter, np, CTI_DT_FILTER_OUT_SIGS, + trig_max); + if (err) + return err; + + /* read the connection name if set - may be overridden by later */ + of_property_read_string(np, CTI_DT_CONN_NAME, &assoc_name); + + /* associated cpu ? */ + cpuid = of_coresight_ect_get_cpu(np); + drvdata->ctidev.cpu = cpuid; + if (cpuid >= 0) { + scnprintf(cpu_name_str, 16, "cpu%d", cpuid); + assoc_name = cpu_name_str; + } else { + /* associated device ? */ + cs_np = of_parse_phandle(np, CTI_DT_CSDEV_ASSOC, 0); + if (cs_np) { + dev_info(dev, "finding assoc dev %s\n", + cs_np->full_name); + csdev = of_coresight_get_cs_device_by_node(cs_np); + if (csdev) /* use device name if csdev found */ + assoc_name = dev_name(&csdev->dev); + else /* otherwise node name for later association */ + assoc_name = cs_np->full_name; + of_node_put(cs_np); + } + } + /* set up a connection */ + err = cti_add_connection_entry(drvdata, in, out, csdev, assoc_name); + /* note any filter info */ + if (!err) + drvdata->config.trig_out_filter |= filter->used_mask; + kfree(filter); + return err; +} + +/* get the hardware configuration & connection data. */ +static int of_cti_get_hw_data(struct device *dev, + struct device_node *np, + struct cti_drvdata *drvdata) +{ + int rc = 0; + struct cti_device *cti_dev = &drvdata->ctidev; + struct device_node *nc = NULL; + + /* get any CTM ID - defaults to 0 */ + of_property_read_u32(np, CTI_DT_CTM_ID, &cti_dev->ctm_id); + + if (of_property_read_bool(np, CTI_DT_V8ARCH)) { + rc = of_cti_create_v8_connections(drvdata, np); + } else { + for_each_child_of_node(np, nc) { + if (of_node_cmp(nc->name, CTI_DT_CONNS) != 0) + continue; + dev_info(dev, "dt found node: %s %s\n", + nc->name, nc->full_name); + rc = of_cti_create_connection(dev, nc, drvdata); + if (rc != 0) + return rc; + } + } + + /* if no connections, just add a single default based on max IN-OUT */ + if (cti_dev->nr_trig_con == 0) + rc = cti_add_default_connection(drvdata); + return rc; +} + +static int +cti_match_con_name(struct cti_device *ctidev, const char *node_name, + const char *csdev_name) +{ + int found = 0; + struct cti_trig_con *trig_con; + + list_for_each_entry(trig_con, &ctidev->trig_cons, node) { + if (trig_con->con_dev_name) { + if (!strcmp(node_name, trig_con->con_dev_name)) { + found = 1; + /* match: so swap in csdev name */ + kfree(trig_con->con_dev_name); + trig_con->con_dev_name = + kstrdup(csdev_name, GFP_KERNEL); + goto cti_con_name_match; + } + } + } +cti_con_name_match: + return found; +} + +/* search the cti list to add an associated CTI into the supplied CS device + * This will set the association if CTI declared before the CS device + */ +int cti_add_assoc_to_csdev(struct coresight_device *csdev) +{ + struct ect_node *ect_item; + struct cti_device *ctidev; + const char *node_name = NULL, *csdev_name; + + /* exit early for no CTIs or self referencing ECT devices.*/ + if (!cti_count) { + dev_info(&csdev->dev, "no CTI to check\n"); + return 0; + } + + if (!csdev->dev.parent->of_node) { + dev_info(&csdev->dev, "No parent of_node pointer\n"); + return 0; + } + + node_name = csdev->dev.parent->of_node->full_name; + + if (!node_name) { + dev_info(&csdev->dev, "%s - bad node name\n", __func__); + return 0; + } + + dev_info(&csdev->dev, "%s - looking for %s\n", __func__, node_name); + + if (!cti_count || (csdev->type == CORESIGHT_DEV_TYPE_ECT)) { + dev_info(&csdev->dev, "no CTI or skip self ref CTI\n"); + return 0; + } + + csdev_name = dev_name(&csdev->dev); + /* for each CTI in list... */ + mutex_lock(&ect_mutex); + list_for_each_entry(ect_item, &ect_net, next) { + ctidev = &ect_item->cti_drv->ctidev; + if (cti_match_con_name(ctidev, node_name, csdev_name)) { + csdev->ect_dev = ect_item->cti_drv->csdev; + dev_info(&csdev->dev, "Found!\n"); + goto cti_found; + } + } +cti_found: + mutex_unlock(&ect_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(cti_add_assoc_to_csdev); + +/* attach/detach channel from trigger - write through if enabled. */ +static int cti_channel_trig_op(struct device *dev, + enum cti_chan_op op, + enum cti_trig_dir direction, + u32 channel_idx, + u32 trigger_idx) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + u32 trig_bitmask; + u32 chan_bitmask; + u32 reg_value; + int reg_offset; + + dev_info(dev, "chan_trig_op: op %d, dir %d, chan %d, trig %d\n", + op, direction, channel_idx, trigger_idx); + + /* ensure indexes in range */ + if ((channel_idx >= config->nr_ctm_channels) || + (trigger_idx >= config->nr_trig_max)) + return -EINVAL; + + trig_bitmask = 0x1 << trigger_idx; + + /* ensure registered triggers and not out filtered */ + if (direction == CTI_TRIG_IN) { + if (!(trig_bitmask & config->trig_in_use)) + return -EINVAL; + } else { + if (!(trig_bitmask & config->trig_out_use)) + return -EINVAL; + + if ((config->trig_filter_enable) && + (config->trig_out_filter & trig_bitmask)) + return -EINVAL; + } + + /* update the local register values */ + chan_bitmask = 0x1 << channel_idx; + reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) : + CTIOUTEN(trigger_idx)); + + spin_lock(&drvdata->spinlock); + + /* read - modify write - the trigger / channel enable value */ + reg_value = (direction == CTI_TRIG_IN ? config->ctiinen[trigger_idx] : + config->ctiouten[trigger_idx]); + reg_value = (op == CTI_CHAN_ATTACH) ? reg_value | chan_bitmask : + reg_value & (~chan_bitmask); + + /* write local copy */ + if (direction == CTI_TRIG_IN) + config->ctiinen[trigger_idx] = reg_value; + else + config->ctiouten[trigger_idx] = reg_value; + + /* write through if enabled */ + if (CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, reg_offset, reg_value); + spin_unlock(&drvdata->spinlock); + return 0; +} + +static int cti_channel_gate_op(struct device *dev, + enum cti_chan_gate_op op, + u32 channel_idx) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + u32 chan_bitmask; + u32 reg_value; + int err = 0; + + if (channel_idx >= config->nr_ctm_channels) + return -EINVAL; + + chan_bitmask = 0x1 << channel_idx; + + spin_lock(&drvdata->spinlock); + reg_value = config->ctigate; + switch (op) { + case CTI_GATE_CHAN_ENABLE: + reg_value |= chan_bitmask; + break; + + case CTI_GATE_CHAN_DISABLE: + reg_value &= ~chan_bitmask; + break; + + case CTI_GATE_CHAN_ENABLE_ALL: + reg_value = (0x1 << config->nr_ctm_channels) - 1; + break; + + case CTI_GATE_CHAN_DISABLE_ALL: + reg_value = 0x0; + break; + + default: + err = -EINVAL; + break; + } + if (err == 0) { + config->ctigate = reg_value; + if (CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, CTIGATE, reg_value); + } + spin_unlock(&drvdata->spinlock); + return err; +} + +static int cti_channel_setop(struct device *dev, + enum cti_chan_set_op op, + u32 channel_idx) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + u32 chan_bitmask; + u32 reg_value; + u32 reg_offset; + int err = 0; + + if (channel_idx >= config->nr_ctm_channels) + return -EINVAL; + + chan_bitmask = 0x1 << channel_idx; + + spin_lock(&drvdata->spinlock); + reg_value = config->ctiappset; + switch (op) { + case CTI_CHAN_SET: + config->ctiappset |= chan_bitmask; + reg_value = config->ctiappset; + reg_offset = CTIAPPSET; + break; + + case CTI_CHAN_CLR: + config->ctiappset &= ~chan_bitmask; + reg_value = chan_bitmask; + reg_offset = CTIAPPCLEAR; + break; + + case CTI_CHAN_PULSE: + config->ctiappset &= ~chan_bitmask; + reg_value = chan_bitmask; + reg_offset = CTIAPPPULSE; + break; + + default: + err = -EINVAL; + break; + } + + if ((err == 0) && CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, reg_offset, reg_value); + spin_unlock(&drvdata->spinlock); + + return err; +} + +/** cti ect operations **/ +static int cti_enable(struct coresight_device *csdev, void *__unused) +{ + int rc; + struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev); + + atomic_inc(&drvdata->config.enable_req_count); + rc = cti_enable_hw(drvdata); + if (rc) + atomic_dec(&drvdata->config.enable_req_count); + + return rc; +} + +static int cti_disable(struct coresight_device *csdev, void *__unused) +{ + int rc = 0; + struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev); + + if (!atomic_dec_return(&drvdata->config.enable_req_count)) { + rc = cti_disable_hw(drvdata); + if (rc) + atomic_inc(&drvdata->config.enable_req_count); + } + return rc; +} + +const struct coresight_ops_ect cti_ops_ect = { + .enable = cti_enable, + .disable = cti_disable, +}; + +const struct coresight_ops cti_ops = { + .ect_ops = &cti_ops_ect, +}; + +/** attributes **/ + +#define coresight_cti_reg(name, offset) \ + coresight_simple_reg32(struct cti_drvdata, name, offset) + +/* show a simple 32 bit value. if pval is NULL then live read, + * otherwise read from supplied pointer only + */ +static ssize_t cti_reg32_show(struct device *dev, char *buf, + u32 *pval, int reg_offset) +{ + unsigned long val = 0; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + if (pval) { + val = (unsigned long)*pval; + } else if ((reg_offset >= 0) && CTI_PWR_ENA(config)) { + CS_UNLOCK(drvdata->base); + val = readl_relaxed(drvdata->base + reg_offset); + CS_LOCK(drvdata->base); + } + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +/* store a simple 32 bit value. + * if pval not NULL, then copy to here too, + * if reg_offset >= 0 then write through if enabled. + */ +static ssize_t cti_reg32_store(struct device *dev, const char *buf, + size_t size, u32 *pval, + int reg_offset) +{ + unsigned long val; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* local store */ + if (pval) + *pval = (u32)val; + + /* write through of offset and enabled */ + if ((reg_offset >= 0) && CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, reg_offset, val); + spin_unlock(&drvdata->spinlock); + return size; +} + +/* basic attributes */ +static ssize_t enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int enable_req; + bool enabled, powered, cpuid; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + ssize_t size = 0; + + enable_req = atomic_read(&drvdata->config.enable_req_count); + spin_lock(&drvdata->spinlock); + powered = drvdata->config.hw_powered; + enabled = drvdata->config.hw_enabled; + cpuid = drvdata->ctidev.cpu; + spin_unlock(&drvdata->spinlock); + + if (powered) { + size = scnprintf(buf, PAGE_SIZE, "cti %s; cpu%d powered;\n", + enabled ? "enabled" : "disabled", cpuid); + } else if (cpuid >= 0) { + size = scnprintf(buf, PAGE_SIZE, "cti %s; cpu%d unpowered;\n", + enable_req ? "enable req" : "disable req", cpuid); + } else { + size = scnprintf(buf, PAGE_SIZE, "cti %s; no assoc cpu;\n", + enabled ? "enabled" : "disabled"); + } + return size; +} + +static ssize_t enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret = 0; + unsigned long val; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + if (val) + ret = cti_enable(drvdata->csdev, NULL); + else + ret = cti_disable(drvdata->csdev, NULL); + if (ret) + return ret; + return size; +} +static DEVICE_ATTR_RW(enable); + +static ssize_t ctmid_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return scnprintf(buf, PAGE_SIZE, "0x%x\n", drvdata->ctidev.ctm_id); +} +static DEVICE_ATTR_RO(ctmid); + +#define LF_ADJUST_BUF_PTRS(n) \ + do { buf_pos += n; buf_size -= n; \ + if (buf_size <= 1) \ + goto lf_buf_full; \ + } while (0) + +static ssize_t list_features_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int used = 0, sig_idx, con_idx, nr_trig_max; + /* buffer vars */ + int buf_size = PAGE_SIZE; + char *buf_pos = buf; + char *con_name; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *cfg = &drvdata->config; + struct cti_device *ctidev = &drvdata->ctidev; + u32 sig_mask; + struct cti_trig_con *con; + + /* basic feature info */ + used = scnprintf(buf_pos, buf_size, + "CTI:%s; Channels:%d; Max Trigs:%d\n", + dev_name(dev), cfg->nr_ctm_channels, cfg->nr_trig_max); + LF_ADJUST_BUF_PTRS(used); + used = scnprintf(buf_pos, buf_size, "CTM id = %d; Num Conns = %d\n", + ctidev->ctm_id, ctidev->nr_trig_con); + LF_ADJUST_BUF_PTRS(used); + + /* list each connection info */ + con_idx = 0; + nr_trig_max = cfg->nr_trig_max; + list_for_each_entry(con, &ctidev->trig_cons, node) { + /* connection name */ + if (con->con_dev_name != NULL) + con_name = con->con_dev_name; + else + con_name = "unknown"; + used = scnprintf(buf_pos, buf_size, "conn(%d:%s)\n", + con_idx, con_name); + LF_ADJUST_BUF_PTRS(used); + con_idx++; + + /* connection signals */ + used = scnprintf(buf_pos, buf_size, "Trig IN(%d) [ ", + con->con_in->nr_sigs); + LF_ADJUST_BUF_PTRS(used); + sig_mask = 0x1; + for (sig_idx = 0; sig_idx < nr_trig_max; sig_idx++) { + if (sig_mask & con->con_in->used_mask) { + used = scnprintf(buf_pos, buf_size, "%d ", + sig_idx); + LF_ADJUST_BUF_PTRS(used); + } + sig_mask <<= 1; + } + used = scnprintf(buf_pos, buf_size, "]\n"); + LF_ADJUST_BUF_PTRS(used); + + used = scnprintf(buf_pos, buf_size, "Trig OUT(%d) [ ", + con->con_out->nr_sigs); + LF_ADJUST_BUF_PTRS(used); + sig_mask = 0x1; + for (sig_idx = 0; sig_idx < nr_trig_max; sig_idx++) { + if (sig_mask & con->con_out->used_mask) { + used = scnprintf(buf_pos, buf_size, "%d ", + sig_idx); + LF_ADJUST_BUF_PTRS(used); + } + sig_mask <<= 1; + } + used = scnprintf(buf_pos, buf_size, "]\n"); + LF_ADJUST_BUF_PTRS(used); + } + + if (cfg->trig_out_filter) { + used = scnprintf(buf_pos, buf_size, "Trig OUT_FILTERED [ "); + LF_ADJUST_BUF_PTRS(used); + sig_mask = 0x1; + for (sig_idx = 0; sig_idx < nr_trig_max; sig_idx++) { + if (sig_mask & cfg->trig_out_filter) { + used = scnprintf(buf_pos, buf_size, "%d ", + sig_idx); + LF_ADJUST_BUF_PTRS(used); + } + sig_mask <<= 1; + } + used = scnprintf(buf_pos, buf_size, "]\n"); + LF_ADJUST_BUF_PTRS(used); + } + +lf_buf_full: + used = buf_size > 0 ? PAGE_SIZE - buf_size : PAGE_SIZE; + return used; +} +static DEVICE_ATTR_RO(list_features); + + +static struct attribute *coresight_cti_attrs[] = { + &dev_attr_enable.attr, + &dev_attr_list_features.attr, + &dev_attr_ctmid.attr, + NULL, +}; + +/* raw register attributes */ +static ssize_t inout_sel_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u32 val; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = (u32)drvdata->config.ctiinout_sel; + return scnprintf(buf, PAGE_SIZE, "%#x\n", val); +} + +static ssize_t inout_sel_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val > (CTIINOUTEN_MAX-1)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + drvdata->config.ctiinout_sel = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(inout_sel); + +static ssize_t inen_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + int index; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + spin_lock(&drvdata->spinlock); + index = drvdata->config.ctiinout_sel; + val = drvdata->config.ctiinen[index]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "INEN%d %#lx\n", index, val); +} + +static ssize_t inen_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + int index; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + index = config->ctiinout_sel; + config->ctiinen[index] = val; + + /* write through if enabled */ + if (CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, CTIINEN(index), val); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(inen); + +static ssize_t outen_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + int index; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + spin_lock(&drvdata->spinlock); + index = drvdata->config.ctiinout_sel; + val = drvdata->config.ctiouten[index]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "OUTEN%d %#lx\n", index, val); +} + +static ssize_t outen_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + int index; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + index = config->ctiinout_sel; + config->ctiouten[index] = val; + + /* write through if enabled */ + if (CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, CTIOUTEN(index), val); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(outen); + + +static ssize_t gate_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return cti_reg32_show(dev, buf, &drvdata->config.ctigate, -1); +} + +static ssize_t gate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return cti_reg32_store(dev, buf, size, + &drvdata->config.ctigate, CTIGATE); +} +static DEVICE_ATTR_RW(gate); + +static ssize_t asicctl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return cti_reg32_show(dev, buf, &drvdata->config.asicctl, -1); +} + +static ssize_t asicctl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return cti_reg32_store(dev, buf, size, + &drvdata->config.asicctl, ASICCTL); +} +static DEVICE_ATTR_RW(asicctl); + +static ssize_t intack_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + cti_write_intack(dev, val); + return size; +} +static DEVICE_ATTR_WO(intack); + +static ssize_t appset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return cti_reg32_show(dev, buf, &drvdata->config.ctiappset, -1); +} + +static ssize_t appset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return cti_reg32_store(dev, buf, size, + &drvdata->config.ctiappset, CTIAPPSET); +} +static DEVICE_ATTR_RW(appset); + +static ssize_t appclear_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val, mask; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + + /* a 1'b1 in appclr clears down the same bit in appset*/ + mask = ~val; + config->ctiappset &= mask; + + /* write through if enabled */ + if (CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, CTIAPPCLEAR, val); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_WO(appclear); + +static ssize_t apppulse_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val, mask; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + + /* a 1'b1 in apppulse sets then clears the bit, + * effectively clears down the same bit in appset + */ + mask = ~val; + config->ctiappset &= mask; + + /* write through if enabled */ + if (CTI_PWR_ENA(config)) + cti_write_single_reg(drvdata, CTIAPPPULSE, val); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_WO(apppulse); + +static ssize_t itchout_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return cti_reg32_show(dev, buf, NULL, ITCHOUT); +} + +static ssize_t itchout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_reg32_store(dev, buf, size, NULL, ITCHOUT); +} +static DEVICE_ATTR_RW(itchout); + +static ssize_t ittrigout_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return cti_reg32_show(dev, buf, NULL, ITTRIGOUT); +} + +static ssize_t ittrigout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_reg32_store(dev, buf, size, NULL, ITTRIGOUT); +} +static DEVICE_ATTR_RW(ittrigout); + +static ssize_t itchinack_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_reg32_store(dev, buf, size, NULL, ITCHINACK); +} +static DEVICE_ATTR_WO(itchinack); + +static ssize_t ittriginack_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_reg32_store(dev, buf, size, NULL, ITTRIGINACK); +} +static DEVICE_ATTR_WO(ittriginack); + +static ssize_t itctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return cti_reg32_show(dev, buf, NULL, CORESIGHT_ITCTRL); +} + +static ssize_t itctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_reg32_store(dev, buf, size, NULL, CORESIGHT_ITCTRL); +} +static DEVICE_ATTR_RW(itctrl); + +coresight_cti_reg(triginstatus, CTITRIGINSTATUS); +coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS); +coresight_cti_reg(chinstatus, CTICHINSTATUS); +coresight_cti_reg(choutstatus, CTICHOUTSTATUS); +coresight_cti_reg(ittrigin, ITTRIGIN); +coresight_cti_reg(itchin, ITCHIN); +coresight_cti_reg(itchoutack, ITCHOUTACK); +coresight_cti_reg(ittrigoutack, ITTRIGOUTACK); + +static struct attribute *coresight_cti_regs_attrs[] = { + &dev_attr_inout_sel.attr, + &dev_attr_inen.attr, + &dev_attr_outen.attr, + &dev_attr_gate.attr, + &dev_attr_asicctl.attr, + &dev_attr_intack.attr, + &dev_attr_appset.attr, + &dev_attr_appclear.attr, + &dev_attr_apppulse.attr, + &dev_attr_triginstatus.attr, + &dev_attr_trigoutstatus.attr, + &dev_attr_chinstatus.attr, + &dev_attr_choutstatus.attr, + &dev_attr_itctrl.attr, + &dev_attr_ittrigin.attr, + &dev_attr_itchin.attr, + &dev_attr_ittrigout.attr, + &dev_attr_itchout.attr, + &dev_attr_itchoutack.attr, + &dev_attr_ittrigoutack.attr, + &dev_attr_ittriginack.attr, + &dev_attr_itchinack.attr, + NULL, +}; + +/* channel / trigger api */ +static int +cti_trig_op_parse(struct device *dev, enum cti_chan_op op, + enum cti_trig_dir dir, const char *buf, size_t size) +{ + u32 chan_idx; + u32 trig_idx; + int items, err = size; + + /* extract chan idx and trigger idx */ + items = sscanf(buf, "%d %d", &chan_idx, &trig_idx); + if (items) { + err = cti_channel_trig_op(dev, op, dir, chan_idx, trig_idx); + if (!err) + err = size; + } else + err = -EINVAL; + return err; +} + +static ssize_t trigin_attach_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_IN, + buf, size); +} +static DEVICE_ATTR_WO(trigin_attach); + +static ssize_t trigin_detach_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_IN, + buf, size); +} +static DEVICE_ATTR_WO(trigin_detach); + +static ssize_t trigout_attach_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_trig_op_parse(dev, CTI_CHAN_ATTACH, CTI_TRIG_OUT, + buf, size); +} +static DEVICE_ATTR_WO(trigout_attach); + +static ssize_t trigout_detach_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + return cti_trig_op_parse(dev, CTI_CHAN_DETACH, CTI_TRIG_OUT, + buf, size); +} +static DEVICE_ATTR_WO(trigout_detach); + + +static ssize_t gate_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int err = 0, channel = 0, items; + + if (strcmp(buf, "all") == 0) { + err = cti_channel_gate_op(dev, CTI_GATE_CHAN_ENABLE_ALL, 0); + } else { + items = sscanf(buf, "%d", &channel); + if (!items) + return -EINVAL; + err = cti_channel_gate_op(dev, CTI_GATE_CHAN_ENABLE, 0); + } + return err ? err : size; +} +static DEVICE_ATTR_WO(gate_enable); + +static ssize_t gate_disable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int err = 0, channel = 0, items; + + if (strcmp(buf, "all") == 0) { + err = cti_channel_gate_op(dev, CTI_GATE_CHAN_DISABLE_ALL, 0); + } else { + items = sscanf(buf, "%d", &channel); + if (!items) + return -EINVAL; + err = cti_channel_gate_op(dev, CTI_GATE_CHAN_DISABLE, 0); + } + return err ? err : size; +} +static DEVICE_ATTR_WO(gate_disable); + +static int +chan_op_parse(struct device *dev, enum cti_chan_set_op op, const char *buf) +{ + int err = 0, channel = 0, items; + + items = sscanf(buf, "%d", &channel); + if (!items) + return -EINVAL; + err = cti_channel_setop(dev, op, channel); + return err; + +} + +static ssize_t chan_set_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int err = chan_op_parse(dev, CTI_CHAN_SET, buf); + + return err ? err : size; +} +static DEVICE_ATTR_WO(chan_set); + +static ssize_t chan_clear_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int err = chan_op_parse(dev, CTI_CHAN_CLR, buf); + + return err ? err : size; +} +static DEVICE_ATTR_WO(chan_clear); + +static ssize_t chan_pulse_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int err = chan_op_parse(dev, CTI_CHAN_PULSE, buf); + + return err ? err : size; +} +static DEVICE_ATTR_WO(chan_pulse); + +static ssize_t trig_filter_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + spin_lock(&drvdata->spinlock); + val = drvdata->config.trig_filter_enable; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%ld (%s)\n", val, + val ? "enabled" : "disabled"); +} + +static ssize_t trig_filter_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + drvdata->config.trig_filter_enable = val ? 1 : 0; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(trig_filter_enable); + +/* clear all xtrigger / channel programming */ +static ssize_t reset_xtrigs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int i; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + + /* clear the CTI trigger / channel programming registers */ + for (i = 0; i < config->nr_trig_max; i++) { + config->ctiinen[i] = 0; + config->ctiouten[i] = 0; + } + + /* clear the other regs */ + config->ctigate = (0x1 << config->nr_ctm_channels) - 1; + config->asicctl = 0; + config->ctiappset = 0; + config->ctiinout_sel = 0; + + /* if enabled then write through */ + if (CTI_PWR_ENA(config)) + cti_write_all_hw_regs(drvdata); + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_WO(reset_xtrigs); + +/* list attachments by channel */ +#define LX_ADJUST_BUF_PTRS(n) \ + do { buf_pos += n; buf_size -= n; \ + if (buf_size <= 1) \ + goto lx_buf_full; \ + } while (0) + +static ssize_t list_xtrigs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int used = 0, nr_channels, chan_idx, reg_idx; + /* buffer vars */ + int buf_size = PAGE_SIZE; + char *buf_pos = buf; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *cfg = &drvdata->config; + u32 chan_mask; + + nr_channels = cfg->nr_ctm_channels; + + used = scnprintf(buf_pos, buf_size, "CTI:%s; Channels:%d\n", + dev_name(dev), nr_channels); + LX_ADJUST_BUF_PTRS(used); + + /* show the IN and OUT triggers per channel */ + for (chan_idx = 0; chan_idx < nr_channels; chan_idx++) { + chan_mask = 0x1 << chan_idx; + used = scnprintf(buf_pos, buf_size, "Chan %d: IN [ ", chan_idx); + LX_ADJUST_BUF_PTRS(used); + + for (reg_idx = 0; + reg_idx < drvdata->config.nr_trig_max; + reg_idx++) { + if (chan_mask & cfg->ctiinen[reg_idx]) { + used = scnprintf(buf_pos, buf_size, "%d ", + reg_idx); + LX_ADJUST_BUF_PTRS(used); + } + } + used = scnprintf(buf_pos, buf_size, "]; OUT [ "); + LX_ADJUST_BUF_PTRS(used); + + for (reg_idx = 0; + reg_idx < drvdata->config.nr_trig_max; + reg_idx++) { + if (chan_mask & cfg->ctiouten[reg_idx]) { + used = scnprintf(buf_pos, buf_size, "%d ", + reg_idx); + LX_ADJUST_BUF_PTRS(used); + } + } + used = scnprintf(buf_pos, buf_size, "];\n"); + LX_ADJUST_BUF_PTRS(used); + } + + /* show the channels enabled via ctigate */ + used = scnprintf(buf_pos, buf_size, "Gate Channels Enabled: [ "); + LX_ADJUST_BUF_PTRS(used); + + if (cfg->ctigate == 0) { + used = scnprintf(buf_pos, buf_size, "None ]\n"); + LX_ADJUST_BUF_PTRS(used); + } else { + for (chan_idx = 0; chan_idx < nr_channels; chan_idx++) { + chan_mask = 0x1 << chan_idx; + if (chan_mask & cfg->ctigate) { + used = scnprintf(buf_pos, buf_size, "%d ", + chan_idx); + LX_ADJUST_BUF_PTRS(used); + } + } + used = scnprintf(buf_pos, buf_size, "]\n"); + LX_ADJUST_BUF_PTRS(used); + } + +lx_buf_full: + used = buf_size > 0 ? PAGE_SIZE - buf_size : PAGE_SIZE; + return used; +} +static DEVICE_ATTR_RO(list_xtrigs); + +#define LCIU_ADJUST_BUF_PTRS(n) \ + do { buf_pos += n; buf_size -= n; \ + if (buf_size <= 1) \ + goto lciu_buf_full; \ + } while (0) + +static ssize_t list_chan_inuse_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int used = 0, nr_channels, i; + /* buffer vars */ + int buf_size = PAGE_SIZE; + char *buf_pos = buf; + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct cti_config *config = &drvdata->config; + u32 inuse_bits = 0, chan_mask, chan_bit_mask; + + nr_channels = config->nr_ctm_channels; + spin_lock(&drvdata->spinlock); + for (i = 0; i < config->nr_trig_max; i++) { + inuse_bits |= config->ctiinen[i]; + inuse_bits |= config->ctiouten[i]; + } + spin_unlock(&drvdata->spinlock); + + /* print in use channels */ + chan_mask = ((u32)(0x1 << nr_channels)) - 1; + if (inuse_bits & chan_mask) { + used = scnprintf(buf_pos, buf_size, "Chan in use [ "); + LCIU_ADJUST_BUF_PTRS(used); + + for (i = 0; i < nr_channels; i++) { + chan_bit_mask = 0x1 << i; + if (chan_bit_mask & inuse_bits) { + used = scnprintf(buf_pos, buf_size, "%d ", i); + LCIU_ADJUST_BUF_PTRS(used); + } + } + + used = scnprintf(buf_pos, buf_size, "]\n"); + LCIU_ADJUST_BUF_PTRS(used); + + } else { + used = scnprintf(buf_pos, buf_size, "Chan in use [ none ]\n"); + LCIU_ADJUST_BUF_PTRS(used); + } + + /* print free channels */ + inuse_bits = ~inuse_bits; /* flip for free channels */ + if (inuse_bits & chan_mask) { + used = scnprintf(buf_pos, buf_size, "Chan free [ "); + LCIU_ADJUST_BUF_PTRS(used); + + for (i = 0; i < nr_channels; i++) { + chan_bit_mask = 0x1 << i; + if (chan_bit_mask & inuse_bits) { + used = scnprintf(buf_pos, buf_size, "%d ", i); + LCIU_ADJUST_BUF_PTRS(used); + } + } + + used = scnprintf(buf_pos, buf_size, "]\n"); + LCIU_ADJUST_BUF_PTRS(used); + } else { + used = scnprintf(buf_pos, buf_size, "Chan free [ none ]\n"); + LCIU_ADJUST_BUF_PTRS(used); + } + +lciu_buf_full: + used = buf_size > 0 ? PAGE_SIZE - buf_size : PAGE_SIZE; + return used; +} +static DEVICE_ATTR_RO(list_chan_inuse); + +static struct attribute *coresight_cti_channel_attrs[] = { + &dev_attr_trigin_attach.attr, + &dev_attr_trigin_detach.attr, + &dev_attr_trigout_attach.attr, + &dev_attr_trigout_detach.attr, + &dev_attr_gate_enable.attr, + &dev_attr_gate_disable.attr, + &dev_attr_chan_set.attr, + &dev_attr_chan_clear.attr, + &dev_attr_chan_pulse.attr, + &dev_attr_trig_filter_enable.attr, + &dev_attr_list_xtrigs.attr, + &dev_attr_reset_xtrigs.attr, + &dev_attr_list_chan_inuse.attr, + NULL, +}; + +/* coresight management registers */ +coresight_cti_reg(devaff0, CTIDEVAFF0); +coresight_cti_reg(devaff1, CTIDEVAFF1); +coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS); +coresight_cti_reg(devarch, CORESIGHT_DEVARCH); +coresight_cti_reg(devid, CORESIGHT_DEVID); +coresight_cti_reg(devtype, CORESIGHT_DEVTYPE); +coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0); +coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1); +coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2); +coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3); +coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4); + +static struct attribute *coresight_cti_mgmt_attrs[] = { + &dev_attr_devaff0.attr, + &dev_attr_devaff1.attr, + &dev_attr_authstatus.attr, + &dev_attr_devarch.attr, + &dev_attr_devid.attr, + &dev_attr_devtype.attr, + &dev_attr_pidr0.attr, + &dev_attr_pidr1.attr, + &dev_attr_pidr2.attr, + &dev_attr_pidr3.attr, + &dev_attr_pidr4.attr, + NULL, +}; + +static const struct attribute_group coresight_cti_group = { + .attrs = coresight_cti_attrs, +}; + +static const struct attribute_group coresight_cti_regs_group = { + .attrs = coresight_cti_regs_attrs, + .name = "regs", +}; + +static const struct attribute_group coresight_cti_channels_group = { + .attrs = coresight_cti_channel_attrs, + .name = "channels", +}; + +static const struct attribute_group coresight_cti_mgmt_group = { + .attrs = coresight_cti_mgmt_attrs, + .name = "mgmt", +}; + +const struct attribute_group *coresight_cti_groups[] = { + &coresight_cti_group, + &coresight_cti_regs_group, + &coresight_cti_channels_group, + &coresight_cti_mgmt_group, + NULL, +}; + +static int cti_starting_cpu(unsigned int cpu) +{ + struct cti_drvdata *drvdata = cti_cpu_drv[cpu]; + + if (!drvdata) + return 0; + spin_lock(&drvdata->spinlock); + drvdata->config.hw_powered = true; + spin_unlock(&drvdata->spinlock); + if (atomic_read(&drvdata->config.enable_req_count)) + cti_enable_hw(drvdata); + return 0; +} + +static int cti_stopping_cpu(unsigned int cpu) +{ + struct cti_drvdata *drvdata = cti_cpu_drv[cpu]; + + if (!drvdata) + return 0; + cti_disable_hw(drvdata); + spin_lock(&drvdata->spinlock); + drvdata->config.hw_powered = false; + spin_unlock(&drvdata->spinlock); + return 0; +} + +static void cti_smp_set_powered(void *info) +{ + struct cti_drvdata *drvdata = info; + + drvdata->config.hw_powered = true; +} + +/* setup power handling on a cti that has an association with a cpu */ +static int cti_setup_hp_cpu_affinity(struct cti_drvdata *drvdata) +{ + int ret = 0; + + /* CPU affinity - add to quick lookup array + * and register cpu hp callbacks first time out + */ + cpus_read_lock(); + cti_cpu_drv[drvdata->ctidev.cpu] = drvdata; + if (!cti_cpu_count) { + cti_hp_online = cpuhp_setup_state_nocalls_cpuslocked( + CPUHP_AP_ONLINE_DYN, "arm/coresightcti:online", + cti_starting_cpu, cti_stopping_cpu); + + if (cti_hp_online < 0) { + ret = cti_hp_online; + cti_cpu_drv[drvdata->ctidev.cpu] = 0; + goto cti_hp_cpu_done; + } + } + cti_cpu_count++; + + /* setup powered flag by running function of assoc cpu */ + if (smp_call_function_single(drvdata->ctidev.cpu, cti_smp_set_powered, + drvdata, 1)) { + drvdata->config.hw_powered = false; + } + +cti_hp_cpu_done: + cpus_read_unlock(); + return ret; +} + +/* back out hotplug notifications on error */ +static void cti_clear_hp_cpu_affinity(struct cti_drvdata *drvdata) +{ + if (!drvdata) + return; + + if (drvdata->ctidev.cpu >= 0) { + cti_cpu_drv[drvdata->ctidev.cpu] = 0; + cti_cpu_count--; + if ((cti_cpu_count == 0) && (cti_hp_online > 0)) + cpuhp_remove_state_nocalls(cti_hp_online); + } +} + +static int cti_probe(struct amba_device *adev, const struct amba_id *id) +{ + int ret = 0; + u32 devid; + void __iomem *base; + struct device *dev = &adev->dev; + struct cti_drvdata *drvdata = 0; + struct coresight_desc cti_desc; + struct coresight_platform_data *pdata = NULL; + struct resource *res = &adev->res; + struct device_node *np = adev->dev.of_node; + struct ect_node *ect_nd = 0; + + /* boilerplate code to set up the basics */ + dev_info(dev, "%s(ID = %x)\n", __func__, id->id); + if (np) { + pdata = of_get_coresight_ect_platform_data(dev, np); + if (IS_ERR(pdata)) { + dev_info(dev, "of_get_coresight_ect_platform err\n"); + ret = PTR_ERR(pdata); + goto err_out; + } + dev->platform_data = pdata; + } + /* node to keep track of CTI net */ + ect_nd = kzalloc(sizeof(struct ect_node), GFP_KERNEL); + if (!ect_nd) { + ret = -ENOMEM; + goto err_out; + } + + /* driver data*/ + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) { + ret = -ENOMEM; + dev_info(dev, "%s, mem err\n", __func__); + goto err_out; + } + + /* links between dev and drvdata*/ + drvdata->dev = dev; + dev_set_drvdata(dev, drvdata); + + /* default CTI device info */ + drvdata->ctidev.cpu = -1; + drvdata->ctidev.nr_trig_con = 0; + drvdata->ctidev.ctm_id = 0; + INIT_LIST_HEAD(&drvdata->ctidev.trig_cons); + + /* Validity for the resource is already checked by the AMBA core */ + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + dev_info(dev, "%s, remap err\n", __func__); + goto err_out; + } + + drvdata->base = base; + + spin_lock_init(&drvdata->spinlock); + + /* look at the HW DEVID register for some of the HW settings */ + devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); + drvdata->config.nr_trig_max = (int)((devid & 0xFF00) >> 8); + /* no current hardware should exceed this, but protect the driver + * in case of fault / out of spec hw + */ + if (drvdata->config.nr_trig_max > CTIINOUTEN_MAX) { + dev_warn_once(dev, + "Limiting HW MaxTrig value(%d) to driver max(%d)\n", + drvdata->config.nr_trig_max, CTIINOUTEN_MAX); + drvdata->config.nr_trig_max = CTIINOUTEN_MAX; + } + + drvdata->config.nr_ctm_channels = (int)((devid & 0xF0000) >> 16); + dev_info(dev, "DevID %08x, trigs=%d, chans=%d\n", devid, + drvdata->config.nr_trig_max, drvdata->config.nr_ctm_channels); + + /** additional parse the .dts for connections and signals */ + of_cti_get_hw_data(dev, np, drvdata); + + cti_set_default_config(&drvdata->config); + + /* setup cpu related CTI devices, otherwise assume powered */ + if (drvdata->ctidev.cpu >= 0) { + ret = cti_setup_hp_cpu_affinity(drvdata); + if (ret < 0) + goto err_out; + } else + drvdata->config.hw_powered = true; + + /* set up coresight component description */ + cti_desc.pdata = pdata; + cti_desc.type = CORESIGHT_DEV_TYPE_ECT; + cti_desc.subtype.ect_subtype = CORESIGHT_DEV_SUBTYPE_ECT_CTI; + cti_desc.ops = &cti_ops; + cti_desc.groups = coresight_cti_groups; + cti_desc.dev = dev; + drvdata->csdev = coresight_register(&cti_desc); + if (IS_ERR(drvdata->csdev)) { + ret = PTR_ERR(drvdata->csdev); + pr_err("%s: CS register failed\n", pdata->name); + goto err_out; + } + + /* add to list of CTI devices */ + mutex_lock(&ect_mutex); + ect_nd->cti_drv = drvdata; + list_add(&ect_nd->next, &ect_net); + mutex_unlock(&ect_mutex); + + /* set any cross references */ + cti_update_conn_xrefs(drvdata); + + /* all done - dec pm refcount */ + pm_runtime_put(&adev->dev); + dev_info(dev, "%s: initialized\n", pdata->name); + cti_count++; + dev_info(dev, "%s - ok\n", __func__); + return 0; + +err_out: + cti_clear_hp_cpu_affinity(drvdata); + dev_info(dev, "%s - err_out\n", __func__); + return ret; +} + +/* free up CTI specific resources + * - called from coresight_device_release on coresight_unregister. + */ +void cti_device_release(struct device *dev) +{ + struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct ect_node *ect_item, *ect_tmp; + struct cti_trig_con *tc, *tc_tmp; + + /* free up resources associated with the cti connections */ + if (drvdata->ctidev.cpu >= 0) + cti_cpu_drv[drvdata->ctidev.cpu] = 0; + + list_for_each_entry_safe(tc, tc_tmp, + &drvdata->ctidev.trig_cons, node) { + kfree(tc->con_in); + kfree(tc->con_out); + kfree(tc->con_dev_name); + list_del(&tc->node); + kfree(tc); + } + + /* remove from the list */ + mutex_lock(&ect_mutex); + list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, next) { + if (ect_item->cti_drv == drvdata) { + list_del(&ect_item->next); + kfree(ect_item); + goto ect_list_item_removed; + } + } +ect_list_item_removed: + mutex_unlock(&ect_mutex); + + kfree(drvdata); +} +EXPORT_SYMBOL_GPL(cti_device_release); + +#define CTI_AMBA_ID(pid) \ + { \ + .id = pid, \ + .mask = 0x000fffff, \ + } + +static struct amba_cs_uci_id uci_id_cti[] = { + { + /* CTI UCI data */ + .devarch = 0x47701a14, /* devarch value for CTI v2 */ + .devarch_mask = 0xfff0ffff, + .devtype = 0x00000014, /* maj(0x4-debug) min(0x1-ECT) */ + } +}; + +#define CTI_AMBA_UCI_ID(pid) \ + { \ + .id = pid, \ + .mask = 0x000fffff, \ + .data = uci_id_cti, \ + } + + +static const struct amba_id cti_ids[] = { + CTI_AMBA_ID(0x000bb906), /* Coresight CTI (SoC 400), C-A72, C-A57 */ + CTI_AMBA_ID(0x000bb922), /* CTI - C-A8 */ + CTI_AMBA_ID(0x000bb9a8), /* CTI - C-A53 */ + CTI_AMBA_ID(0x000bb9aa), /* CTI - C-A73 */ + CTI_AMBA_UCI_ID(0x000bb9da), /* CTI - C-A35 */ + CTI_AMBA_UCI_ID(0x000bb9ed), /* Coresight CTI (SoC 600) */ + CTI_AMBA_UCI_ID(0x000bb95d), /* test UCI - ETM PID A53 */ + { 0, 0}, +}; + +static struct amba_driver cti_driver = { + .drv = { + .name = "coresight-cti", + .owner = THIS_MODULE, + .suppress_bind_attrs = true, + }, + .probe = cti_probe, + .id_table = cti_ids, +}; +builtin_amba_driver(cti_driver); diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h new file mode 100644 index 000000000000..09cb985ac0ea --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cti.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Linaro Limited, All rights reserved. + * Author: Mike Leach mike.leach@linaro.org + */ + + +#ifndef _CORESIGHT_CORESIGHT_CTI_H +#define _CORESIGHT_CORESIGHT_CTI_H + +#include <asm/local.h> +#include <linux/spinlock.h> +#include "coresight-priv.h" + +/* + * Device registers + * 0x000 - 0x144: CTI programming and status + * 0xEDC - 0xEF8: CTI integration test. + * 0xF00 - 0xFFC: Coresight management registers. + */ +/* CTI programming registers */ +#define CTICONTROL 0x000 +#define CTIINTACK 0x010 +#define CTIAPPSET 0x014 +#define CTIAPPCLEAR 0x018 +#define CTIAPPPULSE 0x01C +#define CTIINEN(n) (0x020 + (4 * n)) +#define CTIOUTEN(n) (0x0A0 + (4 * n)) +#define CTITRIGINSTATUS 0x130 +#define CTITRIGOUTSTATUS 0x134 +#define CTICHINSTATUS 0x138 +#define CTICHOUTSTATUS 0x13C +#define CTIGATE 0x140 +#define ASICCTL 0x144 +/* Integration test registers */ +#define ITCHINACK 0xEDC /* WO CTI CSSoc 400 only*/ +#define ITTRIGINACK 0xEE0 /* WO CTI CSSoc 400 only*/ +#define ITCHOUT 0xEE4 /* WO RW-600 */ +#define ITTRIGOUT 0xEE8 /* WO RW-600 */ +#define ITCHOUTACK 0xEEC /* RO CTI CSSoc 400 only*/ +#define ITTRIGOUTACK 0xEF0 /* RO CTI CSSoc 400 only*/ +#define ITCHIN 0xEF4 /* RO */ +#define ITTRIGIN 0xEF8 /* RO */ +/* management registers */ +#define CTIDEVAFF0 0xFA8 +#define CTIDEVAFF1 0xFAC + + +/* CTI CSSoc 600 has a max of 32 trigger signals per direction. + * CTI CSSoc 400 has 8 IO triggers - other CTIs can be impl def. + * Max of in and out defined in the DEVID register. + * - pick up actual number used from .dts parameters if present. + */ +#define CTIINOUTEN_MAX 32 + +/** + * Group of related trigger signals + * + * @nr_sigs: number of signals in the group. + * @used_mask: bitmask representing the signal indexes in the group. + * @sig_names: list of names for the signals. + */ +struct cti_trig_grp { + int nr_sigs; + u32 used_mask; + char *sig_names; +}; + +/** + * Trigger connection - connection between a CTI and other (coresight) device + * lists input and output trigger signals for the device + * + * @con_in: connected CTIIN signals for the device. + * @con_out: connected CTIOUT signals for the device. + * @con_dev: coresight device connected to the CTI, NULL if not CS device + * @con_dev_name: name of connected device (CS or CPU) + * @node: entry node in list of connections. + */ +struct cti_trig_con { + struct cti_trig_grp *con_in; + struct cti_trig_grp *con_out; + struct coresight_device *con_dev; + char *con_dev_name; + struct list_head node; +}; + +/** + * struct cti_device - description of CTI device properties. + * + * @nt_trig_con: Number of external devices connected to this device. + * @ctm_id: which CTM this device is connected to (by default it is + * assumed there is a single CTM per SoC, ID 0). + * @trig_cons: list of connections to this device. + * @cpu: CPU ID if associated with CPU, -1 otherwise. + * + */ +struct cti_device { + int nr_trig_con; + u32 ctm_id; + struct list_head trig_cons; + int cpu; +}; + +/** + * struct cti_config - configuration of the CTI device hardware + * + * hardware description from RO ID regs + * @nr_trig_max: Max number of trigger signals implemented on device. + * (max of trig_in or trig_out) + * @nr_ctm_channels: number of available CTM channels + * + * cti enable control + * @enable_req_count: CTI is enabled alongside >=1 associated devices. + * @hw_enabled: true if hw is currently enabled. + * @hw_powered: true if associated cpu powered on, or no cpu. + * + * registered triggers and filtering + * @trig_in_use: bitfield of in triggers registered as in use. + * @trig_out_use: bitfield of out triggers registered as in use. + * @trig_out_filter: bitfield of out triggers that are blocked if filter + * enabled. Typically this would be dbgreq / restart on a core CTI. + * @trig_filter_enable: 1 if filtering enabled. + * + * cti software programmable regs: + * @ctiappset: CTI Software application channel set. + * @ctiinout_sel: register selector for INEN and OUTEN regs. + * @ctiinen: enable input trigger to a channel. + * @ctiouten: enable output trigger from a channel. + * @ctigate: gate channel output from CTI to CTM. + */ +struct cti_config { + /* hardware description */ + int nr_ctm_channels; + int nr_trig_max; + /* cti enable control */ + atomic_t enable_req_count; + bool hw_enabled; + bool hw_powered; + /* registered triggers and filtering */ + u32 trig_in_use; + u32 trig_out_use; + u32 trig_out_filter; + int trig_filter_enable; + /* cti cross trig programmable regs */ + u32 ctiappset; + u8 ctiinout_sel; + u32 ctiinen[CTIINOUTEN_MAX]; + u32 ctiouten[CTIINOUTEN_MAX]; + u32 ctigate; + u32 asicctl; +}; + +/** + * struct cti_drvdata - specifics for the CTI device + * @base: Memory mapped base address for this component. + * @dev: The device entity associated to this component. + * @csdev: Standard CoreSight device information. + * @ctidev: Extra information needed by the CTI/CTM framework. + * @spinlock: Control data access to one at a time. + * @config: Configuration data for this CTI device. + * + */ +struct cti_drvdata { + void __iomem *base; + struct device *dev; + struct coresight_device *csdev; + struct cti_device ctidev; + spinlock_t spinlock; + struct cti_config config; +}; + +enum cti_chan_op { + CTI_CHAN_ATTACH, + CTI_CHAN_DETACH, +}; + +enum cti_trig_dir { + CTI_TRIG_IN, + CTI_TRIG_OUT, +}; + +enum cti_chan_gate_op { + CTI_GATE_CHAN_ENABLE, + CTI_GATE_CHAN_DISABLE, + CTI_GATE_CHAN_ENABLE_ALL, + CTI_GATE_CHAN_DISABLE_ALL, +}; + +enum cti_chan_set_op { + CTI_CHAN_SET, + CTI_CHAN_CLR, + CTI_CHAN_PULSE, +}; + +#endif /* _CORESIGHT_CORESIGHT_CTI_H */