On Mon, 18 Jun 2018 at 08:06, Leo Yan leo.yan@linaro.org wrote:
On Thu, Jun 14, 2018 at 01:41:35PM -0600, Mathieu Poirier wrote:
This patch makes decoding of auxtrace buffer centered around a struct cs_etm_queue. This eliminates surperflous variables and is a precursor for work that simplifies the main decoder loop.
No change in functionality is introduced by this patch.
Signed-off-by: Mathieu Poirier mathieu.poirier@linaro.org
tools/perf/util/cs-etm-decoder/cs-etm-decoder.h | 7 ---- tools/perf/util/cs-etm.c | 52 ++++++++++++------------- 2 files changed, 26 insertions(+), 33 deletions(-)
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h index 226f5db37d34..b2b6c8b5ba95 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h @@ -17,13 +17,6 @@
struct cs_etm_decoder;
-struct cs_etm_buffer {
const unsigned char *buf;
size_t len;
u64 offset;
u64 ref_timestamp;
-};
enum cs_etm_sample_type { CS_ETM_EMPTY = 0, CS_ETM_RANGE = 1 << 0, diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 1b3360959ab1..7ed705f950a7 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -84,6 +84,8 @@ struct cs_etm_queue { size_t last_branch_pos; struct cs_etm_packet *prev_packet; struct cs_etm_packet *packet;
const unsigned char *buf;
size_t buf_len, buf_used;
};
static int cs_etm__update_queues(struct cs_etm_auxtrace *etm); @@ -610,7 +612,7 @@ static int cs_etm__inject_event(union perf_event *event,
static int -cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq) +cs_etm__get_trace(struct cs_etm_queue *etmq) { struct auxtrace_buffer *aux_buffer = etmq->buffer; struct auxtrace_buffer *old_buffer = aux_buffer; @@ -624,7 +626,7 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq) if (!aux_buffer) { if (old_buffer) auxtrace_buffer__drop_data(old_buffer);
buff->len = 0;
etmq->buf_len = 0; return 0; }
@@ -644,13 +646,11 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq) if (old_buffer) auxtrace_buffer__drop_data(old_buffer);
buff->offset = aux_buffer->offset;
buff->len = aux_buffer->size;
buff->buf = aux_buffer->data;
etmq->buf_used = 0;
etmq->buf_len = aux_buffer->size;
etmq->buf = aux_buffer->data;
buff->ref_timestamp = aux_buffer->reference;
return buff->len;
return etmq->buf_len;
}
static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, @@ -1028,24 +1028,23 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
static int cs_etm__run_decoder(struct cs_etm_queue *etmq) {
struct cs_etm_buffer buffer;
size_t buffer_used, processed;
size_t processed; int err = 0; /* Go through each buffer in the queue and decode them one by one */ while (1) {
buffer_used = 0;
memset(&buffer, 0, sizeof(buffer));
err = cs_etm__get_trace(&buffer, etmq);
if (err <= 0)
return err;
/*
* We cannot assume consecutive blocks in the data file are
* contiguous, reset the decoder to force re-sync.
*/
err = cs_etm_decoder__reset(etmq->decoder);
if (err != 0)
return err;
if (!etmq->buf_len) {
err = cs_etm__get_trace(etmq);
if (err <= 0)
return err;
/*
* We cannot assume consecutive blocks in the data file
* are contiguous, reset the decoder to force re-sync.
*/
err = cs_etm_decoder__reset(etmq->decoder);
if (err != 0)
return err;
} /* Run trace decoder until buffer consumed or end of trace */ do {
@@ -1053,14 +1052,15 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq) err = cs_etm_decoder__process_data_block( etmq->decoder, etmq->offset,
&buffer.buf[buffer_used],
buffer.len - buffer_used,
&etmq->buf[etmq->buf_used],
etmq->buf_len, &processed); if (err) return err; etmq->offset += processed;
buffer_used += processed;
etmq->buf_used += processed;
etmq->buf_len -= processed; /* Process each packet in this chunk */ while (1) {
@@ -1100,7 +1100,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq) break; } }
} while (buffer.len > buffer_used);
} while (etmq->buf_len);
Seems to me, it's more safe to check 'etmq->buf_len > 0'. If buffer size isn't alignment with last processed size, it's possible to introduce infinite loop.
Good point - I'll see if that condition can happen and will take your advise if it is the case.
Thanks, Mathieu
if (err == 0) /* Flush any remaining branch stack entries */
-- 2.7.4
CoreSight mailing list CoreSight@lists.linaro.org https://lists.linaro.org/mailman/listinfo/coresight