On Tue, May 22, 2018 at 04:39:20PM +0800, Leo Yan wrote:
[...]
Rather than the patch I posted in my previous email, I think below new patch is more reasonable for me.
In the below change, 'etmq->prev_packet' is only used to store the previous CS_ETM_RANGE packet, we don't need to save CS_ETM_TRACE_ON packet into 'etmq->prev_packet'.
On the other hand, cs_etm__flush() can use 'etmq->period_instructions' to indicate if need to generate instruction sample or not. If it's non-zero, then generate instruction sample and 'etmq->period_instructions' will be cleared; so next time if there have more tracing CS_ETM_TRACE_ON packet, it can skip to generate instruction sample due 'etmq->period_instructions' is zero.
How about you think for this?
Thanks, Leo Yan
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 822ba91..dd354ad 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -495,6 +495,13 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq) static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet) { /* + * The packet is the start tracing packet if the end_addr is zero, + * returns 0 for this case. + */ + if (!packet->end_addr) + return 0; + + /* * The packet records the execution range with an exclusive end address * * A64 instructions are constant size, so the last executed @@ -897,13 +904,27 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) etmq->period_instructions = instrs_over; }
- if (etm->sample_branches && - etmq->prev_packet && - etmq->prev_packet->sample_type == CS_ETM_RANGE && - etmq->prev_packet->last_instr_taken_branch) { - ret = cs_etm__synth_branch_sample(etmq); - if (ret) - return ret; + if (etm->sample_branches && etmq->prev_packet) { + bool generate_sample = false; + + /* Generate sample for start tracing packet */ + if (etmq->prev_packet->sample_type == 0) + generate_sample = true; + + /* Generate sample for exception packet */ + if (etmq->prev_packet->exc == true) + generate_sample = true; + + /* Generate sample for normal branch packet */ + if (etmq->prev_packet->sample_type == CS_ETM_RANGE && + etmq->prev_packet->last_instr_taken_branch) + generate_sample = true; + + if (generate_sample) { + ret = cs_etm__synth_branch_sample(etmq); + if (ret) + return ret; + } }
if (etm->sample_branches || etm->synth_opts.last_branch) { @@ -922,11 +943,12 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) static int cs_etm__flush(struct cs_etm_queue *etmq) { int err = 0; - struct cs_etm_packet *tmp;
if (etmq->etm->synth_opts.last_branch && etmq->prev_packet && - etmq->prev_packet->sample_type == CS_ETM_RANGE) { + etmq->prev_packet->sample_type == CS_ETM_RANGE && + etmq->period_instructions) { + /* * Generate a last branch event for the branches left in the * circular buffer at the end of the trace. @@ -940,14 +962,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) etmq, addr, etmq->period_instructions); etmq->period_instructions = 0; - - /* - * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for - * the next incoming packet. - */ - tmp = etmq->packet; - etmq->packet = etmq->prev_packet; - etmq->prev_packet = tmp; }
return err;