[PATCH v4 28/35] perf vendor events: Update sierraforest events/metrics

From: Ian Rogers
Date: Thu Mar 27 2025 - 18:29:40 EST


Update events from v1.08 to v1.09.
Update event topics, metrics to be generated from the TMA spreadsheet
and other small clean ups. The use of the spreadsheet for conversion
has added thresholds.

Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
---
tools/perf/pmu-events/arch/x86/mapfile.csv | 2 +-
.../arch/x86/sierraforest/cache.json | 20 ++
.../arch/x86/sierraforest/memory.json | 20 ++
.../arch/x86/sierraforest/other.json | 48 ----
.../arch/x86/sierraforest/pipeline.json | 8 +
.../arch/x86/sierraforest/srf-metrics.json | 204 ++++++++-------
.../arch/x86/sierraforest/uncore-cache.json | 32 +++
.../arch/x86/sierraforest/uncore-memory.json | 240 ++++++++++++++++++
8 files changed, 438 insertions(+), 136 deletions(-)

diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 0c16c9b840a5..bde2f32423a1 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -29,7 +29,7 @@ GenuineIntel-6-2E,v4,nehalemex,core
GenuineIntel-6-A7,v1.04,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
GenuineIntel-6-8F,v1.25,sapphirerapids,core
-GenuineIntel-6-AF,v1.08,sierraforest,core
+GenuineIntel-6-AF,v1.09,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v59,skylake,core
GenuineIntel-6-55-[01234],v1.36,skylakex,core
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
index 072df00aff92..21671c65d6dd 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
@@ -466,6 +466,16 @@
"SampleAfterValue": "1000003",
"UMask": "0x6"
},
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
{
"BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
"Counter": "0,1,2,3,4,5,6,7",
@@ -486,6 +496,16 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
},
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
"Counter": "0,1,2,3,4,5,6,7",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
index 22d23077618e..3e2d0b565cfa 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
@@ -82,6 +82,26 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
},
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
{
"BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
"Counter": "0,1,2,3,4,5,6,7",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/other.json b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
index 4c77dac8ec78..daa16030d493 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/other.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
@@ -8,46 +8,6 @@
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
- {
- "BriefDescription": "Counts demand data reads that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x184000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x730000001",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB7",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
{
"BriefDescription": "Counts streaming stores that have any type of response.",
"Counter": "0,1,2,3,4,5,6,7",
@@ -57,13 +17,5 @@
"MSRValue": "0x10800",
"SampleAfterValue": "100003",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0x75",
- "EventName": "SERIALIZATION.C01_MS_SCB",
- "SampleAfterValue": "200003",
- "UMask": "0x4"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
index df2c7bb474a0..a934b64f66d0 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
@@ -300,6 +300,14 @@
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
+ {
+ "BriefDescription": "Counts the number of issue slots in a UMWAIT or TPAUSE instruction where no uop issues due to the instruction putting the CPU into the C0.1 activity state.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.C01_MS_SCB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. [This event is alias to TOPDOWN_BAD_SPECULATION.ALL_P]",
"Counter": "0,1,2,3,4,5,6,7",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json b/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
index 83c86afd2960..ef629e4e91ce 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/srf-metrics.json
@@ -288,15 +288,17 @@
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions",
"MetricExpr": "tma_core_bound",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_allocation_restriction",
+ "MetricThreshold": "tma_allocation_restriction > 0.1 & (tma_core_bound > 0.1 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
"MetricExpr": "TOPDOWN_BE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL1",
"PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count",
"ScaleUnit": "100%"
@@ -304,92 +306,104 @@
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL1",
- "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_detect",
- "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches",
+ "MetricThreshold": "tma_branch_detect > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
"MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS)",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
"MetricExpr": "TOPDOWN_FE_BOUND.CISC / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of cycles due to backend bound stalls that are bounded by core restrictions and not attributed to an outstanding load or stores, or resource limitation",
"MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
"MetricExpr": "TOPDOWN_FE_BOUND.DECODE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that does not require the use of microcode, classified as a fast nuke, due to memory ordering, memory disambiguation and memory renaming",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls",
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
"MetricExpr": "TOPDOWN_FE_BOUND.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL1",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
"MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_bandwidth",
+ "MetricThreshold": "tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend latency restrictions due to icache misses, itlb misses, branch detection, and resteer limitations.",
"MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_ifetch_latency",
+ "MetricThreshold": "tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
@@ -420,33 +434,28 @@
{
"BriefDescription": "Percentage of time that retirement is stalled due to a first level data TLB miss",
"MetricExpr": "100 * (LD_HEAD.DTLB_MISS_AT_RET + LD_HEAD.PGWALK_AT_RET) / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles",
- "MetricName": "tma_info_bottleneck_dtlb_miss_bound_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_bottleneck_%_dtlb_miss_bound_cycles"
},
{
"BriefDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.ALL / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Ifetch",
- "MetricName": "tma_info_bottleneck_ifetch_miss_bound_cycles",
- "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Ifetch",
+ "MetricName": "tma_info_bottleneck_%_ifetch_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that allocation and retirement is stalled by the Frontend Cluster due to an Ifetch Miss, either Icache or ITLB Miss. See Info.Ifetch_Bound"
},
{
"BriefDescription": "Percentage of time that retirement is stalled due to an L1 miss",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.ALL / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Load_Store_Miss",
- "MetricName": "tma_info_bottleneck_load_miss_bound_cycles",
- "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Load_Store_Miss",
+ "MetricName": "tma_info_bottleneck_%_load_miss_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled due to an L1 miss. See Info.Load_Miss_Bound"
},
{
"BriefDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall",
"MetricExpr": "100 * LD_HEAD.ANY_AT_RET / CPU_CLK_UNHALTED.CORE",
- "MetricGroup": "Cycles;Mem_Exec",
- "MetricName": "tma_info_bottleneck_mem_exec_bound_cycles",
- "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound",
- "ScaleUnit": "100%"
+ "MetricGroup": "Mem_Exec",
+ "MetricName": "tma_info_bottleneck_%_mem_exec_bound_cycles",
+ "PublicDescription": "Percentage of time that retirement is stalled by the Memory Cluster due to a pipeline stall. See Info.Mem_Exec_Bound"
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
@@ -501,26 +510,22 @@
{
"BriefDescription": "Percentage of time that allocation is stalled due to load buffer full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.LD_BUF / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_load_buffer_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_load_buffer_stall_cycles"
},
{
"BriefDescription": "Percentage of time that allocation is stalled due to memory reservation stations full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.RSV / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_mem_rsv_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_mem_rsv_stall_cycles"
},
{
"BriefDescription": "Percentage of time that allocation is stalled due to store buffer full",
"MetricExpr": "100 * MEM_SCHEDULER_BLOCK.ST_BUF / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_buffer_stalls_store_buffer_stall_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_buffer_stalls_%_store_buffer_stall_cycles"
},
{
"BriefDescription": "Cycles Per Instruction",
"MetricExpr": "CPU_CLK_UNHALTED.CORE / INST_RETIRED.ANY",
- "MetricName": "tma_info_core_cpi",
- "ScaleUnit": "1per_instr"
+ "MetricName": "tma_info_core_cpi"
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
@@ -541,28 +546,46 @@
{
"BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L2",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.L2_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
- "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l2hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss doesn't hit in the L2",
+ "MetricExpr": "100 * (MEM_BOUND_STALLS_IFETCH.LLC_HIT + MEM_BOUND_STALLS_IFETCH.LLC_MISS) / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l2miss"
},
{
"BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss hits in the L3",
"MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_HIT / MEM_BOUND_STALLS_IFETCH.ALL",
- "MetricName": "tma_info_ifetch_miss_bound_ifetchmissbound_with_l3hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of ifetch miss bound stalls, where the ifetch miss subsequently misses in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_IFETCH.LLC_MISS / MEM_BOUND_STALLS_IFETCH.ALL",
+ "MetricName": "tma_info_ifetch_miss_bound_%_ifetchmissbound_with_l3miss"
},
{
"BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L2",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.L2_HIT / MEM_BOUND_STALLS_LOAD.ALL",
"MetricGroup": "load_store_bound",
- "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l2hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses in the L2",
+ "MetricExpr": "100 * (MEM_BOUND_STALLS_LOAD.LLC_HIT + MEM_BOUND_STALLS_LOAD.LLC_MISS) / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l2miss"
},
{
"BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that hit the L3",
"MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_HIT / MEM_BOUND_STALLS_LOAD.ALL",
"MetricGroup": "load_store_bound",
- "MetricName": "tma_info_load_miss_bound_loadmissbound_with_l3hit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3hit"
+ },
+ {
+ "BriefDescription": "Percentage of memory bound stalls where retirement is stalled due to an L1 miss that subsequently misses the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS_LOAD.LLC_MISS / MEM_BOUND_STALLS_LOAD.ALL",
+ "MetricGroup": "load_store_bound",
+ "MetricName": "tma_info_load_miss_bound_%_loadmissbound_with_l3miss"
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a pipeline block",
@@ -600,44 +623,37 @@
{
"BriefDescription": "Percentage of total non-speculative loads with an address aliasing block",
"MetricExpr": "100 * LD_BLOCKS.ADDRESS_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_mem_exec_blocks_loads_with_adressaliasing",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_adressaliasing"
},
{
"BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
"MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricName": "tma_info_mem_exec_blocks_loads_with_storefwdblk",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_blocks_%_loads_with_storefwdblk"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a first level data cache miss",
"MetricExpr": "100 * LD_HEAD.L1_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_l1miss",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_l1miss"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to other block cases, such as pipeline conflicts, fences, etc",
"MetricExpr": "100 * LD_HEAD.OTHER_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_otherpipelineblks",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_otherpipelineblks"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a pagewalk",
"MetricExpr": "100 * LD_HEAD.PGWALK_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_pagewalk",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_pagewalk"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a second level TLB miss",
"MetricExpr": "100 * LD_HEAD.DTLB_MISS_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_stlbhit",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_stlbhit"
},
{
"BriefDescription": "Percentage of Memory Execution Bound due to a store forward address match",
"MetricExpr": "100 * LD_HEAD.ST_ADDR_AT_RET / LD_HEAD.ANY_AT_RET",
- "MetricName": "tma_info_mem_exec_bound_loadhead_with_storefwding",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_mem_exec_bound_%_loadhead_with_storefwding"
},
{
"BriefDescription": "Instructions per Load",
@@ -667,8 +683,7 @@
{
"BriefDescription": "Percentage of time that the core is stalled due to a TPAUSE or UMWAIT instruction",
"MetricExpr": "100 * SERIALIZATION.C01_MS_SCB / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricName": "tma_info_serialization_tpause_cycles",
- "ScaleUnit": "100%"
+ "MetricName": "tma_info_serialization_%_tpause_cycles"
},
{
"BriefDescription": "Average CPU Utilization",
@@ -684,17 +699,20 @@
},
{
"BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.CORE_P:k / CPU_CLK_UNHALTED.CORE",
+ "MetricExpr": "cpu@CPU_CLK_UNHALTED.CORE_P@k / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": "Summary",
"MetricName": "tma_info_system_kernel_utilization"
},
{
"BriefDescription": "PerfMon Event Multiplexing accuracy indicator",
"MetricExpr": "CPU_CLK_UNHALTED.CORE_P / CPU_CLK_UNHALTED.CORE",
- "MetricName": "tma_info_system_mux"
+ "MetricName": "tma_info_system_mux",
+ "MetricThreshold": "tma_info_system_mux > 1.1 | tma_info_system_mux < 0.9"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.CORE / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
"MetricName": "tma_info_system_turbo_utilization"
},
{
@@ -718,90 +736,102 @@
"MetricName": "tma_info_uop_mix_x87_uop_ratio"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
"MetricExpr": "TOPDOWN_FE_BOUND.ITLB_MISS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_latency_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_latency_group",
"MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_ifetch_latency > 0.15 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05 & tma_bad_speculation > 0.15",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops",
"MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops",
"MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear that requires the use of microcode (slow nuke)",
"MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05 & (tma_machine_clears > 0.05 & tma_bad_speculation > 0.15)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
"MetricExpr": "TOPDOWN_FE_BOUND.OTHER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes",
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
"MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_ifetch_bandwidth_group",
"MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05 & (tma_ifetch_bandwidth > 0.1 & tma_frontend_bound > 0.2)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls)",
"MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls)",
"MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a resource limitation",
"MetricExpr": "tma_backend_bound - tma_core_bound",
- "MetricGroup": "Slots;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2 & tma_backend_bound > 0.1",
"MetricgroupNoGroup": "TopdownL2",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that result in retirement slots",
"MetricExpr": "TOPDOWN_RETIRING.ALL_P / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL1;tma_L1_group",
+ "MetricGroup": "TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
"MetricgroupNoGroup": "TopdownL1",
"ScaleUnit": "100%"
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS)",
"MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / (6 * CPU_CLK_UNHALTED.CORE)",
- "MetricGroup": "Slots;TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1 & (tma_resource_bound > 0.2 & tma_backend_bound > 0.1)",
"ScaleUnit": "100%"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
index a779a1a73ea5..7182ca00ef8d 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-cache.json
@@ -873,6 +873,38 @@
"UMask": "0x1",
"Unit": "CHA"
},
+ {
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : E state",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : M state",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x3d",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Filter Capacity Evictions : S state",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
{
"BriefDescription": "All TOR Inserts",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
index ae9c62b32e92..c7e9dbe02eb0 100644
--- a/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/uncore-memory.json
@@ -188,6 +188,94 @@
"PublicDescription": "DRAM Clockticks",
"Unit": "IMC"
},
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 temp readings forced 2x refresh",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M_MR4_2XREF_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH0_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles MR4 MRRs was triggered/running",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M_PDC_MR4ACTIVE_CYCLES.SCH1_DIMM1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
{
"BriefDescription": "# of cycles a given rank is in Power Down Mode",
"Counter": "0,1,2,3",
@@ -286,6 +374,70 @@
"PublicDescription": "-",
"Unit": "IMC"
},
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM and throttle level is zero.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x89",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.BW_SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "MR4 temp reading is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.MR4BLKEN",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x8",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "RAPL is throttling",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RAPLBLK",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x4",
+ "Unit": "IMC"
+ },
{
"BriefDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
"Counter": "0,1,2,3",
@@ -480,6 +632,94 @@
"UMask": "0x1",
"Unit": "IMC"
},
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Critical level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M_THROTTLE_CRIT_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at High level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M_THROTTLE_HIGH_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Normal level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M_THROTTLE_LOW_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT0",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x1",
+ "Unit": "IMC"
+ },
+ {
+ "BriefDescription": "# of cycles Throttling at Mid level on specified DIMM",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M_THROTTLE_MID_CYCLES.SLOT1",
+ "Experimental": "1",
+ "PerPkg": "1",
+ "PublicDescription": "-",
+ "UMask": "0x2",
+ "Unit": "IMC"
+ },
{
"BriefDescription": "Write Pending Queue Allocations",
"Counter": "0,1,2,3",
--
2.49.0.472.ge94155a9ec-goog