2 * Intel Cache Quality-of-Service Monitoring (CQM) support.
4 * Based very, very heavily on work by Peter Zijlstra.
7 #include <linux/perf_event.h>
8 #include <linux/slab.h>
9 #include <asm/cpu_device_id.h>
10 #include "perf_event.h"
12 #define MSR_IA32_PQR_ASSOC 0x0c8f
13 #define MSR_IA32_QM_CTR 0x0c8e
14 #define MSR_IA32_QM_EVTSEL 0x0c8d
16 static unsigned int cqm_max_rmid = -1;
17 static unsigned int cqm_l3_scale; /* supposedly cacheline size */
19 struct intel_cqm_state {
25 static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state);
28 * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
29 * Also protects event->hw.cqm_rmid
31 * Hold either for stability, both for modification of ->hw.cqm_rmid.
33 static DEFINE_MUTEX(cache_mutex);
34 static DEFINE_RAW_SPINLOCK(cache_lock);
37 * Groups of events that have the same target(s), one RMID per group.
39 static LIST_HEAD(cache_groups);
42 * Mask of CPUs for reading CQM values. We only need one per-socket.
44 static cpumask_t cqm_cpumask;
46 #define RMID_VAL_ERROR (1ULL << 63)
47 #define RMID_VAL_UNAVAIL (1ULL << 62)
49 #define QOS_L3_OCCUP_EVENT_ID (1 << 0)
51 #define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID
54 * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
56 * This rmid is always free and is guaranteed to have an associated
57 * near-zero occupancy value, i.e. no cachelines are tagged with this
58 * RMID, once __intel_cqm_rmid_rotate() returns.
60 static unsigned int intel_cqm_rotation_rmid;
62 #define INVALID_RMID (-1)
65 * Is @rmid valid for programming the hardware?
67 * rmid 0 is reserved by the hardware for all non-monitored tasks, which
68 * means that we should never come across an rmid with that value.
69 * Likewise, an rmid value of -1 is used to indicate "no rmid currently
70 * assigned" and is used as part of the rotation code.
72 static inline bool __rmid_valid(unsigned int rmid)
74 if (!rmid || rmid == INVALID_RMID)
80 static u64 __rmid_read(unsigned int rmid)
85 * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
86 * it just says that to increase confusion.
88 wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
89 rdmsrl(MSR_IA32_QM_CTR, val);
92 * Aside from the ERROR and UNAVAIL bits, assume this thing returns
93 * the number of cachelines tagged with @rmid.
98 enum rmid_recycle_state {
104 struct cqm_rmid_entry {
106 enum rmid_recycle_state state;
107 struct list_head list;
108 unsigned long queue_time;
112 * cqm_rmid_free_lru - A least recently used list of RMIDs.
114 * Oldest entry at the head, newest (most recently used) entry at the
115 * tail. This list is never traversed, it's only used to keep track of
116 * the lru order. That is, we only pick entries of the head or insert
119 * All entries on the list are 'free', and their RMIDs are not currently
120 * in use. To mark an RMID as in use, remove its entry from the lru
124 * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
126 * This list is contains RMIDs that no one is currently using but that
127 * may have a non-zero occupancy value associated with them. The
128 * rotation worker moves RMIDs from the limbo list to the free list once
129 * the occupancy value drops below __intel_cqm_threshold.
131 * Both lists are protected by cache_mutex.
133 static LIST_HEAD(cqm_rmid_free_lru);
134 static LIST_HEAD(cqm_rmid_limbo_lru);
137 * We use a simple array of pointers so that we can lookup a struct
138 * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
139 * and __put_rmid() from having to worry about dealing with struct
140 * cqm_rmid_entry - they just deal with rmids, i.e. integers.
142 * Once this array is initialized it is read-only. No locks are required
145 * All entries for all RMIDs can be looked up in the this array at all
148 static struct cqm_rmid_entry **cqm_rmid_ptrs;
150 static inline struct cqm_rmid_entry *__rmid_entry(int rmid)
152 struct cqm_rmid_entry *entry;
154 entry = cqm_rmid_ptrs[rmid];
155 WARN_ON(entry->rmid != rmid);
161 * Returns < 0 on fail.
163 * We expect to be called with cache_mutex held.
165 static int __get_rmid(void)
167 struct cqm_rmid_entry *entry;
169 lockdep_assert_held(&cache_mutex);
171 if (list_empty(&cqm_rmid_free_lru))
174 entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list);
175 list_del(&entry->list);
180 static void __put_rmid(unsigned int rmid)
182 struct cqm_rmid_entry *entry;
184 lockdep_assert_held(&cache_mutex);
186 WARN_ON(!__rmid_valid(rmid));
187 entry = __rmid_entry(rmid);
189 entry->queue_time = jiffies;
190 entry->state = RMID_YOUNG;
192 list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
195 static int intel_cqm_setup_rmid_cache(void)
197 struct cqm_rmid_entry *entry;
198 unsigned int nr_rmids;
201 nr_rmids = cqm_max_rmid + 1;
202 cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
203 nr_rmids, GFP_KERNEL);
207 for (; r <= cqm_max_rmid; r++) {
208 struct cqm_rmid_entry *entry;
210 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
214 INIT_LIST_HEAD(&entry->list);
216 cqm_rmid_ptrs[r] = entry;
218 list_add_tail(&entry->list, &cqm_rmid_free_lru);
222 * RMID 0 is special and is always allocated. It's used for all
223 * tasks that are not monitored.
225 entry = __rmid_entry(0);
226 list_del(&entry->list);
228 mutex_lock(&cache_mutex);
229 intel_cqm_rotation_rmid = __get_rmid();
230 mutex_unlock(&cache_mutex);
235 kfree(cqm_rmid_ptrs[r]);
237 kfree(cqm_rmid_ptrs);
242 * Determine if @a and @b measure the same set of tasks.
244 * If @a and @b measure the same set of tasks then we want to share a
247 static bool __match_event(struct perf_event *a, struct perf_event *b)
249 /* Per-cpu and task events don't mix */
250 if ((a->attach_state & PERF_ATTACH_TASK) !=
251 (b->attach_state & PERF_ATTACH_TASK))
254 #ifdef CONFIG_CGROUP_PERF
255 if (a->cgrp != b->cgrp)
259 /* If not task event, we're machine wide */
260 if (!(b->attach_state & PERF_ATTACH_TASK))
264 * Events that target same task are placed into the same cache group.
266 if (a->hw.target == b->hw.target)
270 * Are we an inherited event?
278 #ifdef CONFIG_CGROUP_PERF
279 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
281 if (event->attach_state & PERF_ATTACH_TASK)
282 return perf_cgroup_from_task(event->hw.target);
289 * Determine if @a's tasks intersect with @b's tasks
291 * There are combinations of events that we explicitly prohibit,
294 * system-wide -> cgroup and task
295 * cgroup -> system-wide
297 * task -> system-wide
300 * Call this function before allocating an RMID.
302 static bool __conflict_event(struct perf_event *a, struct perf_event *b)
304 #ifdef CONFIG_CGROUP_PERF
306 * We can have any number of cgroups but only one system-wide
309 if (a->cgrp && b->cgrp) {
310 struct perf_cgroup *ac = a->cgrp;
311 struct perf_cgroup *bc = b->cgrp;
314 * This condition should have been caught in
315 * __match_event() and we should be sharing an RMID.
317 WARN_ON_ONCE(ac == bc);
319 if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
320 cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
326 if (a->cgrp || b->cgrp) {
327 struct perf_cgroup *ac, *bc;
330 * cgroup and system-wide events are mutually exclusive
332 if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) ||
333 (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK)))
337 * Ensure neither event is part of the other's cgroup
339 ac = event_to_cgroup(a);
340 bc = event_to_cgroup(b);
345 * Must have cgroup and non-intersecting task events.
351 * We have cgroup and task events, and the task belongs
352 * to a cgroup. Check for for overlap.
354 if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
355 cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
362 * If one of them is not a task, same story as above with cgroups.
364 if (!(a->attach_state & PERF_ATTACH_TASK) ||
365 !(b->attach_state & PERF_ATTACH_TASK))
369 * Must be non-overlapping.
379 static void __intel_cqm_event_count(void *info);
382 * Exchange the RMID of a group of events.
385 intel_cqm_xchg_rmid(struct perf_event *group, unsigned int rmid)
387 struct perf_event *event;
388 unsigned int old_rmid = group->hw.cqm_rmid;
389 struct list_head *head = &group->hw.cqm_group_entry;
391 lockdep_assert_held(&cache_mutex);
394 * If our RMID is being deallocated, perform a read now.
396 if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
397 struct rmid_read rr = {
398 .value = ATOMIC64_INIT(0),
402 on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
404 local64_set(&group->count, atomic64_read(&rr.value));
407 raw_spin_lock_irq(&cache_lock);
409 group->hw.cqm_rmid = rmid;
410 list_for_each_entry(event, head, hw.cqm_group_entry)
411 event->hw.cqm_rmid = rmid;
413 raw_spin_unlock_irq(&cache_lock);
419 * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
420 * cachelines are still tagged with RMIDs in limbo, we progressively
421 * increment the threshold until we find an RMID in limbo with <=
422 * __intel_cqm_threshold lines tagged. This is designed to mitigate the
423 * problem where cachelines tagged with an RMID are not steadily being
426 * On successful rotations we decrease the threshold back towards zero.
428 * __intel_cqm_max_threshold provides an upper bound on the threshold,
429 * and is measured in bytes because it's exposed to userland.
431 static unsigned int __intel_cqm_threshold;
432 static unsigned int __intel_cqm_max_threshold;
435 * Test whether an RMID has a zero occupancy value on this cpu.
437 static void intel_cqm_stable(void *arg)
439 struct cqm_rmid_entry *entry;
441 list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
442 if (entry->state != RMID_AVAILABLE)
445 if (__rmid_read(entry->rmid) > __intel_cqm_threshold)
446 entry->state = RMID_DIRTY;
451 * If we have group events waiting for an RMID that don't conflict with
452 * events already running, assign @rmid.
454 static bool intel_cqm_sched_in_event(unsigned int rmid)
456 struct perf_event *leader, *event;
458 lockdep_assert_held(&cache_mutex);
460 leader = list_first_entry(&cache_groups, struct perf_event,
461 hw.cqm_groups_entry);
464 list_for_each_entry_continue(event, &cache_groups,
465 hw.cqm_groups_entry) {
466 if (__rmid_valid(event->hw.cqm_rmid))
469 if (__conflict_event(event, leader))
472 intel_cqm_xchg_rmid(event, rmid);
480 * Initially use this constant for both the limbo queue time and the
481 * rotation timer interval, pmu::hrtimer_interval_ms.
483 * They don't need to be the same, but the two are related since if you
484 * rotate faster than you recycle RMIDs, you may run out of available
487 #define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
489 static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME;
492 * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
493 * @nr_available: number of freeable RMIDs on the limbo list
495 * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
496 * cachelines are tagged with those RMIDs. After this we can reuse them
497 * and know that the current set of active RMIDs is stable.
499 * Return %true or %false depending on whether stabilization needs to be
502 * If we return %true then @nr_available is updated to indicate the
503 * number of RMIDs on the limbo list that have been queued for the
504 * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
505 * are above __intel_cqm_threshold.
507 static bool intel_cqm_rmid_stabilize(unsigned int *available)
509 struct cqm_rmid_entry *entry, *tmp;
511 lockdep_assert_held(&cache_mutex);
514 list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
515 unsigned long min_queue_time;
516 unsigned long now = jiffies;
519 * We hold RMIDs placed into limbo for a minimum queue
520 * time. Before the minimum queue time has elapsed we do
523 * The reasoning is that until a sufficient time has
524 * passed since we stopped using an RMID, any RMID
525 * placed onto the limbo list will likely still have
526 * data tagged in the cache, which means we'll probably
527 * fail to recycle it anyway.
529 * We can save ourselves an expensive IPI by skipping
530 * any RMIDs that have not been queued for the minimum
533 min_queue_time = entry->queue_time +
534 msecs_to_jiffies(__rmid_queue_time_ms);
536 if (time_after(min_queue_time, now))
539 entry->state = RMID_AVAILABLE;
544 * Fast return if none of the RMIDs on the limbo list have been
545 * sitting on the queue for the minimum queue time.
551 * Test whether an RMID is free for each package.
553 on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true);
555 list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) {
557 * Exhausted all RMIDs that have waited min queue time.
559 if (entry->state == RMID_YOUNG)
562 if (entry->state == RMID_DIRTY)
565 list_del(&entry->list); /* remove from limbo */
568 * The rotation RMID gets priority if it's
569 * currently invalid. In which case, skip adding
570 * the RMID to the the free lru.
572 if (!__rmid_valid(intel_cqm_rotation_rmid)) {
573 intel_cqm_rotation_rmid = entry->rmid;
578 * If we have groups waiting for RMIDs, hand
579 * them one now provided they don't conflict.
581 if (intel_cqm_sched_in_event(entry->rmid))
585 * Otherwise place it onto the free list.
587 list_add_tail(&entry->list, &cqm_rmid_free_lru);
591 return __rmid_valid(intel_cqm_rotation_rmid);
595 * Pick a victim group and move it to the tail of the group list.
596 * @next: The first group without an RMID
598 static void __intel_cqm_pick_and_rotate(struct perf_event *next)
600 struct perf_event *rotor;
603 lockdep_assert_held(&cache_mutex);
605 rotor = list_first_entry(&cache_groups, struct perf_event,
606 hw.cqm_groups_entry);
609 * The group at the front of the list should always have a valid
610 * RMID. If it doesn't then no groups have RMIDs assigned and we
611 * don't need to rotate the list.
616 rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID);
619 list_rotate_left(&cache_groups);
623 * Deallocate the RMIDs from any events that conflict with @event, and
624 * place them on the back of the group list.
626 static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
628 struct perf_event *group, *g;
631 lockdep_assert_held(&cache_mutex);
633 list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) {
637 rmid = group->hw.cqm_rmid;
640 * Skip events that don't have a valid RMID.
642 if (!__rmid_valid(rmid))
646 * No conflict? No problem! Leave the event alone.
648 if (!__conflict_event(group, event))
651 intel_cqm_xchg_rmid(group, INVALID_RMID);
657 * Attempt to rotate the groups and assign new RMIDs.
659 * We rotate for two reasons,
660 * 1. To handle the scheduling of conflicting events
661 * 2. To recycle RMIDs
663 * Rotating RMIDs is complicated because the hardware doesn't give us
666 * There's problems with the hardware interface; when you change the
667 * task:RMID map cachelines retain their 'old' tags, giving a skewed
668 * picture. In order to work around this, we must always keep one free
669 * RMID - intel_cqm_rotation_rmid.
671 * Rotation works by taking away an RMID from a group (the old RMID),
672 * and assigning the free RMID to another group (the new RMID). We must
673 * then wait for the old RMID to not be used (no cachelines tagged).
674 * This ensure that all cachelines are tagged with 'active' RMIDs. At
675 * this point we can start reading values for the new RMID and treat the
676 * old RMID as the free RMID for the next rotation.
678 * Return %true or %false depending on whether we did any rotating.
680 static bool __intel_cqm_rmid_rotate(void)
682 struct perf_event *group, *start = NULL;
683 unsigned int threshold_limit;
684 unsigned int nr_needed = 0;
685 unsigned int nr_available;
686 bool rotated = false;
688 mutex_lock(&cache_mutex);
692 * Fast path through this function if there are no groups and no
693 * RMIDs that need cleaning.
695 if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
698 list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
699 if (!__rmid_valid(group->hw.cqm_rmid)) {
707 * We have some event groups, but they all have RMIDs assigned
708 * and no RMIDs need cleaning.
710 if (!nr_needed && list_empty(&cqm_rmid_limbo_lru))
717 * We have more event groups without RMIDs than available RMIDs,
718 * or we have event groups that conflict with the ones currently
721 * We force deallocate the rmid of the group at the head of
722 * cache_groups. The first event group without an RMID then gets
723 * assigned intel_cqm_rotation_rmid. This ensures we always make
726 * Rotate the cache_groups list so the previous head is now the
729 __intel_cqm_pick_and_rotate(start);
732 * If the rotation is going to succeed, reduce the threshold so
733 * that we don't needlessly reuse dirty RMIDs.
735 if (__rmid_valid(intel_cqm_rotation_rmid)) {
736 intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid);
737 intel_cqm_rotation_rmid = __get_rmid();
739 intel_cqm_sched_out_conflicting_events(start);
741 if (__intel_cqm_threshold)
742 __intel_cqm_threshold--;
749 * We now need to stablize the RMID we freed above (if any) to
750 * ensure that the next time we rotate we have an RMID with zero
753 * Alternatively, if we didn't need to perform any rotation,
754 * we'll have a bunch of RMIDs in limbo that need stabilizing.
756 threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale;
758 while (intel_cqm_rmid_stabilize(&nr_available) &&
759 __intel_cqm_threshold < threshold_limit) {
760 unsigned int steal_limit;
763 * Don't spin if nobody is actively waiting for an RMID,
764 * the rotation worker will be kicked as soon as an
765 * event needs an RMID anyway.
770 /* Allow max 25% of RMIDs to be in limbo. */
771 steal_limit = (cqm_max_rmid + 1) / 4;
774 * We failed to stabilize any RMIDs so our rotation
775 * logic is now stuck. In order to make forward progress
776 * we have a few options:
778 * 1. rotate ("steal") another RMID
779 * 2. increase the threshold
782 * We do both of 1. and 2. until we hit the steal limit.
784 * The steal limit prevents all RMIDs ending up on the
785 * limbo list. This can happen if every RMID has a
786 * non-zero occupancy above threshold_limit, and the
787 * occupancy values aren't dropping fast enough.
789 * Note that there is prioritisation at work here - we'd
790 * rather increase the number of RMIDs on the limbo list
791 * than increase the threshold, because increasing the
792 * threshold skews the event data (because we reuse
793 * dirty RMIDs) - threshold bumps are a last resort.
795 if (nr_available < steal_limit)
798 __intel_cqm_threshold++;
802 mutex_unlock(&cache_mutex);
806 static void intel_cqm_rmid_rotate(struct work_struct *work);
808 static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate);
810 static struct pmu intel_cqm_pmu;
812 static void intel_cqm_rmid_rotate(struct work_struct *work)
816 __intel_cqm_rmid_rotate();
818 delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms);
819 schedule_delayed_work(&intel_cqm_rmid_work, delay);
823 * Find a group and setup RMID.
825 * If we're part of a group, we use the group's RMID.
827 static void intel_cqm_setup_event(struct perf_event *event,
828 struct perf_event **group)
830 struct perf_event *iter;
832 bool conflict = false;
834 list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
835 rmid = iter->hw.cqm_rmid;
837 if (__match_event(iter, event)) {
838 /* All tasks in a group share an RMID */
839 event->hw.cqm_rmid = rmid;
845 * We only care about conflicts for events that are
846 * actually scheduled in (and hence have a valid RMID).
848 if (__conflict_event(iter, event) && __rmid_valid(rmid))
857 event->hw.cqm_rmid = rmid;
860 static void intel_cqm_event_read(struct perf_event *event)
867 * Task events are handled by intel_cqm_event_count().
869 if (event->cpu == -1)
872 raw_spin_lock_irqsave(&cache_lock, flags);
873 rmid = event->hw.cqm_rmid;
875 if (!__rmid_valid(rmid))
878 val = __rmid_read(rmid);
881 * Ignore this reading on error states and do not update the value.
883 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
886 local64_set(&event->count, val);
888 raw_spin_unlock_irqrestore(&cache_lock, flags);
891 static void __intel_cqm_event_count(void *info)
893 struct rmid_read *rr = info;
896 val = __rmid_read(rr->rmid);
898 if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
901 atomic64_add(val, &rr->value);
904 static inline bool cqm_group_leader(struct perf_event *event)
906 return !list_empty(&event->hw.cqm_groups_entry);
909 static u64 intel_cqm_event_count(struct perf_event *event)
912 struct rmid_read rr = {
913 .value = ATOMIC64_INIT(0),
917 * We only need to worry about task events. System-wide events
918 * are handled like usual, i.e. entirely with
919 * intel_cqm_event_read().
921 if (event->cpu != -1)
922 return __perf_event_count(event);
925 * Only the group leader gets to report values. This stops us
926 * reporting duplicate values to userspace, and gives us a clear
927 * rule for which task gets to report the values.
929 * Note that it is impossible to attribute these values to
930 * specific packages - we forfeit that ability when we create
933 if (!cqm_group_leader(event))
937 * Notice that we don't perform the reading of an RMID
938 * atomically, because we can't hold a spin lock across the
941 * Speculatively perform the read, since @event might be
942 * assigned a different (possibly invalid) RMID while we're
943 * busying performing the IPI calls. It's therefore necessary to
944 * check @event's RMID afterwards, and if it has changed,
945 * discard the result of the read.
947 rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid);
949 if (!__rmid_valid(rr.rmid))
952 on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
954 raw_spin_lock_irqsave(&cache_lock, flags);
955 if (event->hw.cqm_rmid == rr.rmid)
956 local64_set(&event->count, atomic64_read(&rr.value));
957 raw_spin_unlock_irqrestore(&cache_lock, flags);
959 return __perf_event_count(event);
962 static void intel_cqm_event_start(struct perf_event *event, int mode)
964 struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
965 u32 rmid = event->hw.cqm_rmid;
968 if (!(event->hw.cqm_state & PERF_HES_STOPPED))
971 event->hw.cqm_state &= ~PERF_HES_STOPPED;
973 raw_spin_lock_irqsave(&state->lock, flags);
976 WARN_ON_ONCE(state->rmid != rmid);
978 WARN_ON_ONCE(state->rmid);
982 * This is actually wrong, as the upper 32 bit MSR contain the
983 * closid which is used for configuring the Cache Allocation
984 * Technology component.
986 wrmsr(MSR_IA32_PQR_ASSOC, rmid, 0);
988 raw_spin_unlock_irqrestore(&state->lock, flags);
991 static void intel_cqm_event_stop(struct perf_event *event, int mode)
993 struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
996 if (event->hw.cqm_state & PERF_HES_STOPPED)
999 event->hw.cqm_state |= PERF_HES_STOPPED;
1001 raw_spin_lock_irqsave(&state->lock, flags);
1002 intel_cqm_event_read(event);
1004 if (!--state->cnt) {
1007 * This is actually wrong, as the upper 32 bit of the
1008 * MSR contain the closid which is used for
1009 * configuring the Cache Allocation Technology
1012 wrmsr(MSR_IA32_PQR_ASSOC, 0, 0);
1014 WARN_ON_ONCE(!state->rmid);
1017 raw_spin_unlock_irqrestore(&state->lock, flags);
1020 static int intel_cqm_event_add(struct perf_event *event, int mode)
1022 unsigned long flags;
1025 raw_spin_lock_irqsave(&cache_lock, flags);
1027 event->hw.cqm_state = PERF_HES_STOPPED;
1028 rmid = event->hw.cqm_rmid;
1030 if (__rmid_valid(rmid) && (mode & PERF_EF_START))
1031 intel_cqm_event_start(event, mode);
1033 raw_spin_unlock_irqrestore(&cache_lock, flags);
1038 static void intel_cqm_event_del(struct perf_event *event, int mode)
1040 intel_cqm_event_stop(event, mode);
1043 static void intel_cqm_event_destroy(struct perf_event *event)
1045 struct perf_event *group_other = NULL;
1047 mutex_lock(&cache_mutex);
1050 * If there's another event in this group...
1052 if (!list_empty(&event->hw.cqm_group_entry)) {
1053 group_other = list_first_entry(&event->hw.cqm_group_entry,
1055 hw.cqm_group_entry);
1056 list_del(&event->hw.cqm_group_entry);
1060 * And we're the group leader..
1062 if (cqm_group_leader(event)) {
1064 * If there was a group_other, make that leader, otherwise
1065 * destroy the group and return the RMID.
1068 list_replace(&event->hw.cqm_groups_entry,
1069 &group_other->hw.cqm_groups_entry);
1071 unsigned int rmid = event->hw.cqm_rmid;
1073 if (__rmid_valid(rmid))
1075 list_del(&event->hw.cqm_groups_entry);
1079 mutex_unlock(&cache_mutex);
1082 static int intel_cqm_event_init(struct perf_event *event)
1084 struct perf_event *group = NULL;
1085 bool rotate = false;
1087 if (event->attr.type != intel_cqm_pmu.type)
1090 if (event->attr.config & ~QOS_EVENT_MASK)
1093 /* unsupported modes and filters */
1094 if (event->attr.exclude_user ||
1095 event->attr.exclude_kernel ||
1096 event->attr.exclude_hv ||
1097 event->attr.exclude_idle ||
1098 event->attr.exclude_host ||
1099 event->attr.exclude_guest ||
1100 event->attr.sample_period) /* no sampling */
1103 INIT_LIST_HEAD(&event->hw.cqm_group_entry);
1104 INIT_LIST_HEAD(&event->hw.cqm_groups_entry);
1106 event->destroy = intel_cqm_event_destroy;
1108 mutex_lock(&cache_mutex);
1110 /* Will also set rmid */
1111 intel_cqm_setup_event(event, &group);
1114 list_add_tail(&event->hw.cqm_group_entry,
1115 &group->hw.cqm_group_entry);
1117 list_add_tail(&event->hw.cqm_groups_entry,
1121 * All RMIDs are either in use or have recently been
1122 * used. Kick the rotation worker to clean/free some.
1124 * We only do this for the group leader, rather than for
1125 * every event in a group to save on needless work.
1127 if (!__rmid_valid(event->hw.cqm_rmid))
1131 mutex_unlock(&cache_mutex);
1134 schedule_delayed_work(&intel_cqm_rmid_work, 0);
1139 EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
1140 EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1");
1141 EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes");
1142 EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL);
1143 EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1");
1145 static struct attribute *intel_cqm_events_attr[] = {
1146 EVENT_PTR(intel_cqm_llc),
1147 EVENT_PTR(intel_cqm_llc_pkg),
1148 EVENT_PTR(intel_cqm_llc_unit),
1149 EVENT_PTR(intel_cqm_llc_scale),
1150 EVENT_PTR(intel_cqm_llc_snapshot),
1154 static struct attribute_group intel_cqm_events_group = {
1156 .attrs = intel_cqm_events_attr,
1159 PMU_FORMAT_ATTR(event, "config:0-7");
1160 static struct attribute *intel_cqm_formats_attr[] = {
1161 &format_attr_event.attr,
1165 static struct attribute_group intel_cqm_format_group = {
1167 .attrs = intel_cqm_formats_attr,
1171 max_recycle_threshold_show(struct device *dev, struct device_attribute *attr,
1176 mutex_lock(&cache_mutex);
1177 rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold);
1178 mutex_unlock(&cache_mutex);
1184 max_recycle_threshold_store(struct device *dev,
1185 struct device_attribute *attr,
1186 const char *buf, size_t count)
1188 unsigned int bytes, cachelines;
1191 ret = kstrtouint(buf, 0, &bytes);
1195 mutex_lock(&cache_mutex);
1197 __intel_cqm_max_threshold = bytes;
1198 cachelines = bytes / cqm_l3_scale;
1201 * The new maximum takes effect immediately.
1203 if (__intel_cqm_threshold > cachelines)
1204 __intel_cqm_threshold = cachelines;
1206 mutex_unlock(&cache_mutex);
1211 static DEVICE_ATTR_RW(max_recycle_threshold);
1213 static struct attribute *intel_cqm_attrs[] = {
1214 &dev_attr_max_recycle_threshold.attr,
1218 static const struct attribute_group intel_cqm_group = {
1219 .attrs = intel_cqm_attrs,
1222 static const struct attribute_group *intel_cqm_attr_groups[] = {
1223 &intel_cqm_events_group,
1224 &intel_cqm_format_group,
1229 static struct pmu intel_cqm_pmu = {
1230 .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME,
1231 .attr_groups = intel_cqm_attr_groups,
1232 .task_ctx_nr = perf_sw_context,
1233 .event_init = intel_cqm_event_init,
1234 .add = intel_cqm_event_add,
1235 .del = intel_cqm_event_del,
1236 .start = intel_cqm_event_start,
1237 .stop = intel_cqm_event_stop,
1238 .read = intel_cqm_event_read,
1239 .count = intel_cqm_event_count,
1242 static inline void cqm_pick_event_reader(int cpu)
1244 int phys_id = topology_physical_package_id(cpu);
1247 for_each_cpu(i, &cqm_cpumask) {
1248 if (phys_id == topology_physical_package_id(i))
1249 return; /* already got reader for this socket */
1252 cpumask_set_cpu(cpu, &cqm_cpumask);
1255 static void intel_cqm_cpu_prepare(unsigned int cpu)
1257 struct intel_cqm_state *state = &per_cpu(cqm_state, cpu);
1258 struct cpuinfo_x86 *c = &cpu_data(cpu);
1260 raw_spin_lock_init(&state->lock);
1264 WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
1265 WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
1268 static void intel_cqm_cpu_exit(unsigned int cpu)
1270 int phys_id = topology_physical_package_id(cpu);
1274 * Is @cpu a designated cqm reader?
1276 if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
1279 for_each_online_cpu(i) {
1283 if (phys_id == topology_physical_package_id(i)) {
1284 cpumask_set_cpu(i, &cqm_cpumask);
1290 static int intel_cqm_cpu_notifier(struct notifier_block *nb,
1291 unsigned long action, void *hcpu)
1293 unsigned int cpu = (unsigned long)hcpu;
1295 switch (action & ~CPU_TASKS_FROZEN) {
1296 case CPU_UP_PREPARE:
1297 intel_cqm_cpu_prepare(cpu);
1299 case CPU_DOWN_PREPARE:
1300 intel_cqm_cpu_exit(cpu);
1303 cqm_pick_event_reader(cpu);
1310 static const struct x86_cpu_id intel_cqm_match[] = {
1311 { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC },
1315 static int __init intel_cqm_init(void)
1317 char *str, scale[20];
1320 if (!x86_match_cpu(intel_cqm_match))
1323 cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale;
1326 * It's possible that not all resources support the same number
1327 * of RMIDs. Instead of making scheduling much more complicated
1328 * (where we have to match a task's RMID to a cpu that supports
1329 * that many RMIDs) just find the minimum RMIDs supported across
1332 * Also, check that the scales match on all cpus.
1334 cpu_notifier_register_begin();
1336 for_each_online_cpu(cpu) {
1337 struct cpuinfo_x86 *c = &cpu_data(cpu);
1339 if (c->x86_cache_max_rmid < cqm_max_rmid)
1340 cqm_max_rmid = c->x86_cache_max_rmid;
1342 if (c->x86_cache_occ_scale != cqm_l3_scale) {
1343 pr_err("Multiple LLC scale values, disabling\n");
1350 * A reasonable upper limit on the max threshold is the number
1351 * of lines tagged per RMID if all RMIDs have the same number of
1352 * lines tagged in the LLC.
1354 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
1356 __intel_cqm_max_threshold =
1357 boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1);
1359 snprintf(scale, sizeof(scale), "%u", cqm_l3_scale);
1360 str = kstrdup(scale, GFP_KERNEL);
1366 event_attr_intel_cqm_llc_scale.event_str = str;
1368 ret = intel_cqm_setup_rmid_cache();
1372 for_each_online_cpu(i) {
1373 intel_cqm_cpu_prepare(i);
1374 cqm_pick_event_reader(i);
1377 __perf_cpu_notifier(intel_cqm_cpu_notifier);
1379 ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
1381 pr_err("Intel CQM perf registration failed: %d\n", ret);
1383 pr_info("Intel CQM monitoring enabled\n");
1386 cpu_notifier_register_done();
1390 device_initcall(intel_cqm_init);