iommu/vt-d: Add trace events for cache tag interface
authorLu Baolu <baolu.lu@linux.intel.com>
Wed, 24 Apr 2024 07:16:35 +0000 (15:16 +0800)
committerJoerg Roedel <jroedel@suse.de>
Fri, 26 Apr 2024 09:57:43 +0000 (11:57 +0200)
Add trace events for cache tag assign/unassign/flush operations and trace
the events in the interfaces. These trace events will improve debugging
capabilities by providing detailed information about cache tag activity.
A sample of the traced messages looks like below [messages have been
stripped and wrapped to make the line short].

 cache_tag_assign: dmar9/0000:00:01.0 type iotlb did 1 pasid 9 ref 1
 cache_tag_assign: dmar9/0000:00:01.0 type devtlb did 1 pasid 9 ref 1
 cache_tag_flush_all: dmar6/0000:8a:00.0 type iotlb did 7 pasid 0 ref 1
 cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
        [0xeab00000-0xeab1afff] addr 0xeab00000 pages 0x20 mask 0x5
 cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
        [0xeab20000-0xeab31fff] addr 0xeab20000 pages 0x20 mask 0x5
 cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
        [0xeaa40000-0xeaa51fff] addr 0xeaa40000 pages 0x20 mask 0x5
 cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
        [0x98de0000-0x98de4fff] addr 0x98de0000 pages 0x8 mask 0x3
 cache_tag_flush_range: dmar1 0000:00:1b.0[0] type iotlb did 9
        [0xe9828000-0xe9828fff] addr 0xe9828000 pages 0x1 mask 0x0
 cache_tag_unassign: dmar9/0000:00:01.0 type iotlb did 1 pasid 9 ref 1
 cache_tag_unassign: dmar9/0000:00:01.0 type devtlb did 1 pasid 9 ref 1

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240416080656.60968-4-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/cache.c
drivers/iommu/intel/trace.h

index 0539275a9d209c2da30590f679f4355aaebaa204..e8418cdd8331be56efeb5a92668f26db9100c9f8 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "iommu.h"
 #include "pasid.h"
+#include "trace.h"
 
 /* Check if an existing cache tag can be reused for a new association. */
 static bool cache_tage_match(struct cache_tag *tag, u16 domain_id,
@@ -69,11 +70,13 @@ static int cache_tag_assign(struct dmar_domain *domain, u16 did,
                        temp->users++;
                        spin_unlock_irqrestore(&domain->cache_lock, flags);
                        kfree(tag);
+                       trace_cache_tag_assign(temp);
                        return 0;
                }
        }
        list_add_tail(&tag->node, &domain->cache_tags);
        spin_unlock_irqrestore(&domain->cache_lock, flags);
+       trace_cache_tag_assign(tag);
 
        return 0;
 }
@@ -91,6 +94,7 @@ static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
        spin_lock_irqsave(&domain->cache_lock, flags);
        list_for_each_entry(tag, &domain->cache_tags, node) {
                if (cache_tage_match(tag, did, iommu, dev, pasid, type)) {
+                       trace_cache_tag_unassign(tag);
                        if (--tag->users == 0) {
                                list_del(&tag->node);
                                kfree(tag);
@@ -316,6 +320,8 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
                        quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep);
                        break;
                }
+
+               trace_cache_tag_flush_range(tag, start, end, addr, pages, mask);
        }
        spin_unlock_irqrestore(&domain->cache_lock, flags);
 }
@@ -356,6 +362,8 @@ void cache_tag_flush_all(struct dmar_domain *domain)
                                                  IOMMU_NO_PASID, info->ats_qdep);
                        break;
                }
+
+               trace_cache_tag_flush_all(tag);
        }
        spin_unlock_irqrestore(&domain->cache_lock, flags);
 }
@@ -404,6 +412,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
                                                         addr, mask,
                                                         DMA_TLB_PSI_FLUSH);
                }
+
+               trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
        }
        spin_unlock_irqrestore(&domain->cache_lock, flags);
 }
index 93d96f93a89b7da82cb70bc7721589a999a3ec00..961ac1c1bc210eadd766261c0aaea83ebaeebdc6 100644 (file)
@@ -89,6 +89,103 @@ TRACE_EVENT(prq_report,
                                      __entry->dw1, __entry->dw2, __entry->dw3)
        )
 );
+
+DECLARE_EVENT_CLASS(cache_tag_log,
+       TP_PROTO(struct cache_tag *tag),
+       TP_ARGS(tag),
+       TP_STRUCT__entry(
+               __string(iommu, tag->iommu->name)
+               __string(dev, dev_name(tag->dev))
+               __field(u16, type)
+               __field(u16, domain_id)
+               __field(u32, pasid)
+               __field(u32, users)
+       ),
+       TP_fast_assign(
+               __assign_str(iommu, tag->iommu->name);
+               __assign_str(dev, dev_name(tag->dev));
+               __entry->type = tag->type;
+               __entry->domain_id = tag->domain_id;
+               __entry->pasid = tag->pasid;
+               __entry->users = tag->users;
+       ),
+       TP_printk("%s/%s type %s did %d pasid %d ref %d",
+                 __get_str(iommu), __get_str(dev),
+                 __print_symbolic(__entry->type,
+                       { CACHE_TAG_IOTLB,              "iotlb" },
+                       { CACHE_TAG_DEVTLB,             "devtlb" },
+                       { CACHE_TAG_NESTING_IOTLB,      "nesting_iotlb" },
+                       { CACHE_TAG_NESTING_DEVTLB,     "nesting_devtlb" }),
+               __entry->domain_id, __entry->pasid, __entry->users
+       )
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_assign,
+       TP_PROTO(struct cache_tag *tag),
+       TP_ARGS(tag)
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
+       TP_PROTO(struct cache_tag *tag),
+       TP_ARGS(tag)
+);
+
+DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
+       TP_PROTO(struct cache_tag *tag),
+       TP_ARGS(tag)
+);
+
+DECLARE_EVENT_CLASS(cache_tag_flush,
+       TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+                unsigned long addr, unsigned long pages, unsigned long mask),
+       TP_ARGS(tag, start, end, addr, pages, mask),
+       TP_STRUCT__entry(
+               __string(iommu, tag->iommu->name)
+               __string(dev, dev_name(tag->dev))
+               __field(u16, type)
+               __field(u16, domain_id)
+               __field(u32, pasid)
+               __field(unsigned long, start)
+               __field(unsigned long, end)
+               __field(unsigned long, addr)
+               __field(unsigned long, pages)
+               __field(unsigned long, mask)
+       ),
+       TP_fast_assign(
+               __assign_str(iommu, tag->iommu->name);
+               __assign_str(dev, dev_name(tag->dev));
+               __entry->type = tag->type;
+               __entry->domain_id = tag->domain_id;
+               __entry->pasid = tag->pasid;
+               __entry->start = start;
+               __entry->end = end;
+               __entry->addr = addr;
+               __entry->pages = pages;
+               __entry->mask = mask;
+       ),
+       TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
+                 __get_str(iommu), __get_str(dev), __entry->pasid,
+                 __print_symbolic(__entry->type,
+                       { CACHE_TAG_IOTLB,              "iotlb" },
+                       { CACHE_TAG_DEVTLB,             "devtlb" },
+                       { CACHE_TAG_NESTING_IOTLB,      "nesting_iotlb" },
+                       { CACHE_TAG_NESTING_DEVTLB,     "nesting_devtlb" }),
+               __entry->domain_id, __entry->start, __entry->end,
+               __entry->addr, __entry->pages, __entry->mask
+       )
+);
+
+DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
+       TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+                unsigned long addr, unsigned long pages, unsigned long mask),
+       TP_ARGS(tag, start, end, addr, pages, mask)
+);
+
+DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range_np,
+       TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
+                unsigned long addr, unsigned long pages, unsigned long mask),
+       TP_ARGS(tag, start, end, addr, pages, mask)
+);
 #endif /* _TRACE_INTEL_IOMMU_H */
 
 /* This part must be outside protection */