<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">
From: Manfred Spraul &lt;manfred@colorfullife.com&gt;

With the patch applied,

	echo "size-4096 0 0 0" &gt; /proc/slabinfo

walks the objects in the size-4096 slab, printing out the calling address
of whoever allocated that object.

It is for leak detection.


DESC
mm/slab.c warning in cache_alloc_debugcheck_after
EDESC
From: Nathan Lynch &lt;nathanl@austin.ibm.com&gt;

From a ppc64 build:

   CC      mm/slab.o
mm/slab.c: In function `cache_alloc_debugcheck_after':
mm/slab.c:1976: warning: cast from pointer to integer of different size


---

 25-akpm/mm/slab.c |   40 ++++++++++++++++++++++++++++++++++++++--
 1 files changed, 38 insertions(+), 2 deletions(-)

diff -puN mm/slab.c~slab-leak-detector mm/slab.c
--- 25/mm/slab.c~slab-leak-detector	2004-03-21 00:05:15.178745600 -0800
+++ 25-akpm/mm/slab.c	2004-03-21 00:05:15.185744536 -0800
@@ -1954,6 +1954,15 @@ cache_alloc_debugcheck_after(kmem_cache_
 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
 	}
+	{
+		int objnr;
+		struct slab *slabp;
+
+		slabp = GET_PAGE_SLAB(virt_to_page(objp));
+
+		objnr = (objp - slabp-&gt;s_mem) / cachep-&gt;objsize;
+		slab_bufctl(slabp)[objnr] = (unsigned long)caller;
+	}
 	objp += obj_dbghead(cachep);
 	if (cachep-&gt;ctor &amp;&amp; cachep-&gt;flags &amp; SLAB_POISON) {
 		unsigned long	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
@@ -2015,12 +2024,14 @@ static void free_block(kmem_cache_t *cac
 		objnr = (objp - slabp-&gt;s_mem) / cachep-&gt;objsize;
 		check_slabp(cachep, slabp);
 #if DEBUG
+#if 0
 		if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
 			printk(KERN_ERR "slab: double free detected in cache '%s', objp %p.\n",
 						cachep-&gt;name, objp);
 			BUG();
 		}
 #endif
+#endif
 		slab_bufctl(slabp)[objnr] = slabp-&gt;free;
 		slabp-&gt;free = objnr;
 		STATS_DEC_ACTIVE(cachep);
@@ -2801,6 +2812,29 @@ struct seq_operations slabinfo_op = {
 	.show	= s_show,
 };
 
+static void do_dump_slabp(kmem_cache_t *cachep)
+{
+#if DEBUG
+	struct list_head *q;
+
+	check_irq_on();
+	spin_lock_irq(&amp;cachep-&gt;spinlock);
+	list_for_each(q,&amp;cachep-&gt;lists.slabs_full) {
+		struct slab *slabp;
+		int i;
+		slabp = list_entry(q, struct slab, list);
+		for (i = 0; i &lt; cachep-&gt;num; i++) {
+			unsigned long sym = slab_bufctl(slabp)[i];
+
+			printk("obj %p/%d: %p", slabp, i, (void *)sym);
+			print_symbol(" &lt;%s&gt;", sym);
+			printk("\n");
+		}
+	}
+	spin_unlock_irq(&amp;cachep-&gt;spinlock);
+#endif
+}
+
 #define MAX_SLABINFO_WRITE 128
 /**
  * slabinfo_write - Tuning for the slab allocator
@@ -2841,9 +2875,11 @@ ssize_t slabinfo_write(struct file *file
 			    batchcount &lt; 1 ||
 			    batchcount &gt; limit ||
 			    shared &lt; 0) {
-				res = -EINVAL;
+				do_dump_slabp(cachep);
+				res = 0;
 			} else {
-				res = do_tune_cpucache(cachep, limit, batchcount, shared);
+				res = do_tune_cpucache(cachep, limit,
+							batchcount, shared);
 			}
 			break;
 		}

_
</pre></body></html>