mmu: add k_mem_free_get()

Return the amount of physical anonymous memory remaining.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
diff --git a/include/sys/mem_manage.h b/include/sys/mem_manage.h
index ee93f87..8de12e2 100644
--- a/include/sys/mem_manage.h
+++ b/include/sys/mem_manage.h
@@ -139,6 +139,19 @@
 #define K_MEM_MAP_GUARD		BIT(18)
 
 /**
+ * Return the amount of free memory available
+ *
+ * The returned value will reflect how many free RAM page frames are available.
+ * If demand paging is enabled, it may still be possible to allocate more.
+ *
+ * The information reported by this function may go stale immediately if
+ * concurrent memory mappings or page-ins take place.
+ *
+ * @return Free physical RAM, in bytes
+ */
+size_t k_mem_free_get(void);
+
+/**
  * Map anonymous memory into Zephyr's address space
  *
  * This function effectively increases the data space available to Zephyr.
diff --git a/kernel/mmu.c b/kernel/mmu.c
index 9dece53..7cf128f 100644
--- a/kernel/mmu.c
+++ b/kernel/mmu.c
@@ -363,6 +363,20 @@
 	return dst;
 }
 
+size_t k_mem_free_get(void)
+{
+	size_t ret;
+	k_spinlock_key_t key;
+
+	__ASSERT(page_frames_initialized, "%s called too early", __func__);
+
+	key = k_spin_lock(&z_mm_lock);
+	ret = z_free_page_count;
+	k_spin_unlock(&z_mm_lock, key);
+
+	return ret * CONFIG_MMU_PAGE_SIZE;
+}
+
 /* This may be called from arch early boot code before z_cstart() is invoked.
  * Data will be copied and BSS zeroed, but this must not rely on any
  * initialization functions being called prior to work correctly.