pcie: controller: add Type 1 bridge configuration

This adds setup of Type 1 bridge endpoints in two steps, first when
endpoint is detected and secondly when enumerating the next endpoint.

First, the code configures the bus primary & secondary number and 0xff
as subordinate to redirect all PCIe messages to this bus.

Then memory & I/O base are programmed by getting the current allocation
bases.

Finally, now right away, we program the subordinate to the max bus
number under the bridge, here the same, and the memory & I/O limit,
here lower than the base.

This doesn't make the bridge totally usable, enumeration would work
bus not for nested bridges and BARs wouldn't be accessible.

Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/drivers/pcie/host/controller.c b/drivers/pcie/host/controller.c
index c70d1f7..2ed1994 100644
--- a/drivers/pcie/host/controller.c
+++ b/drivers/pcie/host/controller.c
@@ -66,11 +66,6 @@
 	bdf_cfg_mem[reg] = data;
 }
 
-static void pcie_generic_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf)
-{
-	/* Not yet supported */
-}
-
 static void pcie_generic_ctrl_enumerate_bars(const struct device *ctrl_dev, pcie_bdf_t bdf,
 					     unsigned int nbars)
 {
@@ -161,24 +156,128 @@
 	}
 }
 
+static bool pcie_generic_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
+					      unsigned int bus_number)
+{
+	uint32_t class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);
+
+	/* Handle only PCI-to-PCI bridge for now */
+	if (PCIE_CONF_CLASSREV_CLASS(class) == 0x06 &&
+	    PCIE_CONF_CLASSREV_SUBCLASS(class) == 0x04) {
+		uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER);
+		uintptr_t bar_base_addr;
+
+		pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 2);
+
+		/* Configure bus number registers */
+		pcie_conf_write(bdf, PCIE_BUS_NUMBER,
+				PCIE_BUS_NUMBER_VAL(PCIE_BDF_TO_BUS(bdf),
+						    bus_number,
+						    0xff, /* set max until we finished scanning */
+						    PCIE_SECONDARY_LATENCY_TIMER(number)));
+
+		/* I/O align on 4k boundary */
+		if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
+						       KB(4), &bar_base_addr)) {
+			uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS);
+			uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER);
+
+			pcie_conf_write(bdf, PCIE_IO_SEC_STATUS,
+					PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
+							       PCIE_IO_LIMIT(io),
+							       PCIE_SEC_STATUS(io)));
+
+			pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER,
+				PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
+							     PCIE_IO_LIMIT_UPPER(io_upper)));
+
+			pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_IO, true);
+		}
+
+		/* MEM align on 1MiB boundary */
+		if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
+						       MB(1), &bar_base_addr)) {
+			uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT);
+
+			pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT,
+					PCIE_MEM_BASE_LIMIT_VAL((bar_base_addr & 0xfff00000) >> 16,
+								PCIE_MEM_LIMIT(mem)));
+
+			pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MEM, true);
+		}
+
+		/* TODO: add support for prefetchable */
+
+		pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true);
+
+		return true;
+	}
+
+	return false;
+}
+
+static void pcie_generic_ctrl_post_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
+						   unsigned int bus_number)
+{
+	uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER);
+	uintptr_t bar_base_addr;
+
+	/* Configure bus subordinate */
+	pcie_conf_write(bdf, PCIE_BUS_NUMBER,
+			PCIE_BUS_NUMBER_VAL(PCIE_BUS_PRIMARY_NUMBER(number),
+				PCIE_BUS_SECONDARY_NUMBER(number),
+				bus_number - 1,
+				PCIE_SECONDARY_LATENCY_TIMER(number)));
+
+	/* I/O align on 4k boundary */
+	if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
+					       KB(4), &bar_base_addr)) {
+		uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS);
+		uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER);
+
+		pcie_conf_write(bdf, PCIE_IO_SEC_STATUS,
+				PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
+					((bar_base_addr - 1) & 0x0000f000) >> 16,
+					PCIE_SEC_STATUS(io)));
+
+		pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER,
+				PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
+					((bar_base_addr - 1) & 0xffff0000) >> 16));
+	}
+
+	/* MEM align on 1MiB boundary */
+	if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
+					       MB(1), &bar_base_addr)) {
+		uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT);
+
+		pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT,
+				PCIE_MEM_BASE_LIMIT_VAL(PCIE_MEM_BASE(mem),
+					(bar_base_addr - 1) >> 16));
+	}
+
+	/* TODO: add support for prefetchable */
+}
+
 static void pcie_generic_ctrl_enumerate_type0(const struct device *ctrl_dev, pcie_bdf_t bdf)
 {
 	/* Setup Type0 BARs */
 	pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 6);
 }
 
-static void pcie_generic_ctrl_enumerate_endpoint(const struct device *ctrl_dev,
-						 pcie_bdf_t bdf, bool *skip_next_func)
+static bool pcie_generic_ctrl_enumerate_endpoint(const struct device *ctrl_dev,
+						 pcie_bdf_t bdf, unsigned int bus_number,
+						 bool *skip_next_func)
 {
 	bool multifunction_device = false;
 	bool layout_type_1 = false;
 	uint32_t data, class, id;
+	bool is_bridge = false;
 
 	*skip_next_func = false;
 
 	id = pcie_conf_read(bdf, PCIE_CONF_ID);
 	if (id == PCIE_ID_NONE) {
-		return;
+		return false;
 	}
 
 	class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);
@@ -204,16 +303,20 @@
 	}
 
 	if (layout_type_1) {
-		pcie_generic_ctrl_enumerate_type1(ctrl_dev, bdf);
+		is_bridge = pcie_generic_ctrl_enumerate_type1(ctrl_dev, bdf, bus_number);
 	} else {
 		pcie_generic_ctrl_enumerate_type0(ctrl_dev, bdf);
 	}
+
+	return is_bridge;
 }
 
 void pcie_generic_ctrl_enumerate(const struct device *ctrl_dev, pcie_bdf_t bdf_start)
 {
 	uint32_t data, class, id;
+	unsigned int bus_number = PCIE_BDF_TO_BUS(bdf_start) + 1;
 	bool skip_next_func = false;
+	bool is_bridge = false;
 	unsigned int dev = PCIE_BDF_TO_DEV(bdf_start),
 		     func = 0,
 		     bus = PCIE_BDF_TO_BUS(bdf_start);
@@ -223,7 +326,15 @@
 		for (; func <= PCIE_MAX_FUNC; func++) {
 			pcie_bdf_t bdf = PCIE_BDF(bus, dev, func);
 
-			pcie_generic_ctrl_enumerate_endpoint(ctrl_dev, bdf, &skip_next_func);
+			is_bridge = pcie_generic_ctrl_enumerate_endpoint(ctrl_dev, bdf,
+									  bus_number,
+									  &skip_next_func);
+
+			if (is_bridge) {
+				bus_number++;
+				pcie_generic_ctrl_post_enumerate_type1(ctrl_dev, bdf,
+							               bus_number);
+			}
 
 			if (skip_next_func) {
 				break;