target: kill struct se_subsystem_dev

Simplify the code a lot by killing the superflous struct se_subsystem_dev.
Instead se_device is allocated early on by the backend driver, which allocates
it as part of its own per-device structure, borrowing the scheme that is for
example used for inode allocation.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c123327..7272016 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -565,21 +565,8 @@
 	struct se_dev_attrib *da,					\
 	char *page)							\
 {									\
-	struct se_device *dev;						\
-	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
-	ssize_t rb;							\
-									\
-	spin_lock(&se_dev->se_dev_lock);				\
-	dev = se_dev->se_dev_ptr;					\
-	if (!dev) {							\
-		spin_unlock(&se_dev->se_dev_lock); 			\
-		return -ENODEV;						\
-	}								\
-	rb = snprintf(page, PAGE_SIZE, "%u\n",				\
-		(u32)dev->se_sub_dev->se_dev_attrib._name);		\
-	spin_unlock(&se_dev->se_dev_lock);				\
-									\
-	return rb;							\
+	return snprintf(page, PAGE_SIZE, "%u\n",			\
+		(u32)da->da_dev->dev_attrib._name);			\
 }
 
 #define DEF_DEV_ATTRIB_STORE(_name)					\
@@ -588,26 +575,16 @@
 	const char *page,						\
 	size_t count)							\
 {									\
-	struct se_device *dev;						\
-	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
 	unsigned long val;						\
 	int ret;							\
 									\
-	spin_lock(&se_dev->se_dev_lock);				\
-	dev = se_dev->se_dev_ptr;					\
-	if (!dev) {							\
-		spin_unlock(&se_dev->se_dev_lock);			\
-		return -ENODEV;						\
-	}								\
 	ret = strict_strtoul(page, 0, &val);				\
 	if (ret < 0) {							\
-		spin_unlock(&se_dev->se_dev_lock);                      \
 		pr_err("strict_strtoul() failed with"		\
 			" ret: %d\n", ret);				\
 		return -EINVAL;						\
 	}								\
-	ret = se_dev_set_##_name(dev, (u32)val);			\
-	spin_unlock(&se_dev->se_dev_lock);				\
+	ret = se_dev_set_##_name(da->da_dev, (u32)val);			\
 									\
 	return (!ret) ? count : -EINVAL;				\
 }
@@ -764,13 +741,6 @@
 	struct t10_wwn *t10_wwn,
 	char *page)
 {
-	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
-	struct se_device *dev;
-
-	dev = se_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
 	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
 		&t10_wwn->unit_serial[0]);
 }
@@ -780,8 +750,7 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
-	struct se_device *dev;
+	struct se_device *dev = t10_wwn->t10_dev;
 	unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
 
 	/*
@@ -794,7 +763,7 @@
 	 * it is doing 'the right thing' wrt a world wide unique
 	 * VPD Unit Serial Number that OS dependent multipath can depend on.
 	 */
-	if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+	if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
 		pr_err("Underlying SCSI device firmware provided VPD"
 			" Unit Serial, ignoring request\n");
 		return -EOPNOTSUPP;
@@ -811,15 +780,13 @@
 	 * (underneath the initiator side OS dependent multipath code)
 	 * could cause negative effects.
 	 */
-	dev = su_dev->se_dev_ptr;
-	if (dev) {
-		if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-			pr_err("Unable to set VPD Unit Serial while"
-				" active %d $FABRIC_MOD exports exist\n",
-				atomic_read(&dev->dev_export_obj.obj_access_count));
-			return -EINVAL;
-		}
+	if (dev->export_count) {
+		pr_err("Unable to set VPD Unit Serial while"
+			" active %d $FABRIC_MOD exports exist\n",
+			dev->export_count);
+		return -EINVAL;
 	}
+
 	/*
 	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
 	 *
@@ -828,12 +795,12 @@
 	 */
 	memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
 	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
-	snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+	snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
 			"%s", strstrip(buf));
-	su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+	dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
 
 	pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
-			" %s\n", su_dev->t10_wwn.unit_serial);
+			" %s\n", dev->t10_wwn.unit_serial);
 
 	return count;
 }
@@ -847,16 +814,10 @@
 	struct t10_wwn *t10_wwn,
 	char *page)
 {
-	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
-	struct se_device *dev;
 	struct t10_vpd *vpd;
 	unsigned char buf[VPD_TMP_BUF_SIZE];
 	ssize_t len = 0;
 
-	dev = se_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
 	memset(buf, 0, VPD_TMP_BUF_SIZE);
 
 	spin_lock(&t10_wwn->t10_vpd_lock);
@@ -894,16 +855,10 @@
 	struct t10_wwn *t10_wwn,					\
 	char *page)							\
 {									\
-	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;		\
-	struct se_device *dev;						\
 	struct t10_vpd *vpd;							\
 	unsigned char buf[VPD_TMP_BUF_SIZE];				\
 	ssize_t len = 0;						\
 									\
-	dev = se_dev->se_dev_ptr;					\
-	if (!dev)							\
-		return -ENODEV;						\
-									\
 	spin_lock(&t10_wwn->t10_vpd_lock);				\
 	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
 		if (vpd->association != _assoc)				\
@@ -1003,7 +958,7 @@
 
 /*  Start functions for struct config_item_type target_core_dev_pr_cit */
 
-CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
 #define SE_DEV_PR_ATTR(_name, _mode)					\
 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
 	__CONFIGFS_EATTR(_name, _mode,					\
@@ -1071,23 +1026,17 @@
 	return *len;
 }
 
-static ssize_t target_core_dev_pr_show_attr_res_holder(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
+		char *page)
 {
 	ssize_t len = 0;
 
-	if (!su_dev->se_dev_ptr)
-		return -ENODEV;
-
-	switch (su_dev->t10_pr.res_type) {
+	switch (dev->t10_pr.res_type) {
 	case SPC3_PERSISTENT_RESERVATIONS:
-		target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
-				page, &len);
+		target_core_dev_pr_show_spc3_res(dev, page, &len);
 		break;
 	case SPC2_RESERVATIONS:
-		target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
-				page, &len);
+		target_core_dev_pr_show_spc2_res(dev, page, &len);
 		break;
 	case SPC_PASSTHROUGH:
 		len += sprintf(page+len, "Passthrough\n");
@@ -1102,22 +1051,13 @@
 
 SE_DEV_PR_ATTR_RO(res_holder);
 
-/*
- * res_pr_all_tgt_pts
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
-	struct se_device *dev;
 	struct t10_pr_registration *pr_reg;
 	ssize_t len = 0;
 
-	dev = su_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return len;
 
 	spin_lock(&dev->dev_reservation_lock);
@@ -1144,20 +1084,13 @@
 
 SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
 
-/*
- * res_pr_generation
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
-	if (!su_dev->se_dev_ptr)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return 0;
 
-	return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
+	return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
 }
 
 SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1166,10 +1099,8 @@
  * res_pr_holder_tg_port
  */
 static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
-	struct se_device *dev;
 	struct se_node_acl *se_nacl;
 	struct se_lun *lun;
 	struct se_portal_group *se_tpg;
@@ -1177,11 +1108,7 @@
 	struct target_core_fabric_ops *tfo;
 	ssize_t len = 0;
 
-	dev = su_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return len;
 
 	spin_lock(&dev->dev_reservation_lock);
@@ -1211,12 +1138,8 @@
 
 SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
 
-/*
- * res_pr_registered_i_pts
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
 	struct target_core_fabric_ops *tfo;
 	struct t10_pr_registration *pr_reg;
@@ -1225,16 +1148,13 @@
 	ssize_t len = 0;
 	int reg_count = 0, prf_isid;
 
-	if (!su_dev->se_dev_ptr)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return len;
 
 	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
 
-	spin_lock(&su_dev->t10_pr.registration_lock);
-	list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+	spin_lock(&dev->t10_pr.registration_lock);
+	list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
 			pr_reg_list) {
 
 		memset(buf, 0, 384);
@@ -1254,7 +1174,7 @@
 		len += sprintf(page+len, "%s", buf);
 		reg_count++;
 	}
-	spin_unlock(&su_dev->t10_pr.registration_lock);
+	spin_unlock(&dev->t10_pr.registration_lock);
 
 	if (!reg_count)
 		len += sprintf(page+len, "None\n");
@@ -1264,22 +1184,13 @@
 
 SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
 
-/*
- * res_pr_type
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_type(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
-	struct se_device *dev;
 	struct t10_pr_registration *pr_reg;
 	ssize_t len = 0;
 
-	dev = su_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return len;
 
 	spin_lock(&dev->dev_reservation_lock);
@@ -1298,19 +1209,12 @@
 
 SE_DEV_PR_ATTR_RO(res_pr_type);
 
-/*
- * res_type
- */
 static ssize_t target_core_dev_pr_show_attr_res_type(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
 	ssize_t len = 0;
 
-	if (!su_dev->se_dev_ptr)
-		return -ENODEV;
-
-	switch (su_dev->t10_pr.res_type) {
+	switch (dev->t10_pr.res_type) {
 	case SPC3_PERSISTENT_RESERVATIONS:
 		len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
 		break;
@@ -1330,22 +1234,14 @@
 
 SE_DEV_PR_ATTR_RO(res_type);
 
-/*
- * res_aptpl_active
- */
-
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
-	if (!su_dev->se_dev_ptr)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return 0;
 
 	return sprintf(page, "APTPL Bit Status: %s\n",
-		(su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
+		(dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
 }
 
 SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1354,13 +1250,9 @@
  * res_aptpl_metadata
  */
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
-	struct se_subsystem_dev *su_dev,
-	char *page)
+		struct se_device *dev, char *page)
 {
-	if (!su_dev->se_dev_ptr)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return 0;
 
 	return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1392,11 +1284,10 @@
 };
 
 static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
-	struct se_subsystem_dev *su_dev,
+	struct se_device *dev,
 	const char *page,
 	size_t count)
 {
-	struct se_device *dev;
 	unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
 	unsigned char *t_fabric = NULL, *t_port = NULL;
 	char *orig, *ptr, *arg_p, *opts;
@@ -1408,14 +1299,10 @@
 	u16 port_rpti = 0, tpgt = 0;
 	u8 type = 0, scope;
 
-	dev = su_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
-	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+	if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return 0;
 
-	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+	if (dev->export_count) {
 		pr_debug("Unable to process APTPL metadata while"
 			" active fabric exports exist\n");
 		return -EINVAL;
@@ -1558,7 +1445,7 @@
 		goto out;
 	}
 
-	ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
+	ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
 			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
 			res_holder, all_tg_pt, type);
 out:
@@ -1573,7 +1460,7 @@
 
 SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
 
-CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
 
 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
 	&target_core_dev_pr_res_holder.attr,
@@ -1605,18 +1492,14 @@
 
 static ssize_t target_core_show_dev_info(void *p, char *page)
 {
-	struct se_subsystem_dev *se_dev = p;
-	struct se_hba *hba = se_dev->se_dev_hba;
-	struct se_subsystem_api *t = hba->transport;
+	struct se_device *dev = p;
+	struct se_subsystem_api *t = dev->transport;
 	int bl = 0;
 	ssize_t read_bytes = 0;
 
-	if (!se_dev->se_dev_ptr)
-		return -ENODEV;
-
-	transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+	transport_dump_dev_state(dev, page, &bl);
 	read_bytes += bl;
-	read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+	read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
 	return read_bytes;
 }
 
@@ -1633,17 +1516,10 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = p;
-	struct se_hba *hba = se_dev->se_dev_hba;
-	struct se_subsystem_api *t = hba->transport;
+	struct se_device *dev = p;
+	struct se_subsystem_api *t = dev->transport;
 
-	if (!se_dev->se_dev_su_ptr) {
-		pr_err("Unable to locate struct se_subsystem_dev>se"
-				"_dev_su_ptr\n");
-		return -EINVAL;
-	}
-
-	return t->set_configfs_dev_params(hba, se_dev, page, count);
+	return t->set_configfs_dev_params(dev, page, count);
 }
 
 static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -1656,12 +1532,12 @@
 
 static ssize_t target_core_show_dev_alias(void *p, char *page)
 {
-	struct se_subsystem_dev *se_dev = p;
+	struct se_device *dev = p;
 
-	if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+	if (!(dev->dev_flags & DF_USING_ALIAS))
 		return 0;
 
-	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+	return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
 }
 
 static ssize_t target_core_store_dev_alias(
@@ -1669,8 +1545,8 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = p;
-	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_device *dev = p;
+	struct se_hba *hba = dev->se_hba;
 	ssize_t read_bytes;
 
 	if (count > (SE_DEV_ALIAS_LEN-1)) {
@@ -1680,19 +1556,18 @@
 		return -EINVAL;
 	}
 
-	read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
-			"%s", page);
+	read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
 	if (!read_bytes)
 		return -EINVAL;
-	if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
-		se_dev->se_dev_alias[read_bytes - 1] = '\0';
+	if (dev->dev_alias[read_bytes - 1] == '\n')
+		dev->dev_alias[read_bytes - 1] = '\0';
 
-	se_dev->su_dev_flags |= SDF_USING_ALIAS;
+	dev->dev_flags |= DF_USING_ALIAS;
 
 	pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
 		config_item_name(&hba->hba_group.cg_item),
-		config_item_name(&se_dev->se_dev_group.cg_item),
-		se_dev->se_dev_alias);
+		config_item_name(&dev->dev_group.cg_item),
+		dev->dev_alias);
 
 	return read_bytes;
 }
@@ -1707,12 +1582,12 @@
 
 static ssize_t target_core_show_dev_udev_path(void *p, char *page)
 {
-	struct se_subsystem_dev *se_dev = p;
+	struct se_device *dev = p;
 
-	if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+	if (!(dev->dev_flags & DF_USING_UDEV_PATH))
 		return 0;
 
-	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+	return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
 }
 
 static ssize_t target_core_store_dev_udev_path(
@@ -1720,8 +1595,8 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = p;
-	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_device *dev = p;
+	struct se_hba *hba = dev->se_hba;
 	ssize_t read_bytes;
 
 	if (count > (SE_UDEV_PATH_LEN-1)) {
@@ -1731,19 +1606,19 @@
 		return -EINVAL;
 	}
 
-	read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+	read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
 			"%s", page);
 	if (!read_bytes)
 		return -EINVAL;
-	if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
-		se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+	if (dev->udev_path[read_bytes - 1] == '\n')
+		dev->udev_path[read_bytes - 1] = '\0';
 
-	se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+	dev->dev_flags |= DF_USING_UDEV_PATH;
 
 	pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
 		config_item_name(&hba->hba_group.cg_item),
-		config_item_name(&se_dev->se_dev_group.cg_item),
-		se_dev->se_dev_udev_path);
+		config_item_name(&dev->dev_group.cg_item),
+		dev->udev_path);
 
 	return read_bytes;
 }
@@ -1761,11 +1636,9 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *se_dev = p;
-	struct se_device *dev;
-	struct se_hba *hba = se_dev->se_dev_hba;
-	struct se_subsystem_api *t = hba->transport;
+	struct se_device *dev = p;
 	char *ptr;
+	int ret;
 
 	ptr = strstr(page, "1");
 	if (!ptr) {
@@ -1773,25 +1646,10 @@
 				" is \"1\"\n");
 		return -EINVAL;
 	}
-	if (se_dev->se_dev_ptr) {
-		pr_err("se_dev->se_dev_ptr already set for storage"
-				" object\n");
-		return -EEXIST;
-	}
 
-	if (t->check_configfs_dev_params(hba, se_dev) < 0)
-		return -EINVAL;
-
-	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
-	if (IS_ERR(dev))
-		return PTR_ERR(dev);
-	else if (!dev)
-		return -EINVAL;
-
-	se_dev->se_dev_ptr = dev;
-	pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
-		" %p\n", se_dev->se_dev_ptr);
-
+	ret = target_configure_device(dev);
+	if (ret)
+		return ret;
 	return count;
 }
 
@@ -1805,18 +1663,13 @@
 
 static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
 {
-	struct se_device *dev;
-	struct se_subsystem_dev *su_dev = p;
+	struct se_device *dev = p;
 	struct config_item *lu_ci;
 	struct t10_alua_lu_gp *lu_gp;
 	struct t10_alua_lu_gp_member *lu_gp_mem;
 	ssize_t len = 0;
 
-	dev = su_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
-	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
+	if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
 		return len;
 
 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
@@ -1843,22 +1696,17 @@
 	const char *page,
 	size_t count)
 {
-	struct se_device *dev;
-	struct se_subsystem_dev *su_dev = p;
-	struct se_hba *hba = su_dev->se_dev_hba;
+	struct se_device *dev = p;
+	struct se_hba *hba = dev->se_hba;
 	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
 	struct t10_alua_lu_gp_member *lu_gp_mem;
 	unsigned char buf[LU_GROUP_NAME_BUF];
 	int move = 0;
 
-	dev = su_dev->se_dev_ptr;
-	if (!dev)
-		return -ENODEV;
-
-	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+	if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
 		pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
 			config_item_name(&hba->hba_group.cg_item),
-			config_item_name(&su_dev->se_dev_group.cg_item));
+			config_item_name(&dev->dev_group.cg_item));
 		return -EINVAL;
 	}
 	if (count > LU_GROUP_NAME_BUF) {
@@ -1902,7 +1750,7 @@
 				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
 				" %hu\n",
 				config_item_name(&hba->hba_group.cg_item),
-				config_item_name(&su_dev->se_dev_group.cg_item),
+				config_item_name(&dev->dev_group.cg_item),
 				config_item_name(&lu_gp->lu_gp_group.cg_item),
 				lu_gp->lu_gp_id);
 
@@ -1927,7 +1775,7 @@
 		" core/alua/lu_gps/%s, ID: %hu\n",
 		(move) ? "Moving" : "Adding",
 		config_item_name(&hba->hba_group.cg_item),
-		config_item_name(&su_dev->se_dev_group.cg_item),
+		config_item_name(&dev->dev_group.cg_item),
 		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
 		lu_gp_new->lu_gp_id);
 
@@ -1955,69 +1803,44 @@
 
 static void target_core_dev_release(struct config_item *item)
 {
-	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
-				struct se_subsystem_dev, se_dev_group);
-	struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
-	struct se_subsystem_api *t = hba->transport;
-	struct config_group *dev_cg = &se_dev->se_dev_group;
+	struct config_group *dev_cg = to_config_group(item);
+	struct se_device *dev =
+		container_of(dev_cg, struct se_device, dev_group);
 
 	kfree(dev_cg->default_groups);
-	/*
-	 * This pointer will set when the storage is enabled with:
-	 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
-	 */
-	if (se_dev->se_dev_ptr) {
-		pr_debug("Target_Core_ConfigFS: Calling se_free_"
-			"virtual_device() for se_dev_ptr: %p\n",
-			se_dev->se_dev_ptr);
-
-		se_free_virtual_device(se_dev->se_dev_ptr, hba);
-	} else {
-		/*
-		 * Release struct se_subsystem_dev->se_dev_su_ptr..
-		 */
-		pr_debug("Target_Core_ConfigFS: Calling t->free_"
-			"device() for se_dev_su_ptr: %p\n",
-			se_dev->se_dev_su_ptr);
-
-		t->free_device(se_dev->se_dev_su_ptr);
-	}
-
-	pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
-			"_dev_t: %p\n", se_dev);
-	kfree(se_dev);
+	target_free_device(dev);
 }
 
 static ssize_t target_core_dev_show(struct config_item *item,
 				     struct configfs_attribute *attr,
 				     char *page)
 {
-	struct se_subsystem_dev *se_dev = container_of(
-			to_config_group(item), struct se_subsystem_dev,
-			se_dev_group);
+	struct config_group *dev_cg = to_config_group(item);
+	struct se_device *dev =
+		container_of(dev_cg, struct se_device, dev_group);
 	struct target_core_configfs_attribute *tc_attr = container_of(
 			attr, struct target_core_configfs_attribute, attr);
 
 	if (!tc_attr->show)
 		return -EINVAL;
 
-	return tc_attr->show(se_dev, page);
+	return tc_attr->show(dev, page);
 }
 
 static ssize_t target_core_dev_store(struct config_item *item,
 				      struct configfs_attribute *attr,
 				      const char *page, size_t count)
 {
-	struct se_subsystem_dev *se_dev = container_of(
-			to_config_group(item), struct se_subsystem_dev,
-			se_dev_group);
+	struct config_group *dev_cg = to_config_group(item);
+	struct se_device *dev =
+		container_of(dev_cg, struct se_device, dev_group);
 	struct target_core_configfs_attribute *tc_attr = container_of(
 			attr, struct target_core_configfs_attribute, attr);
 
 	if (!tc_attr->store)
 		return -EINVAL;
 
-	return tc_attr->store(se_dev, page, count);
+	return tc_attr->store(dev, page, count);
 }
 
 static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2107,7 +1930,6 @@
 {
 	struct se_device *dev;
 	struct se_hba *hba;
-	struct se_subsystem_dev *su_dev;
 	struct t10_alua_lu_gp_member *lu_gp_mem;
 	ssize_t len = 0, cur_len;
 	unsigned char buf[LU_GROUP_NAME_BUF];
@@ -2117,12 +1939,11 @@
 	spin_lock(&lu_gp->lu_gp_lock);
 	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
 		dev = lu_gp_mem->lu_gp_mem_dev;
-		su_dev = dev->se_sub_dev;
-		hba = su_dev->se_dev_hba;
+		hba = dev->se_hba;
 
 		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
 			config_item_name(&hba->hba_group.cg_item),
-			config_item_name(&su_dev->se_dev_group.cg_item));
+			config_item_name(&dev->dev_group.cg_item));
 		cur_len++; /* Extra byte for NULL terminator */
 
 		if ((cur_len + len) > PAGE_SIZE) {
@@ -2260,7 +2081,7 @@
 	const char *page,
 	size_t count)
 {
-	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
 	unsigned long tmp;
 	int new_state, ret;
 
@@ -2284,7 +2105,7 @@
 		return -EINVAL;
 	}
 
-	ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+	ret = core_alua_do_port_transition(tg_pt_gp, dev,
 					NULL, NULL, new_state, 0);
 	return (!ret) ? count : -EINVAL;
 }
@@ -2620,11 +2441,10 @@
 	struct t10_alua *alua = container_of(group, struct t10_alua,
 					alua_tg_pt_gps_group);
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
-	struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
 	struct config_group *alua_tg_pt_gp_cg = NULL;
 	struct config_item *alua_tg_pt_gp_ci = NULL;
 
-	tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
 	if (!tg_pt_gp)
 		return NULL;
 
@@ -2721,10 +2541,10 @@
 	const char *name)
 {
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
-	struct se_subsystem_dev *se_dev;
 	struct se_subsystem_api *t;
 	struct config_item *hba_ci = &group->cg_item;
 	struct se_hba *hba = item_to_hba(hba_ci);
+	struct se_device *dev;
 	struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
 	struct config_group *dev_stat_grp = NULL;
 	int errno = -ENOMEM, ret;
@@ -2737,120 +2557,80 @@
 	 */
 	t = hba->transport;
 
-	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
-	if (!se_dev) {
-		pr_err("Unable to allocate memory for"
-				" struct se_subsystem_dev\n");
-		goto unlock;
-	}
-	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
-	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
-	INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
-	INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
-	spin_lock_init(&se_dev->t10_pr.registration_lock);
-	spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
-	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
-	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
-	spin_lock_init(&se_dev->se_dev_lock);
-	se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
-	se_dev->t10_wwn.t10_sub_dev = se_dev;
-	se_dev->t10_alua.t10_sub_dev = se_dev;
-	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+	dev = target_alloc_device(hba, name);
+	if (!dev)
+		goto out_unlock;
 
-	se_dev->se_dev_hba = hba;
-	dev_cg = &se_dev->se_dev_group;
+	dev_cg = &dev->dev_group;
 
 	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
 			GFP_KERNEL);
 	if (!dev_cg->default_groups)
-		goto out;
-	/*
-	 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
-	 * for ->allocate_virtdevice()
-	 *
-	 * se_dev->se_dev_ptr will be set after ->create_virtdev()
-	 * has been called successfully in the next level up in the
-	 * configfs tree for device object's struct config_group.
-	 */
-	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
-	if (!se_dev->se_dev_su_ptr) {
-		pr_err("Unable to locate subsystem dependent pointer"
-			" from allocate_virtdevice()\n");
-		goto out;
-	}
+		goto out_free_device;
 
-	config_group_init_type_name(&se_dev->se_dev_group, name,
-			&target_core_dev_cit);
-	config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+	config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+	config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
 			&target_core_dev_attrib_cit);
-	config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+	config_group_init_type_name(&dev->dev_pr_group, "pr",
 			&target_core_dev_pr_cit);
-	config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+	config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
 			&target_core_dev_wwn_cit);
-	config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+	config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
 			"alua", &target_core_alua_tg_pt_gps_cit);
-	config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+	config_group_init_type_name(&dev->dev_stat_grps.stat_group,
 			"statistics", &target_core_stat_cit);
 
-	dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
-	dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
-	dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
-	dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
-	dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+	dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
+	dev_cg->default_groups[1] = &dev->dev_pr_group;
+	dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
+	dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
+	dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
 	dev_cg->default_groups[5] = NULL;
 	/*
 	 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
 	 */
-	tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
 	if (!tg_pt_gp)
-		goto out;
+		goto out_free_dev_cg_default_groups;
+	dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
 
-	tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
 	tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 				GFP_KERNEL);
 	if (!tg_pt_gp_cg->default_groups) {
 		pr_err("Unable to allocate tg_pt_gp_cg->"
 				"default_groups\n");
-		goto out;
+		goto out_free_tg_pt_gp;
 	}
 
 	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
 			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
 	tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
 	tg_pt_gp_cg->default_groups[1] = NULL;
-	se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
 	/*
 	 * Add core/$HBA/$DEV/statistics/ default groups
 	 */
-	dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+	dev_stat_grp = &dev->dev_stat_grps.stat_group;
 	dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
 				GFP_KERNEL);
 	if (!dev_stat_grp->default_groups) {
 		pr_err("Unable to allocate dev_stat_grp->default_groups\n");
-		goto out;
+		goto out_free_tg_pt_gp_cg_default_groups;
 	}
-	target_stat_setup_dev_default_groups(se_dev);
-
-	pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
-		" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+	target_stat_setup_dev_default_groups(dev);
 
 	mutex_unlock(&hba->hba_access_mutex);
-	return &se_dev->se_dev_group;
-out:
-	if (se_dev->t10_alua.default_tg_pt_gp) {
-		core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
-		se_dev->t10_alua.default_tg_pt_gp = NULL;
-	}
-	if (dev_stat_grp)
-		kfree(dev_stat_grp->default_groups);
-	if (tg_pt_gp_cg)
-		kfree(tg_pt_gp_cg->default_groups);
-	if (dev_cg)
-		kfree(dev_cg->default_groups);
-	if (se_dev->se_dev_su_ptr)
-		t->free_device(se_dev->se_dev_su_ptr);
-	kfree(se_dev);
-unlock:
+	return dev_cg;
+
+out_free_tg_pt_gp_cg_default_groups:
+	kfree(tg_pt_gp_cg->default_groups);
+out_free_tg_pt_gp:
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+out_free_dev_cg_default_groups:
+	kfree(dev_cg->default_groups);
+out_free_device:
+	target_free_device(dev);
+out_unlock:
 	mutex_unlock(&hba->hba_access_mutex);
 	return ERR_PTR(errno);
 }
@@ -2859,18 +2639,19 @@
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
-				struct se_subsystem_dev, se_dev_group);
+	struct config_group *dev_cg = to_config_group(item);
+	struct se_device *dev =
+		container_of(dev_cg, struct se_device, dev_group);
 	struct se_hba *hba;
 	struct config_item *df_item;
-	struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
+	struct config_group *tg_pt_gp_cg, *dev_stat_grp;
 	int i;
 
-	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+	hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
 
 	mutex_lock(&hba->hba_access_mutex);
 
-	dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+	dev_stat_grp = &dev->dev_stat_grps.stat_group;
 	for (i = 0; dev_stat_grp->default_groups[i]; i++) {
 		df_item = &dev_stat_grp->default_groups[i]->cg_item;
 		dev_stat_grp->default_groups[i] = NULL;
@@ -2878,7 +2659,7 @@
 	}
 	kfree(dev_stat_grp->default_groups);
 
-	tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
 	for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
 		df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
 		tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2889,17 +2670,15 @@
 	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
 	 * directly from target_core_alua_tg_pt_gp_release().
 	 */
-	se_dev->t10_alua.default_tg_pt_gp = NULL;
+	dev->t10_alua.default_tg_pt_gp = NULL;
 
-	dev_cg = &se_dev->se_dev_group;
 	for (i = 0; dev_cg->default_groups[i]; i++) {
 		df_item = &dev_cg->default_groups[i]->cg_item;
 		dev_cg->default_groups[i] = NULL;
 		config_item_put(df_item);
 	}
 	/*
-	 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
-	 * from target_core_dev_item_ops->release() ->target_core_dev_release().
+	 * se_dev is released from target_core_dev_item_ops->release()
 	 */
 	config_item_put(item);
 	mutex_unlock(&hba->hba_access_mutex);
@@ -2962,13 +2741,10 @@
 		return -EINVAL;
 	}
 
-	spin_lock(&hba->device_lock);
-	if (!list_empty(&hba->hba_dev_list)) {
+	if (hba->dev_count) {
 		pr_err("Unable to set hba_mode with active devices\n");
-		spin_unlock(&hba->device_lock);
 		return -EINVAL;
 	}
-	spin_unlock(&hba->device_lock);
 
 	ret = transport->pmode_enable_hba(hba, mode_flag);
 	if (ret < 0)