diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index f2a347015768d450769a00ed37b347876698bb33..1286f202a21fddd30fac3efb7e166692efc5adab 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -702,6 +702,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
 	/* Initialize IET */
 	am65_cpsw_qos_iet_init(ndev);
 	am65_cpsw_qos_cut_thru_init(port);
+	am65_cpsw_qos_mqprio_init(port);
 
 	/* restore vlan configurations */
 	vlan_for_each(ndev, cpsw_restore_vlans, port);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 7bbc28d7227cc83ea90887efd0b75436f0ac8c61..1c518604422aee27780bc382c1a9273eafbe8028 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -19,12 +19,15 @@
 #define AM65_CPSW_REG_FREQ			0x05c
 #define AM65_CPSW_PN_REG_CTL			0x004
 #define AM65_CPSW_PN_REG_MAX_BLKS		0x008
+#define AM65_CPSW_PN_REG_TX_PRI_MAP		0x018
+#define AM65_CPSW_PN_REG_RX_PRI_MAP		0x020
 #define AM65_CPSW_PN_REG_IET_CTRL		0x040
 #define AM65_CPSW_PN_REG_IET_STATUS		0x044
 #define AM65_CPSW_PN_REG_IET_VERIFY		0x048
 #define AM65_CPSW_PN_REG_FIFO_STATUS		0x050
 #define AM65_CPSW_PN_REG_EST_CTL		0x060
 #define AM65_CPSW_PN_REG_PRI_CIR(pri)		(0x140 + 4 * (pri))
+#define AM65_CPSW_PN_REG_PRI_EIR(pri)		(0x160 + 4 * (pri))
 
 #define AM64_CPSW_PN_CUT_THRU			0x3C0
 #define AM64_CPSW_PN_SPEED			0x3C4
@@ -102,6 +105,9 @@ enum timer_act {
 
 /* number of traffic classes (fifos) per port */
 #define AM65_CPSW_PN_TC_NUM			8
+#define AM65_CPSW_PN_TX_PRI_MAP_DEF			0x76543210
+
+static int am65_cpsw_mqprio_setup(struct net_device *ndev, void *type_data);
 
 /* Fetch command count it's number of bytes in Gigabit mode or nibbles in
  * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
@@ -888,6 +894,8 @@ int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
 	switch (type) {
 	case TC_SETUP_QDISC_TAPRIO:
 		return am65_cpsw_setup_taprio(ndev, type_data);
+	case TC_SETUP_QDISC_MQPRIO:
+		return am65_cpsw_mqprio_setup(ndev, type_data);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -927,6 +935,7 @@ static void am65_cpsw_iet_link_up(struct net_device *ndev)
 }
 
 static void am65_cpsw_cut_thru_link_up(struct am65_cpsw_port *port);
+static void am65_cpsw_tx_pn_shaper_link_up(struct am65_cpsw_port *port);
 
 void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed, int duplex)
 {
@@ -936,6 +945,7 @@ void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed, int duplex)
 	port->qos.duplex = duplex;
 	am65_cpsw_iet_link_up(ndev);
 	am65_cpsw_cut_thru_link_up(port);
+	am65_cpsw_tx_pn_shaper_link_up(port);
 
 	if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
 		return;
@@ -1183,3 +1193,278 @@ void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
 		       host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
 	}
 }
+
+static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
+{
+	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+	struct am65_cpsw_common *common = port->common;
+	struct tc_mqprio_qopt_offload *mqprio;
+	bool shaper_en;
+	u32 rate_mbps;
+	int i;
+
+	mqprio = &p_mqprio->mqprio_hw;
+	shaper_en = p_mqprio->shaper_en && !p_mqprio->shaper_susp;
+
+	for (i = 0; i < mqprio->qopt.num_tc; i++) {
+		rate_mbps = 0;
+		if (shaper_en) {
+			rate_mbps = mqprio->min_rate[i] * 8 / 1000000;
+			rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+							       common->bus_freq);
+		}
+
+		writel(rate_mbps,
+		       port->port_base + AM65_CPSW_PN_REG_PRI_CIR(i));
+	}
+
+	for (i = 0; i < mqprio->qopt.num_tc; i++) {
+		rate_mbps = 0;
+		if (shaper_en && mqprio->max_rate[i]) {
+			rate_mbps = mqprio->max_rate[i] - mqprio->min_rate[i];
+			rate_mbps = rate_mbps * 8 / 1000000;
+			rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+							       common->bus_freq);
+		}
+
+		writel(rate_mbps,
+		       port->port_base + AM65_CPSW_PN_REG_PRI_EIR(i));
+	}
+}
+
+static void am65_cpsw_tx_pn_shaper_link_up(struct am65_cpsw_port *port)
+{
+	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+	struct am65_cpsw_common *common = port->common;
+	bool shaper_susp = false;
+
+	if (!p_mqprio->enable || !p_mqprio->shaper_en)
+		return;
+
+	if (p_mqprio->max_rate_total > port->qos.link_speed)
+		shaper_susp = true;
+
+	if (p_mqprio->shaper_susp == shaper_susp)
+		return;
+
+	if (shaper_susp)
+		dev_info(common->dev,
+			 "Port%u: total shaper tx rate > link speed - suspend shaper\n",
+			 port->port_id);
+	else
+		dev_info(common->dev,
+			 "Port%u: link recover - resume shaper\n",
+			 port->port_id);
+
+	p_mqprio->shaper_susp = shaper_susp;
+	am65_cpsw_tx_pn_shaper_apply(port);
+}
+
+void am65_cpsw_qos_mqprio_init(struct am65_cpsw_port *port)
+{
+	struct am65_cpsw_host *host = am65_common_get_host(port->common);
+	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+	struct tc_mqprio_qopt_offload *mqprio = &p_mqprio->mqprio_hw;
+	int i, fifo, rx_prio_map;
+
+	rx_prio_map = readl(host->port_base + AM65_CPSW_PN_REG_RX_PRI_MAP);
+
+	if (p_mqprio->enable) {
+		for (i = 0; i < AM65_CPSW_PN_TC_NUM; i++) {
+			fifo = mqprio->qopt.prio_tc_map[i];
+			p_mqprio->tx_prio_map |= fifo << (4 * i);
+		}
+
+		netdev_set_num_tc(port->ndev, mqprio->qopt.num_tc);
+		for (i = 0; i < mqprio->qopt.num_tc; i++) {
+			netdev_set_tc_queue(port->ndev, i,
+					    mqprio->qopt.count[i],
+					    mqprio->qopt.offset[i]);
+			if (!i) {
+				p_mqprio->tc0_q = mqprio->qopt.offset[i];
+				rx_prio_map &= ~(0x7 << (4 * p_mqprio->tc0_q));
+			}
+		}
+	} else {
+		/* restore default configuration */
+		netdev_reset_tc(port->ndev);
+		p_mqprio->tx_prio_map = AM65_CPSW_PN_TX_PRI_MAP_DEF;
+		rx_prio_map |= p_mqprio->tc0_q << (4 * p_mqprio->tc0_q);
+		p_mqprio->tc0_q = 0;
+	}
+
+	writel(p_mqprio->tx_prio_map,
+	       port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
+	writel(rx_prio_map,
+	       host->port_base + AM65_CPSW_PN_REG_RX_PRI_MAP);
+
+	am65_cpsw_tx_pn_shaper_apply(port);
+}
+
+static int am65_cpsw_mqprio_verify(struct am65_cpsw_port *port,
+				   struct tc_mqprio_qopt_offload *mqprio)
+{
+	int i;
+
+	for (i = 0; i < mqprio->qopt.num_tc; i++) {
+		unsigned int last = mqprio->qopt.offset[i] +
+				    mqprio->qopt.count[i];
+
+		if (mqprio->qopt.offset[i] >= port->ndev->real_num_tx_queues ||
+		    !mqprio->qopt.count[i] ||
+		    last >  port->ndev->real_num_tx_queues)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
+					  struct tc_mqprio_qopt_offload *mqprio,
+					  u64 *max_rate)
+{
+	struct am65_cpsw_common *common = port->common;
+	bool has_min_rate, has_max_rate;
+	u64 min_rate_total = 0, max_rate_total = 0;
+	u32 min_rate_msk = 0, max_rate_msk = 0;
+	int num_tc, i;
+
+	has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+	has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
+
+	if (!has_min_rate && has_max_rate)
+		return -EOPNOTSUPP;
+
+	if (!has_min_rate)
+		return 0;
+
+	num_tc = mqprio->qopt.num_tc;
+
+	for (i = num_tc - 1; i >= 0; i--) {
+		u32 ch_msk;
+
+		if (mqprio->min_rate[i])
+			min_rate_msk |= BIT(i);
+		min_rate_total +=  mqprio->min_rate[i];
+
+		if (has_max_rate) {
+			if (mqprio->max_rate[i])
+				max_rate_msk |= BIT(i);
+			max_rate_total +=  mqprio->max_rate[i];
+
+			if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
+				dev_err(common->dev, "TX tc%d rate max>0 but min=0\n",
+					i);
+				return -EINVAL;
+			}
+
+			if (mqprio->max_rate[i] &&
+			    mqprio->max_rate[i] < mqprio->min_rate[i]) {
+				dev_err(common->dev, "TX tc%d rate min(%llu)>max(%llu)\n",
+					i, mqprio->min_rate[i],
+					mqprio->max_rate[i]);
+				return -EINVAL;
+			}
+		}
+
+		ch_msk = GENMASK(num_tc - 1, i);
+		if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
+			dev_err(common->dev, "TX Min rate limiting has to be enabled sequentially hi->lo tx_rate_msk%x\n",
+				min_rate_msk);
+			return -EINVAL;
+		}
+
+		if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
+			dev_err(common->dev, "TX max rate limiting has to be enabled sequentially hi->lo tx_rate_msk%x\n",
+				max_rate_msk);
+			return -EINVAL;
+		}
+	}
+	min_rate_total *= 8;
+	min_rate_total /= 1000 * 1000;
+	max_rate_total *= 8;
+	max_rate_total /= 1000 * 1000;
+
+	if (port->qos.link_speed != SPEED_UNKNOWN) {
+		if (min_rate_total > port->qos.link_speed) {
+			dev_err(common->dev, "TX rate min exceed %llu link speed %d\n",
+				min_rate_total, port->qos.link_speed);
+			return -EINVAL;
+		}
+
+		if (max_rate_total > port->qos.link_speed) {
+			dev_err(common->dev, "TX rate max exceed %llu link speed %d\n",
+				max_rate_total, port->qos.link_speed);
+			return -EINVAL;
+		}
+	}
+
+	*max_rate = max_t(u64, min_rate_total, max_rate_total);
+
+	return 0;
+}
+
+static int am65_cpsw_mqprio_setup(struct net_device *ndev, void *type_data)
+{
+	struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+	struct tc_mqprio_qopt_offload *mqprio = type_data;
+	struct am65_cpsw_common *common = port->common;
+	struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+	bool has_min_rate;
+	int num_tc, ret;
+	u64 max_rate;
+
+	if (!mqprio->qopt.hw)
+		goto skip_check;
+
+	if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL)
+		return -EOPNOTSUPP;
+
+	num_tc = mqprio->qopt.num_tc;
+	if (num_tc > AM65_CPSW_PN_TC_NUM)
+		return -ERANGE;
+
+	if ((mqprio->flags & TC_MQPRIO_F_SHAPER) &&
+	    mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+		return -EOPNOTSUPP;
+
+	ret = am65_cpsw_mqprio_verify(port, mqprio);
+	if (ret)
+		return ret;
+
+	ret = am65_cpsw_mqprio_verify_shaper(port, mqprio, &max_rate);
+	if (ret)
+		return ret;
+
+skip_check:
+	ret = pm_runtime_get_sync(common->dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(common->dev);
+		return ret;
+	}
+
+	if (mqprio->qopt.hw) {
+		memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
+		has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+		p_mqprio->enable = 1;
+		p_mqprio->shaper_en = has_min_rate;
+		p_mqprio->shaper_susp = !has_min_rate;
+		p_mqprio->max_rate_total = max_rate;
+		p_mqprio->tx_prio_map = 0;
+	} else {
+		unsigned int tc0_q = p_mqprio->tc0_q;
+
+		memset(p_mqprio, 0, sizeof(*p_mqprio));
+		p_mqprio->mqprio_hw.qopt.num_tc = AM65_CPSW_PN_TC_NUM;
+		p_mqprio->tc0_q = tc0_q;
+	}
+
+	if (!netif_running(ndev))
+		goto exit_put;
+
+	am65_cpsw_qos_mqprio_init(port);
+
+exit_put:
+	pm_runtime_put(common->dev);
+	return 0;
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index d5137b044484f28331b5ae1a7b05e6020a16bb7a..1b759babecb2609e76d9d3db17150713fc89b9b4 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -7,6 +7,7 @@
 
 #include <linux/netdevice.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 struct am65_cpsw_port;
 struct am65_cpsw_common;
@@ -33,6 +34,17 @@ struct am65_cpsw_iet {
 	u32 mask;
 };
 
+struct am65_cpsw_mqprio {
+	struct tc_mqprio_qopt_offload mqprio_hw;
+	u64 max_rate_total;
+	u32 tx_prio_map;
+
+	unsigned enable:1;
+	unsigned shaper_en:1;
+	unsigned shaper_susp:1;
+	unsigned tc0_q:3;
+};
+
 struct am65_cpsw_cut_thru {
 	unsigned int rx_pri_mask;
 	unsigned int tx_pri_mask;
@@ -46,6 +58,7 @@ struct am65_cpsw_qos {
 	int link_speed;
 	int duplex;
 	struct am65_cpsw_iet iet;
+	struct am65_cpsw_mqprio mqprio;
 	struct am65_cpsw_cut_thru cut_thru;
 };
 
@@ -60,5 +73,6 @@ void am65_cpsw_qos_cut_thru_cleanup(struct am65_cpsw_port *port);
 int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
 					int queue, u32 rate_mbps);
 void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common);
+void am65_cpsw_qos_mqprio_init(struct am65_cpsw_port *port);
 
 #endif /* AM65_CPSW_QOS_H_ */