]> git.itanic.dy.fi Git - linux-stable/commitdiff
net/mlx5: Enable management PF initialization
authorShay Drory <shayd@nvidia.com>
Wed, 29 Jun 2022 08:38:21 +0000 (11:38 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 11 Jan 2023 05:24:41 +0000 (21:24 -0800)
Enable initialization of DPU Management PF, which is a new loopback PF
designed for communication with BMC.
For now Management PF doesn't support nor require most upper layer
protocols so avoid them.

Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Eran Ben Elisha <eranbe@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
include/linux/mlx5/driver.h

index 0571e40c6ee5fe86b03ad244910e87c697e91b7d..5b6b0b126e52358079b3dd773360bb4baf1fb8f7 100644 (file)
@@ -59,6 +59,9 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev)
        if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
                return false;
 
+       if (mlx5_core_is_management_pf(dev))
+               return false;
+
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
                return false;
 
@@ -198,6 +201,9 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
        if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return false;
 
+       if (mlx5_core_is_management_pf(dev))
+               return false;
+
        if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
                return false;
 
index 464eb3a184506575b02e3f500f818b00c1bedbb0..b70e36025d922bdb885a37a3dd9c9a9d299c5c1e 100644 (file)
@@ -75,6 +75,10 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
        if (!mlx5_core_is_ecpf(dev))
                return 0;
 
+       /* Management PF don't have a peer PF */
+       if (mlx5_core_is_management_pf(dev))
+               return 0;
+
        return mlx5_host_pf_init(dev);
 }
 
@@ -85,6 +89,10 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
        if (!mlx5_core_is_ecpf(dev))
                return;
 
+       /* Management PF don't have a peer PF */
+       if (mlx5_core_is_management_pf(dev))
+               return;
+
        mlx5_host_pf_cleanup(dev);
 
        err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
index 0dfd5742c6fe9f8688f7bc39bb27c82923a684af..bbb6dab3b21f4b959aa05211ce0749a8d910b6f7 100644 (file)
@@ -1488,7 +1488,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
        void *hca_caps;
        int err;
 
-       if (!mlx5_core_is_ecpf(dev)) {
+       if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) {
                *max_sfs = 0;
                return 0;
        }
index 0c4f6acf59ca8d5bdc18863fce0db7c7c009401d..50a5780367fa36e26c8e649b2429010098bcb905 100644 (file)
@@ -1202,6 +1202,11 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
        return dev->coredev_type == MLX5_COREDEV_VF;
 }
 
+static inline bool mlx5_core_is_management_pf(const struct mlx5_core_dev *dev)
+{
+       return MLX5_CAP_GEN(dev, num_ports) == 1 && !MLX5_CAP_GEN(dev, native_port_num);
+}
+
 static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
 {
        return dev->caps.embedded_cpu;