mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	Merge branches 'af_ib', 'cxgb4', 'misc', 'mlx5', 'ocrdma', 'qib' and 'srp' into for-next
This commit is contained in:
		
						commit
						0eba551148
					
				
					 74 changed files with 17267 additions and 358 deletions
				
			
		|  | @ -54,6 +54,13 @@ Description:	Interface for making ib_srp connect to a new target. | |||
| 		  ib_srp. Specifying a value that exceeds cmd_sg_entries is | ||||
| 		  only safe with partial memory descriptor list support enabled | ||||
| 		  (allow_ext_sg=1). | ||||
| 		* comp_vector, a number in the range 0..n-1 specifying the | ||||
| 		  MSI-X completion vector. Some HCA's allocate multiple (n) | ||||
| 		  MSI-X vectors per HCA port. If the IRQ affinity masks of | ||||
| 		  these interrupts have been configured such that each MSI-X | ||||
| 		  interrupt is handled by a different CPU then the comp_vector | ||||
| 		  parameter can be used to spread the SRP completion workload | ||||
| 		  over multiple CPU's. | ||||
| 
 | ||||
| What:		/sys/class/infiniband_srp/srp-<hca>-<port_number>/ibdev | ||||
| Date:		January 2, 2006 | ||||
|  |  | |||
							
								
								
									
										22
									
								
								MAINTAINERS
									
										
									
									
									
								
							
							
						
						
									
										22
									
								
								MAINTAINERS
									
										
									
									
									
								
							|  | @ -5365,6 +5365,28 @@ W:	http://linuxtv.org | |||
| S:	Odd Fixes | ||||
| F:	drivers/media/radio/radio-miropcm20* | ||||
| 
 | ||||
| Mellanox MLX5 core VPI driver | ||||
| M:	Eli Cohen <eli@mellanox.com> | ||||
| L:	netdev@vger.kernel.org | ||||
| L:	linux-rdma@vger.kernel.org | ||||
| W:	http://www.mellanox.com | ||||
| Q:	http://patchwork.ozlabs.org/project/netdev/list/ | ||||
| Q:	http://patchwork.kernel.org/project/linux-rdma/list/ | ||||
| T:	git://openfabrics.org/~eli/connect-ib.git | ||||
| S:	Supported | ||||
| F:	drivers/net/ethernet/mellanox/mlx5/core/ | ||||
| F:	include/linux/mlx5/ | ||||
| 
 | ||||
| Mellanox MLX5 IB driver | ||||
| M:      Eli Cohen <eli@mellanox.com> | ||||
| L:      linux-rdma@vger.kernel.org | ||||
| W:      http://www.mellanox.com | ||||
| Q:      http://patchwork.kernel.org/project/linux-rdma/list/ | ||||
| T:      git://openfabrics.org/~eli/connect-ib.git | ||||
| S:      Supported | ||||
| F:      include/linux/mlx5/ | ||||
| F:      drivers/infiniband/hw/mlx5/ | ||||
| 
 | ||||
| MODULE SUPPORT | ||||
| M:	Rusty Russell <rusty@rustcorp.com.au> | ||||
| S:	Maintained | ||||
|  |  | |||
|  | @ -50,6 +50,7 @@ source "drivers/infiniband/hw/amso1100/Kconfig" | |||
| source "drivers/infiniband/hw/cxgb3/Kconfig" | ||||
| source "drivers/infiniband/hw/cxgb4/Kconfig" | ||||
| source "drivers/infiniband/hw/mlx4/Kconfig" | ||||
| source "drivers/infiniband/hw/mlx5/Kconfig" | ||||
| source "drivers/infiniband/hw/nes/Kconfig" | ||||
| source "drivers/infiniband/hw/ocrdma/Kconfig" | ||||
| 
 | ||||
|  |  | |||
|  | @ -7,6 +7,7 @@ obj-$(CONFIG_INFINIBAND_AMSO1100)	+= hw/amso1100/ | |||
| obj-$(CONFIG_INFINIBAND_CXGB3)		+= hw/cxgb3/ | ||||
| obj-$(CONFIG_INFINIBAND_CXGB4)		+= hw/cxgb4/ | ||||
| obj-$(CONFIG_MLX4_INFINIBAND)		+= hw/mlx4/ | ||||
| obj-$(CONFIG_MLX5_INFINIBAND)		+= hw/mlx5/ | ||||
| obj-$(CONFIG_INFINIBAND_NES)		+= hw/nes/ | ||||
| obj-$(CONFIG_INFINIBAND_OCRDMA)		+= hw/ocrdma/ | ||||
| obj-$(CONFIG_INFINIBAND_IPOIB)		+= ulp/ipoib/ | ||||
|  |  | |||
|  | @ -545,8 +545,10 @@ static int add_port(struct ib_device *device, int port_num, | |||
| 
 | ||||
| 	p->gid_group.name  = "gids"; | ||||
| 	p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len); | ||||
| 	if (!p->gid_group.attrs) | ||||
| 	if (!p->gid_group.attrs) { | ||||
| 		ret = -ENOMEM; | ||||
| 		goto err_remove_pma; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = sysfs_create_group(&p->kobj, &p->gid_group); | ||||
| 	if (ret) | ||||
|  | @ -555,8 +557,10 @@ static int add_port(struct ib_device *device, int port_num, | |||
| 	p->pkey_group.name  = "pkeys"; | ||||
| 	p->pkey_group.attrs = alloc_group_attrs(show_port_pkey, | ||||
| 						attr.pkey_tbl_len); | ||||
| 	if (!p->pkey_group.attrs) | ||||
| 	if (!p->pkey_group.attrs) { | ||||
| 		ret = -ENOMEM; | ||||
| 		goto err_remove_gid; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = sysfs_create_group(&p->kobj, &p->pkey_group); | ||||
| 	if (ret) | ||||
|  |  | |||
|  | @ -334,7 +334,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | |||
| 
 | ||||
| 	resp.num_comp_vectors = file->device->num_comp_vectors; | ||||
| 
 | ||||
| 	ret = get_unused_fd(); | ||||
| 	ret = get_unused_fd_flags(O_CLOEXEC); | ||||
| 	if (ret < 0) | ||||
| 		goto err_free; | ||||
| 	resp.async_fd = ret; | ||||
|  | @ -1184,7 +1184,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, | |||
| 	if (copy_from_user(&cmd, buf, sizeof cmd)) | ||||
| 		return -EFAULT; | ||||
| 
 | ||||
| 	ret = get_unused_fd(); | ||||
| 	ret = get_unused_fd_flags(O_CLOEXEC); | ||||
| 	if (ret < 0) | ||||
| 		return ret; | ||||
| 	resp.fd = ret; | ||||
|  |  | |||
|  | @ -883,7 +883,8 @@ u16 iwch_rqes_posted(struct iwch_qp *qhp) | |||
| { | ||||
| 	union t3_wr *wqe = qhp->wq.queue; | ||||
| 	u16 count = 0; | ||||
| 	while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) { | ||||
| 
 | ||||
| 	while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) { | ||||
| 		count++; | ||||
| 		wqe++; | ||||
| 	} | ||||
|  |  | |||
|  | @ -211,6 +211,7 @@ static int ehca_create_slab_caches(void) | |||
| 	if (!ctblk_cache) { | ||||
| 		ehca_gen_err("Cannot create ctblk SLAB cache."); | ||||
| 		ehca_cleanup_small_qp_cache(); | ||||
| 		ret = -ENOMEM; | ||||
| 		goto create_slab_caches6; | ||||
| 	} | ||||
| #endif | ||||
|  |  | |||
							
								
								
									
										10
									
								
								drivers/infiniband/hw/mlx5/Kconfig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								drivers/infiniband/hw/mlx5/Kconfig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,10 @@ | |||
| config MLX5_INFINIBAND | ||||
| 	tristate "Mellanox Connect-IB HCA support" | ||||
| 	depends on NETDEVICES && ETHERNET && PCI && X86 | ||||
| 	select NET_VENDOR_MELLANOX | ||||
| 	select MLX5_CORE | ||||
| 	---help--- | ||||
| 	  This driver provides low-level InfiniBand support for | ||||
| 	  Mellanox Connect-IB PCI Express host channel adapters (HCAs). | ||||
| 	  This is required to use InfiniBand protocols such as | ||||
| 	  IP-over-IB or SRP with these devices. | ||||
							
								
								
									
										3
									
								
								drivers/infiniband/hw/mlx5/Makefile
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								drivers/infiniband/hw/mlx5/Makefile
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,3 @@ | |||
| obj-$(CONFIG_MLX5_INFINIBAND)	+= mlx5_ib.o | ||||
| 
 | ||||
| mlx5_ib-y :=	main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o | ||||
							
								
								
									
										92
									
								
								drivers/infiniband/hw/mlx5/ah.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								drivers/infiniband/hw/mlx5/ah.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,92 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include "mlx5_ib.h" | ||||
| 
 | ||||
| struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, | ||||
| 			   struct mlx5_ib_ah *ah) | ||||
| { | ||||
| 	if (ah_attr->ah_flags & IB_AH_GRH) { | ||||
| 		memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16); | ||||
| 		ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label | | ||||
| 						(1 << 30) | | ||||
| 						ah_attr->grh.sgid_index << 20); | ||||
| 		ah->av.hop_limit = ah_attr->grh.hop_limit; | ||||
| 		ah->av.tclass = ah_attr->grh.traffic_class; | ||||
| 	} | ||||
| 
 | ||||
| 	ah->av.rlid = cpu_to_be16(ah_attr->dlid); | ||||
| 	ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f; | ||||
| 	ah->av.stat_rate_sl = (ah_attr->static_rate << 4) | (ah_attr->sl & 0xf); | ||||
| 
 | ||||
| 	return &ah->ibah; | ||||
| } | ||||
| 
 | ||||
| struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | ||||
| { | ||||
| 	struct mlx5_ib_ah *ah; | ||||
| 
 | ||||
| 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC); | ||||
| 	if (!ah) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| 	return create_ib_ah(ah_attr, ah); /* never fails */ | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) | ||||
| { | ||||
| 	struct mlx5_ib_ah *ah = to_mah(ibah); | ||||
| 	u32 tmp; | ||||
| 
 | ||||
| 	memset(ah_attr, 0, sizeof(*ah_attr)); | ||||
| 
 | ||||
| 	tmp = be32_to_cpu(ah->av.grh_gid_fl); | ||||
| 	if (tmp & (1 << 30)) { | ||||
| 		ah_attr->ah_flags = IB_AH_GRH; | ||||
| 		ah_attr->grh.sgid_index = (tmp >> 20) & 0xff; | ||||
| 		ah_attr->grh.flow_label = tmp & 0xfffff; | ||||
| 		memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16); | ||||
| 		ah_attr->grh.hop_limit = ah->av.hop_limit; | ||||
| 		ah_attr->grh.traffic_class = ah->av.tclass; | ||||
| 	} | ||||
| 	ah_attr->dlid = be16_to_cpu(ah->av.rlid); | ||||
| 	ah_attr->static_rate = ah->av.stat_rate_sl >> 4; | ||||
| 	ah_attr->sl = ah->av.stat_rate_sl & 0xf; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_destroy_ah(struct ib_ah *ah) | ||||
| { | ||||
| 	kfree(to_mah(ah)); | ||||
| 	return 0; | ||||
| } | ||||
							
								
								
									
										843
									
								
								drivers/infiniband/hw/mlx5/cq.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										843
									
								
								drivers/infiniband/hw/mlx5/cq.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,843 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kref.h> | ||||
| #include <rdma/ib_umem.h> | ||||
| #include "mlx5_ib.h" | ||||
| #include "user.h" | ||||
| 
 | ||||
| static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) | ||||
| { | ||||
| 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; | ||||
| 
 | ||||
| 	ibcq->comp_handler(ibcq, ibcq->cq_context); | ||||
| } | ||||
| 
 | ||||
| static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) | ||||
| { | ||||
| 	struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | ||||
| 	struct ib_cq *ibcq = &cq->ibcq; | ||||
| 	struct ib_event event; | ||||
| 
 | ||||
| 	if (type != MLX5_EVENT_TYPE_CQ_ERROR) { | ||||
| 		mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", | ||||
| 			     type, mcq->cqn); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	if (ibcq->event_handler) { | ||||
| 		event.device     = &dev->ib_dev; | ||||
| 		event.event      = IB_EVENT_CQ_ERR; | ||||
| 		event.element.cq = ibcq; | ||||
| 		ibcq->event_handler(&event, ibcq->cq_context); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) | ||||
| { | ||||
| 	return mlx5_buf_offset(&buf->buf, n * size); | ||||
| } | ||||
| 
 | ||||
| static void *get_cqe(struct mlx5_ib_cq *cq, int n) | ||||
| { | ||||
| 	return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); | ||||
| } | ||||
| 
 | ||||
| static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) | ||||
| { | ||||
| 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe); | ||||
| 	struct mlx5_cqe64 *cqe64; | ||||
| 
 | ||||
| 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | ||||
| 	return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ | ||||
| 		!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; | ||||
| } | ||||
| 
 | ||||
| static void *next_cqe_sw(struct mlx5_ib_cq *cq) | ||||
| { | ||||
| 	return get_sw_cqe(cq, cq->mcq.cons_index); | ||||
| } | ||||
| 
 | ||||
| static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) | ||||
| { | ||||
| 	switch (wq->wr_data[idx]) { | ||||
| 	case MLX5_IB_WR_UMR: | ||||
| 		return 0; | ||||
| 
 | ||||
| 	case IB_WR_LOCAL_INV: | ||||
| 		return IB_WC_LOCAL_INV; | ||||
| 
 | ||||
| 	case IB_WR_FAST_REG_MR: | ||||
| 		return IB_WC_FAST_REG_MR; | ||||
| 
 | ||||
| 	default: | ||||
| 		pr_warn("unknown completion status\n"); | ||||
| 		return 0; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | ||||
| 			    struct mlx5_ib_wq *wq, int idx) | ||||
| { | ||||
| 	wc->wc_flags = 0; | ||||
| 	switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { | ||||
| 	case MLX5_OPCODE_RDMA_WRITE_IMM: | ||||
| 		wc->wc_flags |= IB_WC_WITH_IMM; | ||||
| 	case MLX5_OPCODE_RDMA_WRITE: | ||||
| 		wc->opcode    = IB_WC_RDMA_WRITE; | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_SEND_IMM: | ||||
| 		wc->wc_flags |= IB_WC_WITH_IMM; | ||||
| 	case MLX5_OPCODE_SEND: | ||||
| 	case MLX5_OPCODE_SEND_INVAL: | ||||
| 		wc->opcode    = IB_WC_SEND; | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_RDMA_READ: | ||||
| 		wc->opcode    = IB_WC_RDMA_READ; | ||||
| 		wc->byte_len  = be32_to_cpu(cqe->byte_cnt); | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_ATOMIC_CS: | ||||
| 		wc->opcode    = IB_WC_COMP_SWAP; | ||||
| 		wc->byte_len  = 8; | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_ATOMIC_FA: | ||||
| 		wc->opcode    = IB_WC_FETCH_ADD; | ||||
| 		wc->byte_len  = 8; | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_ATOMIC_MASKED_CS: | ||||
| 		wc->opcode    = IB_WC_MASKED_COMP_SWAP; | ||||
| 		wc->byte_len  = 8; | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_ATOMIC_MASKED_FA: | ||||
| 		wc->opcode    = IB_WC_MASKED_FETCH_ADD; | ||||
| 		wc->byte_len  = 8; | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_BIND_MW: | ||||
| 		wc->opcode    = IB_WC_BIND_MW; | ||||
| 		break; | ||||
| 	case MLX5_OPCODE_UMR: | ||||
| 		wc->opcode = get_umr_comp(wq, idx); | ||||
| 		break; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_GRH_IN_BUFFER = 1, | ||||
| 	MLX5_GRH_IN_CQE	   = 2, | ||||
| }; | ||||
| 
 | ||||
| static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | ||||
| 			     struct mlx5_ib_qp *qp) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); | ||||
| 	struct mlx5_ib_srq *srq; | ||||
| 	struct mlx5_ib_wq *wq; | ||||
| 	u16 wqe_ctr; | ||||
| 	u8 g; | ||||
| 
 | ||||
| 	if (qp->ibqp.srq || qp->ibqp.xrcd) { | ||||
| 		struct mlx5_core_srq *msrq = NULL; | ||||
| 
 | ||||
| 		if (qp->ibqp.xrcd) { | ||||
| 			msrq = mlx5_core_get_srq(&dev->mdev, | ||||
| 						 be32_to_cpu(cqe->srqn)); | ||||
| 			srq = to_mibsrq(msrq); | ||||
| 		} else { | ||||
| 			srq = to_msrq(qp->ibqp.srq); | ||||
| 		} | ||||
| 		if (srq) { | ||||
| 			wqe_ctr = be16_to_cpu(cqe->wqe_counter); | ||||
| 			wc->wr_id = srq->wrid[wqe_ctr]; | ||||
| 			mlx5_ib_free_srq_wqe(srq, wqe_ctr); | ||||
| 			if (msrq && atomic_dec_and_test(&msrq->refcount)) | ||||
| 				complete(&msrq->free); | ||||
| 		} | ||||
| 	} else { | ||||
| 		wq	  = &qp->rq; | ||||
| 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | ||||
| 		++wq->tail; | ||||
| 	} | ||||
| 	wc->byte_len = be32_to_cpu(cqe->byte_cnt); | ||||
| 
 | ||||
| 	switch (cqe->op_own >> 4) { | ||||
| 	case MLX5_CQE_RESP_WR_IMM: | ||||
| 		wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM; | ||||
| 		wc->wc_flags	= IB_WC_WITH_IMM; | ||||
| 		wc->ex.imm_data = cqe->imm_inval_pkey; | ||||
| 		break; | ||||
| 	case MLX5_CQE_RESP_SEND: | ||||
| 		wc->opcode   = IB_WC_RECV; | ||||
| 		wc->wc_flags = 0; | ||||
| 		break; | ||||
| 	case MLX5_CQE_RESP_SEND_IMM: | ||||
| 		wc->opcode	= IB_WC_RECV; | ||||
| 		wc->wc_flags	= IB_WC_WITH_IMM; | ||||
| 		wc->ex.imm_data = cqe->imm_inval_pkey; | ||||
| 		break; | ||||
| 	case MLX5_CQE_RESP_SEND_INV: | ||||
| 		wc->opcode	= IB_WC_RECV; | ||||
| 		wc->wc_flags	= IB_WC_WITH_INVALIDATE; | ||||
| 		wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); | ||||
| 		break; | ||||
| 	} | ||||
| 	wc->slid	   = be16_to_cpu(cqe->slid); | ||||
| 	wc->sl		   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; | ||||
| 	wc->src_qp	   = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; | ||||
| 	wc->dlid_path_bits = cqe->ml_path; | ||||
| 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; | ||||
| 	wc->wc_flags |= g ? IB_WC_GRH : 0; | ||||
| 	wc->pkey_index     = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; | ||||
| } | ||||
| 
 | ||||
| static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) | ||||
| { | ||||
| 	__be32 *p = (__be32 *)cqe; | ||||
| 	int i; | ||||
| 
 | ||||
| 	mlx5_ib_warn(dev, "dump error cqe\n"); | ||||
| 	for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) | ||||
| 		pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), | ||||
| 			be32_to_cpu(p[1]), be32_to_cpu(p[2]), | ||||
| 			be32_to_cpu(p[3])); | ||||
| } | ||||
| 
 | ||||
| static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, | ||||
| 				  struct mlx5_err_cqe *cqe, | ||||
| 				  struct ib_wc *wc) | ||||
| { | ||||
| 	int dump = 1; | ||||
| 
 | ||||
| 	switch (cqe->syndrome) { | ||||
| 	case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: | ||||
| 		wc->status = IB_WC_LOC_LEN_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: | ||||
| 		wc->status = IB_WC_LOC_QP_OP_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: | ||||
| 		wc->status = IB_WC_LOC_PROT_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: | ||||
| 		dump = 0; | ||||
| 		wc->status = IB_WC_WR_FLUSH_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_MW_BIND_ERR: | ||||
| 		wc->status = IB_WC_MW_BIND_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_BAD_RESP_ERR: | ||||
| 		wc->status = IB_WC_BAD_RESP_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: | ||||
| 		wc->status = IB_WC_LOC_ACCESS_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: | ||||
| 		wc->status = IB_WC_REM_INV_REQ_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: | ||||
| 		wc->status = IB_WC_REM_ACCESS_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: | ||||
| 		wc->status = IB_WC_REM_OP_ERR; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: | ||||
| 		wc->status = IB_WC_RETRY_EXC_ERR; | ||||
| 		dump = 0; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: | ||||
| 		wc->status = IB_WC_RNR_RETRY_EXC_ERR; | ||||
| 		dump = 0; | ||||
| 		break; | ||||
| 	case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: | ||||
| 		wc->status = IB_WC_REM_ABORT_ERR; | ||||
| 		break; | ||||
| 	default: | ||||
| 		wc->status = IB_WC_GENERAL_ERR; | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| 	wc->vendor_err = cqe->vendor_err_synd; | ||||
| 	if (dump) | ||||
| 		dump_cqe(dev, cqe); | ||||
| } | ||||
| 
 | ||||
| static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) | ||||
| { | ||||
| 	/* TBD: waiting decision
 | ||||
| 	*/ | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) | ||||
| { | ||||
| 	struct mlx5_wqe_data_seg *dpseg; | ||||
| 	void *addr; | ||||
| 
 | ||||
| 	dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + | ||||
| 		sizeof(struct mlx5_wqe_raddr_seg) + | ||||
| 		sizeof(struct mlx5_wqe_atomic_seg); | ||||
| 	addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); | ||||
| 	return addr; | ||||
| } | ||||
| 
 | ||||
| static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, | ||||
| 			  uint16_t idx) | ||||
| { | ||||
| 	void *addr; | ||||
| 	int byte_count; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (!is_atomic_response(qp, idx)) | ||||
| 		return; | ||||
| 
 | ||||
| 	byte_count = be32_to_cpu(cqe64->byte_cnt); | ||||
| 	addr = mlx5_get_atomic_laddr(qp, idx); | ||||
| 
 | ||||
| 	if (byte_count == 4) { | ||||
| 		*(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); | ||||
| 	} else { | ||||
| 		for (i = 0; i < byte_count; i += 8) { | ||||
| 			*(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); | ||||
| 			addr += 8; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return; | ||||
| } | ||||
| 
 | ||||
| static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, | ||||
| 			   u16 tail, u16 head) | ||||
| { | ||||
| 	int idx; | ||||
| 
 | ||||
| 	do { | ||||
| 		idx = tail & (qp->sq.wqe_cnt - 1); | ||||
| 		handle_atomic(qp, cqe64, idx); | ||||
| 		if (idx == head) | ||||
| 			break; | ||||
| 
 | ||||
| 		tail = qp->sq.w_list[idx].next; | ||||
| 	} while (1); | ||||
| 	tail = qp->sq.w_list[idx].next; | ||||
| 	qp->sq.last_poll = tail; | ||||
| } | ||||
| 
 | ||||
| static int mlx5_poll_one(struct mlx5_ib_cq *cq, | ||||
| 			 struct mlx5_ib_qp **cur_qp, | ||||
| 			 struct ib_wc *wc) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); | ||||
| 	struct mlx5_err_cqe *err_cqe; | ||||
| 	struct mlx5_cqe64 *cqe64; | ||||
| 	struct mlx5_core_qp *mqp; | ||||
| 	struct mlx5_ib_wq *wq; | ||||
| 	uint8_t opcode; | ||||
| 	uint32_t qpn; | ||||
| 	u16 wqe_ctr; | ||||
| 	void *cqe; | ||||
| 	int idx; | ||||
| 
 | ||||
| 	cqe = next_cqe_sw(cq); | ||||
| 	if (!cqe) | ||||
| 		return -EAGAIN; | ||||
| 
 | ||||
| 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | ||||
| 
 | ||||
| 	++cq->mcq.cons_index; | ||||
| 
 | ||||
| 	/* Make sure we read CQ entry contents after we've checked the
 | ||||
| 	 * ownership bit. | ||||
| 	 */ | ||||
| 	rmb(); | ||||
| 
 | ||||
| 	/* TBD: resize CQ */ | ||||
| 
 | ||||
| 	qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; | ||||
| 	if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { | ||||
| 		/* We do not have to take the QP table lock here,
 | ||||
| 		 * because CQs will be locked while QPs are removed | ||||
| 		 * from the table. | ||||
| 		 */ | ||||
| 		mqp = __mlx5_qp_lookup(&dev->mdev, qpn); | ||||
| 		if (unlikely(!mqp)) { | ||||
| 			mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", | ||||
| 				     cq->mcq.cqn, qpn); | ||||
| 			return -EINVAL; | ||||
| 		} | ||||
| 
 | ||||
| 		*cur_qp = to_mibqp(mqp); | ||||
| 	} | ||||
| 
 | ||||
| 	wc->qp  = &(*cur_qp)->ibqp; | ||||
| 	opcode = cqe64->op_own >> 4; | ||||
| 	switch (opcode) { | ||||
| 	case MLX5_CQE_REQ: | ||||
| 		wq = &(*cur_qp)->sq; | ||||
| 		wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | ||||
| 		idx = wqe_ctr & (wq->wqe_cnt - 1); | ||||
| 		handle_good_req(wc, cqe64, wq, idx); | ||||
| 		handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); | ||||
| 		wc->wr_id = wq->wrid[idx]; | ||||
| 		wq->tail = wq->wqe_head[idx] + 1; | ||||
| 		wc->status = IB_WC_SUCCESS; | ||||
| 		break; | ||||
| 	case MLX5_CQE_RESP_WR_IMM: | ||||
| 	case MLX5_CQE_RESP_SEND: | ||||
| 	case MLX5_CQE_RESP_SEND_IMM: | ||||
| 	case MLX5_CQE_RESP_SEND_INV: | ||||
| 		handle_responder(wc, cqe64, *cur_qp); | ||||
| 		wc->status = IB_WC_SUCCESS; | ||||
| 		break; | ||||
| 	case MLX5_CQE_RESIZE_CQ: | ||||
| 		break; | ||||
| 	case MLX5_CQE_REQ_ERR: | ||||
| 	case MLX5_CQE_RESP_ERR: | ||||
| 		err_cqe = (struct mlx5_err_cqe *)cqe64; | ||||
| 		mlx5_handle_error_cqe(dev, err_cqe, wc); | ||||
| 		mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", | ||||
| 			    opcode == MLX5_CQE_REQ_ERR ? | ||||
| 			    "Requestor" : "Responder", cq->mcq.cqn); | ||||
| 		mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", | ||||
| 			    err_cqe->syndrome, err_cqe->vendor_err_synd); | ||||
| 		if (opcode == MLX5_CQE_REQ_ERR) { | ||||
| 			wq = &(*cur_qp)->sq; | ||||
| 			wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | ||||
| 			idx = wqe_ctr & (wq->wqe_cnt - 1); | ||||
| 			wc->wr_id = wq->wrid[idx]; | ||||
| 			wq->tail = wq->wqe_head[idx] + 1; | ||||
| 		} else { | ||||
| 			struct mlx5_ib_srq *srq; | ||||
| 
 | ||||
| 			if ((*cur_qp)->ibqp.srq) { | ||||
| 				srq = to_msrq((*cur_qp)->ibqp.srq); | ||||
| 				wqe_ctr = be16_to_cpu(cqe64->wqe_counter); | ||||
| 				wc->wr_id = srq->wrid[wqe_ctr]; | ||||
| 				mlx5_ib_free_srq_wqe(srq, wqe_ctr); | ||||
| 			} else { | ||||
| 				wq = &(*cur_qp)->rq; | ||||
| 				wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | ||||
| 				++wq->tail; | ||||
| 			} | ||||
| 		} | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | ||||
| { | ||||
| 	struct mlx5_ib_cq *cq = to_mcq(ibcq); | ||||
| 	struct mlx5_ib_qp *cur_qp = NULL; | ||||
| 	unsigned long flags; | ||||
| 	int npolled; | ||||
| 	int err = 0; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&cq->lock, flags); | ||||
| 
 | ||||
| 	for (npolled = 0; npolled < num_entries; npolled++) { | ||||
| 		err = mlx5_poll_one(cq, &cur_qp, wc + npolled); | ||||
| 		if (err) | ||||
| 			break; | ||||
| 	} | ||||
| 
 | ||||
| 	if (npolled) | ||||
| 		mlx5_cq_set_ci(&cq->mcq); | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&cq->lock, flags); | ||||
| 
 | ||||
| 	if (err == 0 || err == -EAGAIN) | ||||
| 		return npolled; | ||||
| 	else | ||||
| 		return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | ||||
| { | ||||
| 	mlx5_cq_arm(&to_mcq(ibcq)->mcq, | ||||
| 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? | ||||
| 		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, | ||||
| 		    to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map, | ||||
| 		    MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock)); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, | ||||
| 			int nent, int cqe_size) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size, | ||||
| 			     PAGE_SIZE * 2, &buf->buf); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	buf->cqe_size = cqe_size; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) | ||||
| { | ||||
| 	mlx5_buf_free(&dev->mdev, &buf->buf); | ||||
| } | ||||
| 
 | ||||
| static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, | ||||
| 			  struct ib_ucontext *context, struct mlx5_ib_cq *cq, | ||||
| 			  int entries, struct mlx5_create_cq_mbox_in **cqb, | ||||
| 			  int *cqe_size, int *index, int *inlen) | ||||
| { | ||||
| 	struct mlx5_ib_create_cq ucmd; | ||||
| 	int page_shift; | ||||
| 	int npages; | ||||
| 	int ncont; | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) | ||||
| 		return -EFAULT; | ||||
| 
 | ||||
| 	if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	*cqe_size = ucmd.cqe_size; | ||||
| 
 | ||||
| 	cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, | ||||
| 				   entries * ucmd.cqe_size, | ||||
| 				   IB_ACCESS_LOCAL_WRITE, 1); | ||||
| 	if (IS_ERR(cq->buf.umem)) { | ||||
| 		err = PTR_ERR(cq->buf.umem); | ||||
| 		return err; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, | ||||
| 				  &cq->db); | ||||
| 	if (err) | ||||
| 		goto err_umem; | ||||
| 
 | ||||
| 	mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, | ||||
| 			   &ncont, NULL); | ||||
| 	mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", | ||||
| 		    ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); | ||||
| 
 | ||||
| 	*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; | ||||
| 	*cqb = mlx5_vzalloc(*inlen); | ||||
| 	if (!*cqb) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_db; | ||||
| 	} | ||||
| 	mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); | ||||
| 	(*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; | ||||
| 
 | ||||
| 	*index = to_mucontext(context)->uuari.uars[0].index; | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_db: | ||||
| 	mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); | ||||
| 
 | ||||
| err_umem: | ||||
| 	ib_umem_release(cq->buf.umem); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) | ||||
| { | ||||
| 	mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); | ||||
| 	ib_umem_release(cq->buf.umem); | ||||
| } | ||||
| 
 | ||||
| static void init_cq_buf(struct mlx5_ib_cq *cq, int nent) | ||||
| { | ||||
| 	int i; | ||||
| 	void *cqe; | ||||
| 	struct mlx5_cqe64 *cqe64; | ||||
| 
 | ||||
| 	for (i = 0; i < nent; i++) { | ||||
| 		cqe = get_cqe(cq, i); | ||||
| 		cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64; | ||||
| 		cqe64->op_own = 0xf1; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | ||||
| 			    int entries, int cqe_size, | ||||
| 			    struct mlx5_create_cq_mbox_in **cqb, | ||||
| 			    int *index, int *inlen) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	err = mlx5_db_alloc(&dev->mdev, &cq->db); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	cq->mcq.set_ci_db  = cq->db.db; | ||||
| 	cq->mcq.arm_db     = cq->db.db + 1; | ||||
| 	*cq->mcq.set_ci_db = 0; | ||||
| 	*cq->mcq.arm_db    = 0; | ||||
| 	cq->mcq.cqe_sz = cqe_size; | ||||
| 
 | ||||
| 	err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); | ||||
| 	if (err) | ||||
| 		goto err_db; | ||||
| 
 | ||||
| 	init_cq_buf(cq, entries); | ||||
| 
 | ||||
| 	*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; | ||||
| 	*cqb = mlx5_vzalloc(*inlen); | ||||
| 	if (!*cqb) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_buf; | ||||
| 	} | ||||
| 	mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); | ||||
| 
 | ||||
| 	(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT; | ||||
| 	*index = dev->mdev.priv.uuari.uars[0].index; | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_buf: | ||||
| 	free_cq_buf(dev, &cq->buf); | ||||
| 
 | ||||
| err_db: | ||||
| 	mlx5_db_free(&dev->mdev, &cq->db); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) | ||||
| { | ||||
| 	free_cq_buf(dev, &cq->buf); | ||||
| 	mlx5_db_free(&dev->mdev, &cq->db); | ||||
| } | ||||
| 
 | ||||
| struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, | ||||
| 				int vector, struct ib_ucontext *context, | ||||
| 				struct ib_udata *udata) | ||||
| { | ||||
| 	struct mlx5_create_cq_mbox_in *cqb = NULL; | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||||
| 	struct mlx5_ib_cq *cq; | ||||
| 	int uninitialized_var(index); | ||||
| 	int uninitialized_var(inlen); | ||||
| 	int cqe_size; | ||||
| 	int irqn; | ||||
| 	int eqn; | ||||
| 	int err; | ||||
| 
 | ||||
| 	entries = roundup_pow_of_two(entries + 1); | ||||
| 	if (entries < 1 || entries > dev->mdev.caps.max_cqes) | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 
 | ||||
| 	cq = kzalloc(sizeof(*cq), GFP_KERNEL); | ||||
| 	if (!cq) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| 	cq->ibcq.cqe = entries - 1; | ||||
| 	mutex_init(&cq->resize_mutex); | ||||
| 	spin_lock_init(&cq->lock); | ||||
| 	cq->resize_buf = NULL; | ||||
| 	cq->resize_umem = NULL; | ||||
| 
 | ||||
| 	if (context) { | ||||
| 		err = create_cq_user(dev, udata, context, cq, entries, | ||||
| 				     &cqb, &cqe_size, &index, &inlen); | ||||
| 		if (err) | ||||
| 			goto err_create; | ||||
| 	} else { | ||||
| 		/* for now choose 64 bytes till we have a proper interface */ | ||||
| 		cqe_size = 64; | ||||
| 		err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, | ||||
| 				       &index, &inlen); | ||||
| 		if (err) | ||||
| 			goto err_create; | ||||
| 	} | ||||
| 
 | ||||
| 	cq->cqe_size = cqe_size; | ||||
| 	cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; | ||||
| 	cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); | ||||
| 	err = mlx5_vector2eqn(dev, vector, &eqn, &irqn); | ||||
| 	if (err) | ||||
| 		goto err_cqb; | ||||
| 
 | ||||
| 	cqb->ctx.c_eqn = cpu_to_be16(eqn); | ||||
| 	cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); | ||||
| 
 | ||||
| 	err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen); | ||||
| 	if (err) | ||||
| 		goto err_cqb; | ||||
| 
 | ||||
| 	mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); | ||||
| 	cq->mcq.irqn = irqn; | ||||
| 	cq->mcq.comp  = mlx5_ib_cq_comp; | ||||
| 	cq->mcq.event = mlx5_ib_cq_event; | ||||
| 
 | ||||
| 	if (context) | ||||
| 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { | ||||
| 			err = -EFAULT; | ||||
| 			goto err_cmd; | ||||
| 		} | ||||
| 
 | ||||
| 
 | ||||
| 	mlx5_vfree(cqb); | ||||
| 	return &cq->ibcq; | ||||
| 
 | ||||
| err_cmd: | ||||
| 	mlx5_core_destroy_cq(&dev->mdev, &cq->mcq); | ||||
| 
 | ||||
| err_cqb: | ||||
| 	mlx5_vfree(cqb); | ||||
| 	if (context) | ||||
| 		destroy_cq_user(cq, context); | ||||
| 	else | ||||
| 		destroy_cq_kernel(dev, cq); | ||||
| 
 | ||||
| err_create: | ||||
| 	kfree(cq); | ||||
| 
 | ||||
| 	return ERR_PTR(err); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| int mlx5_ib_destroy_cq(struct ib_cq *cq) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(cq->device); | ||||
| 	struct mlx5_ib_cq *mcq = to_mcq(cq); | ||||
| 	struct ib_ucontext *context = NULL; | ||||
| 
 | ||||
| 	if (cq->uobject) | ||||
| 		context = cq->uobject->context; | ||||
| 
 | ||||
| 	mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq); | ||||
| 	if (context) | ||||
| 		destroy_cq_user(mcq, context); | ||||
| 	else | ||||
| 		destroy_cq_kernel(dev, mcq); | ||||
| 
 | ||||
| 	kfree(mcq); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq, | ||||
| 			u32 rsn) | ||||
| { | ||||
| 	u32 lrsn; | ||||
| 
 | ||||
| 	if (srq) | ||||
| 		lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff; | ||||
| 	else | ||||
| 		lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff; | ||||
| 
 | ||||
| 	return rsn == lrsn; | ||||
| } | ||||
| 
 | ||||
| void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) | ||||
| { | ||||
| 	struct mlx5_cqe64 *cqe64, *dest64; | ||||
| 	void *cqe, *dest; | ||||
| 	u32 prod_index; | ||||
| 	int nfreed = 0; | ||||
| 	u8 owner_bit; | ||||
| 
 | ||||
| 	if (!cq) | ||||
| 		return; | ||||
| 
 | ||||
| 	/* First we need to find the current producer index, so we
 | ||||
| 	 * know where to start cleaning from.  It doesn't matter if HW | ||||
| 	 * adds new entries after this loop -- the QP we're worried | ||||
| 	 * about is already in RESET, so the new entries won't come | ||||
| 	 * from our QP and therefore don't need to be checked. | ||||
| 	 */ | ||||
| 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) | ||||
| 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) | ||||
| 			break; | ||||
| 
 | ||||
| 	/* Now sweep backwards through the CQ, removing CQ entries
 | ||||
| 	 * that match our QP by copying older entries on top of them. | ||||
| 	 */ | ||||
| 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { | ||||
| 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); | ||||
| 		cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; | ||||
| 		if (is_equal_rsn(cqe64, srq, rsn)) { | ||||
| 			if (srq) | ||||
| 				mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); | ||||
| 			++nfreed; | ||||
| 		} else if (nfreed) { | ||||
| 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); | ||||
| 			dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; | ||||
| 			owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; | ||||
| 			memcpy(dest, cqe, cq->mcq.cqe_sz); | ||||
| 			dest64->op_own = owner_bit | | ||||
| 				(dest64->op_own & ~MLX5_CQE_OWNER_MASK); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (nfreed) { | ||||
| 		cq->mcq.cons_index += nfreed; | ||||
| 		/* Make sure update of buffer contents is done before
 | ||||
| 		 * updating consumer index. | ||||
| 		 */ | ||||
| 		wmb(); | ||||
| 		mlx5_cq_set_ci(&cq->mcq); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) | ||||
| { | ||||
| 	if (!cq) | ||||
| 		return; | ||||
| 
 | ||||
| 	spin_lock_irq(&cq->lock); | ||||
| 	__mlx5_ib_cq_clean(cq, qpn, srq); | ||||
| 	spin_unlock_irq(&cq->lock); | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) | ||||
| { | ||||
| 	return -ENOSYS; | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | ||||
| { | ||||
| 	return -ENOSYS; | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) | ||||
| { | ||||
| 	struct mlx5_ib_cq *cq; | ||||
| 
 | ||||
| 	if (!ibcq) | ||||
| 		return 128; | ||||
| 
 | ||||
| 	cq = to_mcq(ibcq); | ||||
| 	return cq->cqe_size; | ||||
| } | ||||
							
								
								
									
										100
									
								
								drivers/infiniband/hw/mlx5/doorbell.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								drivers/infiniband/hw/mlx5/doorbell.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,100 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kref.h> | ||||
| #include <linux/slab.h> | ||||
| #include <rdma/ib_umem.h> | ||||
| 
 | ||||
| #include "mlx5_ib.h" | ||||
| 
 | ||||
| struct mlx5_ib_user_db_page { | ||||
| 	struct list_head	list; | ||||
| 	struct ib_umem	       *umem; | ||||
| 	unsigned long		user_virt; | ||||
| 	int			refcnt; | ||||
| }; | ||||
| 
 | ||||
| int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, | ||||
| 			struct mlx5_db *db) | ||||
| { | ||||
| 	struct mlx5_ib_user_db_page *page; | ||||
| 	struct ib_umem_chunk *chunk; | ||||
| 	int err = 0; | ||||
| 
 | ||||
| 	mutex_lock(&context->db_page_mutex); | ||||
| 
 | ||||
| 	list_for_each_entry(page, &context->db_page_list, list) | ||||
| 		if (page->user_virt == (virt & PAGE_MASK)) | ||||
| 			goto found; | ||||
| 
 | ||||
| 	page = kmalloc(sizeof(*page), GFP_KERNEL); | ||||
| 	if (!page) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	page->user_virt = (virt & PAGE_MASK); | ||||
| 	page->refcnt    = 0; | ||||
| 	page->umem      = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, | ||||
| 				      PAGE_SIZE, 0, 0); | ||||
| 	if (IS_ERR(page->umem)) { | ||||
| 		err = PTR_ERR(page->umem); | ||||
| 		kfree(page); | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	list_add(&page->list, &context->db_page_list); | ||||
| 
 | ||||
| found: | ||||
| 	chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); | ||||
| 	db->dma		= sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK); | ||||
| 	db->u.user_page = page; | ||||
| 	++page->refcnt; | ||||
| 
 | ||||
| out: | ||||
| 	mutex_unlock(&context->db_page_mutex); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) | ||||
| { | ||||
| 	mutex_lock(&context->db_page_mutex); | ||||
| 
 | ||||
| 	if (!--db->u.user_page->refcnt) { | ||||
| 		list_del(&db->u.user_page->list); | ||||
| 		ib_umem_release(db->u.user_page->umem); | ||||
| 		kfree(db->u.user_page); | ||||
| 	} | ||||
| 
 | ||||
| 	mutex_unlock(&context->db_page_mutex); | ||||
| } | ||||
							
								
								
									
										139
									
								
								drivers/infiniband/hw/mlx5/mad.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										139
									
								
								drivers/infiniband/hw/mlx5/mad.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,139 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include <rdma/ib_mad.h> | ||||
| #include <rdma/ib_smi.h> | ||||
| #include "mlx5_ib.h" | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_IB_VENDOR_CLASS1 = 0x9, | ||||
| 	MLX5_IB_VENDOR_CLASS2 = 0xa | ||||
| }; | ||||
| 
 | ||||
| int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, | ||||
| 		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, | ||||
| 		 void *in_mad, void *response_mad) | ||||
| { | ||||
| 	u8 op_modifier = 0; | ||||
| 
 | ||||
| 	/* Key check traps can't be generated unless we have in_wc to
 | ||||
| 	 * tell us where to send the trap. | ||||
| 	 */ | ||||
| 	if (ignore_mkey || !in_wc) | ||||
| 		op_modifier |= 0x1; | ||||
| 	if (ignore_bkey || !in_wc) | ||||
| 		op_modifier |= 0x2; | ||||
| 
 | ||||
| 	return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port); | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | ||||
| 			struct ib_wc *in_wc, struct ib_grh *in_grh, | ||||
| 			struct ib_mad *in_mad, struct ib_mad *out_mad) | ||||
| { | ||||
| 	u16 slid; | ||||
| 	int err; | ||||
| 
 | ||||
| 	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); | ||||
| 
 | ||||
| 	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) | ||||
| 		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | ||||
| 
 | ||||
| 	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || | ||||
| 	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | ||||
| 		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET && | ||||
| 		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET && | ||||
| 		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS) | ||||
| 			return IB_MAD_RESULT_SUCCESS; | ||||
| 
 | ||||
| 		/* Don't process SMInfo queries -- the SMA can't handle them.
 | ||||
| 		 */ | ||||
| 		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) | ||||
| 			return IB_MAD_RESULT_SUCCESS; | ||||
| 	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || | ||||
| 		   in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1   || | ||||
| 		   in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2   || | ||||
| 		   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { | ||||
| 		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET && | ||||
| 		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET) | ||||
| 			return IB_MAD_RESULT_SUCCESS; | ||||
| 	} else { | ||||
| 		return IB_MAD_RESULT_SUCCESS; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_MAD_IFC(to_mdev(ibdev), | ||||
| 			   mad_flags & IB_MAD_IGNORE_MKEY, | ||||
| 			   mad_flags & IB_MAD_IGNORE_BKEY, | ||||
| 			   port_num, in_wc, in_grh, in_mad, out_mad); | ||||
| 	if (err) | ||||
| 		return IB_MAD_RESULT_FAILURE; | ||||
| 
 | ||||
| 	/* set return bit in status of directed route responses */ | ||||
| 	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | ||||
| 		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); | ||||
| 
 | ||||
| 	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) | ||||
| 		/* no response for trap repress */ | ||||
| 		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | ||||
| 
 | ||||
| 	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | ||||
| } | ||||
| 
 | ||||
| int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) | ||||
| { | ||||
| 	struct ib_smp *in_mad  = NULL; | ||||
| 	struct ib_smp *out_mad = NULL; | ||||
| 	int err = -ENOMEM; | ||||
| 	u16 packet_error; | ||||
| 
 | ||||
| 	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL); | ||||
| 	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); | ||||
| 	if (!in_mad || !out_mad) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	init_query_mad(in_mad); | ||||
| 	in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; | ||||
| 	in_mad->attr_mod = cpu_to_be32(port); | ||||
| 
 | ||||
| 	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); | ||||
| 
 | ||||
| 	packet_error = be16_to_cpu(out_mad->status); | ||||
| 
 | ||||
| 	dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ? | ||||
| 		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; | ||||
| 
 | ||||
| out: | ||||
| 	kfree(in_mad); | ||||
| 	kfree(out_mad); | ||||
| 	return err; | ||||
| } | ||||
							
								
								
									
										1504
									
								
								drivers/infiniband/hw/mlx5/main.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1504
									
								
								drivers/infiniband/hw/mlx5/main.c
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										162
									
								
								drivers/infiniband/hw/mlx5/mem.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										162
									
								
								drivers/infiniband/hw/mlx5/mem.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,162 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/module.h> | ||||
| #include <rdma/ib_umem.h> | ||||
| #include "mlx5_ib.h" | ||||
| 
 | ||||
| /* @umem: umem object to scan
 | ||||
|  * @addr: ib virtual address requested by the user | ||||
|  * @count: number of PAGE_SIZE pages covered by umem | ||||
|  * @shift: page shift for the compound pages found in the region | ||||
|  * @ncont: number of compund pages | ||||
|  * @order: log2 of the number of compound pages | ||||
|  */ | ||||
| void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | ||||
| 			int *ncont, int *order) | ||||
| { | ||||
| 	struct ib_umem_chunk *chunk; | ||||
| 	unsigned long tmp; | ||||
| 	unsigned long m; | ||||
| 	int i, j, k; | ||||
| 	u64 base = 0; | ||||
| 	int p = 0; | ||||
| 	int skip; | ||||
| 	int mask; | ||||
| 	u64 len; | ||||
| 	u64 pfn; | ||||
| 
 | ||||
| 	addr = addr >> PAGE_SHIFT; | ||||
| 	tmp = (unsigned long)addr; | ||||
| 	m = find_first_bit(&tmp, sizeof(tmp)); | ||||
| 	skip = 1 << m; | ||||
| 	mask = skip - 1; | ||||
| 	i = 0; | ||||
| 	list_for_each_entry(chunk, &umem->chunk_list, list) | ||||
| 		for (j = 0; j < chunk->nmap; j++) { | ||||
| 			len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; | ||||
| 			pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT; | ||||
| 			for (k = 0; k < len; k++) { | ||||
| 				if (!(i & mask)) { | ||||
| 					tmp = (unsigned long)pfn; | ||||
| 					m = min(m, find_first_bit(&tmp, sizeof(tmp))); | ||||
| 					skip = 1 << m; | ||||
| 					mask = skip - 1; | ||||
| 					base = pfn; | ||||
| 					p = 0; | ||||
| 				} else { | ||||
| 					if (base + p != pfn) { | ||||
| 						tmp = (unsigned long)p; | ||||
| 						m = find_first_bit(&tmp, sizeof(tmp)); | ||||
| 						skip = 1 << m; | ||||
| 						mask = skip - 1; | ||||
| 						base = pfn; | ||||
| 						p = 0; | ||||
| 					} | ||||
| 				} | ||||
| 				p++; | ||||
| 				i++; | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 	if (i) { | ||||
| 		m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); | ||||
| 
 | ||||
| 		if (order) | ||||
| 			*order = ilog2(roundup_pow_of_two(i) >> m); | ||||
| 
 | ||||
| 		*ncont = DIV_ROUND_UP(i, (1 << m)); | ||||
| 	} else { | ||||
| 		m  = 0; | ||||
| 
 | ||||
| 		if (order) | ||||
| 			*order = 0; | ||||
| 
 | ||||
| 		*ncont = 0; | ||||
| 	} | ||||
| 	*shift = PAGE_SHIFT + m; | ||||
| 	*count = i; | ||||
| } | ||||
| 
 | ||||
| void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, | ||||
| 			  int page_shift, __be64 *pas, int umr) | ||||
| { | ||||
| 	int shift = page_shift - PAGE_SHIFT; | ||||
| 	int mask = (1 << shift) - 1; | ||||
| 	struct ib_umem_chunk *chunk; | ||||
| 	int i, j, k; | ||||
| 	u64 cur = 0; | ||||
| 	u64 base; | ||||
| 	int len; | ||||
| 
 | ||||
| 	i = 0; | ||||
| 	list_for_each_entry(chunk, &umem->chunk_list, list) | ||||
| 		for (j = 0; j < chunk->nmap; j++) { | ||||
| 			len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; | ||||
| 			base = sg_dma_address(&chunk->page_list[j]); | ||||
| 			for (k = 0; k < len; k++) { | ||||
| 				if (!(i & mask)) { | ||||
| 					cur = base + (k << PAGE_SHIFT); | ||||
| 					if (umr) | ||||
| 						cur |= 3; | ||||
| 
 | ||||
| 					pas[i >> shift] = cpu_to_be64(cur); | ||||
| 					mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", | ||||
| 						    i >> shift, be64_to_cpu(pas[i >> shift])); | ||||
| 				}  else | ||||
| 					mlx5_ib_dbg(dev, "=====> 0x%llx\n", | ||||
| 						    base + (k << PAGE_SHIFT)); | ||||
| 				i++; | ||||
| 			} | ||||
| 		} | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) | ||||
| { | ||||
| 	u64 page_size; | ||||
| 	u64 page_mask; | ||||
| 	u64 off_size; | ||||
| 	u64 off_mask; | ||||
| 	u64 buf_off; | ||||
| 
 | ||||
| 	page_size = 1 << page_shift; | ||||
| 	page_mask = page_size - 1; | ||||
| 	buf_off = addr & page_mask; | ||||
| 	off_size = page_size >> 6; | ||||
| 	off_mask = off_size - 1; | ||||
| 
 | ||||
| 	if (buf_off & off_mask) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	*offset = buf_off >> ilog2(off_size); | ||||
| 	return 0; | ||||
| } | ||||
							
								
								
									
										545
									
								
								drivers/infiniband/hw/mlx5/mlx5_ib.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										545
									
								
								drivers/infiniband/hw/mlx5/mlx5_ib.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,545 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_IB_H | ||||
| #define MLX5_IB_H | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/sched.h> | ||||
| #include <rdma/ib_verbs.h> | ||||
| #include <rdma/ib_smi.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cq.h> | ||||
| #include <linux/mlx5/qp.h> | ||||
| #include <linux/mlx5/srq.h> | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| #define mlx5_ib_dbg(dev, format, arg...)				\ | ||||
| pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\ | ||||
| 	 __LINE__, current->pid, ##arg) | ||||
| 
 | ||||
| #define mlx5_ib_err(dev, format, arg...)				\ | ||||
| pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\ | ||||
| 	__LINE__, current->pid, ##arg) | ||||
| 
 | ||||
| #define mlx5_ib_warn(dev, format, arg...)				\ | ||||
| pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\ | ||||
| 	__LINE__, current->pid, ##arg) | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_IB_MMAP_CMD_SHIFT	= 8, | ||||
| 	MLX5_IB_MMAP_CMD_MASK	= 0xff, | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_ib_mmap_cmd { | ||||
| 	MLX5_IB_MMAP_REGULAR_PAGE		= 0, | ||||
| 	MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES	= 1, /* always last */ | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_RES_SCAT_DATA32_CQE	= 0x1, | ||||
| 	MLX5_RES_SCAT_DATA64_CQE	= 0x2, | ||||
| 	MLX5_REQ_SCAT_DATA32_CQE	= 0x11, | ||||
| 	MLX5_REQ_SCAT_DATA64_CQE	= 0x22, | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_ib_latency_class { | ||||
| 	MLX5_IB_LATENCY_CLASS_LOW, | ||||
| 	MLX5_IB_LATENCY_CLASS_MEDIUM, | ||||
| 	MLX5_IB_LATENCY_CLASS_HIGH, | ||||
| 	MLX5_IB_LATENCY_CLASS_FAST_PATH | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_ib_mad_ifc_flags { | ||||
| 	MLX5_MAD_IFC_IGNORE_MKEY	= 1, | ||||
| 	MLX5_MAD_IFC_IGNORE_BKEY	= 2, | ||||
| 	MLX5_MAD_IFC_NET_VIEW		= 4, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_ucontext { | ||||
| 	struct ib_ucontext	ibucontext; | ||||
| 	struct list_head	db_page_list; | ||||
| 
 | ||||
| 	/* protect doorbell record alloc/free
 | ||||
| 	 */ | ||||
| 	struct mutex		db_page_mutex; | ||||
| 	struct mlx5_uuar_info	uuari; | ||||
| }; | ||||
| 
 | ||||
| static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) | ||||
| { | ||||
| 	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); | ||||
| } | ||||
| 
 | ||||
| struct mlx5_ib_pd { | ||||
| 	struct ib_pd		ibpd; | ||||
| 	u32			pdn; | ||||
| 	u32			pa_lkey; | ||||
| }; | ||||
| 
 | ||||
| /* Use macros here so that don't have to duplicate
 | ||||
|  * enum ib_send_flags and enum ib_qp_type for low-level driver | ||||
|  */ | ||||
| 
 | ||||
| #define MLX5_IB_SEND_UMR_UNREG	IB_SEND_RESERVED_START | ||||
| #define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1 | ||||
| #define MLX5_IB_WR_UMR		IB_WR_RESERVED1 | ||||
| 
 | ||||
| struct wr_list { | ||||
| 	u16	opcode; | ||||
| 	u16	next; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_wq { | ||||
| 	u64		       *wrid; | ||||
| 	u32		       *wr_data; | ||||
| 	struct wr_list	       *w_list; | ||||
| 	unsigned	       *wqe_head; | ||||
| 	u16		        unsig_count; | ||||
| 
 | ||||
| 	/* serialize post to the work queue
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 	int			wqe_cnt; | ||||
| 	int			max_post; | ||||
| 	int			max_gs; | ||||
| 	int			offset; | ||||
| 	int			wqe_shift; | ||||
| 	unsigned		head; | ||||
| 	unsigned		tail; | ||||
| 	u16			cur_post; | ||||
| 	u16			last_poll; | ||||
| 	void		       *qend; | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_QP_USER, | ||||
| 	MLX5_QP_KERNEL, | ||||
| 	MLX5_QP_EMPTY | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_qp { | ||||
| 	struct ib_qp		ibqp; | ||||
| 	struct mlx5_core_qp	mqp; | ||||
| 	struct mlx5_buf		buf; | ||||
| 
 | ||||
| 	struct mlx5_db		db; | ||||
| 	struct mlx5_ib_wq	rq; | ||||
| 
 | ||||
| 	u32			doorbell_qpn; | ||||
| 	u8			sq_signal_bits; | ||||
| 	u8			fm_cache; | ||||
| 	int			sq_max_wqes_per_wr; | ||||
| 	int			sq_spare_wqes; | ||||
| 	struct mlx5_ib_wq	sq; | ||||
| 
 | ||||
| 	struct ib_umem	       *umem; | ||||
| 	int			buf_size; | ||||
| 
 | ||||
| 	/* serialize qp state modifications
 | ||||
| 	 */ | ||||
| 	struct mutex		mutex; | ||||
| 	u16			xrcdn; | ||||
| 	u32			flags; | ||||
| 	u8			port; | ||||
| 	u8			alt_port; | ||||
| 	u8			atomic_rd_en; | ||||
| 	u8			resp_depth; | ||||
| 	u8			state; | ||||
| 	int			mlx_type; | ||||
| 	int			wq_sig; | ||||
| 	int			scat_cqe; | ||||
| 	int			max_inline_data; | ||||
| 	struct mlx5_bf	       *bf; | ||||
| 	int			has_rq; | ||||
| 
 | ||||
| 	/* only for user space QPs. For kernel
 | ||||
| 	 * we have it from the bf object | ||||
| 	 */ | ||||
| 	int			uuarn; | ||||
| 
 | ||||
| 	int			create_type; | ||||
| 	u32			pa_lkey; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_cq_buf { | ||||
| 	struct mlx5_buf		buf; | ||||
| 	struct ib_umem		*umem; | ||||
| 	int			cqe_size; | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_ib_qp_flags { | ||||
| 	MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK     = 1 << 0, | ||||
| 	MLX5_IB_QP_SIGNATURE_HANDLING           = 1 << 1, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_shared_mr_info { | ||||
| 	int mr_id; | ||||
| 	struct ib_umem		*umem; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_cq { | ||||
| 	struct ib_cq		ibcq; | ||||
| 	struct mlx5_core_cq	mcq; | ||||
| 	struct mlx5_ib_cq_buf	buf; | ||||
| 	struct mlx5_db		db; | ||||
| 
 | ||||
| 	/* serialize access to the CQ
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 
 | ||||
| 	/* protect resize cq
 | ||||
| 	 */ | ||||
| 	struct mutex		resize_mutex; | ||||
| 	struct mlx5_ib_cq_resize *resize_buf; | ||||
| 	struct ib_umem	       *resize_umem; | ||||
| 	int			cqe_size; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_srq { | ||||
| 	struct ib_srq		ibsrq; | ||||
| 	struct mlx5_core_srq	msrq; | ||||
| 	struct mlx5_buf		buf; | ||||
| 	struct mlx5_db		db; | ||||
| 	u64		       *wrid; | ||||
| 	/* protect SRQ hanlding
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 	int			head; | ||||
| 	int			tail; | ||||
| 	u16			wqe_ctr; | ||||
| 	struct ib_umem	       *umem; | ||||
| 	/* serialize arming a SRQ
 | ||||
| 	 */ | ||||
| 	struct mutex		mutex; | ||||
| 	int			wq_sig; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_xrcd { | ||||
| 	struct ib_xrcd		ibxrcd; | ||||
| 	u32			xrcdn; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_mr { | ||||
| 	struct ib_mr		ibmr; | ||||
| 	struct mlx5_core_mr	mmr; | ||||
| 	struct ib_umem	       *umem; | ||||
| 	struct mlx5_shared_mr_info	*smr_info; | ||||
| 	struct list_head	list; | ||||
| 	int			order; | ||||
| 	int			umred; | ||||
| 	__be64			*pas; | ||||
| 	dma_addr_t		dma; | ||||
| 	int			npages; | ||||
| 	struct completion	done; | ||||
| 	enum ib_wc_status	status; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_fast_reg_page_list { | ||||
| 	struct ib_fast_reg_page_list	ibfrpl; | ||||
| 	__be64			       *mapped_page_list; | ||||
| 	dma_addr_t			map; | ||||
| }; | ||||
| 
 | ||||
| struct umr_common { | ||||
| 	struct ib_pd	*pd; | ||||
| 	struct ib_cq	*cq; | ||||
| 	struct ib_qp	*qp; | ||||
| 	struct ib_mr	*mr; | ||||
| 	/* control access to UMR QP
 | ||||
| 	 */ | ||||
| 	struct semaphore	sem; | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_FMR_INVALID, | ||||
| 	MLX5_FMR_VALID, | ||||
| 	MLX5_FMR_BUSY, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_fmr { | ||||
| 	struct ib_fmr			ibfmr; | ||||
| 	struct mlx5_core_mr		mr; | ||||
| 	int				access_flags; | ||||
| 	int				state; | ||||
| 	/* protect fmr state
 | ||||
| 	 */ | ||||
| 	spinlock_t			lock; | ||||
| 	u64				wrid; | ||||
| 	struct ib_send_wr		wr[2]; | ||||
| 	u8				page_shift; | ||||
| 	struct ib_fast_reg_page_list	page_list; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cache_ent { | ||||
| 	struct list_head	head; | ||||
| 	/* sync access to the cahce entry
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 
 | ||||
| 
 | ||||
| 	struct dentry	       *dir; | ||||
| 	char                    name[4]; | ||||
| 	u32                     order; | ||||
| 	u32			size; | ||||
| 	u32                     cur; | ||||
| 	u32                     miss; | ||||
| 	u32			limit; | ||||
| 
 | ||||
| 	struct dentry          *fsize; | ||||
| 	struct dentry          *fcur; | ||||
| 	struct dentry          *fmiss; | ||||
| 	struct dentry          *flimit; | ||||
| 
 | ||||
| 	struct mlx5_ib_dev     *dev; | ||||
| 	struct work_struct	work; | ||||
| 	struct delayed_work	dwork; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_mr_cache { | ||||
| 	struct workqueue_struct *wq; | ||||
| 	struct mlx5_cache_ent	ent[MAX_MR_CACHE_ENTRIES]; | ||||
| 	int			stopped; | ||||
| 	struct dentry		*root; | ||||
| 	unsigned long		last_add; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_resources { | ||||
| 	struct ib_cq	*c0; | ||||
| 	struct ib_xrcd	*x0; | ||||
| 	struct ib_xrcd	*x1; | ||||
| 	struct ib_pd	*p0; | ||||
| 	struct ib_srq	*s0; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_dev { | ||||
| 	struct ib_device		ib_dev; | ||||
| 	struct mlx5_core_dev		mdev; | ||||
| 	MLX5_DECLARE_DOORBELL_LOCK(uar_lock); | ||||
| 	struct list_head		eqs_list; | ||||
| 	int				num_ports; | ||||
| 	int				num_comp_vectors; | ||||
| 	/* serialize update of capability mask
 | ||||
| 	 */ | ||||
| 	struct mutex			cap_mask_mutex; | ||||
| 	bool				ib_active; | ||||
| 	struct umr_common		umrc; | ||||
| 	/* sync used page count stats
 | ||||
| 	 */ | ||||
| 	spinlock_t			mr_lock; | ||||
| 	struct mlx5_ib_resources	devr; | ||||
| 	struct mlx5_mr_cache		cache; | ||||
| }; | ||||
| 
 | ||||
| static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) | ||||
| { | ||||
| 	return container_of(mcq, struct mlx5_ib_cq, mcq); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) | ||||
| { | ||||
| 	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) | ||||
| { | ||||
| 	return container_of(ibdev, struct mlx5_ib_dev, ib_dev); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) | ||||
| { | ||||
| 	return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) | ||||
| { | ||||
| 	return container_of(ibcq, struct mlx5_ib_cq, ibcq); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) | ||||
| { | ||||
| 	return container_of(mqp, struct mlx5_ib_qp, mqp); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) | ||||
| { | ||||
| 	return container_of(ibpd, struct mlx5_ib_pd, ibpd); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) | ||||
| { | ||||
| 	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) | ||||
| { | ||||
| 	return container_of(ibqp, struct mlx5_ib_qp, ibqp); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) | ||||
| { | ||||
| 	return container_of(msrq, struct mlx5_ib_srq, msrq); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) | ||||
| { | ||||
| 	return container_of(ibmr, struct mlx5_ib_mr, ibmr); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) | ||||
| { | ||||
| 	return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl); | ||||
| } | ||||
| 
 | ||||
| struct mlx5_ib_ah { | ||||
| 	struct ib_ah		ibah; | ||||
| 	struct mlx5_av		av; | ||||
| }; | ||||
| 
 | ||||
| static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) | ||||
| { | ||||
| 	return container_of(ibah, struct mlx5_ib_ah, ibah); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	return container_of(dev, struct mlx5_ib_dev, mdev); | ||||
| } | ||||
| 
 | ||||
| static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev) | ||||
| { | ||||
| 	return mlx5_core2ibdev(pci2mlx5_core_dev(pdev)); | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, | ||||
| 			struct mlx5_db *db); | ||||
| void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); | ||||
| void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); | ||||
| void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); | ||||
| void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); | ||||
| int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, | ||||
| 		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, | ||||
| 		 void *in_mad, void *response_mad); | ||||
| struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, | ||||
| 			   struct mlx5_ib_ah *ah); | ||||
| struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); | ||||
| int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); | ||||
| int mlx5_ib_destroy_ah(struct ib_ah *ah); | ||||
| struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | ||||
| 				  struct ib_srq_init_attr *init_attr, | ||||
| 				  struct ib_udata *udata); | ||||
| int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||||
| 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | ||||
| int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); | ||||
| int mlx5_ib_destroy_srq(struct ib_srq *srq); | ||||
| int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||||
| 			  struct ib_recv_wr **bad_wr); | ||||
| struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, | ||||
| 				struct ib_qp_init_attr *init_attr, | ||||
| 				struct ib_udata *udata); | ||||
| int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||||
| 		      int attr_mask, struct ib_udata *udata); | ||||
| int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | ||||
| 		     struct ib_qp_init_attr *qp_init_attr); | ||||
| int mlx5_ib_destroy_qp(struct ib_qp *qp); | ||||
| int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | ||||
| 		      struct ib_send_wr **bad_wr); | ||||
| int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | ||||
| 		      struct ib_recv_wr **bad_wr); | ||||
| void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); | ||||
| struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, | ||||
| 				int vector, struct ib_ucontext *context, | ||||
| 				struct ib_udata *udata); | ||||
| int mlx5_ib_destroy_cq(struct ib_cq *cq); | ||||
| int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | ||||
| int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | ||||
| int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); | ||||
| int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); | ||||
| struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); | ||||
| struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | ||||
| 				  u64 virt_addr, int access_flags, | ||||
| 				  struct ib_udata *udata); | ||||
| int mlx5_ib_dereg_mr(struct ib_mr *ibmr); | ||||
| struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, | ||||
| 					int max_page_list_len); | ||||
| struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, | ||||
| 							       int page_list_len); | ||||
| void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); | ||||
| struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc, | ||||
| 				 struct ib_fmr_attr *fmr_attr); | ||||
| int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | ||||
| 		      int npages, u64 iova); | ||||
| int mlx5_ib_unmap_fmr(struct list_head *fmr_list); | ||||
| int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr); | ||||
| int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | ||||
| 			struct ib_wc *in_wc, struct ib_grh *in_grh, | ||||
| 			struct ib_mad *in_mad, struct ib_mad *out_mad); | ||||
| struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, | ||||
| 					  struct ib_ucontext *context, | ||||
| 					  struct ib_udata *udata); | ||||
| int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); | ||||
| int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn); | ||||
| int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); | ||||
| int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); | ||||
| int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, | ||||
| 		       struct ib_port_attr *props); | ||||
| int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); | ||||
| void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); | ||||
| void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | ||||
| 			int *ncont, int *order); | ||||
| void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, | ||||
| 			  int page_shift, __be64 *pas, int umr); | ||||
| void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); | ||||
| int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); | ||||
| int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); | ||||
| int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); | ||||
| int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); | ||||
| void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); | ||||
| 
 | ||||
| static inline void init_query_mad(struct ib_smp *mad) | ||||
| { | ||||
| 	mad->base_version  = 1; | ||||
| 	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED; | ||||
| 	mad->class_version = 1; | ||||
| 	mad->method	   = IB_MGMT_METHOD_GET; | ||||
| } | ||||
| 
 | ||||
| static inline u8 convert_access(int acc) | ||||
| { | ||||
| 	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) | | ||||
| 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) | | ||||
| 	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) | | ||||
| 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) | | ||||
| 	       MLX5_PERM_LOCAL_READ; | ||||
| } | ||||
| 
 | ||||
| #endif /* MLX5_IB_H */ | ||||
							
								
								
									
										1007
									
								
								drivers/infiniband/hw/mlx5/mr.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1007
									
								
								drivers/infiniband/hw/mlx5/mr.c
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										2524
									
								
								drivers/infiniband/hw/mlx5/qp.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										2524
									
								
								drivers/infiniband/hw/mlx5/qp.c
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										473
									
								
								drivers/infiniband/hw/mlx5/srq.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										473
									
								
								drivers/infiniband/hw/mlx5/srq.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,473 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/qp.h> | ||||
| #include <linux/mlx5/srq.h> | ||||
| #include <linux/slab.h> | ||||
| #include <rdma/ib_umem.h> | ||||
| 
 | ||||
| #include "mlx5_ib.h" | ||||
| #include "user.h" | ||||
| 
 | ||||
| /* not supported currently */ | ||||
| static int srq_signature; | ||||
| 
 | ||||
| static void *get_wqe(struct mlx5_ib_srq *srq, int n) | ||||
| { | ||||
| 	return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); | ||||
| } | ||||
| 
 | ||||
| static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) | ||||
| { | ||||
| 	struct ib_event event; | ||||
| 	struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; | ||||
| 
 | ||||
| 	if (ibsrq->event_handler) { | ||||
| 		event.device      = ibsrq->device; | ||||
| 		event.element.srq = ibsrq; | ||||
| 		switch (type) { | ||||
| 		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | ||||
| 			event.event = IB_EVENT_SRQ_LIMIT_REACHED; | ||||
| 			break; | ||||
| 		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | ||||
| 			event.event = IB_EVENT_SRQ_ERR; | ||||
| 			break; | ||||
| 		default: | ||||
| 			pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", | ||||
| 				type, srq->srqn); | ||||
| 			return; | ||||
| 		} | ||||
| 
 | ||||
| 		ibsrq->event_handler(&event, ibsrq->srq_context); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | ||||
| 			   struct mlx5_create_srq_mbox_in **in, | ||||
| 			   struct ib_udata *udata, int buf_size, int *inlen) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(pd->device); | ||||
| 	struct mlx5_ib_create_srq ucmd; | ||||
| 	int err; | ||||
| 	int npages; | ||||
| 	int page_shift; | ||||
| 	int ncont; | ||||
| 	u32 offset; | ||||
| 
 | ||||
| 	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { | ||||
| 		mlx5_ib_dbg(dev, "failed copy udata\n"); | ||||
| 		return -EFAULT; | ||||
| 	} | ||||
| 	srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); | ||||
| 
 | ||||
| 	srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, | ||||
| 				0, 0); | ||||
| 	if (IS_ERR(srq->umem)) { | ||||
| 		mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); | ||||
| 		err = PTR_ERR(srq->umem); | ||||
| 		return err; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, | ||||
| 			   &page_shift, &ncont, NULL); | ||||
| 	err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, | ||||
| 				     &offset); | ||||
| 	if (err) { | ||||
| 		mlx5_ib_warn(dev, "bad offset\n"); | ||||
| 		goto err_umem; | ||||
| 	} | ||||
| 
 | ||||
| 	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; | ||||
| 	*in = mlx5_vzalloc(*inlen); | ||||
| 	if (!(*in)) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_umem; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); | ||||
| 
 | ||||
| 	err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), | ||||
| 				  ucmd.db_addr, &srq->db); | ||||
| 	if (err) { | ||||
| 		mlx5_ib_dbg(dev, "map doorbell failed\n"); | ||||
| 		goto err_in; | ||||
| 	} | ||||
| 
 | ||||
| 	(*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; | ||||
| 	(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_in: | ||||
| 	mlx5_vfree(*in); | ||||
| 
 | ||||
| err_umem: | ||||
| 	ib_umem_release(srq->umem); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, | ||||
| 			     struct mlx5_create_srq_mbox_in **in, int buf_size, | ||||
| 			     int *inlen) | ||||
| { | ||||
| 	int err; | ||||
| 	int i; | ||||
| 	struct mlx5_wqe_srq_next_seg *next; | ||||
| 	int page_shift; | ||||
| 	int npages; | ||||
| 
 | ||||
| 	err = mlx5_db_alloc(&dev->mdev, &srq->db); | ||||
| 	if (err) { | ||||
| 		mlx5_ib_warn(dev, "alloc dbell rec failed\n"); | ||||
| 		return err; | ||||
| 	} | ||||
| 
 | ||||
| 	*srq->db.db = 0; | ||||
| 
 | ||||
| 	if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { | ||||
| 		mlx5_ib_dbg(dev, "buf alloc failed\n"); | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_db; | ||||
| 	} | ||||
| 	page_shift = srq->buf.page_shift; | ||||
| 
 | ||||
| 	srq->head    = 0; | ||||
| 	srq->tail    = srq->msrq.max - 1; | ||||
| 	srq->wqe_ctr = 0; | ||||
| 
 | ||||
| 	for (i = 0; i < srq->msrq.max; i++) { | ||||
| 		next = get_wqe(srq, i); | ||||
| 		next->next_wqe_index = | ||||
| 			cpu_to_be16((i + 1) & (srq->msrq.max - 1)); | ||||
| 	} | ||||
| 
 | ||||
| 	npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); | ||||
| 	mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", | ||||
| 		    buf_size, page_shift, srq->buf.npages, npages); | ||||
| 	*inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; | ||||
| 	*in = mlx5_vzalloc(*inlen); | ||||
| 	if (!*in) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_buf; | ||||
| 	} | ||||
| 	mlx5_fill_page_array(&srq->buf, (*in)->pas); | ||||
| 
 | ||||
| 	srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); | ||||
| 	if (!srq->wrid) { | ||||
| 		mlx5_ib_dbg(dev, "kmalloc failed %lu\n", | ||||
| 			    (unsigned long)(srq->msrq.max * sizeof(u64))); | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_in; | ||||
| 	} | ||||
| 	srq->wq_sig = !!srq_signature; | ||||
| 
 | ||||
| 	(*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_in: | ||||
| 	mlx5_vfree(*in); | ||||
| 
 | ||||
| err_buf: | ||||
| 	mlx5_buf_free(&dev->mdev, &srq->buf); | ||||
| 
 | ||||
| err_db: | ||||
| 	mlx5_db_free(&dev->mdev, &srq->db); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) | ||||
| { | ||||
| 	mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); | ||||
| 	ib_umem_release(srq->umem); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) | ||||
| { | ||||
| 	kfree(srq->wrid); | ||||
| 	mlx5_buf_free(&dev->mdev, &srq->buf); | ||||
| 	mlx5_db_free(&dev->mdev, &srq->db); | ||||
| } | ||||
| 
 | ||||
| struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | ||||
| 				  struct ib_srq_init_attr *init_attr, | ||||
| 				  struct ib_udata *udata) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(pd->device); | ||||
| 	struct mlx5_ib_srq *srq; | ||||
| 	int desc_size; | ||||
| 	int buf_size; | ||||
| 	int err; | ||||
| 	struct mlx5_create_srq_mbox_in *uninitialized_var(in); | ||||
| 	int uninitialized_var(inlen); | ||||
| 	int is_xrc; | ||||
| 	u32 flgs, xrcdn; | ||||
| 
 | ||||
| 	/* Sanity check SRQ size before proceeding */ | ||||
| 	if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) { | ||||
| 		mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", | ||||
| 			    init_attr->attr.max_wr, | ||||
| 			    dev->mdev.caps.max_srq_wqes); | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 	} | ||||
| 
 | ||||
| 	srq = kmalloc(sizeof(*srq), GFP_KERNEL); | ||||
| 	if (!srq) | ||||
| 		return ERR_PTR(-ENOMEM); | ||||
| 
 | ||||
| 	mutex_init(&srq->mutex); | ||||
| 	spin_lock_init(&srq->lock); | ||||
| 	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1); | ||||
| 	srq->msrq.max_gs = init_attr->attr.max_sge; | ||||
| 
 | ||||
| 	desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + | ||||
| 		    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); | ||||
| 	desc_size = roundup_pow_of_two(desc_size); | ||||
| 	desc_size = max_t(int, 32, desc_size); | ||||
| 	srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / | ||||
| 		sizeof(struct mlx5_wqe_data_seg); | ||||
| 	srq->msrq.wqe_shift = ilog2(desc_size); | ||||
| 	buf_size = srq->msrq.max * desc_size; | ||||
| 	mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", | ||||
| 		    desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, | ||||
| 		    srq->msrq.max_avail_gather); | ||||
| 
 | ||||
| 	if (pd->uobject) | ||||
| 		err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); | ||||
| 	else | ||||
| 		err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); | ||||
| 
 | ||||
| 	if (err) { | ||||
| 		mlx5_ib_warn(dev, "create srq %s failed, err %d\n", | ||||
| 			     pd->uobject ? "user" : "kernel", err); | ||||
| 		goto err_srq; | ||||
| 	} | ||||
| 
 | ||||
| 	is_xrc = (init_attr->srq_type == IB_SRQT_XRC); | ||||
| 	in->ctx.state_log_sz = ilog2(srq->msrq.max); | ||||
| 	flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; | ||||
| 	xrcdn = 0; | ||||
| 	if (is_xrc) { | ||||
| 		xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; | ||||
| 		in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); | ||||
| 	} else if (init_attr->srq_type == IB_SRQT_BASIC) { | ||||
| 		xrcdn = to_mxrcd(dev->devr.x0)->xrcdn; | ||||
| 		in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); | ||||
| 	} | ||||
| 
 | ||||
| 	in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF)); | ||||
| 
 | ||||
| 	in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); | ||||
| 	in->ctx.db_record = cpu_to_be64(srq->db.dma); | ||||
| 	err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen); | ||||
| 	mlx5_vfree(in); | ||||
| 	if (err) { | ||||
| 		mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); | ||||
| 		goto err_srq; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); | ||||
| 
 | ||||
| 	srq->msrq.event = mlx5_ib_srq_event; | ||||
| 	srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; | ||||
| 
 | ||||
| 	if (pd->uobject) | ||||
| 		if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { | ||||
| 			mlx5_ib_dbg(dev, "copy to user failed\n"); | ||||
| 			err = -EFAULT; | ||||
| 			goto err_core; | ||||
| 		} | ||||
| 
 | ||||
| 	init_attr->attr.max_wr = srq->msrq.max - 1; | ||||
| 
 | ||||
| 	return &srq->ibsrq; | ||||
| 
 | ||||
| err_core: | ||||
| 	mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); | ||||
| 	if (pd->uobject) | ||||
| 		destroy_srq_user(pd, srq); | ||||
| 	else | ||||
| 		destroy_srq_kernel(dev, srq); | ||||
| 
 | ||||
| err_srq: | ||||
| 	kfree(srq); | ||||
| 
 | ||||
| 	return ERR_PTR(err); | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||||
| 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); | ||||
| 	struct mlx5_ib_srq *srq = to_msrq(ibsrq); | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/* We don't support resizing SRQs yet */ | ||||
| 	if (attr_mask & IB_SRQ_MAX_WR) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (attr_mask & IB_SRQ_LIMIT) { | ||||
| 		if (attr->srq_limit >= srq->msrq.max) | ||||
| 			return -EINVAL; | ||||
| 
 | ||||
| 		mutex_lock(&srq->mutex); | ||||
| 		ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1); | ||||
| 		mutex_unlock(&srq->mutex); | ||||
| 
 | ||||
| 		if (ret) | ||||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); | ||||
| 	struct mlx5_ib_srq *srq = to_msrq(ibsrq); | ||||
| 	int ret; | ||||
| 	struct mlx5_query_srq_mbox_out *out; | ||||
| 
 | ||||
| 	out = kzalloc(sizeof(*out), GFP_KERNEL); | ||||
| 	if (!out) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out); | ||||
| 	if (ret) | ||||
| 		goto out_box; | ||||
| 
 | ||||
| 	srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm); | ||||
| 	srq_attr->max_wr    = srq->msrq.max - 1; | ||||
| 	srq_attr->max_sge   = srq->msrq.max_gs; | ||||
| 
 | ||||
| out_box: | ||||
| 	kfree(out); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_destroy_srq(struct ib_srq *srq) | ||||
| { | ||||
| 	struct mlx5_ib_dev *dev = to_mdev(srq->device); | ||||
| 	struct mlx5_ib_srq *msrq = to_msrq(srq); | ||||
| 
 | ||||
| 	mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq); | ||||
| 
 | ||||
| 	if (srq->uobject) { | ||||
| 		mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); | ||||
| 		ib_umem_release(msrq->umem); | ||||
| 	} else { | ||||
| 		kfree(msrq->wrid); | ||||
| 		mlx5_buf_free(&dev->mdev, &msrq->buf); | ||||
| 		mlx5_db_free(&dev->mdev, &msrq->db); | ||||
| 	} | ||||
| 
 | ||||
| 	kfree(srq); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) | ||||
| { | ||||
| 	struct mlx5_wqe_srq_next_seg *next; | ||||
| 
 | ||||
| 	/* always called with interrupts disabled. */ | ||||
| 	spin_lock(&srq->lock); | ||||
| 
 | ||||
| 	next = get_wqe(srq, srq->tail); | ||||
| 	next->next_wqe_index = cpu_to_be16(wqe_index); | ||||
| 	srq->tail = wqe_index; | ||||
| 
 | ||||
| 	spin_unlock(&srq->lock); | ||||
| } | ||||
| 
 | ||||
| int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||||
| 			  struct ib_recv_wr **bad_wr) | ||||
| { | ||||
| 	struct mlx5_ib_srq *srq = to_msrq(ibsrq); | ||||
| 	struct mlx5_wqe_srq_next_seg *next; | ||||
| 	struct mlx5_wqe_data_seg *scat; | ||||
| 	unsigned long flags; | ||||
| 	int err = 0; | ||||
| 	int nreq; | ||||
| 	int i; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&srq->lock, flags); | ||||
| 
 | ||||
| 	for (nreq = 0; wr; nreq++, wr = wr->next) { | ||||
| 		if (unlikely(wr->num_sge > srq->msrq.max_gs)) { | ||||
| 			err = -EINVAL; | ||||
| 			*bad_wr = wr; | ||||
| 			break; | ||||
| 		} | ||||
| 
 | ||||
| 		if (unlikely(srq->head == srq->tail)) { | ||||
| 			err = -ENOMEM; | ||||
| 			*bad_wr = wr; | ||||
| 			break; | ||||
| 		} | ||||
| 
 | ||||
| 		srq->wrid[srq->head] = wr->wr_id; | ||||
| 
 | ||||
| 		next      = get_wqe(srq, srq->head); | ||||
| 		srq->head = be16_to_cpu(next->next_wqe_index); | ||||
| 		scat      = (struct mlx5_wqe_data_seg *)(next + 1); | ||||
| 
 | ||||
| 		for (i = 0; i < wr->num_sge; i++) { | ||||
| 			scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); | ||||
| 			scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey); | ||||
| 			scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr); | ||||
| 		} | ||||
| 
 | ||||
| 		if (i < srq->msrq.max_avail_gather) { | ||||
| 			scat[i].byte_count = 0; | ||||
| 			scat[i].lkey       = cpu_to_be32(MLX5_INVALID_LKEY); | ||||
| 			scat[i].addr       = 0; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (likely(nreq)) { | ||||
| 		srq->wqe_ctr += nreq; | ||||
| 
 | ||||
| 		/* Make sure that descriptors are written before
 | ||||
| 		 * doorbell record. | ||||
| 		 */ | ||||
| 		wmb(); | ||||
| 
 | ||||
| 		*srq->db.db = cpu_to_be32(srq->wqe_ctr); | ||||
| 	} | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&srq->lock, flags); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
							
								
								
									
										121
									
								
								drivers/infiniband/hw/mlx5/user.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								drivers/infiniband/hw/mlx5/user.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,121 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_IB_USER_H | ||||
| #define MLX5_IB_USER_H | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_QP_FLAG_SIGNATURE		= 1 << 0, | ||||
| 	MLX5_QP_FLAG_SCATTER_CQE	= 1 << 1, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_SRQ_FLAG_SIGNATURE		= 1 << 0, | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| /* Increment this value if any changes that break userspace ABI
 | ||||
|  * compatibility are made. | ||||
|  */ | ||||
| #define MLX5_IB_UVERBS_ABI_VERSION	1 | ||||
| 
 | ||||
| /* Make sure that all structs defined in this file remain laid out so
 | ||||
|  * that they pack the same way on 32-bit and 64-bit architectures (to | ||||
|  * avoid incompatibility between 32-bit userspace and 64-bit kernels). | ||||
|  * In particular do not use pointer types -- pass pointers in __u64 | ||||
|  * instead. | ||||
|  */ | ||||
| 
 | ||||
| struct mlx5_ib_alloc_ucontext_req { | ||||
| 	__u32	total_num_uuars; | ||||
| 	__u32	num_low_latency_uuars; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_alloc_ucontext_resp { | ||||
| 	__u32	qp_tab_size; | ||||
| 	__u32	bf_reg_size; | ||||
| 	__u32	tot_uuars; | ||||
| 	__u32	cache_line_size; | ||||
| 	__u16	max_sq_desc_sz; | ||||
| 	__u16	max_rq_desc_sz; | ||||
| 	__u32	max_send_wqebb; | ||||
| 	__u32	max_recv_wr; | ||||
| 	__u32	max_srq_recv_wr; | ||||
| 	__u16	num_ports; | ||||
| 	__u16	reserved; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_alloc_pd_resp { | ||||
| 	__u32	pdn; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_create_cq { | ||||
| 	__u64	buf_addr; | ||||
| 	__u64	db_addr; | ||||
| 	__u32	cqe_size; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_create_cq_resp { | ||||
| 	__u32	cqn; | ||||
| 	__u32	reserved; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_resize_cq { | ||||
| 	__u64	buf_addr; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_create_srq { | ||||
| 	__u64	buf_addr; | ||||
| 	__u64	db_addr; | ||||
| 	__u32	flags; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_create_srq_resp { | ||||
| 	__u32	srqn; | ||||
| 	__u32	reserved; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_create_qp { | ||||
| 	__u64	buf_addr; | ||||
| 	__u64	db_addr; | ||||
| 	__u32	sq_wqe_count; | ||||
| 	__u32	rq_wqe_count; | ||||
| 	__u32	rq_wqe_shift; | ||||
| 	__u32	flags; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_ib_create_qp_resp { | ||||
| 	__u32	uuar_index; | ||||
| }; | ||||
| #endif /* MLX5_IB_USER_H */ | ||||
|  | @ -42,8 +42,6 @@ | |||
| #define OCRDMA_ROCE_DEV_VERSION "1.0.0" | ||||
| #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | ||||
| 
 | ||||
| #define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg) | ||||
| 
 | ||||
| #define OCRDMA_MAX_AH 512 | ||||
| 
 | ||||
| #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | ||||
|  | @ -97,7 +95,6 @@ struct ocrdma_queue_info { | |||
| 	u16 id;			/* qid, where to ring the doorbell. */ | ||||
| 	u16 head, tail; | ||||
| 	bool created; | ||||
| 	atomic_t used;		/* Number of valid elements in the queue */ | ||||
| }; | ||||
| 
 | ||||
| struct ocrdma_eq { | ||||
|  | @ -198,7 +195,6 @@ struct ocrdma_cq { | |||
| 	struct ocrdma_ucontext *ucontext; | ||||
| 	dma_addr_t pa; | ||||
| 	u32 len; | ||||
| 	atomic_t use_cnt; | ||||
| 
 | ||||
| 	/* head of all qp's sq and rq for which cqes need to be flushed
 | ||||
| 	 * by the software. | ||||
|  | @ -210,7 +206,6 @@ struct ocrdma_pd { | |||
| 	struct ib_pd ibpd; | ||||
| 	struct ocrdma_dev *dev; | ||||
| 	struct ocrdma_ucontext *uctx; | ||||
| 	atomic_t use_cnt; | ||||
| 	u32 id; | ||||
| 	int num_dpp_qp; | ||||
| 	u32 dpp_page; | ||||
|  | @ -241,16 +236,16 @@ struct ocrdma_srq { | |||
| 	struct ib_srq ibsrq; | ||||
| 	struct ocrdma_dev *dev; | ||||
| 	u8 __iomem *db; | ||||
| 	/* provide synchronization to multiple context(s) posting rqe */ | ||||
| 	spinlock_t q_lock ____cacheline_aligned; | ||||
| 
 | ||||
| 	struct ocrdma_qp_hwq_info rq; | ||||
| 	struct ocrdma_pd *pd; | ||||
| 	atomic_t use_cnt; | ||||
| 	u32 id; | ||||
| 	u64 *rqe_wr_id_tbl; | ||||
| 	u32 *idx_bit_fields; | ||||
| 	u32 bit_fields_len; | ||||
| 
 | ||||
| 	/* provide synchronization to multiple context(s) posting rqe */ | ||||
| 	spinlock_t q_lock ____cacheline_aligned; | ||||
| 
 | ||||
| 	struct ocrdma_pd *pd; | ||||
| 	u32 id; | ||||
| }; | ||||
| 
 | ||||
| struct ocrdma_qp { | ||||
|  | @ -258,8 +253,6 @@ struct ocrdma_qp { | |||
| 	struct ocrdma_dev *dev; | ||||
| 
 | ||||
| 	u8 __iomem *sq_db; | ||||
| 	/* provide synchronization to multiple context(s) posting wqe, rqe */ | ||||
| 	spinlock_t q_lock ____cacheline_aligned; | ||||
| 	struct ocrdma_qp_hwq_info sq; | ||||
| 	struct { | ||||
| 		uint64_t wrid; | ||||
|  | @ -269,6 +262,9 @@ struct ocrdma_qp { | |||
| 		uint8_t  rsvd[3]; | ||||
| 	} *wqe_wr_id_tbl; | ||||
| 	u32 max_inline_data; | ||||
| 
 | ||||
| 	/* provide synchronization to multiple context(s) posting wqe, rqe */ | ||||
| 	spinlock_t q_lock ____cacheline_aligned; | ||||
| 	struct ocrdma_cq *sq_cq; | ||||
| 	/* list maintained per CQ to flush SQ errors */ | ||||
| 	struct list_head sq_entry; | ||||
|  | @ -296,10 +292,6 @@ struct ocrdma_qp { | |||
| 	u8 *ird_q_va; | ||||
| }; | ||||
| 
 | ||||
| #define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \ | ||||
| 	(((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \ | ||||
| 		(qp->id < 64)) ? 24 : 16) | ||||
| 
 | ||||
| struct ocrdma_hw_mr { | ||||
| 	struct ocrdma_dev *dev; | ||||
| 	u32 lkey; | ||||
|  | @ -390,4 +382,43 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) | |||
| 	return container_of(ibsrq, struct ocrdma_srq, ibsrq); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) | ||||
| { | ||||
| 	return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && | ||||
| 		 qp->id < 64) ? 24 : 16); | ||||
| } | ||||
| 
 | ||||
| static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) | ||||
| { | ||||
| 	int cqe_valid; | ||||
| 	cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; | ||||
| 	return ((cqe_valid == cq->phase) ? 1 : 0); | ||||
| } | ||||
| 
 | ||||
| static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) | ||||
| { | ||||
| 	return (le32_to_cpu(cqe->flags_status_srcqpn) & | ||||
| 		OCRDMA_CQE_QTYPE) ? 0 : 1; | ||||
| } | ||||
| 
 | ||||
| static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) | ||||
| { | ||||
| 	return (le32_to_cpu(cqe->flags_status_srcqpn) & | ||||
| 		OCRDMA_CQE_INVALIDATE) ? 1 : 0; | ||||
| } | ||||
| 
 | ||||
| static inline int is_cqe_imm(struct ocrdma_cqe *cqe) | ||||
| { | ||||
| 	return (le32_to_cpu(cqe->flags_status_srcqpn) & | ||||
| 		OCRDMA_CQE_IMM) ? 1 : 0; | ||||
| } | ||||
| 
 | ||||
| static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) | ||||
| { | ||||
| 	return (le32_to_cpu(cqe->flags_status_srcqpn) & | ||||
| 		OCRDMA_CQE_WRITE_IMM) ? 1 : 0; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
|  | @ -128,7 +128,6 @@ static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev) | |||
| static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev) | ||||
| { | ||||
| 	dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); | ||||
| 	atomic_inc(&dev->mq.sq.used); | ||||
| } | ||||
| 
 | ||||
| static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev) | ||||
|  | @ -564,32 +563,19 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev, | |||
| 	memset(cmd, 0, sizeof(*cmd)); | ||||
| 	num_pages = PAGES_4K_SPANNED(mq->va, mq->size); | ||||
| 
 | ||||
| 	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { | ||||
| 		ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ, | ||||
| 				OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||||
| 		cmd->v0.pages = num_pages; | ||||
| 		cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; | ||||
| 		cmd->v0.async_cqid_valid = (cq->id << 1); | ||||
| 		cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << | ||||
| 					     OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); | ||||
| 		cmd->v0.cqid_ringsize |= | ||||
| 			(cq->id << OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT); | ||||
| 		cmd->v0.valid = OCRDMA_CREATE_MQ_VALID; | ||||
| 		pa = &cmd->v0.pa[0]; | ||||
| 	} else { | ||||
| 		ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT, | ||||
| 				OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||||
| 		cmd->req.rsvd_version = 1; | ||||
| 		cmd->v1.cqid_pages = num_pages; | ||||
| 		cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); | ||||
| 		cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; | ||||
| 		cmd->v1.async_event_bitmap = Bit(20); | ||||
| 		cmd->v1.async_cqid_ringsize = cq->id; | ||||
| 		cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << | ||||
| 					     OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); | ||||
| 		cmd->v1.valid = OCRDMA_CREATE_MQ_VALID; | ||||
| 		pa = &cmd->v1.pa[0]; | ||||
| 	} | ||||
| 	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT, | ||||
| 			OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); | ||||
| 	cmd->req.rsvd_version = 1; | ||||
| 	cmd->cqid_pages = num_pages; | ||||
| 	cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); | ||||
| 	cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID; | ||||
| 	cmd->async_event_bitmap = Bit(20); | ||||
| 	cmd->async_cqid_ringsize = cq->id; | ||||
| 	cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) << | ||||
| 				OCRDMA_CREATE_MQ_RING_SIZE_SHIFT); | ||||
| 	cmd->valid = OCRDMA_CREATE_MQ_VALID; | ||||
| 	pa = &cmd->pa[0]; | ||||
| 
 | ||||
| 	ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K); | ||||
| 	status = be_roce_mcc_cmd(dev->nic_info.netdev, | ||||
| 				 cmd, sizeof(*cmd), NULL, NULL); | ||||
|  | @ -745,7 +731,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
| 		qp_event = 0; | ||||
| 		srq_event = 0; | ||||
| 		dev_event = 0; | ||||
| 		ocrdma_err("%s() unknown type=0x%x\n", __func__, type); | ||||
| 		pr_err("%s() unknown type=0x%x\n", __func__, type); | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -775,8 +761,8 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) | |||
| 	if (evt_code == OCRDMA_ASYNC_EVE_CODE) | ||||
| 		ocrdma_dispatch_ibevent(dev, cqe); | ||||
| 	else | ||||
| 		ocrdma_err("%s(%d) invalid evt code=0x%x\n", | ||||
| 			   __func__, dev->id, evt_code); | ||||
| 		pr_err("%s(%d) invalid evt code=0x%x\n", __func__, | ||||
| 		       dev->id, evt_code); | ||||
| } | ||||
| 
 | ||||
| static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) | ||||
|  | @ -790,8 +776,8 @@ static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) | |||
| 		dev->mqe_ctx.cmd_done = true; | ||||
| 		wake_up(&dev->mqe_ctx.cmd_wait); | ||||
| 	} else | ||||
| 		ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", | ||||
| 			   __func__, cqe->tag_lo, dev->mqe_ctx.tag); | ||||
| 		pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", | ||||
| 		       __func__, cqe->tag_lo, dev->mqe_ctx.tag); | ||||
| } | ||||
| 
 | ||||
| static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) | ||||
|  | @ -810,7 +796,7 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) | |||
| 		else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) | ||||
| 			ocrdma_process_mcqe(dev, cqe); | ||||
| 		else | ||||
| 			ocrdma_err("%s() cqe->compl is not set.\n", __func__); | ||||
| 			pr_err("%s() cqe->compl is not set.\n", __func__); | ||||
| 		memset(cqe, 0, sizeof(struct ocrdma_mcqe)); | ||||
| 		ocrdma_mcq_inc_tail(dev); | ||||
| 	} | ||||
|  | @ -869,7 +855,7 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx) | |||
| 
 | ||||
| 	cq = dev->cq_tbl[cq_idx]; | ||||
| 	if (cq == NULL) { | ||||
| 		ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); | ||||
| 		pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); | ||||
| 		return; | ||||
| 	} | ||||
| 	spin_lock_irqsave(&cq->cq_lock, flags); | ||||
|  | @ -971,7 +957,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) | |||
| 	rsp = ocrdma_get_mqe_rsp(dev); | ||||
| 	ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); | ||||
| 	if (cqe_status || ext_status) { | ||||
| 		ocrdma_err | ||||
| 		pr_err | ||||
| 		    ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", | ||||
| 		     __func__, | ||||
| 		     (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> | ||||
|  | @ -1353,8 +1339,8 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | |||
| 	if (dpp_cq) | ||||
| 		return -EINVAL; | ||||
| 	if (entries > dev->attr.max_cqe) { | ||||
| 		ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", | ||||
| 			   __func__, dev->id, dev->attr.max_cqe, entries); | ||||
| 		pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", | ||||
| 		       __func__, dev->id, dev->attr.max_cqe, entries); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) | ||||
|  | @ -1621,7 +1607,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev, | |||
| 	status = ocrdma_mbx_reg_mr(dev, hwmr, pdid, | ||||
| 				   cur_pbl_cnt, hwmr->pbe_size, last); | ||||
| 	if (status) { | ||||
| 		ocrdma_err("%s() status=%d\n", __func__, status); | ||||
| 		pr_err("%s() status=%d\n", __func__, status); | ||||
| 		return status; | ||||
| 	} | ||||
| 	/* if there is no more pbls to register then exit. */ | ||||
|  | @ -1644,7 +1630,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev, | |||
| 			break; | ||||
| 	} | ||||
| 	if (status) | ||||
| 		ocrdma_err("%s() err. status=%d\n", __func__, status); | ||||
| 		pr_err("%s() err. status=%d\n", __func__, status); | ||||
| 
 | ||||
| 	return status; | ||||
| } | ||||
|  | @ -1841,8 +1827,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, | |||
| 	status = ocrdma_build_q_conf(&max_wqe_allocated, | ||||
| 		dev->attr.wqe_size, &hw_pages, &hw_page_size); | ||||
| 	if (status) { | ||||
| 		ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__, | ||||
| 			   max_wqe_allocated); | ||||
| 		pr_err("%s() req. max_send_wr=0x%x\n", __func__, | ||||
| 		       max_wqe_allocated); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	qp->sq.max_cnt = max_wqe_allocated; | ||||
|  | @ -1891,8 +1877,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, | |||
| 	status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size, | ||||
| 				     &hw_pages, &hw_page_size); | ||||
| 	if (status) { | ||||
| 		ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__, | ||||
| 			   attrs->cap.max_recv_wr + 1); | ||||
| 		pr_err("%s() req. max_recv_wr=0x%x\n", __func__, | ||||
| 		       attrs->cap.max_recv_wr + 1); | ||||
| 		return status; | ||||
| 	} | ||||
| 	qp->rq.max_cnt = max_rqe_allocated; | ||||
|  | @ -1900,7 +1886,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, | |||
| 
 | ||||
| 	qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); | ||||
| 	if (!qp->rq.va) | ||||
| 		return status; | ||||
| 		return -ENOMEM; | ||||
| 	memset(qp->rq.va, 0, len); | ||||
| 	qp->rq.pa = pa; | ||||
| 	qp->rq.len = len; | ||||
|  | @ -2087,10 +2073,10 @@ mbx_err: | |||
| 	if (qp->rq.va) | ||||
| 		dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); | ||||
| rq_err: | ||||
| 	ocrdma_err("%s(%d) rq_err\n", __func__, dev->id); | ||||
| 	pr_err("%s(%d) rq_err\n", __func__, dev->id); | ||||
| 	dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); | ||||
| sq_err: | ||||
| 	ocrdma_err("%s(%d) sq_err\n", __func__, dev->id); | ||||
| 	pr_err("%s(%d) sq_err\n", __func__, dev->id); | ||||
| 	kfree(cmd); | ||||
| 	return status; | ||||
| } | ||||
|  | @ -2127,7 +2113,7 @@ int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid, | |||
| 	else if (rdma_link_local_addr(&in6)) | ||||
| 		rdma_get_ll_mac(&in6, mac_addr); | ||||
| 	else { | ||||
| 		ocrdma_err("%s() fail to resolve mac_addr.\n", __func__); | ||||
| 		pr_err("%s() fail to resolve mac_addr.\n", __func__); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	return 0; | ||||
|  | @ -2362,8 +2348,8 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq, | |||
| 				dev->attr.rqe_size, | ||||
| 				&hw_pages, &hw_page_size); | ||||
| 	if (status) { | ||||
| 		ocrdma_err("%s() req. max_wr=0x%x\n", __func__, | ||||
| 			   srq_attr->attr.max_wr); | ||||
| 		pr_err("%s() req. max_wr=0x%x\n", __func__, | ||||
| 		       srq_attr->attr.max_wr); | ||||
| 		status = -EINVAL; | ||||
| 		goto ret; | ||||
| 	} | ||||
|  | @ -2614,7 +2600,7 @@ mq_err: | |||
| 	ocrdma_destroy_qp_eqs(dev); | ||||
| qpeq_err: | ||||
| 	ocrdma_destroy_eq(dev, &dev->meq); | ||||
| 	ocrdma_err("%s() status=%d\n", __func__, status); | ||||
| 	pr_err("%s() status=%d\n", __func__, status); | ||||
| 	return status; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -378,7 +378,7 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) | |||
| 	spin_lock_init(&dev->flush_q_lock); | ||||
| 	return 0; | ||||
| alloc_err: | ||||
| 	ocrdma_err("%s(%d) error.\n", __func__, dev->id); | ||||
| 	pr_err("%s(%d) error.\n", __func__, dev->id); | ||||
| 	return -ENOMEM; | ||||
| } | ||||
| 
 | ||||
|  | @ -396,7 +396,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) | |||
| 
 | ||||
| 	dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); | ||||
| 	if (!dev) { | ||||
| 		ocrdma_err("Unable to allocate ib device\n"); | ||||
| 		pr_err("Unable to allocate ib device\n"); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 	dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); | ||||
|  | @ -437,7 +437,7 @@ init_err: | |||
| idr_err: | ||||
| 	kfree(dev->mbx_cmd); | ||||
| 	ib_dealloc_device(&dev->ibdev); | ||||
| 	ocrdma_err("%s() leaving. ret=%d\n", __func__, status); | ||||
| 	pr_err("%s() leaving. ret=%d\n", __func__, status); | ||||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -608,16 +608,8 @@ enum { | |||
| 	OCRDMA_CREATE_MQ_ASYNC_CQ_VALID		= Bit(0) | ||||
| }; | ||||
| 
 | ||||
| struct ocrdma_create_mq_v0 { | ||||
| 	u32 pages; | ||||
| 	u32 cqid_ringsize; | ||||
| 	u32 valid; | ||||
| 	u32 async_cqid_valid; | ||||
| 	u32 rsvd; | ||||
| 	struct ocrdma_pa pa[8]; | ||||
| } __packed; | ||||
| 
 | ||||
| struct ocrdma_create_mq_v1 { | ||||
| struct ocrdma_create_mq_req { | ||||
| 	struct ocrdma_mbx_hdr req; | ||||
| 	u32 cqid_pages; | ||||
| 	u32 async_event_bitmap; | ||||
| 	u32 async_cqid_ringsize; | ||||
|  | @ -627,14 +619,6 @@ struct ocrdma_create_mq_v1 { | |||
| 	struct ocrdma_pa pa[8]; | ||||
| } __packed; | ||||
| 
 | ||||
| struct ocrdma_create_mq_req { | ||||
| 	struct ocrdma_mbx_hdr req; | ||||
| 	union { | ||||
| 		struct ocrdma_create_mq_v0 v0; | ||||
| 		struct ocrdma_create_mq_v1 v1; | ||||
| 	}; | ||||
| } __packed; | ||||
| 
 | ||||
| struct ocrdma_create_mq_rsp { | ||||
| 	struct ocrdma_mbx_rsp rsp; | ||||
| 	u32 id; | ||||
|  | @ -1550,21 +1534,6 @@ struct ocrdma_cqe { | |||
| 	u32 flags_status_srcqpn;	/* w3 */ | ||||
| } __packed; | ||||
| 
 | ||||
| #define is_cqe_valid(cq, cqe) \ | ||||
| 	(((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID)\ | ||||
| 	== cq->phase) ? 1 : 0) | ||||
| #define is_cqe_for_sq(cqe) \ | ||||
| 	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 0 : 1) | ||||
| #define is_cqe_for_rq(cqe) \ | ||||
| 	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 1 : 0) | ||||
| #define is_cqe_invalidated(cqe) \ | ||||
| 	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_INVALIDATE) ? \ | ||||
| 	1 : 0) | ||||
| #define is_cqe_imm(cqe) \ | ||||
| 	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_IMM) ? 1 : 0) | ||||
| #define is_cqe_wr_imm(cqe) \ | ||||
| 	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_WRITE_IMM) ? 1 : 0) | ||||
| 
 | ||||
| struct ocrdma_sge { | ||||
| 	u32 addr_hi; | ||||
| 	u32 addr_lo; | ||||
|  |  | |||
|  | @ -114,8 +114,8 @@ int ocrdma_query_port(struct ib_device *ibdev, | |||
| 
 | ||||
| 	dev = get_ocrdma_dev(ibdev); | ||||
| 	if (port > 1) { | ||||
| 		ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | ||||
| 			   dev->id, port); | ||||
| 		pr_err("%s(%d) invalid_port=0x%x\n", __func__, | ||||
| 		       dev->id, port); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	netdev = dev->nic_info.netdev; | ||||
|  | @ -155,8 +155,7 @@ int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, | |||
| 
 | ||||
| 	dev = get_ocrdma_dev(ibdev); | ||||
| 	if (port > 1) { | ||||
| 		ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | ||||
| 			   dev->id, port); | ||||
| 		pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	return 0; | ||||
|  | @ -398,7 +397,6 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, | |||
| 		kfree(pd); | ||||
| 		return ERR_PTR(status); | ||||
| 	} | ||||
| 	atomic_set(&pd->use_cnt, 0); | ||||
| 
 | ||||
| 	if (udata && context) { | ||||
| 		status = ocrdma_copy_pd_uresp(pd, context, udata); | ||||
|  | @ -419,12 +417,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd) | |||
| 	int status; | ||||
| 	u64 usr_db; | ||||
| 
 | ||||
| 	if (atomic_read(&pd->use_cnt)) { | ||||
| 		ocrdma_err("%s(%d) pd=0x%x is in use.\n", | ||||
| 			   __func__, dev->id, pd->id); | ||||
| 		status = -EFAULT; | ||||
| 		goto dealloc_err; | ||||
| 	} | ||||
| 	status = ocrdma_mbx_dealloc_pd(dev, pd); | ||||
| 	if (pd->uctx) { | ||||
| 		u64 dpp_db = dev->nic_info.dpp_unmapped_addr + | ||||
|  | @ -436,7 +428,6 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd) | |||
| 		ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); | ||||
| 	} | ||||
| 	kfree(pd); | ||||
| dealloc_err: | ||||
| 	return status; | ||||
| } | ||||
| 
 | ||||
|  | @ -450,8 +441,8 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, | |||
| 	struct ocrdma_dev *dev = pd->dev; | ||||
| 
 | ||||
| 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { | ||||
| 		ocrdma_err("%s(%d) leaving err, invalid access rights\n", | ||||
| 			   __func__, dev->id); | ||||
| 		pr_err("%s(%d) leaving err, invalid access rights\n", | ||||
| 		       __func__, dev->id); | ||||
| 		return ERR_PTR(-EINVAL); | ||||
| 	} | ||||
| 
 | ||||
|  | @ -474,7 +465,6 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, | |||
| 		return ERR_PTR(-ENOMEM); | ||||
| 	} | ||||
| 	mr->pd = pd; | ||||
| 	atomic_inc(&pd->use_cnt); | ||||
| 	mr->ibmr.lkey = mr->hwmr.lkey; | ||||
| 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | ||||
| 		mr->ibmr.rkey = mr->hwmr.lkey; | ||||
|  | @ -664,7 +654,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, | |||
| 	if (status) | ||||
| 		goto mbx_err; | ||||
| 	mr->pd = pd; | ||||
| 	atomic_inc(&pd->use_cnt); | ||||
| 	mr->ibmr.lkey = mr->hwmr.lkey; | ||||
| 	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | ||||
| 		mr->ibmr.rkey = mr->hwmr.lkey; | ||||
|  | @ -689,7 +678,6 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |||
| 	if (mr->hwmr.fr_mr == 0) | ||||
| 		ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | ||||
| 
 | ||||
| 	atomic_dec(&mr->pd->use_cnt); | ||||
| 	/* it could be user registered memory. */ | ||||
| 	if (mr->umem) | ||||
| 		ib_umem_release(mr->umem); | ||||
|  | @ -714,8 +702,8 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, | |||
| 	uresp.phase_change = cq->phase_change ? 1 : 0; | ||||
| 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||||
| 	if (status) { | ||||
| 		ocrdma_err("%s(%d) copy error cqid=0x%x.\n", | ||||
| 			   __func__, cq->dev->id, cq->id); | ||||
| 		pr_err("%s(%d) copy error cqid=0x%x.\n", | ||||
| 		       __func__, cq->dev->id, cq->id); | ||||
| 		goto err; | ||||
| 	} | ||||
| 	uctx = get_ocrdma_ucontext(ib_ctx); | ||||
|  | @ -752,7 +740,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, | |||
| 
 | ||||
| 	spin_lock_init(&cq->cq_lock); | ||||
| 	spin_lock_init(&cq->comp_handler_lock); | ||||
| 	atomic_set(&cq->use_cnt, 0); | ||||
| 	INIT_LIST_HEAD(&cq->sq_head); | ||||
| 	INIT_LIST_HEAD(&cq->rq_head); | ||||
| 	cq->dev = dev; | ||||
|  | @ -799,9 +786,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) | |||
| 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | ||||
| 	struct ocrdma_dev *dev = cq->dev; | ||||
| 
 | ||||
| 	if (atomic_read(&cq->use_cnt)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	status = ocrdma_mbx_destroy_cq(dev, cq); | ||||
| 
 | ||||
| 	if (cq->ucontext) { | ||||
|  | @ -837,57 +821,56 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | |||
| 	if (attrs->qp_type != IB_QPT_GSI && | ||||
| 	    attrs->qp_type != IB_QPT_RC && | ||||
| 	    attrs->qp_type != IB_QPT_UD) { | ||||
| 		ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", | ||||
| 			   __func__, dev->id, attrs->qp_type); | ||||
| 		pr_err("%s(%d) unsupported qp type=0x%x requested\n", | ||||
| 		       __func__, dev->id, attrs->qp_type); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (attrs->cap.max_send_wr > dev->attr.max_wqe) { | ||||
| 		ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", | ||||
| 			   __func__, dev->id, attrs->cap.max_send_wr); | ||||
| 		ocrdma_err("%s(%d) supported send_wr=0x%x\n", | ||||
| 			   __func__, dev->id, dev->attr.max_wqe); | ||||
| 		pr_err("%s(%d) unsupported send_wr=0x%x requested\n", | ||||
| 		       __func__, dev->id, attrs->cap.max_send_wr); | ||||
| 		pr_err("%s(%d) supported send_wr=0x%x\n", | ||||
| 		       __func__, dev->id, dev->attr.max_wqe); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { | ||||
| 		ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", | ||||
| 			   __func__, dev->id, attrs->cap.max_recv_wr); | ||||
| 		ocrdma_err("%s(%d) supported recv_wr=0x%x\n", | ||||
| 			   __func__, dev->id, dev->attr.max_rqe); | ||||
| 		pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", | ||||
| 		       __func__, dev->id, attrs->cap.max_recv_wr); | ||||
| 		pr_err("%s(%d) supported recv_wr=0x%x\n", | ||||
| 		       __func__, dev->id, dev->attr.max_rqe); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { | ||||
| 		ocrdma_err("%s(%d) unsupported inline data size=0x%x" | ||||
| 			   " requested\n", __func__, dev->id, | ||||
| 			   attrs->cap.max_inline_data); | ||||
| 		ocrdma_err("%s(%d) supported inline data size=0x%x\n", | ||||
| 			   __func__, dev->id, dev->attr.max_inline_data); | ||||
| 		pr_err("%s(%d) unsupported inline data size=0x%x requested\n", | ||||
| 		       __func__, dev->id, attrs->cap.max_inline_data); | ||||
| 		pr_err("%s(%d) supported inline data size=0x%x\n", | ||||
| 		       __func__, dev->id, dev->attr.max_inline_data); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { | ||||
| 		ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", | ||||
| 			   __func__, dev->id, attrs->cap.max_send_sge); | ||||
| 		ocrdma_err("%s(%d) supported send_sge=0x%x\n", | ||||
| 			   __func__, dev->id, dev->attr.max_send_sge); | ||||
| 		pr_err("%s(%d) unsupported send_sge=0x%x requested\n", | ||||
| 		       __func__, dev->id, attrs->cap.max_send_sge); | ||||
| 		pr_err("%s(%d) supported send_sge=0x%x\n", | ||||
| 		       __func__, dev->id, dev->attr.max_send_sge); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { | ||||
| 		ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", | ||||
| 			   __func__, dev->id, attrs->cap.max_recv_sge); | ||||
| 		ocrdma_err("%s(%d) supported recv_sge=0x%x\n", | ||||
| 			   __func__, dev->id, dev->attr.max_recv_sge); | ||||
| 		pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", | ||||
| 		       __func__, dev->id, attrs->cap.max_recv_sge); | ||||
| 		pr_err("%s(%d) supported recv_sge=0x%x\n", | ||||
| 		       __func__, dev->id, dev->attr.max_recv_sge); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	/* unprivileged user space cannot create special QP */ | ||||
| 	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { | ||||
| 		ocrdma_err | ||||
| 		pr_err | ||||
| 		    ("%s(%d) Userspace can't create special QPs of type=0x%x\n", | ||||
| 		     __func__, dev->id, attrs->qp_type); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	/* allow creating only one GSI type of QP */ | ||||
| 	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { | ||||
| 		ocrdma_err("%s(%d) GSI special QPs already created.\n", | ||||
| 			   __func__, dev->id); | ||||
| 		pr_err("%s(%d) GSI special QPs already created.\n", | ||||
| 		       __func__, dev->id); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	/* verify consumer QPs are not trying to use GSI QP's CQ */ | ||||
|  | @ -896,8 +879,8 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | |||
| 		    (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || | ||||
| 		    (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) || | ||||
| 		    (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { | ||||
| 			ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", | ||||
| 				   __func__, dev->id); | ||||
| 			pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", | ||||
| 			       __func__, dev->id); | ||||
| 			return -EINVAL; | ||||
| 		} | ||||
| 	} | ||||
|  | @ -949,7 +932,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |||
| 	} | ||||
| 	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | ||||
| 	if (status) { | ||||
| 		ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); | ||||
| 		pr_err("%s(%d) user copy error.\n", __func__, dev->id); | ||||
| 		goto err; | ||||
| 	} | ||||
| 	status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], | ||||
|  | @ -1023,15 +1006,6 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, | |||
| 	qp->state = OCRDMA_QPS_RST; | ||||
| } | ||||
| 
 | ||||
| static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd) | ||||
| { | ||||
| 	atomic_inc(&pd->use_cnt); | ||||
| 	atomic_inc(&qp->sq_cq->use_cnt); | ||||
| 	atomic_inc(&qp->rq_cq->use_cnt); | ||||
| 	if (qp->srq) | ||||
| 		atomic_inc(&qp->srq->use_cnt); | ||||
| 	qp->ibqp.qp_num = qp->id; | ||||
| } | ||||
| 
 | ||||
| static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, | ||||
| 				   struct ib_qp_init_attr *attrs) | ||||
|  | @ -1099,7 +1073,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, | |||
| 			goto cpy_err; | ||||
| 	} | ||||
| 	ocrdma_store_gsi_qp_cq(dev, attrs); | ||||
| 	ocrdma_set_qp_use_cnt(qp, pd); | ||||
| 	qp->ibqp.qp_num = qp->id; | ||||
| 	mutex_unlock(&dev->dev_lock); | ||||
| 	return &qp->ibqp; | ||||
| 
 | ||||
|  | @ -1112,7 +1086,7 @@ mbx_err: | |||
| 	kfree(qp->wqe_wr_id_tbl); | ||||
| 	kfree(qp->rqe_wr_id_tbl); | ||||
| 	kfree(qp); | ||||
| 	ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); | ||||
| 	pr_err("%s(%d) error=%d\n", __func__, dev->id, status); | ||||
| gen_err: | ||||
| 	return ERR_PTR(status); | ||||
| } | ||||
|  | @ -1162,10 +1136,10 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 	spin_unlock_irqrestore(&qp->q_lock, flags); | ||||
| 
 | ||||
| 	if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { | ||||
| 		ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " | ||||
| 			   "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", | ||||
| 			   __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, | ||||
| 			   old_qps, new_qps); | ||||
| 		pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" | ||||
| 		       "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", | ||||
| 		       __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, | ||||
| 		       old_qps, new_qps); | ||||
| 		goto param_err; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -1475,11 +1449,6 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) | |||
| 
 | ||||
| 	ocrdma_del_flush_qp(qp); | ||||
| 
 | ||||
| 	atomic_dec(&qp->pd->use_cnt); | ||||
| 	atomic_dec(&qp->sq_cq->use_cnt); | ||||
| 	atomic_dec(&qp->rq_cq->use_cnt); | ||||
| 	if (qp->srq) | ||||
| 		atomic_dec(&qp->srq->use_cnt); | ||||
| 	kfree(qp->wqe_wr_id_tbl); | ||||
| 	kfree(qp->rqe_wr_id_tbl); | ||||
| 	kfree(qp); | ||||
|  | @ -1565,14 +1534,12 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, | |||
| 			goto arm_err; | ||||
| 	} | ||||
| 
 | ||||
| 	atomic_set(&srq->use_cnt, 0); | ||||
| 	if (udata) { | ||||
| 		status = ocrdma_copy_srq_uresp(srq, udata); | ||||
| 		if (status) | ||||
| 			goto arm_err; | ||||
| 	} | ||||
| 
 | ||||
| 	atomic_inc(&pd->use_cnt); | ||||
| 	return &srq->ibsrq; | ||||
| 
 | ||||
| arm_err: | ||||
|  | @ -1618,18 +1585,12 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq) | |||
| 
 | ||||
| 	srq = get_ocrdma_srq(ibsrq); | ||||
| 	dev = srq->dev; | ||||
| 	if (atomic_read(&srq->use_cnt)) { | ||||
| 		ocrdma_err("%s(%d) err, srq=0x%x in use\n", | ||||
| 			   __func__, dev->id, srq->id); | ||||
| 		return -EAGAIN; | ||||
| 	} | ||||
| 
 | ||||
| 	status = ocrdma_mbx_destroy_srq(dev, srq); | ||||
| 
 | ||||
| 	if (srq->pd->uctx) | ||||
| 		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len); | ||||
| 
 | ||||
| 	atomic_dec(&srq->pd->use_cnt); | ||||
| 	kfree(srq->idx_bit_fields); | ||||
| 	kfree(srq->rqe_wr_id_tbl); | ||||
| 	kfree(srq); | ||||
|  | @ -1677,9 +1638,9 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, | |||
| { | ||||
| 	if (wr->send_flags & IB_SEND_INLINE) { | ||||
| 		if (wr->sg_list[0].length > qp->max_inline_data) { | ||||
| 			ocrdma_err("%s() supported_len=0x%x," | ||||
| 				" unspported len req=0x%x\n", __func__, | ||||
| 				qp->max_inline_data, wr->sg_list[0].length); | ||||
| 			pr_err("%s() supported_len=0x%x,\n" | ||||
| 			       " unspported len req=0x%x\n", __func__, | ||||
| 			       qp->max_inline_data, wr->sg_list[0].length); | ||||
| 			return -EINVAL; | ||||
| 		} | ||||
| 		memcpy(sge, | ||||
|  | @ -1773,12 +1734,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 	spin_lock_irqsave(&qp->q_lock, flags); | ||||
| 	if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { | ||||
| 		spin_unlock_irqrestore(&qp->q_lock, flags); | ||||
| 		*bad_wr = wr; | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	while (wr) { | ||||
| 		if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || | ||||
| 		    wr->num_sge > qp->sq.max_sges) { | ||||
| 			*bad_wr = wr; | ||||
| 			status = -ENOMEM; | ||||
| 			break; | ||||
| 		} | ||||
|  | @ -1856,7 +1819,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 
 | ||||
| static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) | ||||
| { | ||||
| 	u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp)); | ||||
| 	u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp)); | ||||
| 
 | ||||
| 	iowrite32(val, qp->rq_db); | ||||
| } | ||||
|  | @ -2094,8 +2057,8 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, | |||
| 		break; | ||||
| 	default: | ||||
| 		ibwc->status = IB_WC_GENERAL_ERR; | ||||
| 		ocrdma_err("%s() invalid opcode received = 0x%x\n", | ||||
| 			   __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | ||||
| 		pr_err("%s() invalid opcode received = 0x%x\n", | ||||
| 		       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | ||||
| 		break; | ||||
| 	}; | ||||
| } | ||||
|  |  | |||
|  | @ -5,3 +5,11 @@ config INFINIBAND_QIB | |||
| 	This is a low-level driver for Intel PCIe QLE InfiniBand host | ||||
| 	channel adapters.  This driver does not support the Intel | ||||
| 	HyperTransport card (model QHT7140). | ||||
| 
 | ||||
| config INFINIBAND_QIB_DCA | ||||
| 	bool "QIB DCA support" | ||||
| 	depends on INFINIBAND_QIB && DCA && SMP && GENERIC_HARDIRQS && !(INFINIBAND_QIB=y && DCA=m) | ||||
| 	default y | ||||
| 	---help--- | ||||
| 	Setting this enables DCA support on some Intel chip sets | ||||
| 	with the iba7322 HCA. | ||||
|  |  | |||
|  | @ -13,3 +13,4 @@ ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o | |||
| 
 | ||||
| ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o | ||||
| ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o | ||||
| ib_qib-$(CONFIG_DEBUG_FS) += qib_debugfs.o | ||||
|  |  | |||
|  | @ -1,7 +1,7 @@ | |||
| #ifndef _QIB_KERNEL_H | ||||
| #define _QIB_KERNEL_H | ||||
| /*
 | ||||
|  * Copyright (c) 2012 Intel Corporation.  All rights reserved. | ||||
|  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved. | ||||
|  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||||
|  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||||
|  * | ||||
|  | @ -51,6 +51,7 @@ | |||
| #include <linux/completion.h> | ||||
| #include <linux/kref.h> | ||||
| #include <linux/sched.h> | ||||
| #include <linux/kthread.h> | ||||
| 
 | ||||
| #include "qib_common.h" | ||||
| #include "qib_verbs.h" | ||||
|  | @ -114,6 +115,11 @@ struct qib_eep_log_mask { | |||
| /*
 | ||||
|  * Below contains all data related to a single context (formerly called port). | ||||
|  */ | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| struct qib_opcode_stats_perctx; | ||||
| #endif | ||||
| 
 | ||||
| struct qib_ctxtdata { | ||||
| 	void **rcvegrbuf; | ||||
| 	dma_addr_t *rcvegrbuf_phys; | ||||
|  | @ -154,6 +160,8 @@ struct qib_ctxtdata { | |||
| 	 */ | ||||
| 	/* instead of calculating it */ | ||||
| 	unsigned ctxt; | ||||
| 	/* local node of context */ | ||||
| 	int node_id; | ||||
| 	/* non-zero if ctxt is being shared. */ | ||||
| 	u16 subctxt_cnt; | ||||
| 	/* non-zero if ctxt is being shared. */ | ||||
|  | @ -222,12 +230,15 @@ struct qib_ctxtdata { | |||
| 	u8 redirect_seq_cnt; | ||||
| 	/* ctxt rcvhdrq head offset */ | ||||
| 	u32 head; | ||||
| 	u32 pkt_count; | ||||
| 	/* lookaside fields */ | ||||
| 	struct qib_qp *lookaside_qp; | ||||
| 	u32 lookaside_qpn; | ||||
| 	/* QPs waiting for context processing */ | ||||
| 	struct list_head qp_wait_list; | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	/* verbs stats per CTX */ | ||||
| 	struct qib_opcode_stats_perctx *opstats; | ||||
| #endif | ||||
| }; | ||||
| 
 | ||||
| struct qib_sge_state; | ||||
|  | @ -428,9 +439,19 @@ struct qib_verbs_txreq { | |||
| #define ACTIVITY_TIMER 5 | ||||
| 
 | ||||
| #define MAX_NAME_SIZE 64 | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| struct qib_irq_notify; | ||||
| #endif | ||||
| 
 | ||||
| struct qib_msix_entry { | ||||
| 	struct msix_entry msix; | ||||
| 	void *arg; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	int dca; | ||||
| 	int rcv; | ||||
| 	struct qib_irq_notify *notifier; | ||||
| #endif | ||||
| 	char name[MAX_NAME_SIZE]; | ||||
| 	cpumask_var_t mask; | ||||
| }; | ||||
|  | @ -828,6 +849,9 @@ struct qib_devdata { | |||
| 		struct qib_ctxtdata *); | ||||
| 	void (*f_writescratch)(struct qib_devdata *, u32); | ||||
| 	int (*f_tempsense_rd)(struct qib_devdata *, int regnum); | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	int (*f_notify_dca)(struct qib_devdata *, unsigned long event); | ||||
| #endif | ||||
| 
 | ||||
| 	char *boardname; /* human readable board info */ | ||||
| 
 | ||||
|  | @ -1075,6 +1099,10 @@ struct qib_devdata { | |||
| 	u16 psxmitwait_check_rate; | ||||
| 	/* high volume overflow errors defered to tasklet */ | ||||
| 	struct tasklet_struct error_tasklet; | ||||
| 	/* per device cq worker */ | ||||
| 	struct kthread_worker *worker; | ||||
| 
 | ||||
| 	int assigned_node_id; /* NUMA node closest to HCA */ | ||||
| }; | ||||
| 
 | ||||
| /* hol_state values */ | ||||
|  | @ -1154,7 +1182,7 @@ int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *); | |||
| int qib_setup_eagerbufs(struct qib_ctxtdata *); | ||||
| void qib_set_ctxtcnt(struct qib_devdata *); | ||||
| int qib_create_ctxts(struct qib_devdata *dd); | ||||
| struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32); | ||||
| struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int); | ||||
| void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); | ||||
| void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); | ||||
| 
 | ||||
|  | @ -1445,6 +1473,7 @@ extern unsigned qib_n_krcv_queues; | |||
| extern unsigned qib_sdma_fetch_arb; | ||||
| extern unsigned qib_compat_ddr_negotiate; | ||||
| extern int qib_special_trigger; | ||||
| extern unsigned qib_numa_aware; | ||||
| 
 | ||||
| extern struct mutex qib_mutex; | ||||
| 
 | ||||
|  | @ -1474,27 +1503,23 @@ extern struct mutex qib_mutex; | |||
|  * first to avoid possible serial port delays from printk. | ||||
|  */ | ||||
| #define qib_early_err(dev, fmt, ...) \ | ||||
| 	do { \ | ||||
| 		dev_err(dev, fmt, ##__VA_ARGS__); \ | ||||
| 	} while (0) | ||||
| 	dev_err(dev, fmt, ##__VA_ARGS__) | ||||
| 
 | ||||
| #define qib_dev_err(dd, fmt, ...) \ | ||||
| 	do { \ | ||||
| 		dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ | ||||
| 			qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \ | ||||
| 	} while (0) | ||||
| 	dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ | ||||
| 		qib_get_unit_name((dd)->unit), ##__VA_ARGS__) | ||||
| 
 | ||||
| #define qib_dev_warn(dd, fmt, ...) \ | ||||
| 	dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \ | ||||
| 		qib_get_unit_name((dd)->unit), ##__VA_ARGS__) | ||||
| 
 | ||||
| #define qib_dev_porterr(dd, port, fmt, ...) \ | ||||
| 	do { \ | ||||
| 		dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ | ||||
| 			qib_get_unit_name((dd)->unit), (dd)->unit, (port), \ | ||||
| 			##__VA_ARGS__); \ | ||||
| 	} while (0) | ||||
| 	dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ | ||||
| 		qib_get_unit_name((dd)->unit), (dd)->unit, (port), \ | ||||
| 		##__VA_ARGS__) | ||||
| 
 | ||||
| #define qib_devinfo(pcidev, fmt, ...) \ | ||||
| 	do { \ | ||||
| 		dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \ | ||||
| 	} while (0) | ||||
| 	dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__) | ||||
| 
 | ||||
| /*
 | ||||
|  * this is used for formatting hw error messages... | ||||
|  |  | |||
|  | @ -279,7 +279,7 @@ struct qib_base_info { | |||
|  * may not be implemented; the user code must deal with this if it | ||||
|  * cares, or it must abort after initialization reports the difference. | ||||
|  */ | ||||
| #define QIB_USER_SWMINOR 11 | ||||
| #define QIB_USER_SWMINOR 12 | ||||
| 
 | ||||
| #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,4 +1,5 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013 Intel Corporation.  All rights reserved. | ||||
|  * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved. | ||||
|  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||||
|  * | ||||
|  | @ -34,8 +35,10 @@ | |||
| #include <linux/err.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/kthread.h> | ||||
| 
 | ||||
| #include "qib_verbs.h" | ||||
| #include "qib.h" | ||||
| 
 | ||||
| /**
 | ||||
|  * qib_cq_enter - add a new entry to the completion queue | ||||
|  | @ -102,13 +105,18 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) | |||
| 	if (cq->notify == IB_CQ_NEXT_COMP || | ||||
| 	    (cq->notify == IB_CQ_SOLICITED && | ||||
| 	     (solicited || entry->status != IB_WC_SUCCESS))) { | ||||
| 		cq->notify = IB_CQ_NONE; | ||||
| 		cq->triggered++; | ||||
| 		struct kthread_worker *worker; | ||||
| 		/*
 | ||||
| 		 * This will cause send_complete() to be called in | ||||
| 		 * another thread. | ||||
| 		 */ | ||||
| 		queue_work(qib_cq_wq, &cq->comptask); | ||||
| 		smp_rmb(); | ||||
| 		worker = cq->dd->worker; | ||||
| 		if (likely(worker)) { | ||||
| 			cq->notify = IB_CQ_NONE; | ||||
| 			cq->triggered++; | ||||
| 			queue_kthread_work(worker, &cq->comptask); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&cq->lock, flags); | ||||
|  | @ -163,7 +171,7 @@ bail: | |||
| 	return npolled; | ||||
| } | ||||
| 
 | ||||
| static void send_complete(struct work_struct *work) | ||||
| static void send_complete(struct kthread_work *work) | ||||
| { | ||||
| 	struct qib_cq *cq = container_of(work, struct qib_cq, comptask); | ||||
| 
 | ||||
|  | @ -287,11 +295,12 @@ struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, | |||
| 	 * The number of entries should be >= the number requested or return | ||||
| 	 * an error. | ||||
| 	 */ | ||||
| 	cq->dd = dd_from_dev(dev); | ||||
| 	cq->ibcq.cqe = entries; | ||||
| 	cq->notify = IB_CQ_NONE; | ||||
| 	cq->triggered = 0; | ||||
| 	spin_lock_init(&cq->lock); | ||||
| 	INIT_WORK(&cq->comptask, send_complete); | ||||
| 	init_kthread_work(&cq->comptask, send_complete); | ||||
| 	wc->head = 0; | ||||
| 	wc->tail = 0; | ||||
| 	cq->queue = wc; | ||||
|  | @ -323,7 +332,7 @@ int qib_destroy_cq(struct ib_cq *ibcq) | |||
| 	struct qib_ibdev *dev = to_idev(ibcq->device); | ||||
| 	struct qib_cq *cq = to_icq(ibcq); | ||||
| 
 | ||||
| 	flush_work(&cq->comptask); | ||||
| 	flush_kthread_work(&cq->comptask); | ||||
| 	spin_lock(&dev->n_cqs_lock); | ||||
| 	dev->n_cqs_allocated--; | ||||
| 	spin_unlock(&dev->n_cqs_lock); | ||||
|  | @ -483,3 +492,49 @@ bail_free: | |||
| bail: | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int qib_cq_init(struct qib_devdata *dd) | ||||
| { | ||||
| 	int ret = 0; | ||||
| 	int cpu; | ||||
| 	struct task_struct *task; | ||||
| 
 | ||||
| 	if (dd->worker) | ||||
| 		return 0; | ||||
| 	dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL); | ||||
| 	if (!dd->worker) | ||||
| 		return -ENOMEM; | ||||
| 	init_kthread_worker(dd->worker); | ||||
| 	task = kthread_create_on_node( | ||||
| 		kthread_worker_fn, | ||||
| 		dd->worker, | ||||
| 		dd->assigned_node_id, | ||||
| 		"qib_cq%d", dd->unit); | ||||
| 	if (IS_ERR(task)) | ||||
| 		goto task_fail; | ||||
| 	cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id)); | ||||
| 	kthread_bind(task, cpu); | ||||
| 	wake_up_process(task); | ||||
| out: | ||||
| 	return ret; | ||||
| task_fail: | ||||
| 	ret = PTR_ERR(task); | ||||
| 	kfree(dd->worker); | ||||
| 	dd->worker = NULL; | ||||
| 	goto out; | ||||
| } | ||||
| 
 | ||||
| void qib_cq_exit(struct qib_devdata *dd) | ||||
| { | ||||
| 	struct kthread_worker *worker; | ||||
| 
 | ||||
| 	worker = dd->worker; | ||||
| 	if (!worker) | ||||
| 		return; | ||||
| 	/* blocks future queuing from send_complete() */ | ||||
| 	dd->worker = NULL; | ||||
| 	smp_wmb(); | ||||
| 	flush_kthread_worker(worker); | ||||
| 	kthread_stop(worker->task); | ||||
| 	kfree(worker); | ||||
| } | ||||
|  |  | |||
							
								
								
									
										283
									
								
								drivers/infiniband/hw/qib/qib_debugfs.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										283
									
								
								drivers/infiniband/hw/qib/qib_debugfs.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,283 @@ | |||
| #ifdef CONFIG_DEBUG_FS | ||||
| /*
 | ||||
|  * Copyright (c) 2013 Intel Corporation.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| #include <linux/debugfs.h> | ||||
| #include <linux/seq_file.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/export.h> | ||||
| 
 | ||||
| #include "qib.h" | ||||
| #include "qib_verbs.h" | ||||
| #include "qib_debugfs.h" | ||||
| 
 | ||||
| static struct dentry *qib_dbg_root; | ||||
| 
 | ||||
| #define DEBUGFS_FILE(name) \ | ||||
| static const struct seq_operations _##name##_seq_ops = { \ | ||||
| 	.start = _##name##_seq_start, \ | ||||
| 	.next  = _##name##_seq_next, \ | ||||
| 	.stop  = _##name##_seq_stop, \ | ||||
| 	.show  = _##name##_seq_show \ | ||||
| }; \ | ||||
| static int _##name##_open(struct inode *inode, struct file *s) \ | ||||
| { \ | ||||
| 	struct seq_file *seq; \ | ||||
| 	int ret; \ | ||||
| 	ret =  seq_open(s, &_##name##_seq_ops); \ | ||||
| 	if (ret) \ | ||||
| 		return ret; \ | ||||
| 	seq = s->private_data; \ | ||||
| 	seq->private = inode->i_private; \ | ||||
| 	return 0; \ | ||||
| } \ | ||||
| static const struct file_operations _##name##_file_ops = { \ | ||||
| 	.owner   = THIS_MODULE, \ | ||||
| 	.open    = _##name##_open, \ | ||||
| 	.read    = seq_read, \ | ||||
| 	.llseek  = seq_lseek, \ | ||||
| 	.release = seq_release \ | ||||
| }; | ||||
| 
 | ||||
| #define DEBUGFS_FILE_CREATE(name) \ | ||||
| do { \ | ||||
| 	struct dentry *ent; \ | ||||
| 	ent = debugfs_create_file(#name , 0400, ibd->qib_ibdev_dbg, \ | ||||
| 		ibd, &_##name##_file_ops); \ | ||||
| 	if (!ent) \ | ||||
| 		pr_warn("create of " #name " failed\n"); \ | ||||
| } while (0) | ||||
| 
 | ||||
| static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) | ||||
| { | ||||
| 	struct qib_opcode_stats_perctx *opstats; | ||||
| 
 | ||||
| 	if (*pos >= ARRAY_SIZE(opstats->stats)) | ||||
| 		return NULL; | ||||
| 	return pos; | ||||
| } | ||||
| 
 | ||||
| static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||||
| { | ||||
| 	struct qib_opcode_stats_perctx *opstats; | ||||
| 
 | ||||
| 	++*pos; | ||||
| 	if (*pos >= ARRAY_SIZE(opstats->stats)) | ||||
| 		return NULL; | ||||
| 	return pos; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static void _opcode_stats_seq_stop(struct seq_file *s, void *v) | ||||
| { | ||||
| 	/* nothing allocated */ | ||||
| } | ||||
| 
 | ||||
| static int _opcode_stats_seq_show(struct seq_file *s, void *v) | ||||
| { | ||||
| 	loff_t *spos = v; | ||||
| 	loff_t i = *spos, j; | ||||
| 	u64 n_packets = 0, n_bytes = 0; | ||||
| 	struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; | ||||
| 	struct qib_devdata *dd = dd_from_dev(ibd); | ||||
| 
 | ||||
| 	for (j = 0; j < dd->first_user_ctxt; j++) { | ||||
| 		if (!dd->rcd[j]) | ||||
| 			continue; | ||||
| 		n_packets += dd->rcd[j]->opstats->stats[i].n_packets; | ||||
| 		n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes; | ||||
| 	} | ||||
| 	if (!n_packets && !n_bytes) | ||||
| 		return SEQ_SKIP; | ||||
| 	seq_printf(s, "%02llx %llu/%llu\n", i, | ||||
| 		(unsigned long long) n_packets, | ||||
| 		(unsigned long long) n_bytes); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| DEBUGFS_FILE(opcode_stats) | ||||
| 
 | ||||
| static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos) | ||||
| { | ||||
| 	struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; | ||||
| 	struct qib_devdata *dd = dd_from_dev(ibd); | ||||
| 
 | ||||
| 	if (!*pos) | ||||
| 		return SEQ_START_TOKEN; | ||||
| 	if (*pos >= dd->first_user_ctxt) | ||||
| 		return NULL; | ||||
| 	return pos; | ||||
| } | ||||
| 
 | ||||
| static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||||
| { | ||||
| 	struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; | ||||
| 	struct qib_devdata *dd = dd_from_dev(ibd); | ||||
| 
 | ||||
| 	if (v == SEQ_START_TOKEN) | ||||
| 		return pos; | ||||
| 
 | ||||
| 	++*pos; | ||||
| 	if (*pos >= dd->first_user_ctxt) | ||||
| 		return NULL; | ||||
| 	return pos; | ||||
| } | ||||
| 
 | ||||
| static void _ctx_stats_seq_stop(struct seq_file *s, void *v) | ||||
| { | ||||
| 	/* nothing allocated */ | ||||
| } | ||||
| 
 | ||||
| static int _ctx_stats_seq_show(struct seq_file *s, void *v) | ||||
| { | ||||
| 	loff_t *spos; | ||||
| 	loff_t i, j; | ||||
| 	u64 n_packets = 0; | ||||
| 	struct qib_ibdev *ibd = (struct qib_ibdev *)s->private; | ||||
| 	struct qib_devdata *dd = dd_from_dev(ibd); | ||||
| 
 | ||||
| 	if (v == SEQ_START_TOKEN) { | ||||
| 		seq_puts(s, "Ctx:npkts\n"); | ||||
| 		return 0; | ||||
| 	} | ||||
| 
 | ||||
| 	spos = v; | ||||
| 	i = *spos; | ||||
| 
 | ||||
| 	if (!dd->rcd[i]) | ||||
| 		return SEQ_SKIP; | ||||
| 
 | ||||
| 	for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++) | ||||
| 		n_packets += dd->rcd[i]->opstats->stats[j].n_packets; | ||||
| 
 | ||||
| 	if (!n_packets) | ||||
| 		return SEQ_SKIP; | ||||
| 
 | ||||
| 	seq_printf(s, "  %llu:%llu\n", i, n_packets); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| DEBUGFS_FILE(ctx_stats) | ||||
| 
 | ||||
| static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) | ||||
| { | ||||
| 	struct qib_qp_iter *iter; | ||||
| 	loff_t n = *pos; | ||||
| 
 | ||||
| 	iter = qib_qp_iter_init(s->private); | ||||
| 	if (!iter) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	while (n--) { | ||||
| 		if (qib_qp_iter_next(iter)) { | ||||
| 			kfree(iter); | ||||
| 			return NULL; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return iter; | ||||
| } | ||||
| 
 | ||||
| static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | ||||
| 				   loff_t *pos) | ||||
| { | ||||
| 	struct qib_qp_iter *iter = iter_ptr; | ||||
| 
 | ||||
| 	(*pos)++; | ||||
| 
 | ||||
| 	if (qib_qp_iter_next(iter)) { | ||||
| 		kfree(iter); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	return iter; | ||||
| } | ||||
| 
 | ||||
| static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) | ||||
| { | ||||
| 	/* nothing for now */ | ||||
| } | ||||
| 
 | ||||
| static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) | ||||
| { | ||||
| 	struct qib_qp_iter *iter = iter_ptr; | ||||
| 
 | ||||
| 	if (!iter) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	qib_qp_iter_print(s, iter); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| DEBUGFS_FILE(qp_stats) | ||||
| 
 | ||||
| void qib_dbg_ibdev_init(struct qib_ibdev *ibd) | ||||
| { | ||||
| 	char name[10]; | ||||
| 
 | ||||
| 	snprintf(name, sizeof(name), "qib%d", dd_from_dev(ibd)->unit); | ||||
| 	ibd->qib_ibdev_dbg = debugfs_create_dir(name, qib_dbg_root); | ||||
| 	if (!ibd->qib_ibdev_dbg) { | ||||
| 		pr_warn("create of %s failed\n", name); | ||||
| 		return; | ||||
| 	} | ||||
| 	DEBUGFS_FILE_CREATE(opcode_stats); | ||||
| 	DEBUGFS_FILE_CREATE(ctx_stats); | ||||
| 	DEBUGFS_FILE_CREATE(qp_stats); | ||||
| 	return; | ||||
| } | ||||
| 
 | ||||
| void qib_dbg_ibdev_exit(struct qib_ibdev *ibd) | ||||
| { | ||||
| 	if (!qib_dbg_root) | ||||
| 		goto out; | ||||
| 	debugfs_remove_recursive(ibd->qib_ibdev_dbg); | ||||
| out: | ||||
| 	ibd->qib_ibdev_dbg = NULL; | ||||
| } | ||||
| 
 | ||||
| void qib_dbg_init(void) | ||||
| { | ||||
| 	qib_dbg_root = debugfs_create_dir(QIB_DRV_NAME, NULL); | ||||
| 	if (!qib_dbg_root) | ||||
| 		pr_warn("init of debugfs failed\n"); | ||||
| } | ||||
| 
 | ||||
| void qib_dbg_exit(void) | ||||
| { | ||||
| 	debugfs_remove_recursive(qib_dbg_root); | ||||
| 	qib_dbg_root = NULL; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
							
								
								
									
										45
									
								
								drivers/infiniband/hw/qib/qib_debugfs.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								drivers/infiniband/hw/qib/qib_debugfs.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,45 @@ | |||
| #ifndef _QIB_DEBUGFS_H | ||||
| #define _QIB_DEBUGFS_H | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| /*
 | ||||
|  * Copyright (c) 2013 Intel Corporation.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| struct qib_ibdev; | ||||
| void qib_dbg_ibdev_init(struct qib_ibdev *ibd); | ||||
| void qib_dbg_ibdev_exit(struct qib_ibdev *ibd); | ||||
| void qib_dbg_init(void); | ||||
| void qib_dbg_exit(void); | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| #endif                          /* _QIB_DEBUGFS_H */ | ||||
|  | @ -558,7 +558,6 @@ move_along: | |||
| 	} | ||||
| 
 | ||||
| 	rcd->head = l; | ||||
| 	rcd->pkt_count += i; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Iterate over all QPs waiting to respond. | ||||
|  |  | |||
|  | @ -1,5 +1,5 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2012 Intel Corporation. All rights reserved. | ||||
|  * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. | ||||
|  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||||
|  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||||
|  * | ||||
|  | @ -1155,6 +1155,49 @@ static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt) | |||
| 	return pollflag; | ||||
| } | ||||
| 
 | ||||
| static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) | ||||
| { | ||||
| 	struct qib_filedata *fd = fp->private_data; | ||||
| 	const unsigned int weight = cpumask_weight(¤t->cpus_allowed); | ||||
| 	const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); | ||||
| 	int local_cpu; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If process has NOT already set it's affinity, select and | ||||
| 	 * reserve a processor for it on the local NUMA node. | ||||
| 	 */ | ||||
| 	if ((weight >= qib_cpulist_count) && | ||||
| 		(cpumask_weight(local_mask) <= qib_cpulist_count)) { | ||||
| 		for_each_cpu(local_cpu, local_mask) | ||||
| 			if (!test_and_set_bit(local_cpu, qib_cpulist)) { | ||||
| 				fd->rec_cpu_num = local_cpu; | ||||
| 				return; | ||||
| 			} | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If process has NOT already set it's affinity, select and | ||||
| 	 * reserve a processor for it, as a rendevous for all | ||||
| 	 * users of the driver.  If they don't actually later | ||||
| 	 * set affinity to this cpu, or set it to some other cpu, | ||||
| 	 * it just means that sooner or later we don't recommend | ||||
| 	 * a cpu, and let the scheduler do it's best. | ||||
| 	 */ | ||||
| 	if (weight >= qib_cpulist_count) { | ||||
| 		int cpu; | ||||
| 		cpu = find_first_zero_bit(qib_cpulist, | ||||
| 					  qib_cpulist_count); | ||||
| 		if (cpu == qib_cpulist_count) | ||||
| 			qib_dev_err(dd, | ||||
| 			"no cpus avail for affinity PID %u\n", | ||||
| 			current->pid); | ||||
| 		else { | ||||
| 			__set_bit(cpu, qib_cpulist); | ||||
| 			fd->rec_cpu_num = cpu; | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Check that userland and driver are compatible for subcontexts. | ||||
|  */ | ||||
|  | @ -1259,12 +1302,20 @@ bail: | |||
| static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, | ||||
| 		      struct file *fp, const struct qib_user_info *uinfo) | ||||
| { | ||||
| 	struct qib_filedata *fd = fp->private_data; | ||||
| 	struct qib_devdata *dd = ppd->dd; | ||||
| 	struct qib_ctxtdata *rcd; | ||||
| 	void *ptmp = NULL; | ||||
| 	int ret; | ||||
| 	int numa_id; | ||||
| 
 | ||||
| 	rcd = qib_create_ctxtdata(ppd, ctxt); | ||||
| 	assign_ctxt_affinity(fp, dd); | ||||
| 
 | ||||
| 	numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ? | ||||
| 		cpu_to_node(fd->rec_cpu_num) : | ||||
| 		numa_node_id()) : dd->assigned_node_id; | ||||
| 
 | ||||
| 	rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Allocate memory for use in qib_tid_update() at open to | ||||
|  | @ -1296,6 +1347,9 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, | |||
| 	goto bail; | ||||
| 
 | ||||
| bailerr: | ||||
| 	if (fd->rec_cpu_num != -1) | ||||
| 		__clear_bit(fd->rec_cpu_num, qib_cpulist); | ||||
| 
 | ||||
| 	dd->rcd[ctxt] = NULL; | ||||
| 	kfree(rcd); | ||||
| 	kfree(ptmp); | ||||
|  | @ -1485,6 +1539,57 @@ static int qib_open(struct inode *in, struct file *fp) | |||
| 	return fp->private_data ? 0 : -ENOMEM; | ||||
| } | ||||
| 
 | ||||
| static int find_hca(unsigned int cpu, int *unit) | ||||
| { | ||||
| 	int ret = 0, devmax, npresent, nup, ndev; | ||||
| 
 | ||||
| 	*unit = -1; | ||||
| 
 | ||||
| 	devmax = qib_count_units(&npresent, &nup); | ||||
| 	if (!npresent) { | ||||
| 		ret = -ENXIO; | ||||
| 		goto done; | ||||
| 	} | ||||
| 	if (!nup) { | ||||
| 		ret = -ENETDOWN; | ||||
| 		goto done; | ||||
| 	} | ||||
| 	for (ndev = 0; ndev < devmax; ndev++) { | ||||
| 		struct qib_devdata *dd = qib_lookup(ndev); | ||||
| 		if (dd) { | ||||
| 			if (pcibus_to_node(dd->pcidev->bus) < 0) { | ||||
| 				ret = -EINVAL; | ||||
| 				goto done; | ||||
| 			} | ||||
| 			if (cpu_to_node(cpu) == | ||||
| 				pcibus_to_node(dd->pcidev->bus)) { | ||||
| 				*unit = ndev; | ||||
| 				goto done; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| done: | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int do_qib_user_sdma_queue_create(struct file *fp) | ||||
| { | ||||
| 	struct qib_filedata *fd = fp->private_data; | ||||
| 	struct qib_ctxtdata *rcd = fd->rcd; | ||||
| 	struct qib_devdata *dd = rcd->dd; | ||||
| 
 | ||||
| 	if (dd->flags & QIB_HAS_SEND_DMA) | ||||
| 
 | ||||
| 		fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, | ||||
| 						    dd->unit, | ||||
| 						    rcd->ctxt, | ||||
| 						    fd->subctxt); | ||||
| 		if (!fd->pq) | ||||
| 			return -ENOMEM; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Get ctxt early, so can set affinity prior to memory allocation. | ||||
|  */ | ||||
|  | @ -1517,61 +1622,36 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) | |||
| 	if (qib_compatible_subctxts(swmajor, swminor) && | ||||
| 	    uinfo->spu_subctxt_cnt) { | ||||
| 		ret = find_shared_ctxt(fp, uinfo); | ||||
| 		if (ret) { | ||||
| 			if (ret > 0) | ||||
| 				ret = 0; | ||||
| 			goto done_chk_sdma; | ||||
| 		if (ret > 0) { | ||||
| 			ret = do_qib_user_sdma_queue_create(fp); | ||||
| 			if (!ret) | ||||
| 				assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd); | ||||
| 			goto done_ok; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE; | ||||
| 	if (i_minor) | ||||
| 		ret = find_free_ctxt(i_minor - 1, fp, uinfo); | ||||
| 	else | ||||
| 	else { | ||||
| 		int unit; | ||||
| 		const unsigned int cpu = cpumask_first(¤t->cpus_allowed); | ||||
| 		const unsigned int weight = | ||||
| 			cpumask_weight(¤t->cpus_allowed); | ||||
| 
 | ||||
| 		if (weight == 1 && !test_bit(cpu, qib_cpulist)) | ||||
| 			if (!find_hca(cpu, &unit) && unit >= 0) | ||||
| 				if (!find_free_ctxt(unit, fp, uinfo)) { | ||||
| 					ret = 0; | ||||
| 					goto done_chk_sdma; | ||||
| 				} | ||||
| 		ret = get_a_ctxt(fp, uinfo, alg); | ||||
| 
 | ||||
| done_chk_sdma: | ||||
| 	if (!ret) { | ||||
| 		struct qib_filedata *fd = fp->private_data; | ||||
| 		const struct qib_ctxtdata *rcd = fd->rcd; | ||||
| 		const struct qib_devdata *dd = rcd->dd; | ||||
| 		unsigned int weight; | ||||
| 
 | ||||
| 		if (dd->flags & QIB_HAS_SEND_DMA) { | ||||
| 			fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, | ||||
| 							    dd->unit, | ||||
| 							    rcd->ctxt, | ||||
| 							    fd->subctxt); | ||||
| 			if (!fd->pq) | ||||
| 				ret = -ENOMEM; | ||||
| 		} | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * If process has NOT already set it's affinity, select and | ||||
| 		 * reserve a processor for it, as a rendezvous for all | ||||
| 		 * users of the driver.  If they don't actually later | ||||
| 		 * set affinity to this cpu, or set it to some other cpu, | ||||
| 		 * it just means that sooner or later we don't recommend | ||||
| 		 * a cpu, and let the scheduler do it's best. | ||||
| 		 */ | ||||
| 		weight = cpumask_weight(tsk_cpus_allowed(current)); | ||||
| 		if (!ret && weight >= qib_cpulist_count) { | ||||
| 			int cpu; | ||||
| 			cpu = find_first_zero_bit(qib_cpulist, | ||||
| 						  qib_cpulist_count); | ||||
| 			if (cpu != qib_cpulist_count) { | ||||
| 				__set_bit(cpu, qib_cpulist); | ||||
| 				fd->rec_cpu_num = cpu; | ||||
| 			} | ||||
| 		} else if (weight == 1 && | ||||
| 			test_bit(cpumask_first(tsk_cpus_allowed(current)), | ||||
| 				 qib_cpulist)) | ||||
| 			qib_devinfo(dd->pcidev, | ||||
| 				"%s PID %u affinity set to cpu %d; already allocated\n", | ||||
| 				current->comm, current->pid, | ||||
| 				cpumask_first(tsk_cpus_allowed(current))); | ||||
| 	} | ||||
| 
 | ||||
| done_chk_sdma: | ||||
| 	if (!ret) | ||||
| 		ret = do_qib_user_sdma_queue_create(fp); | ||||
| done_ok: | ||||
| 	mutex_unlock(&qib_mutex); | ||||
| 
 | ||||
| done: | ||||
|  |  | |||
|  | @ -3464,6 +3464,13 @@ static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum) | |||
| 	return -ENXIO; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /* Dummy function, as 6120 boards never disable EEPROM Write */ | ||||
| static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen) | ||||
| { | ||||
|  | @ -3539,6 +3546,9 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, | |||
| 	dd->f_xgxs_reset        = qib_6120_xgxs_reset; | ||||
| 	dd->f_writescratch      = writescratch; | ||||
| 	dd->f_tempsense_rd	= qib_6120_tempsense_rd; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	dd->f_notify_dca = qib_6120_notify_dca; | ||||
| #endif | ||||
| 	/*
 | ||||
| 	 * Do remaining pcie setup and save pcie values in dd. | ||||
| 	 * Any error printing is already done by the init code. | ||||
|  |  | |||
|  | @ -4513,6 +4513,13 @@ bail: | |||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /* Dummy function, as 7220 boards never disable EEPROM Write */ | ||||
| static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen) | ||||
| { | ||||
|  | @ -4587,6 +4594,9 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, | |||
| 	dd->f_xgxs_reset        = qib_7220_xgxs_reset; | ||||
| 	dd->f_writescratch      = writescratch; | ||||
| 	dd->f_tempsense_rd	= qib_7220_tempsense_rd; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	dd->f_notify_dca = qib_7220_notify_dca; | ||||
| #endif | ||||
| 	/*
 | ||||
| 	 * Do remaining pcie setup and save pcie values in dd. | ||||
| 	 * Any error printing is already done by the init code. | ||||
|  |  | |||
|  | @ -44,6 +44,9 @@ | |||
| #include <linux/module.h> | ||||
| #include <rdma/ib_verbs.h> | ||||
| #include <rdma/ib_smi.h> | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| #include <linux/dca.h> | ||||
| #endif | ||||
| 
 | ||||
| #include "qib.h" | ||||
| #include "qib_7322_regs.h" | ||||
|  | @ -519,6 +522,14 @@ static const u8 qib_7322_physportstate[0x20] = { | |||
| 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN | ||||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| struct qib_irq_notify { | ||||
| 	int rcv; | ||||
| 	void *arg; | ||||
| 	struct irq_affinity_notify notify; | ||||
| }; | ||||
| #endif | ||||
| 
 | ||||
| struct qib_chip_specific { | ||||
| 	u64 __iomem *cregbase; | ||||
| 	u64 *cntrs; | ||||
|  | @ -546,6 +557,12 @@ struct qib_chip_specific { | |||
| 	u32 lastbuf_for_pio; | ||||
| 	u32 stay_in_freeze; | ||||
| 	u32 recovery_ports_initted; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	u32 dca_ctrl; | ||||
| 	int rhdr_cpu[18]; | ||||
| 	int sdma_cpu[2]; | ||||
| 	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */ | ||||
| #endif | ||||
| 	struct qib_msix_entry *msix_entries; | ||||
| 	unsigned long *sendchkenable; | ||||
| 	unsigned long *sendgrhchk; | ||||
|  | @ -573,7 +590,7 @@ struct vendor_txdds_ent { | |||
| static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | ||||
| 
 | ||||
| #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | ||||
| #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ | ||||
| #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */ | ||||
| #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */ | ||||
| #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ | ||||
| 
 | ||||
|  | @ -642,28 +659,76 @@ static struct { | |||
| 	irq_handler_t handler; | ||||
| 	int lsb; | ||||
| 	int port; /* 0 if not port-specific, else port # */ | ||||
| 	int dca; | ||||
| } irq_table[] = { | ||||
| 	{ "", qib_7322intr, -1, 0 }, | ||||
| 	{ "", qib_7322intr, -1, 0, 0 }, | ||||
| 	{ " (buf avail)", qib_7322bufavail, | ||||
| 		SYM_LSB(IntStatus, SendBufAvail), 0 }, | ||||
| 		SYM_LSB(IntStatus, SendBufAvail), 0, 0}, | ||||
| 	{ " (sdma 0)", sdma_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaInt_0), 1 }, | ||||
| 		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 }, | ||||
| 	{ " (sdma 1)", sdma_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaInt_1), 2 }, | ||||
| 		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 }, | ||||
| 	{ " (sdmaI 0)", sdma_idle_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, | ||||
| 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1}, | ||||
| 	{ " (sdmaI 1)", sdma_idle_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, | ||||
| 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1}, | ||||
| 	{ " (sdmaP 0)", sdma_progress_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, | ||||
| 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 }, | ||||
| 	{ " (sdmaP 1)", sdma_progress_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, | ||||
| 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 }, | ||||
| 	{ " (sdmaC 0)", sdma_cleanup_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, | ||||
| 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 }, | ||||
| 	{ " (sdmaC 1)", sdma_cleanup_intr, | ||||
| 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, | ||||
| 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0}, | ||||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 
 | ||||
| static const struct dca_reg_map { | ||||
| 	int     shadow_inx; | ||||
| 	int     lsb; | ||||
| 	u64     mask; | ||||
| 	u16     regno; | ||||
| } dca_rcvhdr_reg_map[] = { | ||||
| 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) }, | ||||
| 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) }, | ||||
| 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) }, | ||||
| 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) }, | ||||
| 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) }, | ||||
| 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) }, | ||||
| 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) }, | ||||
| 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) }, | ||||
| 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) }, | ||||
| 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) }, | ||||
| 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) }, | ||||
| 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) }, | ||||
| 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) }, | ||||
| 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) }, | ||||
| 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) }, | ||||
| 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) }, | ||||
| 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) }, | ||||
| 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH), | ||||
| 	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) }, | ||||
| }; | ||||
| #endif | ||||
| 
 | ||||
| /* ibcctrl bits */ | ||||
| #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | ||||
| /* cycle through TS1/TS2 till OK */ | ||||
|  | @ -686,6 +751,13 @@ static void write_7322_init_portregs(struct qib_pportdata *); | |||
| static void setup_7322_link_recovery(struct qib_pportdata *, u32); | ||||
| static void check_7322_rxe_status(struct qib_pportdata *); | ||||
| static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| static void qib_setup_dca(struct qib_devdata *dd); | ||||
| static void setup_dca_notifier(struct qib_devdata *dd, | ||||
| 			       struct qib_msix_entry *m); | ||||
| static void reset_dca_notifier(struct qib_devdata *dd, | ||||
| 			       struct qib_msix_entry *m); | ||||
| #endif | ||||
| 
 | ||||
| /**
 | ||||
|  * qib_read_ureg32 - read 32-bit virtualized per-context register | ||||
|  | @ -2558,6 +2630,162 @@ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) | |||
| 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 
 | ||||
| static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event) | ||||
| { | ||||
| 	switch (event) { | ||||
| 	case DCA_PROVIDER_ADD: | ||||
| 		if (dd->flags & QIB_DCA_ENABLED) | ||||
| 			break; | ||||
| 		if (!dca_add_requester(&dd->pcidev->dev)) { | ||||
| 			qib_devinfo(dd->pcidev, "DCA enabled\n"); | ||||
| 			dd->flags |= QIB_DCA_ENABLED; | ||||
| 			qib_setup_dca(dd); | ||||
| 		} | ||||
| 		break; | ||||
| 	case DCA_PROVIDER_REMOVE: | ||||
| 		if (dd->flags & QIB_DCA_ENABLED) { | ||||
| 			dca_remove_requester(&dd->pcidev->dev); | ||||
| 			dd->flags &= ~QIB_DCA_ENABLED; | ||||
| 			dd->cspec->dca_ctrl = 0; | ||||
| 			qib_write_kreg(dd, KREG_IDX(DCACtrlA), | ||||
| 				dd->cspec->dca_ctrl); | ||||
| 		} | ||||
| 		break; | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu) | ||||
| { | ||||
| 	struct qib_devdata *dd = rcd->dd; | ||||
| 	struct qib_chip_specific *cspec = dd->cspec; | ||||
| 
 | ||||
| 	if (!(dd->flags & QIB_DCA_ENABLED)) | ||||
| 		return; | ||||
| 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { | ||||
| 		const struct dca_reg_map *rmp; | ||||
| 
 | ||||
| 		cspec->rhdr_cpu[rcd->ctxt] = cpu; | ||||
| 		rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; | ||||
| 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; | ||||
| 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= | ||||
| 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; | ||||
| 		qib_devinfo(dd->pcidev, | ||||
| 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu, | ||||
| 			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | ||||
| 		qib_write_kreg(dd, rmp->regno, | ||||
| 			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | ||||
| 		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); | ||||
| 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu) | ||||
| { | ||||
| 	struct qib_devdata *dd = ppd->dd; | ||||
| 	struct qib_chip_specific *cspec = dd->cspec; | ||||
| 	unsigned pidx = ppd->port - 1; | ||||
| 
 | ||||
| 	if (!(dd->flags & QIB_DCA_ENABLED)) | ||||
| 		return; | ||||
| 	if (cspec->sdma_cpu[pidx] != cpu) { | ||||
| 		cspec->sdma_cpu[pidx] = cpu; | ||||
| 		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? | ||||
| 			SYM_MASK(DCACtrlF, SendDma1DCAOPH) : | ||||
| 			SYM_MASK(DCACtrlF, SendDma0DCAOPH)); | ||||
| 		cspec->dca_rcvhdr_ctrl[4] |= | ||||
| 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << | ||||
| 				(ppd->hw_pidx ? | ||||
| 					SYM_LSB(DCACtrlF, SendDma1DCAOPH) : | ||||
| 					SYM_LSB(DCACtrlF, SendDma0DCAOPH)); | ||||
| 		qib_devinfo(dd->pcidev, | ||||
| 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu, | ||||
| 			(long long) cspec->dca_rcvhdr_ctrl[4]); | ||||
| 		qib_write_kreg(dd, KREG_IDX(DCACtrlF), | ||||
| 			       cspec->dca_rcvhdr_ctrl[4]); | ||||
| 		cspec->dca_ctrl |= ppd->hw_pidx ? | ||||
| 			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) : | ||||
| 			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable); | ||||
| 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void qib_setup_dca(struct qib_devdata *dd) | ||||
| { | ||||
| 	struct qib_chip_specific *cspec = dd->cspec; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) | ||||
| 		cspec->rhdr_cpu[i] = -1; | ||||
| 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | ||||
| 		cspec->sdma_cpu[i] = -1; | ||||
| 	cspec->dca_rcvhdr_ctrl[0] = | ||||
| 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt)); | ||||
| 	cspec->dca_rcvhdr_ctrl[1] = | ||||
| 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt)); | ||||
| 	cspec->dca_rcvhdr_ctrl[2] = | ||||
| 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt)); | ||||
| 	cspec->dca_rcvhdr_ctrl[3] = | ||||
| 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt)); | ||||
| 	cspec->dca_rcvhdr_ctrl[4] = | ||||
| 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) | | ||||
| 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt)); | ||||
| 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | ||||
| 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, | ||||
| 			       cspec->dca_rcvhdr_ctrl[i]); | ||||
| 	for (i = 0; i < cspec->num_msix_entries; i++) | ||||
| 		setup_dca_notifier(dd, &cspec->msix_entries[i]); | ||||
| } | ||||
| 
 | ||||
| static void qib_irq_notifier_notify(struct irq_affinity_notify *notify, | ||||
| 			     const cpumask_t *mask) | ||||
| { | ||||
| 	struct qib_irq_notify *n = | ||||
| 		container_of(notify, struct qib_irq_notify, notify); | ||||
| 	int cpu = cpumask_first(mask); | ||||
| 
 | ||||
| 	if (n->rcv) { | ||||
| 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; | ||||
| 		qib_update_rhdrq_dca(rcd, cpu); | ||||
| 	} else { | ||||
| 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; | ||||
| 		qib_update_sdma_dca(ppd, cpu); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void qib_irq_notifier_release(struct kref *ref) | ||||
| { | ||||
| 	struct qib_irq_notify *n = | ||||
| 		container_of(ref, struct qib_irq_notify, notify.kref); | ||||
| 	struct qib_devdata *dd; | ||||
| 
 | ||||
| 	if (n->rcv) { | ||||
| 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; | ||||
| 		dd = rcd->dd; | ||||
| 	} else { | ||||
| 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; | ||||
| 		dd = ppd->dd; | ||||
| 	} | ||||
| 	qib_devinfo(dd->pcidev, | ||||
| 		"release on HCA notify 0x%p n 0x%p\n", ref, n); | ||||
| 	kfree(n); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * Disable MSIx interrupt if enabled, call generic MSIx code | ||||
|  * to cleanup, and clear pending MSIx interrupts. | ||||
|  | @ -2575,6 +2803,9 @@ static void qib_7322_nomsix(struct qib_devdata *dd) | |||
| 
 | ||||
| 		dd->cspec->num_msix_entries = 0; | ||||
| 		for (i = 0; i < n; i++) { | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 			reset_dca_notifier(dd, &dd->cspec->msix_entries[i]); | ||||
| #endif | ||||
| 			irq_set_affinity_hint( | ||||
| 			  dd->cspec->msix_entries[i].msix.vector, NULL); | ||||
| 			free_cpumask_var(dd->cspec->msix_entries[i].mask); | ||||
|  | @ -2602,6 +2833,15 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) | |||
| { | ||||
| 	int i; | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	if (dd->flags & QIB_DCA_ENABLED) { | ||||
| 		dca_remove_requester(&dd->pcidev->dev); | ||||
| 		dd->flags &= ~QIB_DCA_ENABLED; | ||||
| 		dd->cspec->dca_ctrl = 0; | ||||
| 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); | ||||
| 	} | ||||
| #endif | ||||
| 
 | ||||
| 	qib_7322_free_irq(dd); | ||||
| 	kfree(dd->cspec->cntrs); | ||||
| 	kfree(dd->cspec->sendchkenable); | ||||
|  | @ -3068,6 +3308,53 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data) | |||
| 	return IRQ_HANDLED; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 
 | ||||
| static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) | ||||
| { | ||||
| 	if (!m->dca) | ||||
| 		return; | ||||
| 	qib_devinfo(dd->pcidev, | ||||
| 		"Disabling notifier on HCA %d irq %d\n", | ||||
| 		dd->unit, | ||||
| 		m->msix.vector); | ||||
| 	irq_set_affinity_notifier( | ||||
| 		m->msix.vector, | ||||
| 		NULL); | ||||
| 	m->notifier = NULL; | ||||
| } | ||||
| 
 | ||||
| static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m) | ||||
| { | ||||
| 	struct qib_irq_notify *n; | ||||
| 
 | ||||
| 	if (!m->dca) | ||||
| 		return; | ||||
| 	n = kzalloc(sizeof(*n), GFP_KERNEL); | ||||
| 	if (n) { | ||||
| 		int ret; | ||||
| 
 | ||||
| 		m->notifier = n; | ||||
| 		n->notify.irq = m->msix.vector; | ||||
| 		n->notify.notify = qib_irq_notifier_notify; | ||||
| 		n->notify.release = qib_irq_notifier_release; | ||||
| 		n->arg = m->arg; | ||||
| 		n->rcv = m->rcv; | ||||
| 		qib_devinfo(dd->pcidev, | ||||
| 			"set notifier irq %d rcv %d notify %p\n", | ||||
| 			n->notify.irq, n->rcv, &n->notify); | ||||
| 		ret = irq_set_affinity_notifier( | ||||
| 				n->notify.irq, | ||||
| 				&n->notify); | ||||
| 		if (ret) { | ||||
| 			m->notifier = NULL; | ||||
| 			kfree(n); | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * Set up our chip-specific interrupt handler. | ||||
|  * The interrupt type has already been setup, so | ||||
|  | @ -3149,6 +3436,9 @@ try_intx: | |||
| 		void *arg; | ||||
| 		u64 val; | ||||
| 		int lsb, reg, sh; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 		int dca = 0; | ||||
| #endif | ||||
| 
 | ||||
| 		dd->cspec->msix_entries[msixnum]. | ||||
| 			name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] | ||||
|  | @ -3161,6 +3451,9 @@ try_intx: | |||
| 				arg = dd->pport + irq_table[i].port - 1; | ||||
| 			} else | ||||
| 				arg = dd; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 			dca = irq_table[i].dca; | ||||
| #endif | ||||
| 			lsb = irq_table[i].lsb; | ||||
| 			handler = irq_table[i].handler; | ||||
| 			snprintf(dd->cspec->msix_entries[msixnum].name, | ||||
|  | @ -3178,6 +3471,9 @@ try_intx: | |||
| 				continue; | ||||
| 			if (qib_krcvq01_no_msi && ctxt < 2) | ||||
| 				continue; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 			dca = 1; | ||||
| #endif | ||||
| 			lsb = QIB_I_RCVAVAIL_LSB + ctxt; | ||||
| 			handler = qib_7322pintr; | ||||
| 			snprintf(dd->cspec->msix_entries[msixnum].name, | ||||
|  | @ -3203,6 +3499,11 @@ try_intx: | |||
| 			goto try_intx; | ||||
| 		} | ||||
| 		dd->cspec->msix_entries[msixnum].arg = arg; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 		dd->cspec->msix_entries[msixnum].dca = dca; | ||||
| 		dd->cspec->msix_entries[msixnum].rcv = | ||||
| 			handler == qib_7322pintr; | ||||
| #endif | ||||
| 		if (lsb >= 0) { | ||||
| 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; | ||||
| 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * | ||||
|  | @ -6885,6 +7186,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |||
| 	dd->f_sdma_init_early   = qib_7322_sdma_init_early; | ||||
| 	dd->f_writescratch      = writescratch; | ||||
| 	dd->f_tempsense_rd	= qib_7322_tempsense_rd; | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	dd->f_notify_dca	= qib_7322_notify_dca; | ||||
| #endif | ||||
| 	/*
 | ||||
| 	 * Do remaining PCIe setup and save PCIe values in dd. | ||||
| 	 * Any error printing is already done by the init code. | ||||
|  | @ -6921,7 +7225,7 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |||
| 		actual_cnt -= dd->num_pports; | ||||
| 
 | ||||
| 	tabsize = actual_cnt; | ||||
| 	dd->cspec->msix_entries = kmalloc(tabsize * | ||||
| 	dd->cspec->msix_entries = kzalloc(tabsize * | ||||
| 			sizeof(struct qib_msix_entry), GFP_KERNEL); | ||||
| 	if (!dd->cspec->msix_entries) { | ||||
| 		qib_dev_err(dd, "No memory for MSIx table\n"); | ||||
|  | @ -6941,7 +7245,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |||
| 
 | ||||
| 	/* clear diagctrl register, in case diags were running and crashed */ | ||||
| 	qib_write_kreg(dd, kr_hwdiagctrl, 0); | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	if (!dca_add_requester(&pdev->dev)) { | ||||
| 		qib_devinfo(dd->pcidev, "DCA enabled\n"); | ||||
| 		dd->flags |= QIB_DCA_ENABLED; | ||||
| 		qib_setup_dca(dd); | ||||
| 	} | ||||
| #endif | ||||
| 	goto bail; | ||||
| 
 | ||||
| bail_cleanup: | ||||
|  | @ -7156,15 +7466,20 @@ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { | |||
| 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 11 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */ | ||||
| }; | ||||
| 
 | ||||
| static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { | ||||
|  | @ -7173,15 +7488,20 @@ static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { | |||
| 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0, 13 },	/* QME7342 backplane settings */ | ||||
| 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */ | ||||
| }; | ||||
| 
 | ||||
| static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | ||||
|  | @ -7190,15 +7510,20 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | |||
| 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 1, 12, 10 },	/* QME7342 backplane setting */ | ||||
| 	{  0, 1, 12, 11 },	/* QME7342 backplane setting */ | ||||
| 	{  0, 1, 12, 12 },	/* QME7342 backplane setting */ | ||||
| 	{  0, 1, 12, 14 },	/* QME7342 backplane setting */ | ||||
| 	{  0, 1, 12,  6 },	/* QME7342 backplane setting */ | ||||
| 	{  0, 1, 12,  7 },	/* QME7342 backplane setting */ | ||||
| 	{  0, 1, 12,  8 },	/* QME7342 backplane setting */ | ||||
| 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */ | ||||
| 	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */ | ||||
| 	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */ | ||||
| 	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */ | ||||
| }; | ||||
| 
 | ||||
| static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { | ||||
|  |  | |||
|  | @ -39,10 +39,17 @@ | |||
| #include <linux/idr.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/printk.h> | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| #include <linux/dca.h> | ||||
| #endif | ||||
| 
 | ||||
| #include "qib.h" | ||||
| #include "qib_common.h" | ||||
| #include "qib_mad.h" | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| #include "qib_debugfs.h" | ||||
| #include "qib_verbs.h" | ||||
| #endif | ||||
| 
 | ||||
| #undef pr_fmt | ||||
| #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt | ||||
|  | @ -64,6 +71,11 @@ ushort qib_cfgctxts; | |||
| module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); | ||||
| MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); | ||||
| 
 | ||||
| unsigned qib_numa_aware; | ||||
| module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO); | ||||
| MODULE_PARM_DESC(numa_aware, | ||||
| 	"0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process"); | ||||
| 
 | ||||
| /*
 | ||||
|  * If set, do not write to any regs if avoidable, hack to allow | ||||
|  * check for deranged default register values. | ||||
|  | @ -89,8 +101,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | |||
| module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | ||||
| MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | ||||
| 
 | ||||
| struct workqueue_struct *qib_cq_wq; | ||||
| 
 | ||||
| static void verify_interrupt(unsigned long); | ||||
| 
 | ||||
| static struct idr qib_unit_table; | ||||
|  | @ -121,6 +131,11 @@ int qib_create_ctxts(struct qib_devdata *dd) | |||
| { | ||||
| 	unsigned i; | ||||
| 	int ret; | ||||
| 	int local_node_id = pcibus_to_node(dd->pcidev->bus); | ||||
| 
 | ||||
| 	if (local_node_id < 0) | ||||
| 		local_node_id = numa_node_id(); | ||||
| 	dd->assigned_node_id = local_node_id; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Allocate full ctxtcnt array, rather than just cfgctxts, because | ||||
|  | @ -143,7 +158,8 @@ int qib_create_ctxts(struct qib_devdata *dd) | |||
| 			continue; | ||||
| 
 | ||||
| 		ppd = dd->pport + (i % dd->num_pports); | ||||
| 		rcd = qib_create_ctxtdata(ppd, i); | ||||
| 
 | ||||
| 		rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); | ||||
| 		if (!rcd) { | ||||
| 			qib_dev_err(dd, | ||||
| 				"Unable to allocate ctxtdata for Kernel ctxt, failing\n"); | ||||
|  | @ -161,20 +177,33 @@ done: | |||
| /*
 | ||||
|  * Common code for user and kernel context setup. | ||||
|  */ | ||||
| struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) | ||||
| struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, | ||||
| 	int node_id) | ||||
| { | ||||
| 	struct qib_devdata *dd = ppd->dd; | ||||
| 	struct qib_ctxtdata *rcd; | ||||
| 
 | ||||
| 	rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); | ||||
| 	rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id); | ||||
| 	if (rcd) { | ||||
| 		INIT_LIST_HEAD(&rcd->qp_wait_list); | ||||
| 		rcd->node_id = node_id; | ||||
| 		rcd->ppd = ppd; | ||||
| 		rcd->dd = dd; | ||||
| 		rcd->cnt = 1; | ||||
| 		rcd->ctxt = ctxt; | ||||
| 		dd->rcd[ctxt] = rcd; | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 		if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ | ||||
| 			rcd->opstats = kzalloc_node(sizeof(*rcd->opstats), | ||||
| 				GFP_KERNEL, node_id); | ||||
| 			if (!rcd->opstats) { | ||||
| 				kfree(rcd); | ||||
| 				qib_dev_err(dd, | ||||
| 					"Unable to allocate per ctxt stats buffer\n"); | ||||
| 				return NULL; | ||||
| 			} | ||||
| 		} | ||||
| #endif | ||||
| 		dd->f_init_ctxt(rcd); | ||||
| 
 | ||||
| 		/*
 | ||||
|  | @ -429,6 +458,7 @@ static int loadtime_init(struct qib_devdata *dd) | |||
| 	dd->intrchk_timer.function = verify_interrupt; | ||||
| 	dd->intrchk_timer.data = (unsigned long) dd; | ||||
| 
 | ||||
| 	ret = qib_cq_init(dd); | ||||
| done: | ||||
| 	return ret; | ||||
| } | ||||
|  | @ -944,6 +974,10 @@ void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | |||
| 	vfree(rcd->subctxt_uregbase); | ||||
| 	vfree(rcd->subctxt_rcvegrbuf); | ||||
| 	vfree(rcd->subctxt_rcvhdr_base); | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	kfree(rcd->opstats); | ||||
| 	rcd->opstats = NULL; | ||||
| #endif | ||||
| 	kfree(rcd); | ||||
| } | ||||
| 
 | ||||
|  | @ -1033,7 +1067,6 @@ done: | |||
| 	dd->f_set_armlaunch(dd, 1); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| void qib_free_devdata(struct qib_devdata *dd) | ||||
| { | ||||
| 	unsigned long flags; | ||||
|  | @ -1043,6 +1076,9 @@ void qib_free_devdata(struct qib_devdata *dd) | |||
| 	list_del(&dd->list); | ||||
| 	spin_unlock_irqrestore(&qib_devs_lock, flags); | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	qib_dbg_ibdev_exit(&dd->verbs_dev); | ||||
| #endif | ||||
| 	ib_dealloc_device(&dd->verbs_dev.ibdev); | ||||
| } | ||||
| 
 | ||||
|  | @ -1066,6 +1102,10 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) | |||
| 		goto bail; | ||||
| 	} | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	qib_dbg_ibdev_init(&dd->verbs_dev); | ||||
| #endif | ||||
| 
 | ||||
| 	idr_preload(GFP_KERNEL); | ||||
| 	spin_lock_irqsave(&qib_devs_lock, flags); | ||||
| 
 | ||||
|  | @ -1081,6 +1121,9 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) | |||
| 	if (ret < 0) { | ||||
| 		qib_early_err(&pdev->dev, | ||||
| 			      "Could not allocate unit ID: error %d\n", -ret); | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 		qib_dbg_ibdev_exit(&dd->verbs_dev); | ||||
| #endif | ||||
| 		ib_dealloc_device(&dd->verbs_dev.ibdev); | ||||
| 		dd = ERR_PTR(ret); | ||||
| 		goto bail; | ||||
|  | @ -1158,6 +1201,35 @@ struct pci_driver qib_driver = { | |||
| 	.err_handler = &qib_pci_err_handler, | ||||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 
 | ||||
| static int qib_notify_dca(struct notifier_block *, unsigned long, void *); | ||||
| static struct notifier_block dca_notifier = { | ||||
| 	.notifier_call  = qib_notify_dca, | ||||
| 	.next           = NULL, | ||||
| 	.priority       = 0 | ||||
| }; | ||||
| 
 | ||||
| static int qib_notify_dca_device(struct device *device, void *data) | ||||
| { | ||||
| 	struct qib_devdata *dd = dev_get_drvdata(device); | ||||
| 	unsigned long event = *(unsigned long *)data; | ||||
| 
 | ||||
| 	return dd->f_notify_dca(dd, event); | ||||
| } | ||||
| 
 | ||||
| static int qib_notify_dca(struct notifier_block *nb, unsigned long event, | ||||
| 					  void *p) | ||||
| { | ||||
| 	int rval; | ||||
| 
 | ||||
| 	rval = driver_for_each_device(&qib_driver.driver, NULL, | ||||
| 				      &event, qib_notify_dca_device); | ||||
| 	return rval ? NOTIFY_BAD : NOTIFY_DONE; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * Do all the generic driver unit- and chip-independent memory | ||||
|  * allocation and initialization. | ||||
|  | @ -1170,22 +1242,22 @@ static int __init qlogic_ib_init(void) | |||
| 	if (ret) | ||||
| 		goto bail; | ||||
| 
 | ||||
| 	qib_cq_wq = create_singlethread_workqueue("qib_cq"); | ||||
| 	if (!qib_cq_wq) { | ||||
| 		ret = -ENOMEM; | ||||
| 		goto bail_dev; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * These must be called before the driver is registered with | ||||
| 	 * the PCI subsystem. | ||||
| 	 */ | ||||
| 	idr_init(&qib_unit_table); | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	dca_register_notify(&dca_notifier); | ||||
| #endif | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	qib_dbg_init(); | ||||
| #endif | ||||
| 	ret = pci_register_driver(&qib_driver); | ||||
| 	if (ret < 0) { | ||||
| 		pr_err("Unable to register driver: error %d\n", -ret); | ||||
| 		goto bail_unit; | ||||
| 		goto bail_dev; | ||||
| 	} | ||||
| 
 | ||||
| 	/* not fatal if it doesn't work */ | ||||
|  | @ -1193,10 +1265,14 @@ static int __init qlogic_ib_init(void) | |||
| 		pr_err("Unable to register ipathfs\n"); | ||||
| 	goto bail; /* all OK */ | ||||
| 
 | ||||
| bail_unit: | ||||
| 	idr_destroy(&qib_unit_table); | ||||
| 	destroy_workqueue(qib_cq_wq); | ||||
| bail_dev: | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	dca_unregister_notify(&dca_notifier); | ||||
| #endif | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	qib_dbg_exit(); | ||||
| #endif | ||||
| 	idr_destroy(&qib_unit_table); | ||||
| 	qib_dev_cleanup(); | ||||
| bail: | ||||
| 	return ret; | ||||
|  | @ -1217,9 +1293,13 @@ static void __exit qlogic_ib_cleanup(void) | |||
| 			"Unable to cleanup counter filesystem: error %d\n", | ||||
| 			-ret); | ||||
| 
 | ||||
| #ifdef CONFIG_INFINIBAND_QIB_DCA | ||||
| 	dca_unregister_notify(&dca_notifier); | ||||
| #endif | ||||
| 	pci_unregister_driver(&qib_driver); | ||||
| 
 | ||||
| 	destroy_workqueue(qib_cq_wq); | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	qib_dbg_exit(); | ||||
| #endif | ||||
| 
 | ||||
| 	qib_cpulist_count = 0; | ||||
| 	kfree(qib_cpulist); | ||||
|  | @ -1311,6 +1391,7 @@ static void cleanup_device_data(struct qib_devdata *dd) | |||
| 	} | ||||
| 	kfree(tmp); | ||||
| 	kfree(dd->boardname); | ||||
| 	qib_cq_exit(dd); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -1483,6 +1564,7 @@ static void qib_remove_one(struct pci_dev *pdev) | |||
| int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | ||||
| { | ||||
| 	unsigned amt; | ||||
| 	int old_node_id; | ||||
| 
 | ||||
| 	if (!rcd->rcvhdrq) { | ||||
| 		dma_addr_t phys_hdrqtail; | ||||
|  | @ -1492,9 +1574,13 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | |||
| 			    sizeof(u32), PAGE_SIZE); | ||||
| 		gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? | ||||
| 			GFP_USER : GFP_KERNEL; | ||||
| 
 | ||||
| 		old_node_id = dev_to_node(&dd->pcidev->dev); | ||||
| 		set_dev_node(&dd->pcidev->dev, rcd->node_id); | ||||
| 		rcd->rcvhdrq = dma_alloc_coherent( | ||||
| 			&dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, | ||||
| 			gfp_flags | __GFP_COMP); | ||||
| 		set_dev_node(&dd->pcidev->dev, old_node_id); | ||||
| 
 | ||||
| 		if (!rcd->rcvhdrq) { | ||||
| 			qib_dev_err(dd, | ||||
|  | @ -1510,9 +1596,11 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | |||
| 		} | ||||
| 
 | ||||
| 		if (!(dd->flags & QIB_NODMA_RTAIL)) { | ||||
| 			set_dev_node(&dd->pcidev->dev, rcd->node_id); | ||||
| 			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( | ||||
| 				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, | ||||
| 				gfp_flags); | ||||
| 			set_dev_node(&dd->pcidev->dev, old_node_id); | ||||
| 			if (!rcd->rcvhdrtail_kvaddr) | ||||
| 				goto bail_free; | ||||
| 			rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; | ||||
|  | @ -1556,6 +1644,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) | |||
| 	unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; | ||||
| 	size_t size; | ||||
| 	gfp_t gfp_flags; | ||||
| 	int old_node_id; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * GFP_USER, but without GFP_FS, so buffer cache can be | ||||
|  | @ -1574,25 +1663,29 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) | |||
| 	size = rcd->rcvegrbuf_size; | ||||
| 	if (!rcd->rcvegrbuf) { | ||||
| 		rcd->rcvegrbuf = | ||||
| 			kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]), | ||||
| 				GFP_KERNEL); | ||||
| 			kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]), | ||||
| 				GFP_KERNEL, rcd->node_id); | ||||
| 		if (!rcd->rcvegrbuf) | ||||
| 			goto bail; | ||||
| 	} | ||||
| 	if (!rcd->rcvegrbuf_phys) { | ||||
| 		rcd->rcvegrbuf_phys = | ||||
| 			kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]), | ||||
| 				GFP_KERNEL); | ||||
| 			kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]), | ||||
| 				GFP_KERNEL, rcd->node_id); | ||||
| 		if (!rcd->rcvegrbuf_phys) | ||||
| 			goto bail_rcvegrbuf; | ||||
| 	} | ||||
| 	for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { | ||||
| 		if (rcd->rcvegrbuf[e]) | ||||
| 			continue; | ||||
| 
 | ||||
| 		old_node_id = dev_to_node(&dd->pcidev->dev); | ||||
| 		set_dev_node(&dd->pcidev->dev, rcd->node_id); | ||||
| 		rcd->rcvegrbuf[e] = | ||||
| 			dma_alloc_coherent(&dd->pcidev->dev, size, | ||||
| 					   &rcd->rcvegrbuf_phys[e], | ||||
| 					   gfp_flags); | ||||
| 		set_dev_node(&dd->pcidev->dev, old_node_id); | ||||
| 		if (!rcd->rcvegrbuf[e]) | ||||
| 			goto bail_rcvegrbuf_phys; | ||||
| 	} | ||||
|  |  | |||
|  | @ -1,5 +1,5 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2012 Intel Corporation.  All rights reserved. | ||||
|  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved. | ||||
|  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved. | ||||
|  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||||
|  * | ||||
|  | @ -35,6 +35,9 @@ | |||
| #include <linux/err.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/jhash.h> | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| #include <linux/seq_file.h> | ||||
| #endif | ||||
| 
 | ||||
| #include "qib.h" | ||||
| 
 | ||||
|  | @ -222,8 +225,8 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
| 	unsigned long flags; | ||||
| 	unsigned n = qpn_hash(dev, qp->ibqp.qp_num); | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev->qpt_lock, flags); | ||||
| 	atomic_inc(&qp->refcount); | ||||
| 	spin_lock_irqsave(&dev->qpt_lock, flags); | ||||
| 
 | ||||
| 	if (qp->ibqp.qp_num == 0) | ||||
| 		rcu_assign_pointer(ibp->qp0, qp); | ||||
|  | @ -235,7 +238,6 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
| 	} | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&dev->qpt_lock, flags); | ||||
| 	synchronize_rcu(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -247,36 +249,39 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
| 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||||
| 	unsigned n = qpn_hash(dev, qp->ibqp.qp_num); | ||||
| 	unsigned long flags; | ||||
| 	int removed = 1; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&dev->qpt_lock, flags); | ||||
| 
 | ||||
| 	if (rcu_dereference_protected(ibp->qp0, | ||||
| 			lockdep_is_held(&dev->qpt_lock)) == qp) { | ||||
| 		atomic_dec(&qp->refcount); | ||||
| 		rcu_assign_pointer(ibp->qp0, NULL); | ||||
| 	} else if (rcu_dereference_protected(ibp->qp1, | ||||
| 			lockdep_is_held(&dev->qpt_lock)) == qp) { | ||||
| 		atomic_dec(&qp->refcount); | ||||
| 		rcu_assign_pointer(ibp->qp1, NULL); | ||||
| 	} else { | ||||
| 		struct qib_qp *q; | ||||
| 		struct qib_qp __rcu **qpp; | ||||
| 
 | ||||
| 		removed = 0; | ||||
| 		qpp = &dev->qp_table[n]; | ||||
| 		for (; (q = rcu_dereference_protected(*qpp, | ||||
| 				lockdep_is_held(&dev->qpt_lock))) != NULL; | ||||
| 				qpp = &q->next) | ||||
| 			if (q == qp) { | ||||
| 				atomic_dec(&qp->refcount); | ||||
| 				rcu_assign_pointer(*qpp, | ||||
| 					rcu_dereference_protected(qp->next, | ||||
| 					 lockdep_is_held(&dev->qpt_lock))); | ||||
| 				removed = 1; | ||||
| 				break; | ||||
| 			} | ||||
| 	} | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(&dev->qpt_lock, flags); | ||||
| 	synchronize_rcu(); | ||||
| 	if (removed) { | ||||
| 		synchronize_rcu(); | ||||
| 		atomic_dec(&qp->refcount); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  | @ -334,26 +339,25 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | |||
| { | ||||
| 	struct qib_qp *qp = NULL; | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	if (unlikely(qpn <= 1)) { | ||||
| 		rcu_read_lock(); | ||||
| 		if (qpn == 0) | ||||
| 			qp = rcu_dereference(ibp->qp0); | ||||
| 		else | ||||
| 			qp = rcu_dereference(ibp->qp1); | ||||
| 		if (qp) | ||||
| 			atomic_inc(&qp->refcount); | ||||
| 	} else { | ||||
| 		struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | ||||
| 		unsigned n = qpn_hash(dev, qpn); | ||||
| 
 | ||||
| 		rcu_read_lock(); | ||||
| 		for (qp = rcu_dereference(dev->qp_table[n]); qp; | ||||
| 			qp = rcu_dereference(qp->next)) | ||||
| 			if (qp->ibqp.qp_num == qpn) | ||||
| 			if (qp->ibqp.qp_num == qpn) { | ||||
| 				atomic_inc(&qp->refcount); | ||||
| 				break; | ||||
| 			} | ||||
| 	} | ||||
| 	if (qp) | ||||
| 		if (unlikely(!atomic_inc_not_zero(&qp->refcount))) | ||||
| 			qp = NULL; | ||||
| 
 | ||||
| 	rcu_read_unlock(); | ||||
| 	return qp; | ||||
| } | ||||
|  | @ -1286,3 +1290,94 @@ void qib_get_credit(struct qib_qp *qp, u32 aeth) | |||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 
 | ||||
| struct qib_qp_iter { | ||||
| 	struct qib_ibdev *dev; | ||||
| 	struct qib_qp *qp; | ||||
| 	int n; | ||||
| }; | ||||
| 
 | ||||
| struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) | ||||
| { | ||||
| 	struct qib_qp_iter *iter; | ||||
| 
 | ||||
| 	iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||||
| 	if (!iter) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	iter->dev = dev; | ||||
| 	if (qib_qp_iter_next(iter)) { | ||||
| 		kfree(iter); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	return iter; | ||||
| } | ||||
| 
 | ||||
| int qib_qp_iter_next(struct qib_qp_iter *iter) | ||||
| { | ||||
| 	struct qib_ibdev *dev = iter->dev; | ||||
| 	int n = iter->n; | ||||
| 	int ret = 1; | ||||
| 	struct qib_qp *pqp = iter->qp; | ||||
| 	struct qib_qp *qp; | ||||
| 
 | ||||
| 	rcu_read_lock(); | ||||
| 	for (; n < dev->qp_table_size; n++) { | ||||
| 		if (pqp) | ||||
| 			qp = rcu_dereference(pqp->next); | ||||
| 		else | ||||
| 			qp = rcu_dereference(dev->qp_table[n]); | ||||
| 		pqp = qp; | ||||
| 		if (qp) { | ||||
| 			if (iter->qp) | ||||
| 				atomic_dec(&iter->qp->refcount); | ||||
| 			atomic_inc(&qp->refcount); | ||||
| 			rcu_read_unlock(); | ||||
| 			iter->qp = qp; | ||||
| 			iter->n = n; | ||||
| 			return 0; | ||||
| 		} | ||||
| 	} | ||||
| 	rcu_read_unlock(); | ||||
| 	if (iter->qp) | ||||
| 		atomic_dec(&iter->qp->refcount); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static const char * const qp_type_str[] = { | ||||
| 	"SMI", "GSI", "RC", "UC", "UD", | ||||
| }; | ||||
| 
 | ||||
| void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) | ||||
| { | ||||
| 	struct qib_swqe *wqe; | ||||
| 	struct qib_qp *qp = iter->qp; | ||||
| 
 | ||||
| 	wqe = get_swqe_ptr(qp, qp->s_last); | ||||
| 	seq_printf(s, | ||||
| 		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n", | ||||
| 		   iter->n, | ||||
| 		   qp->ibqp.qp_num, | ||||
| 		   qp_type_str[qp->ibqp.qp_type], | ||||
| 		   qp->state, | ||||
| 		   wqe->wr.opcode, | ||||
| 		   qp->s_hdrwords, | ||||
| 		   qp->s_flags, | ||||
| 		   atomic_read(&qp->s_dma_busy), | ||||
| 		   !list_empty(&qp->iowait), | ||||
| 		   qp->timeout, | ||||
| 		   wqe->ssn, | ||||
| 		   qp->s_lsn, | ||||
| 		   qp->s_last_psn, | ||||
| 		   qp->s_psn, qp->s_next_psn, | ||||
| 		   qp->s_sending_psn, qp->s_sending_hpsn, | ||||
| 		   qp->s_last, qp->s_acked, qp->s_cur, | ||||
| 		   qp->s_tail, qp->s_head, qp->s_size, | ||||
| 		   qp->remote_qpn, | ||||
| 		   qp->remote_ah_attr.dlid); | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
|  | @ -645,9 +645,11 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | |||
| 	} else | ||||
| 		goto drop; | ||||
| 
 | ||||
| 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | ||||
| 	ibp->opstats[opcode & 0x7f].n_bytes += tlen; | ||||
| 	ibp->opstats[opcode & 0x7f].n_packets++; | ||||
| 	opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	rcd->opstats->stats[opcode].n_bytes += tlen; | ||||
| 	rcd->opstats->stats[opcode].n_packets++; | ||||
| #endif | ||||
| 
 | ||||
| 	/* Get the destination QP number. */ | ||||
| 	qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; | ||||
|  |  | |||
|  | @ -1,5 +1,5 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2012 Intel Corporation.  All rights reserved. | ||||
|  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved. | ||||
|  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | ||||
|  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||||
|  * | ||||
|  | @ -41,6 +41,7 @@ | |||
| #include <linux/interrupt.h> | ||||
| #include <linux/kref.h> | ||||
| #include <linux/workqueue.h> | ||||
| #include <linux/kthread.h> | ||||
| #include <linux/completion.h> | ||||
| #include <rdma/ib_pack.h> | ||||
| #include <rdma/ib_user_verbs.h> | ||||
|  | @ -267,7 +268,8 @@ struct qib_cq_wc { | |||
|  */ | ||||
| struct qib_cq { | ||||
| 	struct ib_cq ibcq; | ||||
| 	struct work_struct comptask; | ||||
| 	struct kthread_work comptask; | ||||
| 	struct qib_devdata *dd; | ||||
| 	spinlock_t lock; /* protect changes in this struct */ | ||||
| 	u8 notify; | ||||
| 	u8 triggered; | ||||
|  | @ -658,6 +660,10 @@ struct qib_opcode_stats { | |||
| 	u64 n_bytes;            /* total number of bytes */ | ||||
| }; | ||||
| 
 | ||||
| struct qib_opcode_stats_perctx { | ||||
| 	struct qib_opcode_stats stats[128]; | ||||
| }; | ||||
| 
 | ||||
| struct qib_ibport { | ||||
| 	struct qib_qp __rcu *qp0; | ||||
| 	struct qib_qp __rcu *qp1; | ||||
|  | @ -724,7 +730,6 @@ struct qib_ibport { | |||
| 	u8 vl_high_limit; | ||||
| 	u8 sl_to_vl[16]; | ||||
| 
 | ||||
| 	struct qib_opcode_stats opstats[128]; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
|  | @ -768,6 +773,10 @@ struct qib_ibdev { | |||
| 	spinlock_t n_srqs_lock; | ||||
| 	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ | ||||
| 	spinlock_t n_mcast_grps_lock; | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 	/* per HCA debugfs */ | ||||
| 	struct dentry *qib_ibdev_dbg; | ||||
| #endif | ||||
| }; | ||||
| 
 | ||||
| struct qib_verbs_counters { | ||||
|  | @ -832,8 +841,6 @@ static inline int qib_send_ok(struct qib_qp *qp) | |||
| 		 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); | ||||
| } | ||||
| 
 | ||||
| extern struct workqueue_struct *qib_cq_wq; | ||||
| 
 | ||||
| /*
 | ||||
|  * This must be called with s_lock held. | ||||
|  */ | ||||
|  | @ -910,6 +917,18 @@ void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); | |||
| 
 | ||||
| void qib_free_qpn_table(struct qib_qpn_table *qpt); | ||||
| 
 | ||||
| #ifdef CONFIG_DEBUG_FS | ||||
| 
 | ||||
| struct qib_qp_iter; | ||||
| 
 | ||||
| struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev); | ||||
| 
 | ||||
| int qib_qp_iter_next(struct qib_qp_iter *iter); | ||||
| 
 | ||||
| void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter); | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| void qib_get_credit(struct qib_qp *qp, u32 aeth); | ||||
| 
 | ||||
| unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); | ||||
|  | @ -972,6 +991,10 @@ int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | |||
| 
 | ||||
| int qib_destroy_srq(struct ib_srq *ibsrq); | ||||
| 
 | ||||
| int qib_cq_init(struct qib_devdata *dd); | ||||
| 
 | ||||
| void qib_cq_exit(struct qib_devdata *dd); | ||||
| 
 | ||||
| void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); | ||||
| 
 | ||||
| int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); | ||||
|  |  | |||
|  | @ -53,8 +53,8 @@ | |||
| 
 | ||||
| #define DRV_NAME	"ib_srp" | ||||
| #define PFX		DRV_NAME ": " | ||||
| #define DRV_VERSION	"0.2" | ||||
| #define DRV_RELDATE	"November 1, 2005" | ||||
| #define DRV_VERSION	"1.0" | ||||
| #define DRV_RELDATE	"July 1, 2013" | ||||
| 
 | ||||
| MODULE_AUTHOR("Roland Dreier"); | ||||
| MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " | ||||
|  | @ -231,14 +231,16 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, | ||||
| 			       srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); | ||||
| 			       srp_recv_completion, NULL, target, SRP_RQ_SIZE, | ||||
| 			       target->comp_vector); | ||||
| 	if (IS_ERR(recv_cq)) { | ||||
| 		ret = PTR_ERR(recv_cq); | ||||
| 		goto err; | ||||
| 	} | ||||
| 
 | ||||
| 	send_cq = ib_create_cq(target->srp_host->srp_dev->dev, | ||||
| 			       srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); | ||||
| 			       srp_send_completion, NULL, target, SRP_SQ_SIZE, | ||||
| 			       target->comp_vector); | ||||
| 	if (IS_ERR(send_cq)) { | ||||
| 		ret = PTR_ERR(send_cq); | ||||
| 		goto err_recv_cq; | ||||
|  | @ -542,11 +544,11 @@ static void srp_remove_work(struct work_struct *work) | |||
| 
 | ||||
| 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); | ||||
| 
 | ||||
| 	srp_remove_target(target); | ||||
| 
 | ||||
| 	spin_lock(&target->srp_host->target_lock); | ||||
| 	list_del(&target->list); | ||||
| 	spin_unlock(&target->srp_host->target_lock); | ||||
| 
 | ||||
| 	srp_remove_target(target); | ||||
| } | ||||
| 
 | ||||
| static void srp_rport_delete(struct srp_rport *rport) | ||||
|  | @ -1744,18 +1746,25 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
| { | ||||
| 	struct srp_target_port *target = host_to_target(scmnd->device->host); | ||||
| 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); | ||||
| 
 | ||||
| 	if (!req || !srp_claim_req(target, req, scmnd)) | ||||
| 		return FAILED; | ||||
| 	srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, | ||||
| 			  SRP_TSK_ABORT_TASK); | ||||
| 	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, | ||||
| 			      SRP_TSK_ABORT_TASK) == 0 || | ||||
| 	    target->transport_offline) | ||||
| 		ret = SUCCESS; | ||||
| 	else if (target->transport_offline) | ||||
| 		ret = FAST_IO_FAIL; | ||||
| 	else | ||||
| 		ret = FAILED; | ||||
| 	srp_free_req(target, req, scmnd, 0); | ||||
| 	scmnd->result = DID_ABORT << 16; | ||||
| 	scmnd->scsi_done(scmnd); | ||||
| 
 | ||||
| 	return SUCCESS; | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static int srp_reset_device(struct scsi_cmnd *scmnd) | ||||
|  | @ -1891,6 +1900,14 @@ static ssize_t show_local_ib_device(struct device *dev, | |||
| 	return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); | ||||
| } | ||||
| 
 | ||||
| static ssize_t show_comp_vector(struct device *dev, | ||||
| 				struct device_attribute *attr, char *buf) | ||||
| { | ||||
| 	struct srp_target_port *target = host_to_target(class_to_shost(dev)); | ||||
| 
 | ||||
| 	return sprintf(buf, "%d\n", target->comp_vector); | ||||
| } | ||||
| 
 | ||||
| static ssize_t show_cmd_sg_entries(struct device *dev, | ||||
| 				   struct device_attribute *attr, char *buf) | ||||
| { | ||||
|  | @ -1917,6 +1934,7 @@ static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL); | |||
| static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL); | ||||
| static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL); | ||||
| static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); | ||||
| static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL); | ||||
| static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL); | ||||
| static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL); | ||||
| 
 | ||||
|  | @ -1931,6 +1949,7 @@ static struct device_attribute *srp_host_attrs[] = { | |||
| 	&dev_attr_zero_req_lim, | ||||
| 	&dev_attr_local_ib_port, | ||||
| 	&dev_attr_local_ib_device, | ||||
| 	&dev_attr_comp_vector, | ||||
| 	&dev_attr_cmd_sg_entries, | ||||
| 	&dev_attr_allow_ext_sg, | ||||
| 	NULL | ||||
|  | @ -1946,6 +1965,7 @@ static struct scsi_host_template srp_template = { | |||
| 	.eh_abort_handler		= srp_abort, | ||||
| 	.eh_device_reset_handler	= srp_reset_device, | ||||
| 	.eh_host_reset_handler		= srp_reset_host, | ||||
| 	.skip_settle_delay		= true, | ||||
| 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE, | ||||
| 	.can_queue			= SRP_CMD_SQ_SIZE, | ||||
| 	.this_id			= -1, | ||||
|  | @ -2001,6 +2021,36 @@ static struct class srp_class = { | |||
| 	.dev_release = srp_release_dev | ||||
| }; | ||||
| 
 | ||||
| /**
 | ||||
|  * srp_conn_unique() - check whether the connection to a target is unique | ||||
|  */ | ||||
| static bool srp_conn_unique(struct srp_host *host, | ||||
| 			    struct srp_target_port *target) | ||||
| { | ||||
| 	struct srp_target_port *t; | ||||
| 	bool ret = false; | ||||
| 
 | ||||
| 	if (target->state == SRP_TARGET_REMOVED) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	ret = true; | ||||
| 
 | ||||
| 	spin_lock(&host->target_lock); | ||||
| 	list_for_each_entry(t, &host->target_list, list) { | ||||
| 		if (t != target && | ||||
| 		    target->id_ext == t->id_ext && | ||||
| 		    target->ioc_guid == t->ioc_guid && | ||||
| 		    target->initiator_ext == t->initiator_ext) { | ||||
| 			ret = false; | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| 	spin_unlock(&host->target_lock); | ||||
| 
 | ||||
| out: | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Target ports are added by writing | ||||
|  * | ||||
|  | @ -2023,6 +2073,7 @@ enum { | |||
| 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9, | ||||
| 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10, | ||||
| 	SRP_OPT_SG_TABLESIZE	= 1 << 11, | ||||
| 	SRP_OPT_COMP_VECTOR	= 1 << 12, | ||||
| 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	| | ||||
| 				   SRP_OPT_IOC_GUID	| | ||||
| 				   SRP_OPT_DGID		| | ||||
|  | @ -2043,6 +2094,7 @@ static const match_table_t srp_opt_tokens = { | |||
| 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	}, | ||||
| 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	}, | ||||
| 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	}, | ||||
| 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	}, | ||||
| 	{ SRP_OPT_ERR,			NULL 			} | ||||
| }; | ||||
| 
 | ||||
|  | @ -2198,6 +2250,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) | |||
| 			target->sg_tablesize = token; | ||||
| 			break; | ||||
| 
 | ||||
| 		case SRP_OPT_COMP_VECTOR: | ||||
| 			if (match_int(args, &token) || token < 0) { | ||||
| 				pr_warn("bad comp_vector parameter '%s'\n", p); | ||||
| 				goto out; | ||||
| 			} | ||||
| 			target->comp_vector = token; | ||||
| 			break; | ||||
| 
 | ||||
| 		default: | ||||
| 			pr_warn("unknown parameter or missing value '%s' in target creation request\n", | ||||
| 				p); | ||||
|  | @ -2257,6 +2317,16 @@ static ssize_t srp_create_target(struct device *dev, | |||
| 	if (ret) | ||||
| 		goto err; | ||||
| 
 | ||||
| 	if (!srp_conn_unique(target->srp_host, target)) { | ||||
| 		shost_printk(KERN_INFO, target->scsi_host, | ||||
| 			     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", | ||||
| 			     be64_to_cpu(target->id_ext), | ||||
| 			     be64_to_cpu(target->ioc_guid), | ||||
| 			     be64_to_cpu(target->initiator_ext)); | ||||
| 		ret = -EEXIST; | ||||
| 		goto err; | ||||
| 	} | ||||
| 
 | ||||
| 	if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && | ||||
| 				target->cmd_sg_cnt < target->sg_tablesize) { | ||||
| 		pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); | ||||
|  | @ -2507,6 +2577,8 @@ static void srp_remove_one(struct ib_device *device) | |||
| 	struct srp_target_port *target; | ||||
| 
 | ||||
| 	srp_dev = ib_get_client_data(device, &srp_client); | ||||
| 	if (!srp_dev) | ||||
| 		return; | ||||
| 
 | ||||
| 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { | ||||
| 		device_unregister(&host->dev); | ||||
|  |  | |||
|  | @ -156,6 +156,7 @@ struct srp_target_port { | |||
| 	char			target_name[32]; | ||||
| 	unsigned int		scsi_id; | ||||
| 	unsigned int		sg_tablesize; | ||||
| 	int			comp_vector; | ||||
| 
 | ||||
| 	struct ib_sa_path_rec	path; | ||||
| 	__be16			orig_dgid[8]; | ||||
|  |  | |||
|  | @ -19,5 +19,6 @@ config NET_VENDOR_MELLANOX | |||
| if NET_VENDOR_MELLANOX | ||||
| 
 | ||||
| source "drivers/net/ethernet/mellanox/mlx4/Kconfig" | ||||
| source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig" | ||||
| 
 | ||||
| endif # NET_VENDOR_MELLANOX | ||||
|  |  | |||
|  | @ -3,3 +3,4 @@ | |||
| #
 | ||||
| 
 | ||||
| obj-$(CONFIG_MLX4_CORE) += mlx4/ | ||||
| obj-$(CONFIG_MLX5_CORE) += mlx5/core/ | ||||
|  |  | |||
							
								
								
									
										18
									
								
								drivers/net/ethernet/mellanox/mlx5/core/Kconfig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								drivers/net/ethernet/mellanox/mlx5/core/Kconfig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,18 @@ | |||
| # | ||||
| # Mellanox driver configuration | ||||
| # | ||||
| 
 | ||||
| config MLX5_CORE | ||||
| 	tristate | ||||
| 	depends on PCI && X86 | ||||
| 	default n | ||||
| 
 | ||||
| config MLX5_DEBUG | ||||
| 	bool "Verbose debugging output" if (MLX5_CORE && EXPERT) | ||||
| 	depends on MLX5_CORE | ||||
| 	default y | ||||
| 	---help--- | ||||
| 	  This option causes debugging code to be compiled into the | ||||
| 	  mlx5_core driver.  The output can be turned on via the | ||||
| 	  debug_mask module parameter (which can also be set after | ||||
| 	  the driver is loaded through sysfs). | ||||
							
								
								
									
										5
									
								
								drivers/net/ethernet/mellanox/mlx5/core/Makefile
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								drivers/net/ethernet/mellanox/mlx5/core/Makefile
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,5 @@ | |||
| obj-$(CONFIG_MLX5_CORE)		+= mlx5_core.o | ||||
| 
 | ||||
| mlx5_core-y :=	main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
 | ||||
| 		health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
 | ||||
| 		mad.o | ||||
							
								
								
									
										238
									
								
								drivers/net/ethernet/mellanox/mlx5/core/alloc.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										238
									
								
								drivers/net/ethernet/mellanox/mlx5/core/alloc.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,238 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/errno.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/mm.h> | ||||
| #include <linux/export.h> | ||||
| #include <linux/bitmap.h> | ||||
| #include <linux/dma-mapping.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| 
 | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| /* Handling for queue buffers -- we allocate a bunch of memory and
 | ||||
|  * register it in a memory region at HCA virtual address 0.  If the | ||||
|  * requested size is > max_direct, we split the allocation into | ||||
|  * multiple pages, so we don't require too much contiguous memory. | ||||
|  */ | ||||
| 
 | ||||
| int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, | ||||
| 		   struct mlx5_buf *buf) | ||||
| { | ||||
| 	dma_addr_t t; | ||||
| 
 | ||||
| 	buf->size = size; | ||||
| 	if (size <= max_direct) { | ||||
| 		buf->nbufs        = 1; | ||||
| 		buf->npages       = 1; | ||||
| 		buf->page_shift   = get_order(size) + PAGE_SHIFT; | ||||
| 		buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev, | ||||
| 							size, &t, GFP_KERNEL); | ||||
| 		if (!buf->direct.buf) | ||||
| 			return -ENOMEM; | ||||
| 
 | ||||
| 		buf->direct.map = t; | ||||
| 
 | ||||
| 		while (t & ((1 << buf->page_shift) - 1)) { | ||||
| 			--buf->page_shift; | ||||
| 			buf->npages *= 2; | ||||
| 		} | ||||
| 	} else { | ||||
| 		int i; | ||||
| 
 | ||||
| 		buf->direct.buf  = NULL; | ||||
| 		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE; | ||||
| 		buf->npages      = buf->nbufs; | ||||
| 		buf->page_shift  = PAGE_SHIFT; | ||||
| 		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list), | ||||
| 					   GFP_KERNEL); | ||||
| 		if (!buf->page_list) | ||||
| 			return -ENOMEM; | ||||
| 
 | ||||
| 		for (i = 0; i < buf->nbufs; i++) { | ||||
| 			buf->page_list[i].buf = | ||||
| 				dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, | ||||
| 						    &t, GFP_KERNEL); | ||||
| 			if (!buf->page_list[i].buf) | ||||
| 				goto err_free; | ||||
| 
 | ||||
| 			buf->page_list[i].map = t; | ||||
| 		} | ||||
| 
 | ||||
| 		if (BITS_PER_LONG == 64) { | ||||
| 			struct page **pages; | ||||
| 			pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); | ||||
| 			if (!pages) | ||||
| 				goto err_free; | ||||
| 			for (i = 0; i < buf->nbufs; i++) | ||||
| 				pages[i] = virt_to_page(buf->page_list[i].buf); | ||||
| 			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | ||||
| 			kfree(pages); | ||||
| 			if (!buf->direct.buf) | ||||
| 				goto err_free; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_free: | ||||
| 	mlx5_buf_free(dev, buf); | ||||
| 
 | ||||
| 	return -ENOMEM; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_buf_alloc); | ||||
| 
 | ||||
| void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) | ||||
| { | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (buf->nbufs == 1) | ||||
| 		dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, | ||||
| 				  buf->direct.map); | ||||
| 	else { | ||||
| 		if (BITS_PER_LONG == 64 && buf->direct.buf) | ||||
| 			vunmap(buf->direct.buf); | ||||
| 
 | ||||
| 		for (i = 0; i < buf->nbufs; i++) | ||||
| 			if (buf->page_list[i].buf) | ||||
| 				dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | ||||
| 						  buf->page_list[i].buf, | ||||
| 						  buf->page_list[i].map); | ||||
| 		kfree(buf->page_list); | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_buf_free); | ||||
| 
 | ||||
| static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) | ||||
| { | ||||
| 	struct mlx5_db_pgdir *pgdir; | ||||
| 
 | ||||
| 	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); | ||||
| 	if (!pgdir) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); | ||||
| 	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, | ||||
| 					    &pgdir->db_dma, GFP_KERNEL); | ||||
| 	if (!pgdir->db_page) { | ||||
| 		kfree(pgdir); | ||||
| 		return NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	return pgdir; | ||||
| } | ||||
| 
 | ||||
| static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, | ||||
| 				    struct mlx5_db *db) | ||||
| { | ||||
| 	int offset; | ||||
| 	int i; | ||||
| 
 | ||||
| 	i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); | ||||
| 	if (i >= MLX5_DB_PER_PAGE) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	__clear_bit(i, pgdir->bitmap); | ||||
| 
 | ||||
| 	db->u.pgdir = pgdir; | ||||
| 	db->index   = i; | ||||
| 	offset = db->index * L1_CACHE_BYTES; | ||||
| 	db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page); | ||||
| 	db->dma     = pgdir->db_dma  + offset; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) | ||||
| { | ||||
| 	struct mlx5_db_pgdir *pgdir; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	mutex_lock(&dev->priv.pgdir_mutex); | ||||
| 
 | ||||
| 	list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) | ||||
| 		if (!mlx5_alloc_db_from_pgdir(pgdir, db)) | ||||
| 			goto out; | ||||
| 
 | ||||
| 	pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); | ||||
| 	if (!pgdir) { | ||||
| 		ret = -ENOMEM; | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	list_add(&pgdir->list, &dev->priv.pgdir_list); | ||||
| 
 | ||||
| 	/* This should never fail -- we just allocated an empty page: */ | ||||
| 	WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); | ||||
| 
 | ||||
| out: | ||||
| 	mutex_unlock(&dev->priv.pgdir_mutex); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_db_alloc); | ||||
| 
 | ||||
| void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) | ||||
| { | ||||
| 	mutex_lock(&dev->priv.pgdir_mutex); | ||||
| 
 | ||||
| 	__set_bit(db->index, db->u.pgdir->bitmap); | ||||
| 
 | ||||
| 	if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { | ||||
| 		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | ||||
| 				  db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||||
| 		list_del(&db->u.pgdir->list); | ||||
| 		kfree(db->u.pgdir); | ||||
| 	} | ||||
| 
 | ||||
| 	mutex_unlock(&dev->priv.pgdir_mutex); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_db_free); | ||||
| 
 | ||||
| 
 | ||||
| void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) | ||||
| { | ||||
| 	u64 addr; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < buf->npages; i++) { | ||||
| 		if (buf->nbufs == 1) | ||||
| 			addr = buf->direct.map + (i << buf->page_shift); | ||||
| 		else | ||||
| 			addr = buf->page_list[i].map; | ||||
| 
 | ||||
| 		pas[i] = cpu_to_be64(addr); | ||||
| 	} | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_fill_page_array); | ||||
							
								
								
									
										1515
									
								
								drivers/net/ethernet/mellanox/mlx5/core/cmd.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1515
									
								
								drivers/net/ethernet/mellanox/mlx5/core/cmd.c
									
										
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							
							
								
								
									
										224
									
								
								drivers/net/ethernet/mellanox/mlx5/core/cq.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										224
									
								
								drivers/net/ethernet/mellanox/mlx5/core/cq.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,224 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/hardirq.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include <rdma/ib_verbs.h> | ||||
| #include <linux/mlx5/cq.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) | ||||
| { | ||||
| 	struct mlx5_core_cq *cq; | ||||
| 	struct mlx5_cq_table *table = &dev->priv.cq_table; | ||||
| 
 | ||||
| 	spin_lock(&table->lock); | ||||
| 	cq = radix_tree_lookup(&table->tree, cqn); | ||||
| 	if (likely(cq)) | ||||
| 		atomic_inc(&cq->refcount); | ||||
| 	spin_unlock(&table->lock); | ||||
| 
 | ||||
| 	if (!cq) { | ||||
| 		mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	++cq->arm_sn; | ||||
| 
 | ||||
| 	cq->comp(cq); | ||||
| 
 | ||||
| 	if (atomic_dec_and_test(&cq->refcount)) | ||||
| 		complete(&cq->free); | ||||
| } | ||||
| 
 | ||||
| void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) | ||||
| { | ||||
| 	struct mlx5_cq_table *table = &dev->priv.cq_table; | ||||
| 	struct mlx5_core_cq *cq; | ||||
| 
 | ||||
| 	spin_lock(&table->lock); | ||||
| 
 | ||||
| 	cq = radix_tree_lookup(&table->tree, cqn); | ||||
| 	if (cq) | ||||
| 		atomic_inc(&cq->refcount); | ||||
| 
 | ||||
| 	spin_unlock(&table->lock); | ||||
| 
 | ||||
| 	if (!cq) { | ||||
| 		mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	cq->event(cq, event_type); | ||||
| 
 | ||||
| 	if (atomic_dec_and_test(&cq->refcount)) | ||||
| 		complete(&cq->free); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | ||||
| 			struct mlx5_create_cq_mbox_in *in, int inlen) | ||||
| { | ||||
| 	int err; | ||||
| 	struct mlx5_cq_table *table = &dev->priv.cq_table; | ||||
| 	struct mlx5_create_cq_mbox_out out; | ||||
| 	struct mlx5_destroy_cq_mbox_in din; | ||||
| 	struct mlx5_destroy_cq_mbox_out dout; | ||||
| 
 | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; | ||||
| 	cq->cons_index = 0; | ||||
| 	cq->arm_sn     = 0; | ||||
| 	atomic_set(&cq->refcount, 1); | ||||
| 	init_completion(&cq->free); | ||||
| 
 | ||||
| 	spin_lock_irq(&table->lock); | ||||
| 	err = radix_tree_insert(&table->tree, cq->cqn, cq); | ||||
| 	spin_unlock_irq(&table->lock); | ||||
| 	if (err) | ||||
| 		goto err_cmd; | ||||
| 
 | ||||
| 	cq->pid = current->pid; | ||||
| 	err = mlx5_debug_cq_add(dev, cq); | ||||
| 	if (err) | ||||
| 		mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", | ||||
| 			      cq->cqn); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_cmd: | ||||
| 	memset(&din, 0, sizeof(din)); | ||||
| 	memset(&dout, 0, sizeof(dout)); | ||||
| 	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); | ||||
| 	mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_create_cq); | ||||
| 
 | ||||
| int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) | ||||
| { | ||||
| 	struct mlx5_cq_table *table = &dev->priv.cq_table; | ||||
| 	struct mlx5_destroy_cq_mbox_in in; | ||||
| 	struct mlx5_destroy_cq_mbox_out out; | ||||
| 	struct mlx5_core_cq *tmp; | ||||
| 	int err; | ||||
| 
 | ||||
| 	spin_lock_irq(&table->lock); | ||||
| 	tmp = radix_tree_delete(&table->tree, cq->cqn); | ||||
| 	spin_unlock_irq(&table->lock); | ||||
| 	if (!tmp) { | ||||
| 		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (tmp != cq) { | ||||
| 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); | ||||
| 	in.cqn = cpu_to_be32(cq->cqn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	synchronize_irq(cq->irqn); | ||||
| 
 | ||||
| 	mlx5_debug_cq_remove(dev, cq); | ||||
| 	if (atomic_dec_and_test(&cq->refcount)) | ||||
| 		complete(&cq->free); | ||||
| 	wait_for_completion(&cq->free); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_destroy_cq); | ||||
| 
 | ||||
| int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | ||||
| 		       struct mlx5_query_cq_mbox_out *out) | ||||
| { | ||||
| 	struct mlx5_query_cq_mbox_in in; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(out, 0, sizeof(*out)); | ||||
| 
 | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); | ||||
| 	in.cqn = cpu_to_be32(cq->cqn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out->hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out->hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_query_cq); | ||||
| 
 | ||||
| 
 | ||||
| int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | ||||
| 			int type, struct mlx5_cq_modify_params *params) | ||||
| { | ||||
| 	return -ENOSYS; | ||||
| } | ||||
| 
 | ||||
| int mlx5_init_cq_table(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_cq_table *table = &dev->priv.cq_table; | ||||
| 	int err; | ||||
| 
 | ||||
| 	spin_lock_init(&table->lock); | ||||
| 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); | ||||
| 	err = mlx5_cq_debugfs_init(dev); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	mlx5_cq_debugfs_cleanup(dev); | ||||
| } | ||||
							
								
								
									
										587
									
								
								drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										587
									
								
								drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,587 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/module.h> | ||||
| #include <linux/debugfs.h> | ||||
| #include <linux/mlx5/qp.h> | ||||
| #include <linux/mlx5/cq.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| enum { | ||||
| 	QP_PID, | ||||
| 	QP_STATE, | ||||
| 	QP_XPORT, | ||||
| 	QP_MTU, | ||||
| 	QP_N_RECV, | ||||
| 	QP_RECV_SZ, | ||||
| 	QP_N_SEND, | ||||
| 	QP_LOG_PG_SZ, | ||||
| 	QP_RQPN, | ||||
| }; | ||||
| 
 | ||||
| static char *qp_fields[] = { | ||||
| 	[QP_PID]	= "pid", | ||||
| 	[QP_STATE]	= "state", | ||||
| 	[QP_XPORT]	= "transport", | ||||
| 	[QP_MTU]	= "mtu", | ||||
| 	[QP_N_RECV]	= "num_recv", | ||||
| 	[QP_RECV_SZ]	= "rcv_wqe_sz", | ||||
| 	[QP_N_SEND]	= "num_send", | ||||
| 	[QP_LOG_PG_SZ]	= "log2_page_sz", | ||||
| 	[QP_RQPN]	= "remote_qpn", | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	EQ_NUM_EQES, | ||||
| 	EQ_INTR, | ||||
| 	EQ_LOG_PG_SZ, | ||||
| }; | ||||
| 
 | ||||
| static char *eq_fields[] = { | ||||
| 	[EQ_NUM_EQES]	= "num_eqes", | ||||
| 	[EQ_INTR]	= "intr", | ||||
| 	[EQ_LOG_PG_SZ]	= "log_page_size", | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	CQ_PID, | ||||
| 	CQ_NUM_CQES, | ||||
| 	CQ_LOG_PG_SZ, | ||||
| }; | ||||
| 
 | ||||
| static char *cq_fields[] = { | ||||
| 	[CQ_PID]	= "pid", | ||||
| 	[CQ_NUM_CQES]	= "num_cqes", | ||||
| 	[CQ_LOG_PG_SZ]	= "log_page_size", | ||||
| }; | ||||
| 
 | ||||
| struct dentry *mlx5_debugfs_root; | ||||
| EXPORT_SYMBOL(mlx5_debugfs_root); | ||||
| 
 | ||||
| void mlx5_register_debugfs(void) | ||||
| { | ||||
| 	mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL); | ||||
| 	if (IS_ERR_OR_NULL(mlx5_debugfs_root)) | ||||
| 		mlx5_debugfs_root = NULL; | ||||
| } | ||||
| 
 | ||||
| void mlx5_unregister_debugfs(void) | ||||
| { | ||||
| 	debugfs_remove(mlx5_debugfs_root); | ||||
| } | ||||
| 
 | ||||
| int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	atomic_set(&dev->num_qps, 0); | ||||
| 
 | ||||
| 	dev->priv.qp_debugfs = debugfs_create_dir("QPs",  dev->priv.dbg_root); | ||||
| 	if (!dev->priv.qp_debugfs) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return; | ||||
| 
 | ||||
| 	debugfs_remove_recursive(dev->priv.qp_debugfs); | ||||
| } | ||||
| 
 | ||||
| int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	dev->priv.eq_debugfs = debugfs_create_dir("EQs",  dev->priv.dbg_root); | ||||
| 	if (!dev->priv.eq_debugfs) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return; | ||||
| 
 | ||||
| 	debugfs_remove_recursive(dev->priv.eq_debugfs); | ||||
| } | ||||
| 
 | ||||
| static ssize_t average_read(struct file *filp, char __user *buf, size_t count, | ||||
| 			    loff_t *pos) | ||||
| { | ||||
| 	struct mlx5_cmd_stats *stats; | ||||
| 	u64 field = 0; | ||||
| 	int ret; | ||||
| 	int err; | ||||
| 	char tbuf[22]; | ||||
| 
 | ||||
| 	if (*pos) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	stats = filp->private_data; | ||||
| 	spin_lock(&stats->lock); | ||||
| 	if (stats->n) | ||||
| 		field = stats->sum / stats->n; | ||||
| 	spin_unlock(&stats->lock); | ||||
| 	ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); | ||||
| 	if (ret > 0) { | ||||
| 		err = copy_to_user(buf, tbuf, ret); | ||||
| 		if (err) | ||||
| 			return err; | ||||
| 	} | ||||
| 
 | ||||
| 	*pos += ret; | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static ssize_t average_write(struct file *filp, const char __user *buf, | ||||
| 			     size_t count, loff_t *pos) | ||||
| { | ||||
| 	struct mlx5_cmd_stats *stats; | ||||
| 
 | ||||
| 	stats = filp->private_data; | ||||
| 	spin_lock(&stats->lock); | ||||
| 	stats->sum = 0; | ||||
| 	stats->n = 0; | ||||
| 	spin_unlock(&stats->lock); | ||||
| 
 | ||||
| 	*pos += count; | ||||
| 
 | ||||
| 	return count; | ||||
| } | ||||
| 
 | ||||
| static const struct file_operations stats_fops = { | ||||
| 	.owner	= THIS_MODULE, | ||||
| 	.open	= simple_open, | ||||
| 	.read	= average_read, | ||||
| 	.write	= average_write, | ||||
| }; | ||||
| 
 | ||||
| int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_cmd_stats *stats; | ||||
| 	struct dentry **cmd; | ||||
| 	const char *namep; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 
 | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	cmd = &dev->priv.cmdif_debugfs; | ||||
| 	*cmd = debugfs_create_dir("commands", dev->priv.dbg_root); | ||||
| 	if (!*cmd) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { | ||||
| 		stats = &dev->cmd.stats[i]; | ||||
| 		namep = mlx5_command_str(i); | ||||
| 		if (strcmp(namep, "unknown command opcode")) { | ||||
| 			stats->root = debugfs_create_dir(namep, *cmd); | ||||
| 			if (!stats->root) { | ||||
| 				mlx5_core_warn(dev, "failed adding command %d\n", | ||||
| 					       i); | ||||
| 				err = -ENOMEM; | ||||
| 				goto out; | ||||
| 			} | ||||
| 
 | ||||
| 			stats->avg = debugfs_create_file("average", 0400, | ||||
| 							 stats->root, stats, | ||||
| 							 &stats_fops); | ||||
| 			if (!stats->avg) { | ||||
| 				mlx5_core_warn(dev, "failed creating debugfs file\n"); | ||||
| 				err = -ENOMEM; | ||||
| 				goto out; | ||||
| 			} | ||||
| 
 | ||||
| 			stats->count = debugfs_create_u64("n", 0400, | ||||
| 							  stats->root, | ||||
| 							  &stats->n); | ||||
| 			if (!stats->count) { | ||||
| 				mlx5_core_warn(dev, "failed creating debugfs file\n"); | ||||
| 				err = -ENOMEM; | ||||
| 				goto out; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| out: | ||||
| 	debugfs_remove_recursive(dev->priv.cmdif_debugfs); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return; | ||||
| 
 | ||||
| 	debugfs_remove_recursive(dev->priv.cmdif_debugfs); | ||||
| } | ||||
| 
 | ||||
| int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	dev->priv.cq_debugfs = debugfs_create_dir("CQs",  dev->priv.dbg_root); | ||||
| 	if (!dev->priv.cq_debugfs) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return; | ||||
| 
 | ||||
| 	debugfs_remove_recursive(dev->priv.cq_debugfs); | ||||
| } | ||||
| 
 | ||||
| static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, | ||||
| 			 int index) | ||||
| { | ||||
| 	struct mlx5_query_qp_mbox_out *out; | ||||
| 	struct mlx5_qp_context *ctx; | ||||
| 	u64 param = 0; | ||||
| 	int err; | ||||
| 	int no_sq; | ||||
| 
 | ||||
| 	out = kzalloc(sizeof(*out), GFP_KERNEL); | ||||
| 	if (!out) | ||||
| 		return param; | ||||
| 
 | ||||
| 	err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "failed to query qp\n"); | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	ctx = &out->ctx; | ||||
| 	switch (index) { | ||||
| 	case QP_PID: | ||||
| 		param = qp->pid; | ||||
| 		break; | ||||
| 	case QP_STATE: | ||||
| 		param = be32_to_cpu(ctx->flags) >> 28; | ||||
| 		break; | ||||
| 	case QP_XPORT: | ||||
| 		param = (be32_to_cpu(ctx->flags) >> 16) & 0xff; | ||||
| 		break; | ||||
| 	case QP_MTU: | ||||
| 		param = ctx->mtu_msgmax >> 5; | ||||
| 		break; | ||||
| 	case QP_N_RECV: | ||||
| 		param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); | ||||
| 		break; | ||||
| 	case QP_RECV_SZ: | ||||
| 		param = 1 << ((ctx->rq_size_stride & 7) + 4); | ||||
| 		break; | ||||
| 	case QP_N_SEND: | ||||
| 		no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15; | ||||
| 		if (!no_sq) | ||||
| 			param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11); | ||||
| 		else | ||||
| 			param = 0; | ||||
| 		break; | ||||
| 	case QP_LOG_PG_SZ: | ||||
| 		param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f; | ||||
| 		param += 12; | ||||
| 		break; | ||||
| 	case QP_RQPN: | ||||
| 		param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff; | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	kfree(out); | ||||
| 	return param; | ||||
| } | ||||
| 
 | ||||
| static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, | ||||
| 			 int index) | ||||
| { | ||||
| 	struct mlx5_query_eq_mbox_out *out; | ||||
| 	struct mlx5_eq_context *ctx; | ||||
| 	u64 param = 0; | ||||
| 	int err; | ||||
| 
 | ||||
| 	out = kzalloc(sizeof(*out), GFP_KERNEL); | ||||
| 	if (!out) | ||||
| 		return param; | ||||
| 
 | ||||
| 	ctx = &out->ctx; | ||||
| 
 | ||||
| 	err = mlx5_core_eq_query(dev, eq, out, sizeof(*out)); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "failed to query eq\n"); | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	switch (index) { | ||||
| 	case EQ_NUM_EQES: | ||||
| 		param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); | ||||
| 		break; | ||||
| 	case EQ_INTR: | ||||
| 		param = ctx->intr; | ||||
| 		break; | ||||
| 	case EQ_LOG_PG_SZ: | ||||
| 		param = (ctx->log_page_size & 0x1f) + 12; | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	kfree(out); | ||||
| 	return param; | ||||
| } | ||||
| 
 | ||||
| static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | ||||
| 			 int index) | ||||
| { | ||||
| 	struct mlx5_query_cq_mbox_out *out; | ||||
| 	struct mlx5_cq_context *ctx; | ||||
| 	u64 param = 0; | ||||
| 	int err; | ||||
| 
 | ||||
| 	out = kzalloc(sizeof(*out), GFP_KERNEL); | ||||
| 	if (!out) | ||||
| 		return param; | ||||
| 
 | ||||
| 	ctx = &out->ctx; | ||||
| 
 | ||||
| 	err = mlx5_core_query_cq(dev, cq, out); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "failed to query cq\n"); | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	switch (index) { | ||||
| 	case CQ_PID: | ||||
| 		param = cq->pid; | ||||
| 		break; | ||||
| 	case CQ_NUM_CQES: | ||||
| 		param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); | ||||
| 		break; | ||||
| 	case CQ_LOG_PG_SZ: | ||||
| 		param = (ctx->log_pg_sz & 0x1f) + 12; | ||||
| 		break; | ||||
| 	} | ||||
| 
 | ||||
| out: | ||||
| 	kfree(out); | ||||
| 	return param; | ||||
| } | ||||
| 
 | ||||
| static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, | ||||
| 			loff_t *pos) | ||||
| { | ||||
| 	struct mlx5_field_desc *desc; | ||||
| 	struct mlx5_rsc_debug *d; | ||||
| 	char tbuf[18]; | ||||
| 	u64 field; | ||||
| 	int ret; | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (*pos) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	desc = filp->private_data; | ||||
| 	d = (void *)(desc - desc->i) - sizeof(*d); | ||||
| 	switch (d->type) { | ||||
| 	case MLX5_DBG_RSC_QP: | ||||
| 		field = qp_read_field(d->dev, d->object, desc->i); | ||||
| 		break; | ||||
| 
 | ||||
| 	case MLX5_DBG_RSC_EQ: | ||||
| 		field = eq_read_field(d->dev, d->object, desc->i); | ||||
| 		break; | ||||
| 
 | ||||
| 	case MLX5_DBG_RSC_CQ: | ||||
| 		field = cq_read_field(d->dev, d->object, desc->i); | ||||
| 		break; | ||||
| 
 | ||||
| 	default: | ||||
| 		mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); | ||||
| 	if (ret > 0) { | ||||
| 		err = copy_to_user(buf, tbuf, ret); | ||||
| 		if (err) | ||||
| 			return err; | ||||
| 	} | ||||
| 
 | ||||
| 	*pos += ret; | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static const struct file_operations fops = { | ||||
| 	.owner	= THIS_MODULE, | ||||
| 	.open	= simple_open, | ||||
| 	.read	= dbg_read, | ||||
| }; | ||||
| 
 | ||||
| static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, | ||||
| 			struct dentry *root, struct mlx5_rsc_debug **dbg, | ||||
| 			int rsn, char **field, int nfile, void *data) | ||||
| { | ||||
| 	struct mlx5_rsc_debug *d; | ||||
| 	char resn[32]; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 
 | ||||
| 	d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); | ||||
| 	if (!d) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	d->dev = dev; | ||||
| 	d->object = data; | ||||
| 	d->type = type; | ||||
| 	sprintf(resn, "0x%x", rsn); | ||||
| 	d->root = debugfs_create_dir(resn,  root); | ||||
| 	if (!d->root) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out_free; | ||||
| 	} | ||||
| 
 | ||||
| 	for (i = 0; i < nfile; i++) { | ||||
| 		d->fields[i].i = i; | ||||
| 		d->fields[i].dent = debugfs_create_file(field[i], 0400, | ||||
| 							d->root, &d->fields[i], | ||||
| 							&fops); | ||||
| 		if (!d->fields[i].dent) { | ||||
| 			err = -ENOMEM; | ||||
| 			goto out_rem; | ||||
| 		} | ||||
| 	} | ||||
| 	*dbg = d; | ||||
| 
 | ||||
| 	return 0; | ||||
| out_rem: | ||||
| 	debugfs_remove_recursive(d->root); | ||||
| 
 | ||||
| out_free: | ||||
| 	kfree(d); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void rem_res_tree(struct mlx5_rsc_debug *d) | ||||
| { | ||||
| 	debugfs_remove_recursive(d->root); | ||||
| 	kfree(d); | ||||
| } | ||||
| 
 | ||||
| int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, | ||||
| 			   &qp->dbg, qp->qpn, qp_fields, | ||||
| 			   ARRAY_SIZE(qp_fields), qp); | ||||
| 	if (err) | ||||
| 		qp->dbg = NULL; | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (qp->dbg) | ||||
| 		rem_res_tree(qp->dbg); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, | ||||
| 			   &eq->dbg, eq->eqn, eq_fields, | ||||
| 			   ARRAY_SIZE(eq_fields), eq); | ||||
| 	if (err) | ||||
| 		eq->dbg = NULL; | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (eq->dbg) | ||||
| 		rem_res_tree(eq->dbg); | ||||
| } | ||||
| 
 | ||||
| int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, | ||||
| 			   &cq->dbg, cq->cqn, cq_fields, | ||||
| 			   ARRAY_SIZE(cq_fields), cq); | ||||
| 	if (err) | ||||
| 		cq->dbg = NULL; | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) | ||||
| { | ||||
| 	if (!mlx5_debugfs_root) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (cq->dbg) | ||||
| 		rem_res_tree(cq->dbg); | ||||
| } | ||||
							
								
								
									
										521
									
								
								drivers/net/ethernet/mellanox/mlx5/core/eq.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										521
									
								
								drivers/net/ethernet/mellanox/mlx5/core/eq.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,521 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/interrupt.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_EQE_SIZE		= sizeof(struct mlx5_eqe), | ||||
| 	MLX5_EQE_OWNER_INIT_VAL	= 0x1, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_EQ_STATE_ARMED		= 0x9, | ||||
| 	MLX5_EQ_STATE_FIRED		= 0xa, | ||||
| 	MLX5_EQ_STATE_ALWAYS_ARMED	= 0xb, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_NUM_SPARE_EQE	= 0x80, | ||||
| 	MLX5_NUM_ASYNC_EQE	= 0x100, | ||||
| 	MLX5_NUM_CMD_EQE	= 32, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_EQ_DOORBEL_OFFSET	= 0x40, | ||||
| }; | ||||
| 
 | ||||
| #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \ | ||||
| 			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) | ||||
| 
 | ||||
| struct map_eq_in { | ||||
| 	u64	mask; | ||||
| 	u32	reserved; | ||||
| 	u32	unmap_eqn; | ||||
| }; | ||||
| 
 | ||||
| struct cre_des_eq { | ||||
| 	u8	reserved[15]; | ||||
| 	u8	eqn; | ||||
| }; | ||||
| 
 | ||||
| static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) | ||||
| { | ||||
| 	struct mlx5_destroy_eq_mbox_in in; | ||||
| 	struct mlx5_destroy_eq_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); | ||||
| 	in.eqn = eqn; | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (!err) | ||||
| 		goto ex; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| ex: | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) | ||||
| { | ||||
| 	return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); | ||||
| } | ||||
| 
 | ||||
| static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) | ||||
| { | ||||
| 	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); | ||||
| 
 | ||||
| 	return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; | ||||
| } | ||||
| 
 | ||||
| static const char *eqe_type_str(u8 type) | ||||
| { | ||||
| 	switch (type) { | ||||
| 	case MLX5_EVENT_TYPE_COMP: | ||||
| 		return "MLX5_EVENT_TYPE_COMP"; | ||||
| 	case MLX5_EVENT_TYPE_PATH_MIG: | ||||
| 		return "MLX5_EVENT_TYPE_PATH_MIG"; | ||||
| 	case MLX5_EVENT_TYPE_COMM_EST: | ||||
| 		return "MLX5_EVENT_TYPE_COMM_EST"; | ||||
| 	case MLX5_EVENT_TYPE_SQ_DRAINED: | ||||
| 		return "MLX5_EVENT_TYPE_SQ_DRAINED"; | ||||
| 	case MLX5_EVENT_TYPE_SRQ_LAST_WQE: | ||||
| 		return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; | ||||
| 	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | ||||
| 		return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; | ||||
| 	case MLX5_EVENT_TYPE_CQ_ERROR: | ||||
| 		return "MLX5_EVENT_TYPE_CQ_ERROR"; | ||||
| 	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: | ||||
| 		return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; | ||||
| 	case MLX5_EVENT_TYPE_PATH_MIG_FAILED: | ||||
| 		return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; | ||||
| 	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | ||||
| 		return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; | ||||
| 	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | ||||
| 		return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; | ||||
| 	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | ||||
| 		return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; | ||||
| 	case MLX5_EVENT_TYPE_INTERNAL_ERROR: | ||||
| 		return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; | ||||
| 	case MLX5_EVENT_TYPE_PORT_CHANGE: | ||||
| 		return "MLX5_EVENT_TYPE_PORT_CHANGE"; | ||||
| 	case MLX5_EVENT_TYPE_GPIO_EVENT: | ||||
| 		return "MLX5_EVENT_TYPE_GPIO_EVENT"; | ||||
| 	case MLX5_EVENT_TYPE_REMOTE_CONFIG: | ||||
| 		return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; | ||||
| 	case MLX5_EVENT_TYPE_DB_BF_CONGESTION: | ||||
| 		return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; | ||||
| 	case MLX5_EVENT_TYPE_STALL_EVENT: | ||||
| 		return "MLX5_EVENT_TYPE_STALL_EVENT"; | ||||
| 	case MLX5_EVENT_TYPE_CMD: | ||||
| 		return "MLX5_EVENT_TYPE_CMD"; | ||||
| 	case MLX5_EVENT_TYPE_PAGE_REQUEST: | ||||
| 		return "MLX5_EVENT_TYPE_PAGE_REQUEST"; | ||||
| 	default: | ||||
| 		return "Unrecognized event"; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static enum mlx5_dev_event port_subtype_event(u8 subtype) | ||||
| { | ||||
| 	switch (subtype) { | ||||
| 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN: | ||||
| 		return MLX5_DEV_EVENT_PORT_DOWN; | ||||
| 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: | ||||
| 		return MLX5_DEV_EVENT_PORT_UP; | ||||
| 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: | ||||
| 		return MLX5_DEV_EVENT_PORT_INITIALIZED; | ||||
| 	case MLX5_PORT_CHANGE_SUBTYPE_LID: | ||||
| 		return MLX5_DEV_EVENT_LID_CHANGE; | ||||
| 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY: | ||||
| 		return MLX5_DEV_EVENT_PKEY_CHANGE; | ||||
| 	case MLX5_PORT_CHANGE_SUBTYPE_GUID: | ||||
| 		return MLX5_DEV_EVENT_GUID_CHANGE; | ||||
| 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: | ||||
| 		return MLX5_DEV_EVENT_CLIENT_REREG; | ||||
| 	} | ||||
| 	return -1; | ||||
| } | ||||
| 
 | ||||
| static void eq_update_ci(struct mlx5_eq *eq, int arm) | ||||
| { | ||||
| 	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); | ||||
| 	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); | ||||
| 	__raw_writel((__force u32) cpu_to_be32(val), addr); | ||||
| 	/* We still want ordering, just not swabbing, so add a barrier */ | ||||
| 	mb(); | ||||
| } | ||||
| 
 | ||||
| static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | ||||
| { | ||||
| 	struct mlx5_eqe *eqe; | ||||
| 	int eqes_found = 0; | ||||
| 	int set_ci = 0; | ||||
| 	u32 cqn; | ||||
| 	u32 srqn; | ||||
| 	u8 port; | ||||
| 
 | ||||
| 	while ((eqe = next_eqe_sw(eq))) { | ||||
| 		/*
 | ||||
| 		 * Make sure we read EQ entry contents after we've | ||||
| 		 * checked the ownership bit. | ||||
| 		 */ | ||||
| 		rmb(); | ||||
| 
 | ||||
| 		mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); | ||||
| 		switch (eqe->type) { | ||||
| 		case MLX5_EVENT_TYPE_COMP: | ||||
| 			cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; | ||||
| 			mlx5_cq_completion(dev, cqn); | ||||
| 			break; | ||||
| 
 | ||||
| 		case MLX5_EVENT_TYPE_PATH_MIG: | ||||
| 		case MLX5_EVENT_TYPE_COMM_EST: | ||||
| 		case MLX5_EVENT_TYPE_SQ_DRAINED: | ||||
| 		case MLX5_EVENT_TYPE_SRQ_LAST_WQE: | ||||
| 		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: | ||||
| 		case MLX5_EVENT_TYPE_PATH_MIG_FAILED: | ||||
| 		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | ||||
| 		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | ||||
| 			mlx5_core_dbg(dev, "event %s(%d) arrived\n", | ||||
| 				      eqe_type_str(eqe->type), eqe->type); | ||||
| 			mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff, | ||||
| 				      eqe->type); | ||||
| 			break; | ||||
| 
 | ||||
| 		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | ||||
| 		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | ||||
| 			srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; | ||||
| 			mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", | ||||
| 				      eqe_type_str(eqe->type), eqe->type, srqn); | ||||
| 			mlx5_srq_event(dev, srqn, eqe->type); | ||||
| 			break; | ||||
| 
 | ||||
| 		case MLX5_EVENT_TYPE_CMD: | ||||
| 			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); | ||||
| 			break; | ||||
| 
 | ||||
| 		case MLX5_EVENT_TYPE_PORT_CHANGE: | ||||
| 			port = (eqe->data.port.port >> 4) & 0xf; | ||||
| 			switch (eqe->sub_type) { | ||||
| 			case MLX5_PORT_CHANGE_SUBTYPE_DOWN: | ||||
| 			case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: | ||||
| 			case MLX5_PORT_CHANGE_SUBTYPE_LID: | ||||
| 			case MLX5_PORT_CHANGE_SUBTYPE_PKEY: | ||||
| 			case MLX5_PORT_CHANGE_SUBTYPE_GUID: | ||||
| 			case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: | ||||
| 			case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: | ||||
| 				dev->event(dev, port_subtype_event(eqe->sub_type), &port); | ||||
| 				break; | ||||
| 			default: | ||||
| 				mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", | ||||
| 					       port, eqe->sub_type); | ||||
| 			} | ||||
| 			break; | ||||
| 		case MLX5_EVENT_TYPE_CQ_ERROR: | ||||
| 			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; | ||||
| 			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", | ||||
| 				       cqn, eqe->data.cq_err.syndrome); | ||||
| 			mlx5_cq_event(dev, cqn, eqe->type); | ||||
| 			break; | ||||
| 
 | ||||
| 		case MLX5_EVENT_TYPE_PAGE_REQUEST: | ||||
| 			{ | ||||
| 				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | ||||
| 				s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); | ||||
| 
 | ||||
| 				mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); | ||||
| 				mlx5_core_req_pages_handler(dev, func_id, npages); | ||||
| 			} | ||||
| 			break; | ||||
| 
 | ||||
| 
 | ||||
| 		default: | ||||
| 			mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); | ||||
| 			break; | ||||
| 		} | ||||
| 
 | ||||
| 		++eq->cons_index; | ||||
| 		eqes_found = 1; | ||||
| 		++set_ci; | ||||
| 
 | ||||
| 		/* The HCA will think the queue has overflowed if we
 | ||||
| 		 * don't tell it we've been processing events.  We | ||||
| 		 * create our EQs with MLX5_NUM_SPARE_EQE extra | ||||
| 		 * entries, so we must update our consumer index at | ||||
| 		 * least that often. | ||||
| 		 */ | ||||
| 		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { | ||||
| 			eq_update_ci(eq, 0); | ||||
| 			set_ci = 0; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	eq_update_ci(eq, 1); | ||||
| 
 | ||||
| 	return eqes_found; | ||||
| } | ||||
| 
 | ||||
| static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) | ||||
| { | ||||
| 	struct mlx5_eq *eq = eq_ptr; | ||||
| 	struct mlx5_core_dev *dev = eq->dev; | ||||
| 
 | ||||
| 	mlx5_eq_int(dev, eq); | ||||
| 
 | ||||
| 	/* MSI-X vectors always belong to us */ | ||||
| 	return IRQ_HANDLED; | ||||
| } | ||||
| 
 | ||||
| static void init_eq_buf(struct mlx5_eq *eq) | ||||
| { | ||||
| 	struct mlx5_eqe *eqe; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < eq->nent; i++) { | ||||
| 		eqe = get_eqe(eq, i); | ||||
| 		eqe->owner = MLX5_EQE_OWNER_INIT_VAL; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | ||||
| 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar) | ||||
| { | ||||
| 	struct mlx5_eq_table *table = &dev->priv.eq_table; | ||||
| 	struct mlx5_create_eq_mbox_in *in; | ||||
| 	struct mlx5_create_eq_mbox_out out; | ||||
| 	int err; | ||||
| 	int inlen; | ||||
| 
 | ||||
| 	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); | ||||
| 	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, | ||||
| 			     &eq->buf); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	init_eq_buf(eq); | ||||
| 
 | ||||
| 	inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; | ||||
| 	in = mlx5_vzalloc(inlen); | ||||
| 	if (!in) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_buf; | ||||
| 	} | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 
 | ||||
| 	mlx5_fill_page_array(&eq->buf, in->pas); | ||||
| 
 | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); | ||||
| 	in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); | ||||
| 	in->ctx.intr = vecidx; | ||||
| 	in->ctx.log_page_size = PAGE_SHIFT - 12; | ||||
| 	in->events_mask = cpu_to_be64(mask); | ||||
| 
 | ||||
| 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		goto err_in; | ||||
| 
 | ||||
| 	if (out.hdr.status) { | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 		goto err_in; | ||||
| 	} | ||||
| 
 | ||||
| 	eq->eqn = out.eq_number; | ||||
| 	err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, | ||||
| 			  name, eq); | ||||
| 	if (err) | ||||
| 		goto err_eq; | ||||
| 
 | ||||
| 	eq->irqn = vecidx; | ||||
| 	eq->dev = dev; | ||||
| 	eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; | ||||
| 
 | ||||
| 	err = mlx5_debug_eq_add(dev, eq); | ||||
| 	if (err) | ||||
| 		goto err_irq; | ||||
| 
 | ||||
| 	/* EQs are created in ARMED state
 | ||||
| 	 */ | ||||
| 	eq_update_ci(eq, 1); | ||||
| 
 | ||||
| 	mlx5_vfree(in); | ||||
| 	return 0; | ||||
| 
 | ||||
| err_irq: | ||||
| 	free_irq(table->msix_arr[vecidx].vector, eq); | ||||
| 
 | ||||
| err_eq: | ||||
| 	mlx5_cmd_destroy_eq(dev, eq->eqn); | ||||
| 
 | ||||
| err_in: | ||||
| 	mlx5_vfree(in); | ||||
| 
 | ||||
| err_buf: | ||||
| 	mlx5_buf_free(dev, &eq->buf); | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_create_map_eq); | ||||
| 
 | ||||
| int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | ||||
| { | ||||
| 	struct mlx5_eq_table *table = &dev->priv.eq_table; | ||||
| 	int err; | ||||
| 
 | ||||
| 	mlx5_debug_eq_remove(dev, eq); | ||||
| 	free_irq(table->msix_arr[eq->irqn].vector, eq); | ||||
| 	err = mlx5_cmd_destroy_eq(dev, eq->eqn); | ||||
| 	if (err) | ||||
| 		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", | ||||
| 			       eq->eqn); | ||||
| 	mlx5_buf_free(dev, &eq->buf); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); | ||||
| 
 | ||||
| int mlx5_eq_init(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	spin_lock_init(&dev->priv.eq_table.lock); | ||||
| 
 | ||||
| 	err = mlx5_eq_debugfs_init(dev); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| void mlx5_eq_cleanup(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	mlx5_eq_debugfs_cleanup(dev); | ||||
| } | ||||
| 
 | ||||
| int mlx5_start_eqs(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_eq_table *table = &dev->priv.eq_table; | ||||
| 	int err; | ||||
| 
 | ||||
| 	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, | ||||
| 				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, | ||||
| 				 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); | ||||
| 		return err; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_cmd_use_events(dev); | ||||
| 
 | ||||
| 	err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, | ||||
| 				 MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK, | ||||
| 				 "mlx5_async_eq", &dev->priv.uuari.uars[0]); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "failed to create async EQ %d\n", err); | ||||
| 		goto err1; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_create_map_eq(dev, &table->pages_eq, | ||||
| 				 MLX5_EQ_VEC_PAGES, | ||||
| 				 dev->caps.max_vf + 1, | ||||
| 				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", | ||||
| 				 &dev->priv.uuari.uars[0]); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); | ||||
| 		goto err2; | ||||
| 	} | ||||
| 
 | ||||
| 	return err; | ||||
| 
 | ||||
| err2: | ||||
| 	mlx5_destroy_unmap_eq(dev, &table->async_eq); | ||||
| 
 | ||||
| err1: | ||||
| 	mlx5_cmd_use_polling(dev); | ||||
| 	mlx5_destroy_unmap_eq(dev, &table->cmd_eq); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_stop_eqs(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_eq_table *table = &dev->priv.eq_table; | ||||
| 	int err; | ||||
| 
 | ||||
| 	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	mlx5_destroy_unmap_eq(dev, &table->async_eq); | ||||
| 	mlx5_cmd_use_polling(dev); | ||||
| 
 | ||||
| 	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); | ||||
| 	if (err) | ||||
| 		mlx5_cmd_use_events(dev); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, | ||||
| 		       struct mlx5_query_eq_mbox_out *out, int outlen) | ||||
| { | ||||
| 	struct mlx5_query_eq_mbox_in in; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(out, 0, outlen); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); | ||||
| 	in.eqn = eq->eqn; | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out->hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out->hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_eq_query); | ||||
							
								
								
									
										185
									
								
								drivers/net/ethernet/mellanox/mlx5/core/fw.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										185
									
								
								drivers/net/ethernet/mellanox/mlx5/core/fw.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,185 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include <linux/module.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_cmd_query_adapter_mbox_out *out; | ||||
| 	struct mlx5_cmd_query_adapter_mbox_in in; | ||||
| 	int err; | ||||
| 
 | ||||
| 	out = kzalloc(sizeof(*out), GFP_KERNEL); | ||||
| 	if (!out) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); | ||||
| 	if (err) | ||||
| 		goto out_out; | ||||
| 
 | ||||
| 	if (out->hdr.status) { | ||||
| 		err = mlx5_cmd_status_to_err(&out->hdr); | ||||
| 		goto out_out; | ||||
| 	} | ||||
| 
 | ||||
| 	memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); | ||||
| 
 | ||||
| out_out: | ||||
| 	kfree(out); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | ||||
| 			   struct mlx5_caps *caps) | ||||
| { | ||||
| 	struct mlx5_cmd_query_hca_cap_mbox_out *out; | ||||
| 	struct mlx5_cmd_query_hca_cap_mbox_in in; | ||||
| 	struct mlx5_query_special_ctxs_mbox_out ctx_out; | ||||
| 	struct mlx5_query_special_ctxs_mbox_in ctx_in; | ||||
| 	int err; | ||||
| 	u16 t16; | ||||
| 
 | ||||
| 	out = kzalloc(sizeof(*out), GFP_KERNEL); | ||||
| 	if (!out) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); | ||||
| 	in.hdr.opmod  = cpu_to_be16(0x1); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); | ||||
| 	if (err) | ||||
| 		goto out_out; | ||||
| 
 | ||||
| 	if (out->hdr.status) { | ||||
| 		err = mlx5_cmd_status_to_err(&out->hdr); | ||||
| 		goto out_out; | ||||
| 	} | ||||
| 
 | ||||
| 
 | ||||
| 	caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; | ||||
| 	caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; | ||||
| 	caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; | ||||
| 	caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); | ||||
| 	caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); | ||||
| 	caps->flags = be64_to_cpu(out->hca_cap.flags); | ||||
| 	caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); | ||||
| 	caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; | ||||
| 	caps->num_ports = out->hca_cap.num_ports & 0xf; | ||||
| 	caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; | ||||
| 	if (caps->num_ports > MLX5_MAX_PORTS) { | ||||
| 		mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n", | ||||
| 			      caps->num_ports, MLX5_MAX_PORTS); | ||||
| 		err = -EINVAL; | ||||
| 		goto out_out; | ||||
| 	} | ||||
| 	caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; | ||||
| 	caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; | ||||
| 	caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; | ||||
| 	caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; | ||||
| 	caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; | ||||
| 	caps->log_max_mcg = out->hca_cap.log_max_mcg; | ||||
| 	caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); | ||||
| 	caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); | ||||
| 	caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); | ||||
| 	caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; | ||||
| 	t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); | ||||
| 	if (t16 & 0x8000) { | ||||
| 		caps->bf_reg_size = 1 << (t16 & 0x1f); | ||||
| 		caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE; | ||||
| 	} else { | ||||
| 		caps->bf_reg_size = 0; | ||||
| 		caps->bf_regs_per_page = 0; | ||||
| 	} | ||||
| 	caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1); | ||||
| 
 | ||||
| 	memset(&ctx_in, 0, sizeof(ctx_in)); | ||||
| 	memset(&ctx_out, 0, sizeof(ctx_out)); | ||||
| 	ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); | ||||
| 	err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in), | ||||
| 				 &ctx_out, sizeof(ctx_out)); | ||||
| 	if (err) | ||||
| 		goto out_out; | ||||
| 
 | ||||
| 	if (ctx_out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&ctx_out.hdr); | ||||
| 
 | ||||
| 	caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey); | ||||
| 
 | ||||
| out_out: | ||||
| 	kfree(out); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_cmd_init_hca_mbox_in in; | ||||
| 	struct mlx5_cmd_init_hca_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_cmd_teardown_hca_mbox_in in; | ||||
| 	struct mlx5_cmd_teardown_hca_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
							
								
								
									
										227
									
								
								drivers/net/ethernet/mellanox/mlx5/core/health.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										227
									
								
								drivers/net/ethernet/mellanox/mlx5/core/health.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,227 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/random.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_HEALTH_POLL_INTERVAL	= 2 * HZ, | ||||
| 	MAX_MISSES			= 3, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_HEALTH_SYNDR_FW_ERR		= 0x1, | ||||
| 	MLX5_HEALTH_SYNDR_IRISC_ERR		= 0x7, | ||||
| 	MLX5_HEALTH_SYNDR_CRC_ERR		= 0x9, | ||||
| 	MLX5_HEALTH_SYNDR_FETCH_PCI_ERR		= 0xa, | ||||
| 	MLX5_HEALTH_SYNDR_HW_FTL_ERR		= 0xb, | ||||
| 	MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR	= 0xc, | ||||
| 	MLX5_HEALTH_SYNDR_EQ_ERR		= 0xd, | ||||
| 	MLX5_HEALTH_SYNDR_FFSER_ERR		= 0xf, | ||||
| }; | ||||
| 
 | ||||
| static DEFINE_SPINLOCK(health_lock); | ||||
| 
 | ||||
| static LIST_HEAD(health_list); | ||||
| static struct work_struct health_work; | ||||
| 
 | ||||
| static health_handler_t reg_handler; | ||||
| int mlx5_register_health_report_handler(health_handler_t handler) | ||||
| { | ||||
| 	spin_lock_irq(&health_lock); | ||||
| 	if (reg_handler) { | ||||
| 		spin_unlock_irq(&health_lock); | ||||
| 		return -EEXIST; | ||||
| 	} | ||||
| 	reg_handler = handler; | ||||
| 	spin_unlock_irq(&health_lock); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_register_health_report_handler); | ||||
| 
 | ||||
| void mlx5_unregister_health_report_handler(void) | ||||
| { | ||||
| 	spin_lock_irq(&health_lock); | ||||
| 	reg_handler = NULL; | ||||
| 	spin_unlock_irq(&health_lock); | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_unregister_health_report_handler); | ||||
| 
 | ||||
| static void health_care(struct work_struct *work) | ||||
| { | ||||
| 	struct mlx5_core_health *health, *n; | ||||
| 	struct mlx5_core_dev *dev; | ||||
| 	struct mlx5_priv *priv; | ||||
| 	LIST_HEAD(tlist); | ||||
| 
 | ||||
| 	spin_lock_irq(&health_lock); | ||||
| 	list_splice_init(&health_list, &tlist); | ||||
| 
 | ||||
| 	spin_unlock_irq(&health_lock); | ||||
| 
 | ||||
| 	list_for_each_entry_safe(health, n, &tlist, list) { | ||||
| 		priv = container_of(health, struct mlx5_priv, health); | ||||
| 		dev = container_of(priv, struct mlx5_core_dev, priv); | ||||
| 		mlx5_core_warn(dev, "handling bad device here\n"); | ||||
| 		spin_lock_irq(&health_lock); | ||||
| 		if (reg_handler) | ||||
| 			reg_handler(dev->pdev, health->health, | ||||
| 				    sizeof(health->health)); | ||||
| 
 | ||||
| 		list_del_init(&health->list); | ||||
| 		spin_unlock_irq(&health_lock); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static const char *hsynd_str(u8 synd) | ||||
| { | ||||
| 	switch (synd) { | ||||
| 	case MLX5_HEALTH_SYNDR_FW_ERR: | ||||
| 		return "firmware internal error"; | ||||
| 	case MLX5_HEALTH_SYNDR_IRISC_ERR: | ||||
| 		return "irisc not responding"; | ||||
| 	case MLX5_HEALTH_SYNDR_CRC_ERR: | ||||
| 		return "firmware CRC error"; | ||||
| 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: | ||||
| 		return "ICM fetch PCI error"; | ||||
| 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR: | ||||
| 		return "HW fatal error\n"; | ||||
| 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: | ||||
| 		return "async EQ buffer overrun"; | ||||
| 	case MLX5_HEALTH_SYNDR_EQ_ERR: | ||||
| 		return "EQ error"; | ||||
| 	case MLX5_HEALTH_SYNDR_FFSER_ERR: | ||||
| 		return "FFSER error"; | ||||
| 	default: | ||||
| 		return "unrecognized error"; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static u16 read_be16(__be16 __iomem *p) | ||||
| { | ||||
| 	return swab16(readl((__force u16 __iomem *) p)); | ||||
| } | ||||
| 
 | ||||
| static u32 read_be32(__be32 __iomem *p) | ||||
| { | ||||
| 	return swab32(readl((__force u32 __iomem *) p)); | ||||
| } | ||||
| 
 | ||||
| static void print_health_info(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_core_health *health = &dev->priv.health; | ||||
| 	struct health_buffer __iomem *h = health->health; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) | ||||
| 		pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i)); | ||||
| 
 | ||||
| 	pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr)); | ||||
| 	pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra)); | ||||
| 	pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver)); | ||||
| 	pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id)); | ||||
| 	pr_info("irisc_index %d\n", readb(&h->irisc_index)); | ||||
| 	pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd))); | ||||
| 	pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync)); | ||||
| } | ||||
| 
 | ||||
| static void poll_health(unsigned long data) | ||||
| { | ||||
| 	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; | ||||
| 	struct mlx5_core_health *health = &dev->priv.health; | ||||
| 	unsigned long next; | ||||
| 	u32 count; | ||||
| 
 | ||||
| 	count = ioread32be(health->health_counter); | ||||
| 	if (count == health->prev) | ||||
| 		++health->miss_counter; | ||||
| 	else | ||||
| 		health->miss_counter = 0; | ||||
| 
 | ||||
| 	health->prev = count; | ||||
| 	if (health->miss_counter == MAX_MISSES) { | ||||
| 		mlx5_core_err(dev, "device's health compromised\n"); | ||||
| 		print_health_info(dev); | ||||
| 		spin_lock_irq(&health_lock); | ||||
| 		list_add_tail(&health->list, &health_list); | ||||
| 		spin_unlock_irq(&health_lock); | ||||
| 
 | ||||
| 		queue_work(mlx5_core_wq, &health_work); | ||||
| 	} else { | ||||
| 		get_random_bytes(&next, sizeof(next)); | ||||
| 		next %= HZ; | ||||
| 		next += jiffies + MLX5_HEALTH_POLL_INTERVAL; | ||||
| 		mod_timer(&health->timer, next); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| void mlx5_start_health_poll(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_core_health *health = &dev->priv.health; | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(&health->list); | ||||
| 	init_timer(&health->timer); | ||||
| 	health->health = &dev->iseg->health; | ||||
| 	health->health_counter = &dev->iseg->health_counter; | ||||
| 
 | ||||
| 	health->timer.data = (unsigned long)dev; | ||||
| 	health->timer.function = poll_health; | ||||
| 	health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); | ||||
| 	add_timer(&health->timer); | ||||
| } | ||||
| 
 | ||||
| void mlx5_stop_health_poll(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_core_health *health = &dev->priv.health; | ||||
| 
 | ||||
| 	del_timer_sync(&health->timer); | ||||
| 
 | ||||
| 	spin_lock_irq(&health_lock); | ||||
| 	if (!list_empty(&health->list)) | ||||
| 		list_del_init(&health->list); | ||||
| 	spin_unlock_irq(&health_lock); | ||||
| } | ||||
| 
 | ||||
| void mlx5_health_cleanup(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| void  __init mlx5_health_init(void) | ||||
| { | ||||
| 	INIT_WORK(&health_work, health_care); | ||||
| } | ||||
							
								
								
									
										78
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mad.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mad.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,78 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, | ||||
| 		      u16 opmod, int port) | ||||
| { | ||||
| 	struct mlx5_mad_ifc_mbox_in *in = NULL; | ||||
| 	struct mlx5_mad_ifc_mbox_out *out = NULL; | ||||
| 	int err; | ||||
| 
 | ||||
| 	in = kzalloc(sizeof(*in), GFP_KERNEL); | ||||
| 	if (!in) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	out = kzalloc(sizeof(*out), GFP_KERNEL); | ||||
| 	if (!out) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC); | ||||
| 	in->hdr.opmod = cpu_to_be16(opmod); | ||||
| 	in->port = port; | ||||
| 
 | ||||
| 	memcpy(in->data, inb, sizeof(in->data)); | ||||
| 
 | ||||
| 	err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out)); | ||||
| 	if (err) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	if (out->hdr.status) { | ||||
| 		err = mlx5_cmd_status_to_err(&out->hdr); | ||||
| 		goto out; | ||||
| 	} | ||||
| 
 | ||||
| 	memcpy(outb, out->data, sizeof(out->data)); | ||||
| 
 | ||||
| out: | ||||
| 	kfree(out); | ||||
| 	kfree(in); | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc); | ||||
							
								
								
									
										475
									
								
								drivers/net/ethernet/mellanox/mlx5/core/main.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										475
									
								
								drivers/net/ethernet/mellanox/mlx5/core/main.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,475 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <asm-generic/kmap_types.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/init.h> | ||||
| #include <linux/errno.h> | ||||
| #include <linux/pci.h> | ||||
| #include <linux/dma-mapping.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/io-mapping.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cq.h> | ||||
| #include <linux/mlx5/qp.h> | ||||
| #include <linux/mlx5/srq.h> | ||||
| #include <linux/debugfs.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| #define DRIVER_NAME "mlx5_core" | ||||
| #define DRIVER_VERSION "1.0" | ||||
| #define DRIVER_RELDATE	"June 2013" | ||||
| 
 | ||||
| MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); | ||||
| MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); | ||||
| MODULE_LICENSE("Dual BSD/GPL"); | ||||
| MODULE_VERSION(DRIVER_VERSION); | ||||
| 
 | ||||
| int mlx5_core_debug_mask; | ||||
| module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); | ||||
| MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); | ||||
| 
 | ||||
| struct workqueue_struct *mlx5_core_wq; | ||||
| 
 | ||||
| static int set_dma_caps(struct pci_dev *pdev) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||||
| 	if (err) { | ||||
| 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); | ||||
| 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||||
| 		if (err) { | ||||
| 			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); | ||||
| 			return err; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||||
| 	if (err) { | ||||
| 		dev_warn(&pdev->dev, | ||||
| 			 "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); | ||||
| 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||||
| 		if (err) { | ||||
| 			dev_err(&pdev->dev, | ||||
| 				"Can't set consistent PCI DMA mask, aborting.\n"); | ||||
| 			return err; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static int request_bar(struct pci_dev *pdev) | ||||
| { | ||||
| 	int err = 0; | ||||
| 
 | ||||
| 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||||
| 		dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); | ||||
| 		return -ENODEV; | ||||
| 	} | ||||
| 
 | ||||
| 	err = pci_request_regions(pdev, DRIVER_NAME); | ||||
| 	if (err) | ||||
| 		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void release_bar(struct pci_dev *pdev) | ||||
| { | ||||
| 	pci_release_regions(pdev); | ||||
| } | ||||
| 
 | ||||
| static int mlx5_enable_msix(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_eq_table *table = &dev->priv.eq_table; | ||||
| 	int num_eqs = 1 << dev->caps.log_max_eq; | ||||
| 	int nvec; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 
 | ||||
| 	nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; | ||||
| 	nvec = min_t(int, nvec, num_eqs); | ||||
| 	if (nvec <= MLX5_EQ_VEC_COMP_BASE) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); | ||||
| 	if (!table->msix_arr) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	for (i = 0; i < nvec; i++) | ||||
| 		table->msix_arr[i].entry = i; | ||||
| 
 | ||||
| retry: | ||||
| 	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; | ||||
| 	err = pci_enable_msix(dev->pdev, table->msix_arr, nvec); | ||||
| 	if (err <= 0) { | ||||
| 		return err; | ||||
| 	} else if (err > 2) { | ||||
| 		nvec = err; | ||||
| 		goto retry; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void mlx5_disable_msix(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_eq_table *table = &dev->priv.eq_table; | ||||
| 
 | ||||
| 	pci_disable_msix(dev->pdev); | ||||
| 	kfree(table->msix_arr); | ||||
| } | ||||
| 
 | ||||
| struct mlx5_reg_host_endianess { | ||||
| 	u8	he; | ||||
| 	u8      rsvd[15]; | ||||
| }; | ||||
| 
 | ||||
| static int handle_hca_cap(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; | ||||
| 	struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; | ||||
| 	struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; | ||||
| 	struct mlx5_cmd_set_hca_cap_mbox_out set_out; | ||||
| 	struct mlx5_profile *prof = dev->profile; | ||||
| 	u64 flags; | ||||
| 	int csum = 1; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&query_ctx, 0, sizeof(query_ctx)); | ||||
| 	query_out = kzalloc(sizeof(*query_out), GFP_KERNEL); | ||||
| 	if (!query_out) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); | ||||
| 	if (!set_ctx) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto query_ex; | ||||
| 	} | ||||
| 
 | ||||
| 	query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); | ||||
| 	query_ctx.hdr.opmod  = cpu_to_be16(0x1); | ||||
| 	err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), | ||||
| 				 query_out, sizeof(*query_out)); | ||||
| 	if (err) | ||||
| 		goto query_ex; | ||||
| 
 | ||||
| 	err = mlx5_cmd_status_to_err(&query_out->hdr); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "query hca cap failed, %d\n", err); | ||||
| 		goto query_ex; | ||||
| 	} | ||||
| 
 | ||||
| 	memcpy(&set_ctx->hca_cap, &query_out->hca_cap, | ||||
| 	       sizeof(set_ctx->hca_cap)); | ||||
| 
 | ||||
| 	if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) { | ||||
| 		csum = !!prof->cmdif_csum; | ||||
| 		flags = be64_to_cpu(set_ctx->hca_cap.flags); | ||||
| 		if (csum) | ||||
| 			flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | ||||
| 		else | ||||
| 			flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | ||||
| 
 | ||||
| 		set_ctx->hca_cap.flags = cpu_to_be64(flags); | ||||
| 	} | ||||
| 
 | ||||
| 	if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) | ||||
| 		set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; | ||||
| 
 | ||||
| 	memset(&set_out, 0, sizeof(set_out)); | ||||
| 	set_ctx->hca_cap.uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); | ||||
| 	set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); | ||||
| 	err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx), | ||||
| 				 &set_out, sizeof(set_out)); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "set hca cap failed, %d\n", err); | ||||
| 		goto query_ex; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_cmd_status_to_err(&set_out.hdr); | ||||
| 	if (err) | ||||
| 		goto query_ex; | ||||
| 
 | ||||
| 	if (!csum) | ||||
| 		dev->cmd.checksum_disabled = 1; | ||||
| 
 | ||||
| query_ex: | ||||
| 	kfree(query_out); | ||||
| 	kfree(set_ctx); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static int set_hca_ctrl(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_reg_host_endianess he_in; | ||||
| 	struct mlx5_reg_host_endianess he_out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&he_in, 0, sizeof(he_in)); | ||||
| 	he_in.he = MLX5_SET_HOST_ENDIANNESS; | ||||
| 	err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in), | ||||
| 					&he_out, sizeof(he_out), | ||||
| 					MLX5_REG_HOST_ENDIANNESS, 0, 1); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) | ||||
| { | ||||
| 	struct mlx5_priv *priv = &dev->priv; | ||||
| 	int err; | ||||
| 
 | ||||
| 	dev->pdev = pdev; | ||||
| 	pci_set_drvdata(dev->pdev, dev); | ||||
| 	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); | ||||
| 	priv->name[MLX5_MAX_NAME_LEN - 1] = 0; | ||||
| 
 | ||||
| 	mutex_init(&priv->pgdir_mutex); | ||||
| 	INIT_LIST_HEAD(&priv->pgdir_list); | ||||
| 	spin_lock_init(&priv->mkey_lock); | ||||
| 
 | ||||
| 	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); | ||||
| 	if (!priv->dbg_root) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	err = pci_enable_device(pdev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); | ||||
| 		goto err_dbg; | ||||
| 	} | ||||
| 
 | ||||
| 	err = request_bar(pdev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); | ||||
| 		goto err_disable; | ||||
| 	} | ||||
| 
 | ||||
| 	pci_set_master(pdev); | ||||
| 
 | ||||
| 	err = set_dma_caps(pdev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); | ||||
| 		goto err_clr_master; | ||||
| 	} | ||||
| 
 | ||||
| 	dev->iseg_base = pci_resource_start(dev->pdev, 0); | ||||
| 	dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); | ||||
| 	if (!dev->iseg) { | ||||
| 		err = -ENOMEM; | ||||
| 		dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); | ||||
| 		goto err_clr_master; | ||||
| 	} | ||||
| 	dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), | ||||
| 		 fw_rev_min(dev), fw_rev_sub(dev)); | ||||
| 
 | ||||
| 	err = mlx5_cmd_init(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); | ||||
| 		goto err_unmap; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_pagealloc_init(dev); | ||||
| 	err = set_hca_ctrl(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "set_hca_ctrl failed\n"); | ||||
| 		goto err_pagealloc_cleanup; | ||||
| 	} | ||||
| 
 | ||||
| 	err = handle_hca_cap(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "handle_hca_cap failed\n"); | ||||
| 		goto err_pagealloc_cleanup; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_satisfy_startup_pages(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "failed to allocate startup pages\n"); | ||||
| 		goto err_pagealloc_cleanup; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_pagealloc_start(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); | ||||
| 		goto err_reclaim_pages; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_cmd_init_hca(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "init hca failed\n"); | ||||
| 		goto err_pagealloc_stop; | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_start_health_poll(dev); | ||||
| 
 | ||||
| 	err = mlx5_cmd_query_hca_cap(dev, &dev->caps); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "query hca failed\n"); | ||||
| 		goto err_stop_poll; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_cmd_query_adapter(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "query adapter failed\n"); | ||||
| 		goto err_stop_poll; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_enable_msix(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "enable msix failed\n"); | ||||
| 		goto err_stop_poll; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_eq_init(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "failed to initialize eq\n"); | ||||
| 		goto disable_msix; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_alloc_uuars(dev, &priv->uuari); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); | ||||
| 		goto err_eq_cleanup; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_start_eqs(dev); | ||||
| 	if (err) { | ||||
| 		dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); | ||||
| 		goto err_free_uar; | ||||
| 	} | ||||
| 
 | ||||
| 	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); | ||||
| 
 | ||||
| 	mlx5_init_cq_table(dev); | ||||
| 	mlx5_init_qp_table(dev); | ||||
| 	mlx5_init_srq_table(dev); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_free_uar: | ||||
| 	mlx5_free_uuars(dev, &priv->uuari); | ||||
| 
 | ||||
| err_eq_cleanup: | ||||
| 	mlx5_eq_cleanup(dev); | ||||
| 
 | ||||
| disable_msix: | ||||
| 	mlx5_disable_msix(dev); | ||||
| 
 | ||||
| err_stop_poll: | ||||
| 	mlx5_stop_health_poll(dev); | ||||
| 	mlx5_cmd_teardown_hca(dev); | ||||
| 
 | ||||
| err_pagealloc_stop: | ||||
| 	mlx5_pagealloc_stop(dev); | ||||
| 
 | ||||
| err_reclaim_pages: | ||||
| 	mlx5_reclaim_startup_pages(dev); | ||||
| 
 | ||||
| err_pagealloc_cleanup: | ||||
| 	mlx5_pagealloc_cleanup(dev); | ||||
| 	mlx5_cmd_cleanup(dev); | ||||
| 
 | ||||
| err_unmap: | ||||
| 	iounmap(dev->iseg); | ||||
| 
 | ||||
| err_clr_master: | ||||
| 	pci_clear_master(dev->pdev); | ||||
| 	release_bar(dev->pdev); | ||||
| 
 | ||||
| err_disable: | ||||
| 	pci_disable_device(dev->pdev); | ||||
| 
 | ||||
| err_dbg: | ||||
| 	debugfs_remove(priv->dbg_root); | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_dev_init); | ||||
| 
 | ||||
| void mlx5_dev_cleanup(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_priv *priv = &dev->priv; | ||||
| 
 | ||||
| 	mlx5_cleanup_srq_table(dev); | ||||
| 	mlx5_cleanup_qp_table(dev); | ||||
| 	mlx5_cleanup_cq_table(dev); | ||||
| 	mlx5_stop_eqs(dev); | ||||
| 	mlx5_free_uuars(dev, &priv->uuari); | ||||
| 	mlx5_eq_cleanup(dev); | ||||
| 	mlx5_disable_msix(dev); | ||||
| 	mlx5_stop_health_poll(dev); | ||||
| 	mlx5_cmd_teardown_hca(dev); | ||||
| 	mlx5_pagealloc_stop(dev); | ||||
| 	mlx5_reclaim_startup_pages(dev); | ||||
| 	mlx5_pagealloc_cleanup(dev); | ||||
| 	mlx5_cmd_cleanup(dev); | ||||
| 	iounmap(dev->iseg); | ||||
| 	pci_clear_master(dev->pdev); | ||||
| 	release_bar(dev->pdev); | ||||
| 	pci_disable_device(dev->pdev); | ||||
| 	debugfs_remove(priv->dbg_root); | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_dev_cleanup); | ||||
| 
 | ||||
| static int __init init(void) | ||||
| { | ||||
| 	int err; | ||||
| 
 | ||||
| 	mlx5_register_debugfs(); | ||||
| 	mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq"); | ||||
| 	if (!mlx5_core_wq) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto err_debug; | ||||
| 	} | ||||
| 	mlx5_health_init(); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| 	mlx5_health_cleanup(); | ||||
| err_debug: | ||||
| 	mlx5_unregister_debugfs(); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void __exit cleanup(void) | ||||
| { | ||||
| 	mlx5_health_cleanup(); | ||||
| 	destroy_workqueue(mlx5_core_wq); | ||||
| 	mlx5_unregister_debugfs(); | ||||
| } | ||||
| 
 | ||||
| module_init(init); | ||||
| module_exit(cleanup); | ||||
							
								
								
									
										106
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mcg.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										106
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mcg.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,106 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include <rdma/ib_verbs.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| struct mlx5_attach_mcg_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			qpn; | ||||
| 	__be32			rsvd; | ||||
| 	u8			gid[16]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_attach_mcg_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvf[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_detach_mcg_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			qpn; | ||||
| 	__be32			rsvd; | ||||
| 	u8			gid[16]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_detach_mcg_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvf[8]; | ||||
| }; | ||||
| 
 | ||||
| int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) | ||||
| { | ||||
| 	struct mlx5_attach_mcg_mbox_in in; | ||||
| 	struct mlx5_attach_mcg_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG); | ||||
| 	memcpy(in.gid, mgid, sizeof(*mgid)); | ||||
| 	in.qpn = cpu_to_be32(qpn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_attach_mcg); | ||||
| 
 | ||||
| int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) | ||||
| { | ||||
| 	struct mlx5_detach_mcg_mbox_in in; | ||||
| 	struct mlx5_detach_mcg_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); | ||||
| 	memcpy(in.gid, mgid, sizeof(*mgid)); | ||||
| 	in.qpn = cpu_to_be32(qpn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_detach_mcg); | ||||
							
								
								
									
										73
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,73 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef __MLX5_CORE_H__ | ||||
| #define __MLX5_CORE_H__ | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/sched.h> | ||||
| 
 | ||||
| extern int mlx5_core_debug_mask; | ||||
| 
 | ||||
| #define mlx5_core_dbg(dev, format, arg...)				       \ | ||||
| pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,   \ | ||||
| 	 current->pid, ##arg) | ||||
| 
 | ||||
| #define mlx5_core_dbg_mask(dev, mask, format, arg...)			       \ | ||||
| do {									       \ | ||||
| 	if ((mask) & mlx5_core_debug_mask)				       \ | ||||
| 		pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name,       \ | ||||
| 			 __func__, __LINE__, current->pid, ##arg);	       \ | ||||
| } while (0) | ||||
| 
 | ||||
| #define mlx5_core_err(dev, format, arg...) \ | ||||
| pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,     \ | ||||
| 	current->pid, ##arg) | ||||
| 
 | ||||
| #define mlx5_core_warn(dev, format, arg...) \ | ||||
| pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,    \ | ||||
| 	current->pid, ##arg) | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_CMD_DATA, /* print command payload only */ | ||||
| 	MLX5_CMD_TIME, /* print command execution time */ | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | ||||
| 			   struct mlx5_caps *caps); | ||||
| int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); | ||||
| int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); | ||||
| int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); | ||||
| 
 | ||||
| #endif /* __MLX5_CORE_H__ */ | ||||
							
								
								
									
										136
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mr.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										136
									
								
								drivers/net/ethernet/mellanox/mlx5/core/mr.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,136 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | ||||
| 			  struct mlx5_create_mkey_mbox_in *in, int inlen) | ||||
| { | ||||
| 	struct mlx5_create_mkey_mbox_out out; | ||||
| 	int err; | ||||
| 	u8 key; | ||||
| 
 | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	spin_lock(&dev->priv.mkey_lock); | ||||
| 	key = dev->priv.mkey_key++; | ||||
| 	spin_unlock(&dev->priv.mkey_lock); | ||||
| 	in->seg.qpn_mkey7_0 |= cpu_to_be32(key); | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); | ||||
| 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | ||||
| 	if (err) { | ||||
| 		mlx5_core_dbg(dev, "cmd exec faile %d\n", err); | ||||
| 		return err; | ||||
| 	} | ||||
| 
 | ||||
| 	if (out.hdr.status) { | ||||
| 		mlx5_core_dbg(dev, "status %d\n", out.hdr.status); | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 	} | ||||
| 
 | ||||
| 	mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; | ||||
| 	mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_create_mkey); | ||||
| 
 | ||||
| int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) | ||||
| { | ||||
| 	struct mlx5_destroy_mkey_mbox_in in; | ||||
| 	struct mlx5_destroy_mkey_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 
 | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); | ||||
| 	in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_destroy_mkey); | ||||
| 
 | ||||
| int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | ||||
| 			 struct mlx5_query_mkey_mbox_out *out, int outlen) | ||||
| { | ||||
| 	struct mlx5_destroy_mkey_mbox_in in; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(out, 0, outlen); | ||||
| 
 | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); | ||||
| 	in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out->hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out->hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_query_mkey); | ||||
| 
 | ||||
| int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | ||||
| 			     u32 *mkey) | ||||
| { | ||||
| 	struct mlx5_query_special_ctxs_mbox_in in; | ||||
| 	struct mlx5_query_special_ctxs_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 
 | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	*mkey = be32_to_cpu(out.dump_fill_mkey); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); | ||||
							
								
								
									
										435
									
								
								drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										435
									
								
								drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,435 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <asm-generic/kmap_types.h> | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_PAGES_CANT_GIVE	= 0, | ||||
| 	MLX5_PAGES_GIVE		= 1, | ||||
| 	MLX5_PAGES_TAKE		= 2 | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_pages_req { | ||||
| 	struct mlx5_core_dev *dev; | ||||
| 	u32	func_id; | ||||
| 	s16	npages; | ||||
| 	struct work_struct work; | ||||
| }; | ||||
| 
 | ||||
| struct fw_page { | ||||
| 	struct rb_node	rb_node; | ||||
| 	u64		addr; | ||||
| 	struct page	*page; | ||||
| 	u16		func_id; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_pages_inbox { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_pages_outbox { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			reserved[2]; | ||||
| 	__be16			func_id; | ||||
| 	__be16			init_pages; | ||||
| 	__be16			num_pages; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_manage_pages_inbox { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be16			rsvd0; | ||||
| 	__be16			func_id; | ||||
| 	__be16			rsvd1; | ||||
| 	__be16			num_entries; | ||||
| 	u8			rsvd2[16]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_manage_pages_outbox { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[2]; | ||||
| 	__be16			num_entries; | ||||
| 	u8			rsvd1[20]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) | ||||
| { | ||||
| 	struct rb_root *root = &dev->priv.page_root; | ||||
| 	struct rb_node **new = &root->rb_node; | ||||
| 	struct rb_node *parent = NULL; | ||||
| 	struct fw_page *nfp; | ||||
| 	struct fw_page *tfp; | ||||
| 
 | ||||
| 	while (*new) { | ||||
| 		parent = *new; | ||||
| 		tfp = rb_entry(parent, struct fw_page, rb_node); | ||||
| 		if (tfp->addr < addr) | ||||
| 			new = &parent->rb_left; | ||||
| 		else if (tfp->addr > addr) | ||||
| 			new = &parent->rb_right; | ||||
| 		else | ||||
| 			return -EEXIST; | ||||
| 	} | ||||
| 
 | ||||
| 	nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); | ||||
| 	if (!nfp) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	nfp->addr = addr; | ||||
| 	nfp->page = page; | ||||
| 	nfp->func_id = func_id; | ||||
| 
 | ||||
| 	rb_link_node(&nfp->rb_node, parent, new); | ||||
| 	rb_insert_color(&nfp->rb_node, root); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | ||||
| { | ||||
| 	struct rb_root *root = &dev->priv.page_root; | ||||
| 	struct rb_node *tmp = root->rb_node; | ||||
| 	struct page *result = NULL; | ||||
| 	struct fw_page *tfp; | ||||
| 
 | ||||
| 	while (tmp) { | ||||
| 		tfp = rb_entry(tmp, struct fw_page, rb_node); | ||||
| 		if (tfp->addr < addr) { | ||||
| 			tmp = tmp->rb_left; | ||||
| 		} else if (tfp->addr > addr) { | ||||
| 			tmp = tmp->rb_right; | ||||
| 		} else { | ||||
| 			rb_erase(&tfp->rb_node, root); | ||||
| 			result = tfp->page; | ||||
| 			kfree(tfp); | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return result; | ||||
| } | ||||
| 
 | ||||
| static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | ||||
| 				s16 *pages, s16 *init_pages) | ||||
| { | ||||
| 	struct mlx5_query_pages_inbox	in; | ||||
| 	struct mlx5_query_pages_outbox	out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	if (pages) | ||||
| 		*pages = be16_to_cpu(out.num_pages); | ||||
| 	if (init_pages) | ||||
| 		*init_pages = be16_to_cpu(out.init_pages); | ||||
| 	*func_id = be16_to_cpu(out.func_id); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, | ||||
| 		      int notify_fail) | ||||
| { | ||||
| 	struct mlx5_manage_pages_inbox *in; | ||||
| 	struct mlx5_manage_pages_outbox out; | ||||
| 	struct page *page; | ||||
| 	int inlen; | ||||
| 	u64 addr; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 
 | ||||
| 	inlen = sizeof(*in) + npages * sizeof(in->pas[0]); | ||||
| 	in = mlx5_vzalloc(inlen); | ||||
| 	if (!in) { | ||||
| 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); | ||||
| 		return -ENOMEM; | ||||
| 	} | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 
 | ||||
| 	for (i = 0; i < npages; i++) { | ||||
| 		page = alloc_page(GFP_HIGHUSER); | ||||
| 		if (!page) { | ||||
| 			err = -ENOMEM; | ||||
| 			mlx5_core_warn(dev, "failed to allocate page\n"); | ||||
| 			goto out_alloc; | ||||
| 		} | ||||
| 		addr = dma_map_page(&dev->pdev->dev, page, 0, | ||||
| 				    PAGE_SIZE, DMA_BIDIRECTIONAL); | ||||
| 		if (dma_mapping_error(&dev->pdev->dev, addr)) { | ||||
| 			mlx5_core_warn(dev, "failed dma mapping page\n"); | ||||
| 			__free_page(page); | ||||
| 			err = -ENOMEM; | ||||
| 			goto out_alloc; | ||||
| 		} | ||||
| 		err = insert_page(dev, addr, page, func_id); | ||||
| 		if (err) { | ||||
| 			mlx5_core_err(dev, "failed to track allocated page\n"); | ||||
| 			dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||||
| 			__free_page(page); | ||||
| 			err = -ENOMEM; | ||||
| 			goto out_alloc; | ||||
| 		} | ||||
| 		in->pas[i] = cpu_to_be64(addr); | ||||
| 	} | ||||
| 
 | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | ||||
| 	in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | ||||
| 	in->func_id = cpu_to_be16(func_id); | ||||
| 	in->num_entries = cpu_to_be16(npages); | ||||
| 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | ||||
| 	mlx5_core_dbg(dev, "err %d\n", err); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); | ||||
| 		goto out_alloc; | ||||
| 	} | ||||
| 	dev->priv.fw_pages += npages; | ||||
| 
 | ||||
| 	if (out.hdr.status) { | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 		if (err) { | ||||
| 			mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); | ||||
| 			goto out_alloc; | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	mlx5_core_dbg(dev, "err %d\n", err); | ||||
| 
 | ||||
| 	goto out_free; | ||||
| 
 | ||||
| out_alloc: | ||||
| 	if (notify_fail) { | ||||
| 		memset(in, 0, inlen); | ||||
| 		memset(&out, 0, sizeof(out)); | ||||
| 		in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | ||||
| 		in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); | ||||
| 		if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out))) | ||||
| 			mlx5_core_warn(dev, "\n"); | ||||
| 	} | ||||
| 	for (i--; i >= 0; i--) { | ||||
| 		addr = be64_to_cpu(in->pas[i]); | ||||
| 		page = remove_page(dev, addr); | ||||
| 		if (!page) { | ||||
| 			mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n", | ||||
| 				      addr); | ||||
| 			continue; | ||||
| 		} | ||||
| 		dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||||
| 		__free_page(page); | ||||
| 	} | ||||
| 
 | ||||
| out_free: | ||||
| 	mlx5_vfree(in); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | ||||
| 			 int *nclaimed) | ||||
| { | ||||
| 	struct mlx5_manage_pages_inbox   in; | ||||
| 	struct mlx5_manage_pages_outbox *out; | ||||
| 	struct page *page; | ||||
| 	int num_claimed; | ||||
| 	int outlen; | ||||
| 	u64 addr; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	outlen = sizeof(*out) + npages * sizeof(out->pas[0]); | ||||
| 	out = mlx5_vzalloc(outlen); | ||||
| 	if (!out) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | ||||
| 	in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | ||||
| 	in.func_id = cpu_to_be16(func_id); | ||||
| 	in.num_entries = cpu_to_be16(npages); | ||||
| 	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | ||||
| 	if (err) { | ||||
| 		mlx5_core_err(dev, "failed recliaming pages\n"); | ||||
| 		goto out_free; | ||||
| 	} | ||||
| 	dev->priv.fw_pages -= npages; | ||||
| 
 | ||||
| 	if (out->hdr.status) { | ||||
| 		err = mlx5_cmd_status_to_err(&out->hdr); | ||||
| 		goto out_free; | ||||
| 	} | ||||
| 
 | ||||
| 	num_claimed = be16_to_cpu(out->num_entries); | ||||
| 	if (nclaimed) | ||||
| 		*nclaimed = num_claimed; | ||||
| 
 | ||||
| 	for (i = 0; i < num_claimed; i++) { | ||||
| 		addr = be64_to_cpu(out->pas[i]); | ||||
| 		page = remove_page(dev, addr); | ||||
| 		if (!page) { | ||||
| 			mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr); | ||||
| 		} else { | ||||
| 			dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||||
| 			__free_page(page); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| out_free: | ||||
| 	mlx5_vfree(out); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void pages_work_handler(struct work_struct *work) | ||||
| { | ||||
| 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); | ||||
| 	struct mlx5_core_dev *dev = req->dev; | ||||
| 	int err = 0; | ||||
| 
 | ||||
| 	if (req->npages < 0) | ||||
| 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); | ||||
| 	else if (req->npages > 0) | ||||
| 		err = give_pages(dev, req->func_id, req->npages, 1); | ||||
| 
 | ||||
| 	if (err) | ||||
| 		mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? | ||||
| 			       "reclaim" : "give", err); | ||||
| 
 | ||||
| 	kfree(req); | ||||
| } | ||||
| 
 | ||||
| void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | ||||
| 				 s16 npages) | ||||
| { | ||||
| 	struct mlx5_pages_req *req; | ||||
| 
 | ||||
| 	req = kzalloc(sizeof(*req), GFP_ATOMIC); | ||||
| 	if (!req) { | ||||
| 		mlx5_core_warn(dev, "failed to allocate pages request\n"); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	req->dev = dev; | ||||
| 	req->func_id = func_id; | ||||
| 	req->npages = npages; | ||||
| 	INIT_WORK(&req->work, pages_work_handler); | ||||
| 	queue_work(dev->priv.pg_wq, &req->work); | ||||
| } | ||||
| 
 | ||||
| int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	s16 uninitialized_var(init_pages); | ||||
| 	u16 uninitialized_var(func_id); | ||||
| 	int err; | ||||
| 
 | ||||
| 	err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); | ||||
| 
 | ||||
| 	return give_pages(dev, func_id, init_pages, 0); | ||||
| } | ||||
| 
 | ||||
| static int optimal_reclaimed_pages(void) | ||||
| { | ||||
| 	struct mlx5_cmd_prot_block *block; | ||||
| 	struct mlx5_cmd_layout *lay; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	ret = (sizeof(lay->in) + sizeof(block->data) - | ||||
| 	       sizeof(struct mlx5_manage_pages_outbox)) / 8; | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	unsigned long end = jiffies + msecs_to_jiffies(5000); | ||||
| 	struct fw_page *fwp; | ||||
| 	struct rb_node *p; | ||||
| 	int err; | ||||
| 
 | ||||
| 	do { | ||||
| 		p = rb_first(&dev->priv.page_root); | ||||
| 		if (p) { | ||||
| 			fwp = rb_entry(p, struct fw_page, rb_node); | ||||
| 			err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); | ||||
| 			if (err) { | ||||
| 				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); | ||||
| 				return err; | ||||
| 			} | ||||
| 		} | ||||
| 		if (time_after(jiffies, end)) { | ||||
| 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); | ||||
| 			break; | ||||
| 		} | ||||
| 	} while (p); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void mlx5_pagealloc_init(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	dev->priv.page_root = RB_ROOT; | ||||
| } | ||||
| 
 | ||||
| void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	/* nothing */ | ||||
| } | ||||
| 
 | ||||
| int mlx5_pagealloc_start(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); | ||||
| 	if (!dev->priv.pg_wq) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	destroy_workqueue(dev->priv.pg_wq); | ||||
| } | ||||
							
								
								
									
										101
									
								
								drivers/net/ethernet/mellanox/mlx5/core/pd.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								drivers/net/ethernet/mellanox/mlx5/core/pd.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,101 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| struct mlx5_alloc_pd_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_alloc_pd_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			pdn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_dealloc_pd_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			pdn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_dealloc_pd_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) | ||||
| { | ||||
| 	struct mlx5_alloc_pd_mbox_in	in; | ||||
| 	struct mlx5_alloc_pd_mbox_out	out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	*pdn = be32_to_cpu(out.pdn) & 0xffffff; | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_alloc_pd); | ||||
| 
 | ||||
| int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) | ||||
| { | ||||
| 	struct mlx5_dealloc_pd_mbox_in	in; | ||||
| 	struct mlx5_dealloc_pd_mbox_out	out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD); | ||||
| 	in.pdn = cpu_to_be32(pdn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_dealloc_pd); | ||||
							
								
								
									
										104
									
								
								drivers/net/ethernet/mellanox/mlx5/core/port.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								drivers/net/ethernet/mellanox/mlx5/core/port.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,104 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, | ||||
| 			 int size_in, void *data_out, int size_out, | ||||
| 			 u16 reg_num, int arg, int write) | ||||
| { | ||||
| 	struct mlx5_access_reg_mbox_in *in = NULL; | ||||
| 	struct mlx5_access_reg_mbox_out *out = NULL; | ||||
| 	int err = -ENOMEM; | ||||
| 
 | ||||
| 	in = mlx5_vzalloc(sizeof(*in) + size_in); | ||||
| 	if (!in) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	out = mlx5_vzalloc(sizeof(*out) + size_out); | ||||
| 	if (!out) | ||||
| 		goto ex1; | ||||
| 
 | ||||
| 	memcpy(in->data, data_in, size_in); | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); | ||||
| 	in->hdr.opmod = cpu_to_be16(!write); | ||||
| 	in->arg = cpu_to_be32(arg); | ||||
| 	in->register_id = cpu_to_be16(reg_num); | ||||
| 	err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, | ||||
| 			    sizeof(out) + size_out); | ||||
| 	if (err) | ||||
| 		goto ex2; | ||||
| 
 | ||||
| 	if (out->hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out->hdr); | ||||
| 
 | ||||
| 	if (!err) | ||||
| 		memcpy(data_out, out->data, size_out); | ||||
| 
 | ||||
| ex2: | ||||
| 	mlx5_vfree(out); | ||||
| ex1: | ||||
| 	mlx5_vfree(in); | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_access_reg); | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_reg_pcap { | ||||
| 	u8			rsvd0; | ||||
| 	u8			port_num; | ||||
| 	u8			rsvd1[2]; | ||||
| 	__be32			caps_127_96; | ||||
| 	__be32			caps_95_64; | ||||
| 	__be32			caps_63_32; | ||||
| 	__be32			caps_31_0; | ||||
| }; | ||||
| 
 | ||||
| int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps) | ||||
| { | ||||
| 	struct mlx5_reg_pcap in; | ||||
| 	struct mlx5_reg_pcap out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	in.caps_127_96 = cpu_to_be32(caps); | ||||
| 	in.port_num = port_num; | ||||
| 
 | ||||
| 	err = mlx5_core_access_reg(dev, &in, sizeof(in), &out, | ||||
| 				   sizeof(out), MLX5_REG_PCAP, 0, 1); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_set_port_caps); | ||||
							
								
								
									
										301
									
								
								drivers/net/ethernet/mellanox/mlx5/core/qp.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										301
									
								
								drivers/net/ethernet/mellanox/mlx5/core/qp.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,301 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| 
 | ||||
| #include <linux/gfp.h> | ||||
| #include <linux/export.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include <linux/mlx5/qp.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| 
 | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type) | ||||
| { | ||||
| 	struct mlx5_qp_table *table = &dev->priv.qp_table; | ||||
| 	struct mlx5_core_qp *qp; | ||||
| 
 | ||||
| 	spin_lock(&table->lock); | ||||
| 
 | ||||
| 	qp = radix_tree_lookup(&table->tree, qpn); | ||||
| 	if (qp) | ||||
| 		atomic_inc(&qp->refcount); | ||||
| 
 | ||||
| 	spin_unlock(&table->lock); | ||||
| 
 | ||||
| 	if (!qp) { | ||||
| 		mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	qp->event(qp, event_type); | ||||
| 
 | ||||
| 	if (atomic_dec_and_test(&qp->refcount)) | ||||
| 		complete(&qp->free); | ||||
| } | ||||
| 
 | ||||
| int mlx5_core_create_qp(struct mlx5_core_dev *dev, | ||||
| 			struct mlx5_core_qp *qp, | ||||
| 			struct mlx5_create_qp_mbox_in *in, | ||||
| 			int inlen) | ||||
| { | ||||
| 	struct mlx5_qp_table *table = &dev->priv.qp_table; | ||||
| 	struct mlx5_create_qp_mbox_out out; | ||||
| 	struct mlx5_destroy_qp_mbox_in din; | ||||
| 	struct mlx5_destroy_qp_mbox_out dout; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&dout, 0, sizeof(dout)); | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); | ||||
| 
 | ||||
| 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "ret %d", err); | ||||
| 		return err; | ||||
| 	} | ||||
| 
 | ||||
| 	if (out.hdr.status) { | ||||
| 		pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps)); | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 	} | ||||
| 
 | ||||
| 	qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; | ||||
| 	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); | ||||
| 
 | ||||
| 	spin_lock_irq(&table->lock); | ||||
| 	err = radix_tree_insert(&table->tree, qp->qpn, qp); | ||||
| 	spin_unlock_irq(&table->lock); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "err %d", err); | ||||
| 		goto err_cmd; | ||||
| 	} | ||||
| 
 | ||||
| 	err = mlx5_debug_qp_add(dev, qp); | ||||
| 	if (err) | ||||
| 		mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", | ||||
| 			      qp->qpn); | ||||
| 
 | ||||
| 	qp->pid = current->pid; | ||||
| 	atomic_set(&qp->refcount, 1); | ||||
| 	atomic_inc(&dev->num_qps); | ||||
| 	init_completion(&qp->free); | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_cmd: | ||||
| 	memset(&din, 0, sizeof(din)); | ||||
| 	memset(&dout, 0, sizeof(dout)); | ||||
| 	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); | ||||
| 	din.qpn = cpu_to_be32(qp->qpn); | ||||
| 	mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_create_qp); | ||||
| 
 | ||||
| int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, | ||||
| 			 struct mlx5_core_qp *qp) | ||||
| { | ||||
| 	struct mlx5_destroy_qp_mbox_in in; | ||||
| 	struct mlx5_destroy_qp_mbox_out out; | ||||
| 	struct mlx5_qp_table *table = &dev->priv.qp_table; | ||||
| 	unsigned long flags; | ||||
| 	int err; | ||||
| 
 | ||||
| 	mlx5_debug_qp_remove(dev, qp); | ||||
| 
 | ||||
| 	spin_lock_irqsave(&table->lock, flags); | ||||
| 	radix_tree_delete(&table->tree, qp->qpn); | ||||
| 	spin_unlock_irqrestore(&table->lock, flags); | ||||
| 
 | ||||
| 	if (atomic_dec_and_test(&qp->refcount)) | ||||
| 		complete(&qp->free); | ||||
| 	wait_for_completion(&qp->free); | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); | ||||
| 	in.qpn = cpu_to_be32(qp->qpn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	atomic_dec(&dev->num_qps); | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); | ||||
| 
 | ||||
| int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, | ||||
| 			enum mlx5_qp_state new_state, | ||||
| 			struct mlx5_modify_qp_mbox_in *in, int sqd_event, | ||||
| 			struct mlx5_core_qp *qp) | ||||
| { | ||||
| 	static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { | ||||
| 		[MLX5_QP_STATE_RST] = { | ||||
| 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP, | ||||
| 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP, | ||||
| 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_RST2INIT_QP, | ||||
| 		}, | ||||
| 		[MLX5_QP_STATE_INIT]  = { | ||||
| 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP, | ||||
| 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP, | ||||
| 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_INIT2INIT_QP, | ||||
| 			[MLX5_QP_STATE_RTR]	= MLX5_CMD_OP_INIT2RTR_QP, | ||||
| 		}, | ||||
| 		[MLX5_QP_STATE_RTR]   = { | ||||
| 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP, | ||||
| 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP, | ||||
| 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTR2RTS_QP, | ||||
| 		}, | ||||
| 		[MLX5_QP_STATE_RTS]   = { | ||||
| 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP, | ||||
| 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP, | ||||
| 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTS2RTS_QP, | ||||
| 			[MLX5_QP_STATE_SQD]	= MLX5_CMD_OP_RTS2SQD_QP, | ||||
| 		}, | ||||
| 		[MLX5_QP_STATE_SQD] = { | ||||
| 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP, | ||||
| 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP, | ||||
| 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQD2RTS_QP, | ||||
| 			[MLX5_QP_STATE_SQD]	= MLX5_CMD_OP_SQD2SQD_QP, | ||||
| 		}, | ||||
| 		[MLX5_QP_STATE_SQER] = { | ||||
| 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP, | ||||
| 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP, | ||||
| 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQERR2RTS_QP, | ||||
| 		}, | ||||
| 		[MLX5_QP_STATE_ERR] = { | ||||
| 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP, | ||||
| 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP, | ||||
| 		} | ||||
| 	}; | ||||
| 
 | ||||
| 	struct mlx5_modify_qp_mbox_out out; | ||||
| 	int err = 0; | ||||
| 	u16 op; | ||||
| 
 | ||||
| 	if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE || | ||||
| 	    !optab[cur_state][new_state]) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	op = optab[cur_state][new_state]; | ||||
| 	in->hdr.opcode = cpu_to_be16(op); | ||||
| 	in->qpn = cpu_to_be32(qp->qpn); | ||||
| 	err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	return mlx5_cmd_status_to_err(&out.hdr); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); | ||||
| 
 | ||||
| void mlx5_init_qp_table(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_qp_table *table = &dev->priv.qp_table; | ||||
| 
 | ||||
| 	spin_lock_init(&table->lock); | ||||
| 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); | ||||
| 	mlx5_qp_debugfs_init(dev); | ||||
| } | ||||
| 
 | ||||
| void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	mlx5_qp_debugfs_cleanup(dev); | ||||
| } | ||||
| 
 | ||||
| int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, | ||||
| 		       struct mlx5_query_qp_mbox_out *out, int outlen) | ||||
| { | ||||
| 	struct mlx5_query_qp_mbox_in in; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(out, 0, outlen); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); | ||||
| 	in.qpn = cpu_to_be32(qp->qpn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out->hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out->hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_qp_query); | ||||
| 
 | ||||
| int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) | ||||
| { | ||||
| 	struct mlx5_alloc_xrcd_mbox_in in; | ||||
| 	struct mlx5_alloc_xrcd_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 	else | ||||
| 		*xrcdn = be32_to_cpu(out.xrcdn); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); | ||||
| 
 | ||||
| int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) | ||||
| { | ||||
| 	struct mlx5_dealloc_xrcd_mbox_in in; | ||||
| 	struct mlx5_dealloc_xrcd_mbox_out out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); | ||||
| 	in.xrcdn = cpu_to_be32(xrcdn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); | ||||
							
								
								
									
										223
									
								
								drivers/net/ethernet/mellanox/mlx5/core/srq.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										223
									
								
								drivers/net/ethernet/mellanox/mlx5/core/srq.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,223 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include <linux/mlx5/srq.h> | ||||
| #include <rdma/ib_verbs.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) | ||||
| { | ||||
| 	struct mlx5_srq_table *table = &dev->priv.srq_table; | ||||
| 	struct mlx5_core_srq *srq; | ||||
| 
 | ||||
| 	spin_lock(&table->lock); | ||||
| 
 | ||||
| 	srq = radix_tree_lookup(&table->tree, srqn); | ||||
| 	if (srq) | ||||
| 		atomic_inc(&srq->refcount); | ||||
| 
 | ||||
| 	spin_unlock(&table->lock); | ||||
| 
 | ||||
| 	if (!srq) { | ||||
| 		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	srq->event(srq, event_type); | ||||
| 
 | ||||
| 	if (atomic_dec_and_test(&srq->refcount)) | ||||
| 		complete(&srq->free); | ||||
| } | ||||
| 
 | ||||
| struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) | ||||
| { | ||||
| 	struct mlx5_srq_table *table = &dev->priv.srq_table; | ||||
| 	struct mlx5_core_srq *srq; | ||||
| 
 | ||||
| 	spin_lock(&table->lock); | ||||
| 
 | ||||
| 	srq = radix_tree_lookup(&table->tree, srqn); | ||||
| 	if (srq) | ||||
| 		atomic_inc(&srq->refcount); | ||||
| 
 | ||||
| 	spin_unlock(&table->lock); | ||||
| 
 | ||||
| 	return srq; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_get_srq); | ||||
| 
 | ||||
| int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | ||||
| 			 struct mlx5_create_srq_mbox_in *in, int inlen) | ||||
| { | ||||
| 	struct mlx5_create_srq_mbox_out out; | ||||
| 	struct mlx5_srq_table *table = &dev->priv.srq_table; | ||||
| 	struct mlx5_destroy_srq_mbox_in din; | ||||
| 	struct mlx5_destroy_srq_mbox_out dout; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); | ||||
| 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; | ||||
| 
 | ||||
| 	atomic_set(&srq->refcount, 1); | ||||
| 	init_completion(&srq->free); | ||||
| 
 | ||||
| 	spin_lock_irq(&table->lock); | ||||
| 	err = radix_tree_insert(&table->tree, srq->srqn, srq); | ||||
| 	spin_unlock_irq(&table->lock); | ||||
| 	if (err) { | ||||
| 		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); | ||||
| 		goto err_cmd; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| err_cmd: | ||||
| 	memset(&din, 0, sizeof(din)); | ||||
| 	memset(&dout, 0, sizeof(dout)); | ||||
| 	din.srqn = cpu_to_be32(srq->srqn); | ||||
| 	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); | ||||
| 	mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_create_srq); | ||||
| 
 | ||||
| int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) | ||||
| { | ||||
| 	struct mlx5_destroy_srq_mbox_in in; | ||||
| 	struct mlx5_destroy_srq_mbox_out out; | ||||
| 	struct mlx5_srq_table *table = &dev->priv.srq_table; | ||||
| 	struct mlx5_core_srq *tmp; | ||||
| 	int err; | ||||
| 
 | ||||
| 	spin_lock_irq(&table->lock); | ||||
| 	tmp = radix_tree_delete(&table->tree, srq->srqn); | ||||
| 	spin_unlock_irq(&table->lock); | ||||
| 	if (!tmp) { | ||||
| 		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 	if (tmp != srq) { | ||||
| 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); | ||||
| 	in.srqn = cpu_to_be32(srq->srqn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	if (atomic_dec_and_test(&srq->refcount)) | ||||
| 		complete(&srq->free); | ||||
| 	wait_for_completion(&srq->free); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_destroy_srq); | ||||
| 
 | ||||
| int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | ||||
| 			struct mlx5_query_srq_mbox_out *out) | ||||
| { | ||||
| 	struct mlx5_query_srq_mbox_in in; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(out, 0, sizeof(*out)); | ||||
| 
 | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); | ||||
| 	in.srqn = cpu_to_be32(srq->srqn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out->hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out->hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_query_srq); | ||||
| 
 | ||||
| int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | ||||
| 		      u16 lwm, int is_srq) | ||||
| { | ||||
| 	struct mlx5_arm_srq_mbox_in	in; | ||||
| 	struct mlx5_arm_srq_mbox_out	out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 
 | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); | ||||
| 	in.hdr.opmod = cpu_to_be16(!!is_srq); | ||||
| 	in.srqn = cpu_to_be32(srq->srqn); | ||||
| 	in.lwm = cpu_to_be16(lwm); | ||||
| 
 | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		return err; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		return mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_core_arm_srq); | ||||
| 
 | ||||
| void mlx5_init_srq_table(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	struct mlx5_srq_table *table = &dev->priv.srq_table; | ||||
| 
 | ||||
| 	spin_lock_init(&table->lock); | ||||
| 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); | ||||
| } | ||||
| 
 | ||||
| void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	/* nothing */ | ||||
| } | ||||
							
								
								
									
										223
									
								
								drivers/net/ethernet/mellanox/mlx5/core/uar.c
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										223
									
								
								drivers/net/ethernet/mellanox/mlx5/core/uar.c
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,223 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/module.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| #include <linux/mlx5/cmd.h> | ||||
| #include "mlx5_core.h" | ||||
| 
 | ||||
| enum { | ||||
| 	NUM_DRIVER_UARS		= 4, | ||||
| 	NUM_LOW_LAT_UUARS	= 4, | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_alloc_uar_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_alloc_uar_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			uarn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_free_uar_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			uarn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_free_uar_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) | ||||
| { | ||||
| 	struct mlx5_alloc_uar_mbox_in	in; | ||||
| 	struct mlx5_alloc_uar_mbox_out	out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	memset(&out, 0, sizeof(out)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		goto ex; | ||||
| 
 | ||||
| 	if (out.hdr.status) { | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 		goto ex; | ||||
| 	} | ||||
| 
 | ||||
| 	*uarn = be32_to_cpu(out.uarn) & 0xffffff; | ||||
| 
 | ||||
| ex: | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_cmd_alloc_uar); | ||||
| 
 | ||||
| int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) | ||||
| { | ||||
| 	struct mlx5_free_uar_mbox_in	in; | ||||
| 	struct mlx5_free_uar_mbox_out	out; | ||||
| 	int err; | ||||
| 
 | ||||
| 	memset(&in, 0, sizeof(in)); | ||||
| 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); | ||||
| 	in.uarn = cpu_to_be32(uarn); | ||||
| 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||||
| 	if (err) | ||||
| 		goto ex; | ||||
| 
 | ||||
| 	if (out.hdr.status) | ||||
| 		err = mlx5_cmd_status_to_err(&out.hdr); | ||||
| 
 | ||||
| ex: | ||||
| 	return err; | ||||
| } | ||||
| EXPORT_SYMBOL(mlx5_cmd_free_uar); | ||||
| 
 | ||||
| static int need_uuar_lock(int uuarn) | ||||
| { | ||||
| 	int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; | ||||
| 
 | ||||
| 	if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) | ||||
| { | ||||
| 	int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; | ||||
| 	struct mlx5_bf *bf; | ||||
| 	phys_addr_t addr; | ||||
| 	int err; | ||||
| 	int i; | ||||
| 
 | ||||
| 	uuari->num_uars = NUM_DRIVER_UARS; | ||||
| 	uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; | ||||
| 
 | ||||
| 	mutex_init(&uuari->lock); | ||||
| 	uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); | ||||
| 	if (!uuari->uars) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); | ||||
| 	if (!uuari->bfs) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out_uars; | ||||
| 	} | ||||
| 
 | ||||
| 	uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), | ||||
| 				GFP_KERNEL); | ||||
| 	if (!uuari->bitmap) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out_bfs; | ||||
| 	} | ||||
| 
 | ||||
| 	uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); | ||||
| 	if (!uuari->count) { | ||||
| 		err = -ENOMEM; | ||||
| 		goto out_bitmap; | ||||
| 	} | ||||
| 
 | ||||
| 	for (i = 0; i < uuari->num_uars; i++) { | ||||
| 		err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); | ||||
| 		if (err) | ||||
| 			goto out_count; | ||||
| 
 | ||||
| 		addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); | ||||
| 		uuari->uars[i].map = ioremap(addr, PAGE_SIZE); | ||||
| 		if (!uuari->uars[i].map) { | ||||
| 			mlx5_cmd_free_uar(dev, uuari->uars[i].index); | ||||
| 			goto out_count; | ||||
| 		} | ||||
| 		mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", | ||||
| 			      uuari->uars[i].index, uuari->uars[i].map); | ||||
| 	} | ||||
| 
 | ||||
| 	for (i = 0; i < tot_uuars; i++) { | ||||
| 		bf = &uuari->bfs[i]; | ||||
| 
 | ||||
| 		bf->buf_size = dev->caps.bf_reg_size / 2; | ||||
| 		bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; | ||||
| 		bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; | ||||
| 		bf->reg = NULL; /* Add WC support */ | ||||
| 		bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size + | ||||
| 			MLX5_BF_OFFSET; | ||||
| 		bf->need_lock = need_uuar_lock(i); | ||||
| 		spin_lock_init(&bf->lock); | ||||
| 		spin_lock_init(&bf->lock32); | ||||
| 		bf->uuarn = i; | ||||
| 	} | ||||
| 
 | ||||
| 	return 0; | ||||
| 
 | ||||
| out_count: | ||||
| 	for (i--; i >= 0; i--) { | ||||
| 		iounmap(uuari->uars[i].map); | ||||
| 		mlx5_cmd_free_uar(dev, uuari->uars[i].index); | ||||
| 	} | ||||
| 	kfree(uuari->count); | ||||
| 
 | ||||
| out_bitmap: | ||||
| 	kfree(uuari->bitmap); | ||||
| 
 | ||||
| out_bfs: | ||||
| 	kfree(uuari->bfs); | ||||
| 
 | ||||
| out_uars: | ||||
| 	kfree(uuari->uars); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) | ||||
| { | ||||
| 	int i = uuari->num_uars; | ||||
| 
 | ||||
| 	for (i--; i >= 0; i--) { | ||||
| 		iounmap(uuari->uars[i].map); | ||||
| 		mlx5_cmd_free_uar(dev, uuari->uars[i].index); | ||||
| 	} | ||||
| 
 | ||||
| 	kfree(uuari->count); | ||||
| 	kfree(uuari->bitmap); | ||||
| 	kfree(uuari->bfs); | ||||
| 	kfree(uuari->uars); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
							
								
								
									
										51
									
								
								include/linux/mlx5/cmd.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								include/linux/mlx5/cmd.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,51 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_CMD_H | ||||
| #define MLX5_CMD_H | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| struct manage_pages_layout { | ||||
| 	u64	ptr; | ||||
| 	u32	reserved; | ||||
| 	u16	num_entries; | ||||
| 	u16	func_id; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_cmd_alloc_uar_imm_out { | ||||
| 	u32	rsvd[3]; | ||||
| 	u32	uarn; | ||||
| }; | ||||
| 
 | ||||
| #endif /* MLX5_CMD_H */ | ||||
							
								
								
									
										165
									
								
								include/linux/mlx5/cq.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										165
									
								
								include/linux/mlx5/cq.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,165 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_CORE_CQ_H | ||||
| #define MLX5_CORE_CQ_H | ||||
| 
 | ||||
| #include <rdma/ib_verbs.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_core_cq { | ||||
| 	u32			cqn; | ||||
| 	int			cqe_sz; | ||||
| 	__be32		       *set_ci_db; | ||||
| 	__be32		       *arm_db; | ||||
| 	atomic_t		refcount; | ||||
| 	struct completion	free; | ||||
| 	unsigned		vector; | ||||
| 	int			irqn; | ||||
| 	void (*comp)		(struct mlx5_core_cq *); | ||||
| 	void (*event)		(struct mlx5_core_cq *, enum mlx5_event); | ||||
| 	struct mlx5_uar	       *uar; | ||||
| 	u32			cons_index; | ||||
| 	unsigned		arm_sn; | ||||
| 	struct mlx5_rsc_debug	*dbg; | ||||
| 	int			pid; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01, | ||||
| 	MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02, | ||||
| 	MLX5_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04, | ||||
| 	MLX5_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05, | ||||
| 	MLX5_CQE_SYNDROME_MW_BIND_ERR			= 0x06, | ||||
| 	MLX5_CQE_SYNDROME_BAD_RESP_ERR			= 0x10, | ||||
| 	MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11, | ||||
| 	MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12, | ||||
| 	MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13, | ||||
| 	MLX5_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14, | ||||
| 	MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15, | ||||
| 	MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16, | ||||
| 	MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_CQE_OWNER_MASK	= 1, | ||||
| 	MLX5_CQE_REQ		= 0, | ||||
| 	MLX5_CQE_RESP_WR_IMM	= 1, | ||||
| 	MLX5_CQE_RESP_SEND	= 2, | ||||
| 	MLX5_CQE_RESP_SEND_IMM	= 3, | ||||
| 	MLX5_CQE_RESP_SEND_INV	= 4, | ||||
| 	MLX5_CQE_RESIZE_CQ	= 0xff, /* TBD */ | ||||
| 	MLX5_CQE_REQ_ERR	= 13, | ||||
| 	MLX5_CQE_RESP_ERR	= 14, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_CQ_MODIFY_RESEIZE = 0, | ||||
| 	MLX5_CQ_MODIFY_MODER = 1, | ||||
| 	MLX5_CQ_MODIFY_MAPPING = 2, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cq_modify_params { | ||||
| 	int	type; | ||||
| 	union { | ||||
| 		struct { | ||||
| 			u32	page_offset; | ||||
| 			u8	log_cq_size; | ||||
| 		} resize; | ||||
| 
 | ||||
| 		struct { | ||||
| 		} moder; | ||||
| 
 | ||||
| 		struct { | ||||
| 		} mapping; | ||||
| 	} params; | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	CQE_SIZE_64 = 0, | ||||
| 	CQE_SIZE_128 = 1, | ||||
| }; | ||||
| 
 | ||||
| static inline int cqe_sz_to_mlx_sz(u8 size) | ||||
| { | ||||
| 	return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; | ||||
| } | ||||
| 
 | ||||
| static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) | ||||
| { | ||||
| 	*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); | ||||
| } | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_CQ_DB_REQ_NOT_SOL		= 1 << 24, | ||||
| 	MLX5_CQ_DB_REQ_NOT		= 0 << 24 | ||||
| }; | ||||
| 
 | ||||
| static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, | ||||
| 			       void __iomem *uar_page, | ||||
| 			       spinlock_t *doorbell_lock) | ||||
| { | ||||
| 	__be32 doorbell[2]; | ||||
| 	u32 sn; | ||||
| 	u32 ci; | ||||
| 
 | ||||
| 	sn = cq->arm_sn & 3; | ||||
| 	ci = cq->cons_index & 0xffffff; | ||||
| 
 | ||||
| 	*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); | ||||
| 
 | ||||
| 	/* Make sure that the doorbell record in host memory is
 | ||||
| 	 * written before ringing the doorbell via PCI MMIO. | ||||
| 	 */ | ||||
| 	wmb(); | ||||
| 
 | ||||
| 	doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); | ||||
| 	doorbell[1] = cpu_to_be32(cq->cqn); | ||||
| 
 | ||||
| 	mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); | ||||
| } | ||||
| 
 | ||||
| int mlx5_init_cq_table(struct mlx5_core_dev *dev); | ||||
| void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); | ||||
| int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | ||||
| 			struct mlx5_create_cq_mbox_in *in, int inlen); | ||||
| int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | ||||
| int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | ||||
| 		       struct mlx5_query_cq_mbox_out *out); | ||||
| int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, | ||||
| 			int type, struct mlx5_cq_modify_params *params); | ||||
| int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | ||||
| void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); | ||||
| 
 | ||||
| #endif /* MLX5_CORE_CQ_H */ | ||||
							
								
								
									
										893
									
								
								include/linux/mlx5/device.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										893
									
								
								include/linux/mlx5/device.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,893 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_DEVICE_H | ||||
| #define MLX5_DEVICE_H | ||||
| 
 | ||||
| #include <linux/types.h> | ||||
| #include <rdma/ib_verbs.h> | ||||
| 
 | ||||
| #if defined(__LITTLE_ENDIAN) | ||||
| #define MLX5_SET_HOST_ENDIANNESS	0 | ||||
| #elif defined(__BIG_ENDIAN) | ||||
| #define MLX5_SET_HOST_ENDIANNESS	0x80 | ||||
| #else | ||||
| #error Host endianness not defined | ||||
| #endif | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_MAX_COMMANDS		= 32, | ||||
| 	MLX5_CMD_DATA_BLOCK_SIZE	= 512, | ||||
| 	MLX5_PCI_CMD_XPORT		= 7, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_EXTENDED_UD_AV		= 0x80000000, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_CQ_STATE_ARMED		= 9, | ||||
| 	MLX5_CQ_STATE_ALWAYS_ARMED	= 0xb, | ||||
| 	MLX5_CQ_STATE_FIRED		= 0xa, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_STAT_RATE_OFFSET	= 5, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_INLINE_SEG = 0x80000000, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_PERM_LOCAL_READ	= 1 << 2, | ||||
| 	MLX5_PERM_LOCAL_WRITE	= 1 << 3, | ||||
| 	MLX5_PERM_REMOTE_READ	= 1 << 4, | ||||
| 	MLX5_PERM_REMOTE_WRITE	= 1 << 5, | ||||
| 	MLX5_PERM_ATOMIC	= 1 << 6, | ||||
| 	MLX5_PERM_UMR_EN	= 1 << 7, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0, | ||||
| 	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2, | ||||
| 	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3, | ||||
| 	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6, | ||||
| 	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_ACCESS_MODE_PA	= 0, | ||||
| 	MLX5_ACCESS_MODE_MTT	= 1, | ||||
| 	MLX5_ACCESS_MODE_KLM	= 2 | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_MKEY_REMOTE_INVAL	= 1 << 24, | ||||
| 	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, | ||||
| 	MLX5_MKEY_BSF_EN	= 1 << 30, | ||||
| 	MLX5_MKEY_LEN64		= 1 << 31, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_EN_RD	= (u64)1, | ||||
| 	MLX5_EN_WR	= (u64)2 | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_BF_REGS_PER_PAGE	= 4, | ||||
| 	MLX5_MAX_UAR_PAGES	= 1 << 8, | ||||
| 	MLX5_MAX_UUARS		= MLX5_MAX_UAR_PAGES * MLX5_BF_REGS_PER_PAGE, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_MKEY_MASK_LEN		= 1ull << 0, | ||||
| 	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1, | ||||
| 	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6, | ||||
| 	MLX5_MKEY_MASK_PD		= 1ull << 7, | ||||
| 	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8, | ||||
| 	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12, | ||||
| 	MLX5_MKEY_MASK_KEY		= 1ull << 13, | ||||
| 	MLX5_MKEY_MASK_QPN		= 1ull << 14, | ||||
| 	MLX5_MKEY_MASK_LR		= 1ull << 17, | ||||
| 	MLX5_MKEY_MASK_LW		= 1ull << 18, | ||||
| 	MLX5_MKEY_MASK_RR		= 1ull << 19, | ||||
| 	MLX5_MKEY_MASK_RW		= 1ull << 20, | ||||
| 	MLX5_MKEY_MASK_A		= 1ull << 21, | ||||
| 	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23, | ||||
| 	MLX5_MKEY_MASK_FREE		= 1ull << 29, | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_event { | ||||
| 	MLX5_EVENT_TYPE_COMP		   = 0x0, | ||||
| 
 | ||||
| 	MLX5_EVENT_TYPE_PATH_MIG	   = 0x01, | ||||
| 	MLX5_EVENT_TYPE_COMM_EST	   = 0x02, | ||||
| 	MLX5_EVENT_TYPE_SQ_DRAINED	   = 0x03, | ||||
| 	MLX5_EVENT_TYPE_SRQ_LAST_WQE	   = 0x13, | ||||
| 	MLX5_EVENT_TYPE_SRQ_RQ_LIMIT	   = 0x14, | ||||
| 
 | ||||
| 	MLX5_EVENT_TYPE_CQ_ERROR	   = 0x04, | ||||
| 	MLX5_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05, | ||||
| 	MLX5_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07, | ||||
| 	MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, | ||||
| 	MLX5_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11, | ||||
| 	MLX5_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12, | ||||
| 
 | ||||
| 	MLX5_EVENT_TYPE_INTERNAL_ERROR	   = 0x08, | ||||
| 	MLX5_EVENT_TYPE_PORT_CHANGE	   = 0x09, | ||||
| 	MLX5_EVENT_TYPE_GPIO_EVENT	   = 0x15, | ||||
| 	MLX5_EVENT_TYPE_REMOTE_CONFIG	   = 0x19, | ||||
| 
 | ||||
| 	MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a, | ||||
| 	MLX5_EVENT_TYPE_STALL_EVENT	   = 0x1b, | ||||
| 
 | ||||
| 	MLX5_EVENT_TYPE_CMD		   = 0x0a, | ||||
| 	MLX5_EVENT_TYPE_PAGE_REQUEST	   = 0xb, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1, | ||||
| 	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4, | ||||
| 	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5, | ||||
| 	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6, | ||||
| 	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7, | ||||
| 	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8, | ||||
| 	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_DEV_CAP_FLAG_RC		= 1LL <<  0, | ||||
| 	MLX5_DEV_CAP_FLAG_UC		= 1LL <<  1, | ||||
| 	MLX5_DEV_CAP_FLAG_UD		= 1LL <<  2, | ||||
| 	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3, | ||||
| 	MLX5_DEV_CAP_FLAG_SRQ		= 1LL <<  6, | ||||
| 	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8, | ||||
| 	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9, | ||||
| 	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17, | ||||
| 	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18, | ||||
| 	MLX5_DEV_CAP_FLAG_ON_DMND_PG	= 1LL << 24, | ||||
| 	MLX5_DEV_CAP_FLAG_RESIZE_SRQ	= 1LL << 32, | ||||
| 	MLX5_DEV_CAP_FLAG_REMOTE_FENCE	= 1LL << 38, | ||||
| 	MLX5_DEV_CAP_FLAG_TLP_HINTS	= 1LL << 39, | ||||
| 	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40, | ||||
| 	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 41, | ||||
| 	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 1LL << 46, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_OPCODE_NOP			= 0x00, | ||||
| 	MLX5_OPCODE_SEND_INVAL		= 0x01, | ||||
| 	MLX5_OPCODE_RDMA_WRITE		= 0x08, | ||||
| 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09, | ||||
| 	MLX5_OPCODE_SEND		= 0x0a, | ||||
| 	MLX5_OPCODE_SEND_IMM		= 0x0b, | ||||
| 	MLX5_OPCODE_RDMA_READ		= 0x10, | ||||
| 	MLX5_OPCODE_ATOMIC_CS		= 0x11, | ||||
| 	MLX5_OPCODE_ATOMIC_FA		= 0x12, | ||||
| 	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14, | ||||
| 	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15, | ||||
| 	MLX5_OPCODE_BIND_MW		= 0x18, | ||||
| 	MLX5_OPCODE_CONFIG_CMD		= 0x1f, | ||||
| 
 | ||||
| 	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00, | ||||
| 	MLX5_RECV_OPCODE_SEND		= 0x01, | ||||
| 	MLX5_RECV_OPCODE_SEND_IMM	= 0x02, | ||||
| 	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03, | ||||
| 
 | ||||
| 	MLX5_CQE_OPCODE_ERROR		= 0x1e, | ||||
| 	MLX5_CQE_OPCODE_RESIZE		= 0x16, | ||||
| 
 | ||||
| 	MLX5_OPCODE_SET_PSV		= 0x20, | ||||
| 	MLX5_OPCODE_GET_PSV		= 0x21, | ||||
| 	MLX5_OPCODE_CHECK_PSV		= 0x22, | ||||
| 	MLX5_OPCODE_RGET_PSV		= 0x26, | ||||
| 	MLX5_OPCODE_RCHECK_PSV		= 0x27, | ||||
| 
 | ||||
| 	MLX5_OPCODE_UMR			= 0x25, | ||||
| 
 | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_SET_PORT_RESET_QKEY	= 0, | ||||
| 	MLX5_SET_PORT_GUID0		= 16, | ||||
| 	MLX5_SET_PORT_NODE_GUID		= 17, | ||||
| 	MLX5_SET_PORT_SYS_GUID		= 18, | ||||
| 	MLX5_SET_PORT_GID_TABLE		= 19, | ||||
| 	MLX5_SET_PORT_PKEY_TABLE	= 20, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_MAX_PAGE_SHIFT		= 31 | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_inbox_hdr { | ||||
| 	__be16		opcode; | ||||
| 	u8		rsvd[4]; | ||||
| 	__be16		opmod; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_outbox_hdr { | ||||
| 	u8		status; | ||||
| 	u8		rsvd[3]; | ||||
| 	__be32		syndrome; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_query_adapter_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_query_adapter_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[24]; | ||||
| 	u8			intapin; | ||||
| 	u8			rsvd1[13]; | ||||
| 	__be16			vsd_vendor_id; | ||||
| 	u8			vsd[208]; | ||||
| 	u8			vsd_psid[16]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_hca_cap { | ||||
| 	u8	rsvd1[16]; | ||||
| 	u8	log_max_srq_sz; | ||||
| 	u8	log_max_qp_sz; | ||||
| 	u8	rsvd2; | ||||
| 	u8	log_max_qp; | ||||
| 	u8	log_max_strq_sz; | ||||
| 	u8	log_max_srqs; | ||||
| 	u8	rsvd4[2]; | ||||
| 	u8	rsvd5; | ||||
| 	u8	log_max_cq_sz; | ||||
| 	u8	rsvd6; | ||||
| 	u8	log_max_cq; | ||||
| 	u8	log_max_eq_sz; | ||||
| 	u8	log_max_mkey; | ||||
| 	u8	rsvd7; | ||||
| 	u8	log_max_eq; | ||||
| 	u8	max_indirection; | ||||
| 	u8	log_max_mrw_sz; | ||||
| 	u8	log_max_bsf_list_sz; | ||||
| 	u8	log_max_klm_list_sz; | ||||
| 	u8	rsvd_8_0; | ||||
| 	u8	log_max_ra_req_dc; | ||||
| 	u8	rsvd_8_1; | ||||
| 	u8	log_max_ra_res_dc; | ||||
| 	u8	rsvd9; | ||||
| 	u8	log_max_ra_req_qp; | ||||
| 	u8	rsvd10; | ||||
| 	u8	log_max_ra_res_qp; | ||||
| 	u8	rsvd11[4]; | ||||
| 	__be16	max_qp_count; | ||||
| 	__be16	rsvd12; | ||||
| 	u8	rsvd13; | ||||
| 	u8	local_ca_ack_delay; | ||||
| 	u8	rsvd14; | ||||
| 	u8	num_ports; | ||||
| 	u8	log_max_msg; | ||||
| 	u8	rsvd15[3]; | ||||
| 	__be16	stat_rate_support; | ||||
| 	u8	rsvd16[2]; | ||||
| 	__be64	flags; | ||||
| 	u8	rsvd17; | ||||
| 	u8	uar_sz; | ||||
| 	u8	rsvd18; | ||||
| 	u8	log_pg_sz; | ||||
| 	__be16	bf_log_bf_reg_size; | ||||
| 	u8	rsvd19[4]; | ||||
| 	__be16	max_desc_sz_sq; | ||||
| 	u8	rsvd20[2]; | ||||
| 	__be16	max_desc_sz_rq; | ||||
| 	u8	rsvd21[2]; | ||||
| 	__be16	max_desc_sz_sq_dc; | ||||
| 	u8	rsvd22[4]; | ||||
| 	__be16	max_qp_mcg; | ||||
| 	u8	rsvd23; | ||||
| 	u8	log_max_mcg; | ||||
| 	u8	rsvd24; | ||||
| 	u8	log_max_pd; | ||||
| 	u8	rsvd25; | ||||
| 	u8	log_max_xrcd; | ||||
| 	u8	rsvd26[40]; | ||||
| 	__be32  uar_page_sz; | ||||
| 	u8	rsvd27[28]; | ||||
| 	u8	log_msx_atomic_size_qp; | ||||
| 	u8	rsvd28[2]; | ||||
| 	u8	log_msx_atomic_size_dc; | ||||
| 	u8	rsvd29[76]; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_cmd_query_hca_cap_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_cmd_query_hca_cap_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[8]; | ||||
| 	struct mlx5_hca_cap     hca_cap; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_cmd_set_hca_cap_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| 	struct mlx5_hca_cap     hca_cap; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_cmd_set_hca_cap_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[8]; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_cmd_init_hca_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd0[2]; | ||||
| 	__be16			profile; | ||||
| 	u8			rsvd1[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_init_hca_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_teardown_hca_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd0[2]; | ||||
| 	__be16			profile; | ||||
| 	u8			rsvd1[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_teardown_hca_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_layout { | ||||
| 	u8		type; | ||||
| 	u8		rsvd0[3]; | ||||
| 	__be32		inlen; | ||||
| 	__be64		in_ptr; | ||||
| 	__be32		in[4]; | ||||
| 	__be32		out[4]; | ||||
| 	__be64		out_ptr; | ||||
| 	__be32		outlen; | ||||
| 	u8		token; | ||||
| 	u8		sig; | ||||
| 	u8		rsvd1; | ||||
| 	u8		status_own; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct health_buffer { | ||||
| 	__be32		assert_var[5]; | ||||
| 	__be32		rsvd0[3]; | ||||
| 	__be32		assert_exit_ptr; | ||||
| 	__be32		assert_callra; | ||||
| 	__be32		rsvd1[2]; | ||||
| 	__be32		fw_ver; | ||||
| 	__be32		hw_id; | ||||
| 	__be32		rsvd2; | ||||
| 	u8		irisc_index; | ||||
| 	u8		synd; | ||||
| 	__be16		ext_sync; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_init_seg { | ||||
| 	__be32			fw_rev; | ||||
| 	__be32			cmdif_rev_fw_sub; | ||||
| 	__be32			rsvd0[2]; | ||||
| 	__be32			cmdq_addr_h; | ||||
| 	__be32			cmdq_addr_l_sz; | ||||
| 	__be32			cmd_dbell; | ||||
| 	__be32			rsvd1[121]; | ||||
| 	struct health_buffer	health; | ||||
| 	__be32			rsvd2[884]; | ||||
| 	__be32			health_counter; | ||||
| 	__be32			rsvd3[1023]; | ||||
| 	__be64			ieee1588_clk; | ||||
| 	__be32			ieee1588_clk_type; | ||||
| 	__be32			clr_intx; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_comp { | ||||
| 	__be32	reserved[6]; | ||||
| 	__be32	cqn; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_qp_srq { | ||||
| 	__be32	reserved[6]; | ||||
| 	__be32	qp_srq_n; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_cq_err { | ||||
| 	__be32	cqn; | ||||
| 	u8	reserved1[7]; | ||||
| 	u8	syndrome; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_dropped_packet { | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_port_state { | ||||
| 	u8	reserved0[8]; | ||||
| 	u8	port; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_gpio { | ||||
| 	__be32	reserved0[2]; | ||||
| 	__be64	gpio_event; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_congestion { | ||||
| 	u8	type; | ||||
| 	u8	rsvd0; | ||||
| 	u8	congestion_level; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_stall_vl { | ||||
| 	u8	rsvd0[3]; | ||||
| 	u8	port_vl; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_cmd { | ||||
| 	__be32	vector; | ||||
| 	__be32	rsvd[6]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eqe_page_req { | ||||
| 	u8		rsvd0[2]; | ||||
| 	__be16		func_id; | ||||
| 	u8		rsvd1[2]; | ||||
| 	__be16		num_pages; | ||||
| 	__be32		rsvd2[5]; | ||||
| }; | ||||
| 
 | ||||
| union ev_data { | ||||
| 	__be32				raw[7]; | ||||
| 	struct mlx5_eqe_cmd		cmd; | ||||
| 	struct mlx5_eqe_comp		comp; | ||||
| 	struct mlx5_eqe_qp_srq		qp_srq; | ||||
| 	struct mlx5_eqe_cq_err		cq_err; | ||||
| 	struct mlx5_eqe_dropped_packet	dp; | ||||
| 	struct mlx5_eqe_port_state	port; | ||||
| 	struct mlx5_eqe_gpio		gpio; | ||||
| 	struct mlx5_eqe_congestion	cong; | ||||
| 	struct mlx5_eqe_stall_vl	stall_vl; | ||||
| 	struct mlx5_eqe_page_req	req_pages; | ||||
| } __packed; | ||||
| 
 | ||||
| struct mlx5_eqe { | ||||
| 	u8		rsvd0; | ||||
| 	u8		type; | ||||
| 	u8		rsvd1; | ||||
| 	u8		sub_type; | ||||
| 	__be32		rsvd2[7]; | ||||
| 	union ev_data	data; | ||||
| 	__be16		rsvd3; | ||||
| 	u8		signature; | ||||
| 	u8		owner; | ||||
| } __packed; | ||||
| 
 | ||||
| struct mlx5_cmd_prot_block { | ||||
| 	u8		data[MLX5_CMD_DATA_BLOCK_SIZE]; | ||||
| 	u8		rsvd0[48]; | ||||
| 	__be64		next; | ||||
| 	__be32		block_num; | ||||
| 	u8		rsvd1; | ||||
| 	u8		token; | ||||
| 	u8		ctrl_sig; | ||||
| 	u8		sig; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_err_cqe { | ||||
| 	u8	rsvd0[32]; | ||||
| 	__be32	srqn; | ||||
| 	u8	rsvd1[18]; | ||||
| 	u8	vendor_err_synd; | ||||
| 	u8	syndrome; | ||||
| 	__be32	s_wqe_opcode_qpn; | ||||
| 	__be16	wqe_counter; | ||||
| 	u8	signature; | ||||
| 	u8	op_own; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cqe64 { | ||||
| 	u8		rsvd0[17]; | ||||
| 	u8		ml_path; | ||||
| 	u8		rsvd20[4]; | ||||
| 	__be16		slid; | ||||
| 	__be32		flags_rqpn; | ||||
| 	u8		rsvd28[4]; | ||||
| 	__be32		srqn; | ||||
| 	__be32		imm_inval_pkey; | ||||
| 	u8		rsvd40[4]; | ||||
| 	__be32		byte_cnt; | ||||
| 	__be64		timestamp; | ||||
| 	__be32		sop_drop_qpn; | ||||
| 	__be16		wqe_counter; | ||||
| 	u8		signature; | ||||
| 	u8		op_own; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_srq_next_seg { | ||||
| 	u8			rsvd0[2]; | ||||
| 	__be16			next_wqe_index; | ||||
| 	u8			signature; | ||||
| 	u8			rsvd1[11]; | ||||
| }; | ||||
| 
 | ||||
| union mlx5_ext_cqe { | ||||
| 	struct ib_grh	grh; | ||||
| 	u8		inl[64]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cqe128 { | ||||
| 	union mlx5_ext_cqe	inl_grh; | ||||
| 	struct mlx5_cqe64	cqe64; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_srq_ctx { | ||||
| 	u8			state_log_sz; | ||||
| 	u8			rsvd0[3]; | ||||
| 	__be32			flags_xrcd; | ||||
| 	__be32			pgoff_cqn; | ||||
| 	u8			rsvd1[4]; | ||||
| 	u8			log_pg_sz; | ||||
| 	u8			rsvd2[7]; | ||||
| 	__be32			pd; | ||||
| 	__be16			lwm; | ||||
| 	__be16			wqe_cnt; | ||||
| 	u8			rsvd3[8]; | ||||
| 	__be64			db_record; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_srq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			input_srqn; | ||||
| 	u8			rsvd0[4]; | ||||
| 	struct mlx5_srq_ctx	ctx; | ||||
| 	u8			rsvd1[208]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_srq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			srqn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_srq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			srqn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_srq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_srq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			srqn; | ||||
| 	u8			rsvd0[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_srq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[8]; | ||||
| 	struct mlx5_srq_ctx	ctx; | ||||
| 	u8			rsvd1[32]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_arm_srq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			srqn; | ||||
| 	__be16			rsvd; | ||||
| 	__be16			lwm; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_arm_srq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cq_context { | ||||
| 	u8			status; | ||||
| 	u8			cqe_sz_flags; | ||||
| 	u8			st; | ||||
| 	u8			rsvd3; | ||||
| 	u8			rsvd4[6]; | ||||
| 	__be16			page_offset; | ||||
| 	__be32			log_sz_usr_page; | ||||
| 	__be16			cq_period; | ||||
| 	__be16			cq_max_count; | ||||
| 	__be16			rsvd20; | ||||
| 	__be16			c_eqn; | ||||
| 	u8			log_pg_sz; | ||||
| 	u8			rsvd25[7]; | ||||
| 	__be32			last_notified_index; | ||||
| 	__be32			solicit_producer_index; | ||||
| 	__be32			consumer_counter; | ||||
| 	__be32			producer_counter; | ||||
| 	u8			rsvd48[8]; | ||||
| 	__be64			db_record_addr; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_cq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			input_cqn; | ||||
| 	u8			rsvdx[4]; | ||||
| 	struct mlx5_cq_context	ctx; | ||||
| 	u8			rsvd6[192]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_cq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			cqn; | ||||
| 	u8			rsvd0[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_cq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			cqn; | ||||
| 	u8			rsvd0[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_cq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_cq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			cqn; | ||||
| 	u8			rsvd0[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_cq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[8]; | ||||
| 	struct mlx5_cq_context	ctx; | ||||
| 	u8			rsvd6[16]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eq_context { | ||||
| 	u8			status; | ||||
| 	u8			ec_oi; | ||||
| 	u8			st; | ||||
| 	u8			rsvd2[7]; | ||||
| 	__be16			page_pffset; | ||||
| 	__be32			log_sz_usr_page; | ||||
| 	u8			rsvd3[7]; | ||||
| 	u8			intr; | ||||
| 	u8			log_page_size; | ||||
| 	u8			rsvd4[15]; | ||||
| 	__be32			consumer_counter; | ||||
| 	__be32			produser_counter; | ||||
| 	u8			rsvd5[16]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_eq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd0[3]; | ||||
| 	u8			input_eqn; | ||||
| 	u8			rsvd1[4]; | ||||
| 	struct mlx5_eq_context	ctx; | ||||
| 	u8			rsvd2[8]; | ||||
| 	__be64			events_mask; | ||||
| 	u8			rsvd3[176]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_eq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[3]; | ||||
| 	u8			eq_number; | ||||
| 	u8			rsvd1[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_eq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd0[3]; | ||||
| 	u8			eqn; | ||||
| 	u8			rsvd1[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_eq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_map_eq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be64			mask; | ||||
| 	u8			mu; | ||||
| 	u8			rsvd0[2]; | ||||
| 	u8			eqn; | ||||
| 	u8			rsvd1[24]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_map_eq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_eq_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd0[3]; | ||||
| 	u8			eqn; | ||||
| 	u8			rsvd1[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_eq_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| 	struct mlx5_eq_context	ctx; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_mkey_seg { | ||||
| 	/* This is a two bit field occupying bits 31-30.
 | ||||
| 	 * bit 31 is always 0, | ||||
| 	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation | ||||
| 	 */ | ||||
| 	u8		status; | ||||
| 	u8		pcie_control; | ||||
| 	u8		flags; | ||||
| 	u8		version; | ||||
| 	__be32		qpn_mkey7_0; | ||||
| 	u8		rsvd1[4]; | ||||
| 	__be32		flags_pd; | ||||
| 	__be64		start_addr; | ||||
| 	__be64		len; | ||||
| 	__be32		bsfs_octo_size; | ||||
| 	u8		rsvd2[16]; | ||||
| 	__be32		xlt_oct_size; | ||||
| 	u8		rsvd3[3]; | ||||
| 	u8		log2_page_size; | ||||
| 	u8		rsvd4[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_special_ctxs_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_special_ctxs_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			dump_fill_mkey; | ||||
| 	__be32			reserved_lkey; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_mkey_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			input_mkey_index; | ||||
| 	u8			rsvd0[4]; | ||||
| 	struct mlx5_mkey_seg	seg; | ||||
| 	u8			rsvd1[16]; | ||||
| 	__be32			xlat_oct_act_size; | ||||
| 	__be32			bsf_coto_act_size; | ||||
| 	u8			rsvd2[168]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_mkey_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			mkey; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_mkey_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			mkey; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_mkey_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_mkey_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			mkey; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_mkey_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_modify_mkey_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			mkey; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_modify_mkey_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_dump_mkey_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_dump_mkey_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			mkey; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_mad_ifc_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be16			remote_lid; | ||||
| 	u8			rsvd0; | ||||
| 	u8			port; | ||||
| 	u8			rsvd1[4]; | ||||
| 	u8			data[256]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_mad_ifc_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| 	u8			data[256]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_access_reg_mbox_in { | ||||
| 	struct mlx5_inbox_hdr		hdr; | ||||
| 	u8				rsvd0[2]; | ||||
| 	__be16				register_id; | ||||
| 	__be32				arg; | ||||
| 	__be32				data[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_access_reg_mbox_out { | ||||
| 	struct mlx5_outbox_hdr		hdr; | ||||
| 	u8				rsvd[8]; | ||||
| 	__be32				data[0]; | ||||
| }; | ||||
| 
 | ||||
| #define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90) | ||||
| 
 | ||||
| enum { | ||||
| 	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0 | ||||
| }; | ||||
| 
 | ||||
| #endif /* MLX5_DEVICE_H */ | ||||
							
								
								
									
										79
									
								
								include/linux/mlx5/doorbell.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								include/linux/mlx5/doorbell.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,79 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_DOORBELL_H | ||||
| #define MLX5_DOORBELL_H | ||||
| 
 | ||||
| #define MLX5_BF_OFFSET	      0x800 | ||||
| #define MLX5_CQ_DOORBELL      0x20 | ||||
| 
 | ||||
| #if BITS_PER_LONG == 64 | ||||
| /* Assume that we can just write a 64-bit doorbell atomically.  s390
 | ||||
|  * actually doesn't have writeq() but S/390 systems don't even have | ||||
|  * PCI so we won't worry about it. | ||||
|  */ | ||||
| 
 | ||||
| #define MLX5_DECLARE_DOORBELL_LOCK(name) | ||||
| #define MLX5_INIT_DOORBELL_LOCK(ptr)    do { } while (0) | ||||
| #define MLX5_GET_DOORBELL_LOCK(ptr)      (NULL) | ||||
| 
 | ||||
| static inline void mlx5_write64(__be32 val[2], void __iomem *dest, | ||||
| 				spinlock_t *doorbell_lock) | ||||
| { | ||||
| 	__raw_writeq(*(u64 *)val, dest); | ||||
| } | ||||
| 
 | ||||
| #else | ||||
| 
 | ||||
| /* Just fall back to a spinlock to protect the doorbell if
 | ||||
|  * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit | ||||
|  * MMIO writes. | ||||
|  */ | ||||
| 
 | ||||
| #define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name; | ||||
| #define MLX5_INIT_DOORBELL_LOCK(ptr)     spin_lock_init(ptr) | ||||
| #define MLX5_GET_DOORBELL_LOCK(ptr)      (ptr) | ||||
| 
 | ||||
| static inline void mlx5_write64(__be32 val[2], void __iomem *dest, | ||||
| 				spinlock_t *doorbell_lock) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(doorbell_lock, flags); | ||||
| 	__raw_writel((__force u32) val[0], dest); | ||||
| 	__raw_writel((__force u32) val[1], dest + 4); | ||||
| 	spin_unlock_irqrestore(doorbell_lock, flags); | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| #endif /* MLX5_DOORBELL_H */ | ||||
							
								
								
									
										769
									
								
								include/linux/mlx5/driver.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										769
									
								
								include/linux/mlx5/driver.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,769 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_DRIVER_H | ||||
| #define MLX5_DRIVER_H | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/completion.h> | ||||
| #include <linux/pci.h> | ||||
| #include <linux/spinlock_types.h> | ||||
| #include <linux/semaphore.h> | ||||
| #include <linux/vmalloc.h> | ||||
| #include <linux/radix-tree.h> | ||||
| #include <linux/mlx5/device.h> | ||||
| #include <linux/mlx5/doorbell.h> | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_BOARD_ID_LEN = 64, | ||||
| 	MLX5_MAX_NAME_LEN = 16, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	/* one minute for the sake of bringup. Generally, commands must always
 | ||||
| 	 * complete and we may need to increase this timeout value | ||||
| 	 */ | ||||
| 	MLX5_CMD_TIMEOUT_MSEC	= 7200 * 1000, | ||||
| 	MLX5_CMD_WQ_MAX_NAME	= 32, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	CMD_OWNER_SW		= 0x0, | ||||
| 	CMD_OWNER_HW		= 0x1, | ||||
| 	CMD_STATUS_SUCCESS	= 0, | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_sqp_t { | ||||
| 	MLX5_SQP_SMI		= 0, | ||||
| 	MLX5_SQP_GSI		= 1, | ||||
| 	MLX5_SQP_IEEE_1588	= 2, | ||||
| 	MLX5_SQP_SNIFFER	= 3, | ||||
| 	MLX5_SQP_SYNC_UMR	= 4, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_MAX_PORTS	= 2, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_EQ_VEC_PAGES	 = 0, | ||||
| 	MLX5_EQ_VEC_CMD		 = 1, | ||||
| 	MLX5_EQ_VEC_ASYNC	 = 2, | ||||
| 	MLX5_EQ_VEC_COMP_BASE, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_MAX_EQ_NAME	= 20 | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_ATOMIC_MODE_IB_COMP	= 1 << 16, | ||||
| 	MLX5_ATOMIC_MODE_CX		= 2 << 16, | ||||
| 	MLX5_ATOMIC_MODE_8B		= 3 << 16, | ||||
| 	MLX5_ATOMIC_MODE_16B		= 4 << 16, | ||||
| 	MLX5_ATOMIC_MODE_32B		= 5 << 16, | ||||
| 	MLX5_ATOMIC_MODE_64B		= 6 << 16, | ||||
| 	MLX5_ATOMIC_MODE_128B		= 7 << 16, | ||||
| 	MLX5_ATOMIC_MODE_256B		= 8 << 16, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_CMD_OP_QUERY_HCA_CAP		= 0x100, | ||||
| 	MLX5_CMD_OP_QUERY_ADAPTER		= 0x101, | ||||
| 	MLX5_CMD_OP_INIT_HCA			= 0x102, | ||||
| 	MLX5_CMD_OP_TEARDOWN_HCA		= 0x103, | ||||
| 	MLX5_CMD_OP_QUERY_PAGES			= 0x107, | ||||
| 	MLX5_CMD_OP_MANAGE_PAGES		= 0x108, | ||||
| 	MLX5_CMD_OP_SET_HCA_CAP			= 0x109, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_CREATE_MKEY			= 0x200, | ||||
| 	MLX5_CMD_OP_QUERY_MKEY			= 0x201, | ||||
| 	MLX5_CMD_OP_DESTROY_MKEY		= 0x202, | ||||
| 	MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS	= 0x203, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_CREATE_EQ			= 0x301, | ||||
| 	MLX5_CMD_OP_DESTROY_EQ			= 0x302, | ||||
| 	MLX5_CMD_OP_QUERY_EQ			= 0x303, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_CREATE_CQ			= 0x400, | ||||
| 	MLX5_CMD_OP_DESTROY_CQ			= 0x401, | ||||
| 	MLX5_CMD_OP_QUERY_CQ			= 0x402, | ||||
| 	MLX5_CMD_OP_MODIFY_CQ			= 0x403, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_CREATE_QP			= 0x500, | ||||
| 	MLX5_CMD_OP_DESTROY_QP			= 0x501, | ||||
| 	MLX5_CMD_OP_RST2INIT_QP			= 0x502, | ||||
| 	MLX5_CMD_OP_INIT2RTR_QP			= 0x503, | ||||
| 	MLX5_CMD_OP_RTR2RTS_QP			= 0x504, | ||||
| 	MLX5_CMD_OP_RTS2RTS_QP			= 0x505, | ||||
| 	MLX5_CMD_OP_SQERR2RTS_QP		= 0x506, | ||||
| 	MLX5_CMD_OP_2ERR_QP			= 0x507, | ||||
| 	MLX5_CMD_OP_RTS2SQD_QP			= 0x508, | ||||
| 	MLX5_CMD_OP_SQD2RTS_QP			= 0x509, | ||||
| 	MLX5_CMD_OP_2RST_QP			= 0x50a, | ||||
| 	MLX5_CMD_OP_QUERY_QP			= 0x50b, | ||||
| 	MLX5_CMD_OP_CONF_SQP			= 0x50c, | ||||
| 	MLX5_CMD_OP_MAD_IFC			= 0x50d, | ||||
| 	MLX5_CMD_OP_INIT2INIT_QP		= 0x50e, | ||||
| 	MLX5_CMD_OP_SUSPEND_QP			= 0x50f, | ||||
| 	MLX5_CMD_OP_UNSUSPEND_QP		= 0x510, | ||||
| 	MLX5_CMD_OP_SQD2SQD_QP			= 0x511, | ||||
| 	MLX5_CMD_OP_ALLOC_QP_COUNTER_SET	= 0x512, | ||||
| 	MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET	= 0x513, | ||||
| 	MLX5_CMD_OP_QUERY_QP_COUNTER_SET	= 0x514, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_CREATE_PSV			= 0x600, | ||||
| 	MLX5_CMD_OP_DESTROY_PSV			= 0x601, | ||||
| 	MLX5_CMD_OP_QUERY_PSV			= 0x602, | ||||
| 	MLX5_CMD_OP_QUERY_SIG_RULE_TABLE	= 0x603, | ||||
| 	MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE	= 0x604, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_CREATE_SRQ			= 0x700, | ||||
| 	MLX5_CMD_OP_DESTROY_SRQ			= 0x701, | ||||
| 	MLX5_CMD_OP_QUERY_SRQ			= 0x702, | ||||
| 	MLX5_CMD_OP_ARM_RQ			= 0x703, | ||||
| 	MLX5_CMD_OP_RESIZE_SRQ			= 0x704, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_ALLOC_PD			= 0x800, | ||||
| 	MLX5_CMD_OP_DEALLOC_PD			= 0x801, | ||||
| 	MLX5_CMD_OP_ALLOC_UAR			= 0x802, | ||||
| 	MLX5_CMD_OP_DEALLOC_UAR			= 0x803, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_ATTACH_TO_MCG		= 0x806, | ||||
| 	MLX5_CMD_OP_DETACH_FROM_MCG		= 0x807, | ||||
| 
 | ||||
| 
 | ||||
| 	MLX5_CMD_OP_ALLOC_XRCD			= 0x80e, | ||||
| 	MLX5_CMD_OP_DEALLOC_XRCD		= 0x80f, | ||||
| 
 | ||||
| 	MLX5_CMD_OP_ACCESS_REG			= 0x805, | ||||
| 	MLX5_CMD_OP_MAX				= 0x810, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_REG_PCAP		 = 0x5001, | ||||
| 	MLX5_REG_PMTU		 = 0x5003, | ||||
| 	MLX5_REG_PTYS		 = 0x5004, | ||||
| 	MLX5_REG_PAOS		 = 0x5006, | ||||
| 	MLX5_REG_PMAOS		 = 0x5012, | ||||
| 	MLX5_REG_PUDE		 = 0x5009, | ||||
| 	MLX5_REG_PMPE		 = 0x5010, | ||||
| 	MLX5_REG_PELC		 = 0x500e, | ||||
| 	MLX5_REG_PMLP		 = 0, /* TBD */ | ||||
| 	MLX5_REG_NODE_DESC	 = 0x6001, | ||||
| 	MLX5_REG_HOST_ENDIANNESS = 0x7004, | ||||
| }; | ||||
| 
 | ||||
| enum dbg_rsc_type { | ||||
| 	MLX5_DBG_RSC_QP, | ||||
| 	MLX5_DBG_RSC_EQ, | ||||
| 	MLX5_DBG_RSC_CQ, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_field_desc { | ||||
| 	struct dentry	       *dent; | ||||
| 	int			i; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_rsc_debug { | ||||
| 	struct mlx5_core_dev   *dev; | ||||
| 	void		       *object; | ||||
| 	enum dbg_rsc_type	type; | ||||
| 	struct dentry	       *root; | ||||
| 	struct mlx5_field_desc	fields[0]; | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_dev_event { | ||||
| 	MLX5_DEV_EVENT_SYS_ERROR, | ||||
| 	MLX5_DEV_EVENT_PORT_UP, | ||||
| 	MLX5_DEV_EVENT_PORT_DOWN, | ||||
| 	MLX5_DEV_EVENT_PORT_INITIALIZED, | ||||
| 	MLX5_DEV_EVENT_LID_CHANGE, | ||||
| 	MLX5_DEV_EVENT_PKEY_CHANGE, | ||||
| 	MLX5_DEV_EVENT_GUID_CHANGE, | ||||
| 	MLX5_DEV_EVENT_CLIENT_REREG, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_uuar_info { | ||||
| 	struct mlx5_uar	       *uars; | ||||
| 	int			num_uars; | ||||
| 	int			num_low_latency_uuars; | ||||
| 	unsigned long	       *bitmap; | ||||
| 	unsigned int	       *count; | ||||
| 	struct mlx5_bf	       *bfs; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * protect uuar allocation data structs | ||||
| 	 */ | ||||
| 	struct mutex		lock; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_bf { | ||||
| 	void __iomem	       *reg; | ||||
| 	void __iomem	       *regreg; | ||||
| 	int			buf_size; | ||||
| 	struct mlx5_uar	       *uar; | ||||
| 	unsigned long		offset; | ||||
| 	int			need_lock; | ||||
| 	/* protect blue flame buffer selection when needed
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 
 | ||||
| 	/* serialize 64 bit writes when done as two 32 bit accesses
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock32; | ||||
| 	int			uuarn; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_first { | ||||
| 	__be32		data[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_msg { | ||||
| 	struct list_head		list; | ||||
| 	struct cache_ent	       *cache; | ||||
| 	u32				len; | ||||
| 	struct mlx5_cmd_first		first; | ||||
| 	struct mlx5_cmd_mailbox	       *next; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_debug { | ||||
| 	struct dentry	       *dbg_root; | ||||
| 	struct dentry	       *dbg_in; | ||||
| 	struct dentry	       *dbg_out; | ||||
| 	struct dentry	       *dbg_outlen; | ||||
| 	struct dentry	       *dbg_status; | ||||
| 	struct dentry	       *dbg_run; | ||||
| 	void		       *in_msg; | ||||
| 	void		       *out_msg; | ||||
| 	u8			status; | ||||
| 	u16			inlen; | ||||
| 	u16			outlen; | ||||
| }; | ||||
| 
 | ||||
| struct cache_ent { | ||||
| 	/* protect block chain allocations
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 	struct list_head	head; | ||||
| }; | ||||
| 
 | ||||
| struct cmd_msg_cache { | ||||
| 	struct cache_ent	large; | ||||
| 	struct cache_ent	med; | ||||
| 
 | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_stats { | ||||
| 	u64		sum; | ||||
| 	u64		n; | ||||
| 	struct dentry  *root; | ||||
| 	struct dentry  *avg; | ||||
| 	struct dentry  *count; | ||||
| 	/* protect command average calculations */ | ||||
| 	spinlock_t	lock; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd { | ||||
| 	void	       *cmd_buf; | ||||
| 	dma_addr_t	dma; | ||||
| 	u16		cmdif_rev; | ||||
| 	u8		log_sz; | ||||
| 	u8		log_stride; | ||||
| 	int		max_reg_cmds; | ||||
| 	int		events; | ||||
| 	u32 __iomem    *vector; | ||||
| 
 | ||||
| 	/* protect command queue allocations
 | ||||
| 	 */ | ||||
| 	spinlock_t	alloc_lock; | ||||
| 
 | ||||
| 	/* protect token allocations
 | ||||
| 	 */ | ||||
| 	spinlock_t	token_lock; | ||||
| 	u8		token; | ||||
| 	unsigned long	bitmask; | ||||
| 	char		wq_name[MLX5_CMD_WQ_MAX_NAME]; | ||||
| 	struct workqueue_struct *wq; | ||||
| 	struct semaphore sem; | ||||
| 	struct semaphore pages_sem; | ||||
| 	int	mode; | ||||
| 	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; | ||||
| 	struct pci_pool *pool; | ||||
| 	struct mlx5_cmd_debug dbg; | ||||
| 	struct cmd_msg_cache cache; | ||||
| 	int checksum_disabled; | ||||
| 	struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_port_caps { | ||||
| 	int	gid_table_len; | ||||
| 	int	pkey_table_len; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_caps { | ||||
| 	u8	log_max_eq; | ||||
| 	u8	log_max_cq; | ||||
| 	u8	log_max_qp; | ||||
| 	u8	log_max_mkey; | ||||
| 	u8	log_max_pd; | ||||
| 	u8	log_max_srq; | ||||
| 	u32	max_cqes; | ||||
| 	int	max_wqes; | ||||
| 	int	max_sq_desc_sz; | ||||
| 	int	max_rq_desc_sz; | ||||
| 	u64	flags; | ||||
| 	u16	stat_rate_support; | ||||
| 	int	log_max_msg; | ||||
| 	int	num_ports; | ||||
| 	int	max_ra_res_qp; | ||||
| 	int	max_ra_req_qp; | ||||
| 	int	max_srq_wqes; | ||||
| 	int	bf_reg_size; | ||||
| 	int	bf_regs_per_page; | ||||
| 	struct mlx5_port_caps	port[MLX5_MAX_PORTS]; | ||||
| 	u8			ext_port_cap[MLX5_MAX_PORTS]; | ||||
| 	int	max_vf; | ||||
| 	u32	reserved_lkey; | ||||
| 	u8	local_ca_ack_delay; | ||||
| 	u8	log_max_mcg; | ||||
| 	u16	max_qp_mcg; | ||||
| 	int	min_page_sz; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cmd_mailbox { | ||||
| 	void	       *buf; | ||||
| 	dma_addr_t	dma; | ||||
| 	struct mlx5_cmd_mailbox *next; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_buf_list { | ||||
| 	void		       *buf; | ||||
| 	dma_addr_t		map; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_buf { | ||||
| 	struct mlx5_buf_list	direct; | ||||
| 	struct mlx5_buf_list   *page_list; | ||||
| 	int			nbufs; | ||||
| 	int			npages; | ||||
| 	int			page_shift; | ||||
| 	int			size; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eq { | ||||
| 	struct mlx5_core_dev   *dev; | ||||
| 	__be32 __iomem	       *doorbell; | ||||
| 	u32			cons_index; | ||||
| 	struct mlx5_buf		buf; | ||||
| 	int			size; | ||||
| 	u8			irqn; | ||||
| 	u8			eqn; | ||||
| 	int			nent; | ||||
| 	u64			mask; | ||||
| 	char			name[MLX5_MAX_EQ_NAME]; | ||||
| 	struct list_head	list; | ||||
| 	int			index; | ||||
| 	struct mlx5_rsc_debug	*dbg; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_core_mr { | ||||
| 	u64			iova; | ||||
| 	u64			size; | ||||
| 	u32			key; | ||||
| 	u32			pd; | ||||
| 	u32			access; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_core_srq { | ||||
| 	u32		srqn; | ||||
| 	int		max; | ||||
| 	int		max_gs; | ||||
| 	int		max_avail_gather; | ||||
| 	int		wqe_shift; | ||||
| 	void (*event)	(struct mlx5_core_srq *, enum mlx5_event); | ||||
| 
 | ||||
| 	atomic_t		refcount; | ||||
| 	struct completion	free; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_eq_table { | ||||
| 	void __iomem	       *update_ci; | ||||
| 	void __iomem	       *update_arm_ci; | ||||
| 	struct list_head       *comp_eq_head; | ||||
| 	struct mlx5_eq		pages_eq; | ||||
| 	struct mlx5_eq		async_eq; | ||||
| 	struct mlx5_eq		cmd_eq; | ||||
| 	struct msix_entry	*msix_arr; | ||||
| 	int			num_comp_vectors; | ||||
| 	/* protect EQs list
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_uar { | ||||
| 	u32			index; | ||||
| 	struct list_head	bf_list; | ||||
| 	unsigned		free_bf_bmap; | ||||
| 	void __iomem	       *wc_map; | ||||
| 	void __iomem	       *map; | ||||
| }; | ||||
| 
 | ||||
| 
 | ||||
| struct mlx5_core_health { | ||||
| 	struct health_buffer __iomem   *health; | ||||
| 	__be32 __iomem		       *health_counter; | ||||
| 	struct timer_list		timer; | ||||
| 	struct list_head		list; | ||||
| 	u32				prev; | ||||
| 	int				miss_counter; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_cq_table { | ||||
| 	/* protect radix tree
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 	struct radix_tree_root	tree; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_qp_table { | ||||
| 	/* protect radix tree
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 	struct radix_tree_root	tree; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_srq_table { | ||||
| 	/* protect radix tree
 | ||||
| 	 */ | ||||
| 	spinlock_t		lock; | ||||
| 	struct radix_tree_root	tree; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_priv { | ||||
| 	char			name[MLX5_MAX_NAME_LEN]; | ||||
| 	struct mlx5_eq_table	eq_table; | ||||
| 	struct mlx5_uuar_info	uuari; | ||||
| 	MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); | ||||
| 
 | ||||
| 	/* pages stuff */ | ||||
| 	struct workqueue_struct *pg_wq; | ||||
| 	struct rb_root		page_root; | ||||
| 	int			fw_pages; | ||||
| 	int			reg_pages; | ||||
| 
 | ||||
| 	struct mlx5_core_health health; | ||||
| 
 | ||||
| 	struct mlx5_srq_table	srq_table; | ||||
| 
 | ||||
| 	/* start: qp staff */ | ||||
| 	struct mlx5_qp_table	qp_table; | ||||
| 	struct dentry	       *qp_debugfs; | ||||
| 	struct dentry	       *eq_debugfs; | ||||
| 	struct dentry	       *cq_debugfs; | ||||
| 	struct dentry	       *cmdif_debugfs; | ||||
| 	/* end: qp staff */ | ||||
| 
 | ||||
| 	/* start: cq staff */ | ||||
| 	struct mlx5_cq_table	cq_table; | ||||
| 	/* end: cq staff */ | ||||
| 
 | ||||
| 	/* start: alloc staff */ | ||||
| 	struct mutex            pgdir_mutex; | ||||
| 	struct list_head        pgdir_list; | ||||
| 	/* end: alloc staff */ | ||||
| 	struct dentry	       *dbg_root; | ||||
| 
 | ||||
| 	/* protect mkey key part */ | ||||
| 	spinlock_t		mkey_lock; | ||||
| 	u8			mkey_key; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_core_dev { | ||||
| 	struct pci_dev	       *pdev; | ||||
| 	u8			rev_id; | ||||
| 	char			board_id[MLX5_BOARD_ID_LEN]; | ||||
| 	struct mlx5_cmd		cmd; | ||||
| 	struct mlx5_caps	caps; | ||||
| 	phys_addr_t		iseg_base; | ||||
| 	struct mlx5_init_seg __iomem *iseg; | ||||
| 	void			(*event) (struct mlx5_core_dev *dev, | ||||
| 					  enum mlx5_dev_event event, | ||||
| 					  void *data); | ||||
| 	struct mlx5_priv	priv; | ||||
| 	struct mlx5_profile	*profile; | ||||
| 	atomic_t		num_qps; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_db { | ||||
| 	__be32			*db; | ||||
| 	union { | ||||
| 		struct mlx5_db_pgdir		*pgdir; | ||||
| 		struct mlx5_ib_user_db_page	*user_page; | ||||
| 	}			u; | ||||
| 	dma_addr_t		dma; | ||||
| 	int			index; | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_COMP_EQ_SIZE = 1024, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_db_pgdir { | ||||
| 	struct list_head	list; | ||||
| 	DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); | ||||
| 	__be32		       *db_page; | ||||
| 	dma_addr_t		db_dma; | ||||
| }; | ||||
| 
 | ||||
| typedef void (*mlx5_cmd_cbk_t)(int status, void *context); | ||||
| 
 | ||||
| struct mlx5_cmd_work_ent { | ||||
| 	struct mlx5_cmd_msg    *in; | ||||
| 	struct mlx5_cmd_msg    *out; | ||||
| 	mlx5_cmd_cbk_t		callback; | ||||
| 	void		       *context; | ||||
| 	int idx; | ||||
| 	struct completion	done; | ||||
| 	struct mlx5_cmd        *cmd; | ||||
| 	struct work_struct	work; | ||||
| 	struct mlx5_cmd_layout *lay; | ||||
| 	int			ret; | ||||
| 	int			page_queue; | ||||
| 	u8			status; | ||||
| 	u8			token; | ||||
| 	struct timespec		ts1; | ||||
| 	struct timespec		ts2; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_pas { | ||||
| 	u64	pa; | ||||
| 	u8	log_sz; | ||||
| }; | ||||
| 
 | ||||
| static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) | ||||
| { | ||||
| 	if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) | ||||
| 		return buf->direct.buf + offset; | ||||
| 	else | ||||
| 		return buf->page_list[offset >> PAGE_SHIFT].buf + | ||||
| 			(offset & (PAGE_SIZE - 1)); | ||||
| } | ||||
| 
 | ||||
| extern struct workqueue_struct *mlx5_core_wq; | ||||
| 
 | ||||
| #define STRUCT_FIELD(header, field) \ | ||||
| 	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \ | ||||
| 	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field | ||||
| 
 | ||||
| struct ib_field { | ||||
| 	size_t struct_offset_bytes; | ||||
| 	size_t struct_size_bytes; | ||||
| 	int    offset_bits; | ||||
| 	int    size_bits; | ||||
| }; | ||||
| 
 | ||||
| static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) | ||||
| { | ||||
| 	return pci_get_drvdata(pdev); | ||||
| } | ||||
| 
 | ||||
| extern struct dentry *mlx5_debugfs_root; | ||||
| 
 | ||||
| static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	return ioread32be(&dev->iseg->fw_rev) & 0xffff; | ||||
| } | ||||
| 
 | ||||
| static inline u16 fw_rev_min(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	return ioread32be(&dev->iseg->fw_rev) >> 16; | ||||
| } | ||||
| 
 | ||||
| static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; | ||||
| } | ||||
| 
 | ||||
| static inline u16 cmdif_rev(struct mlx5_core_dev *dev) | ||||
| { | ||||
| 	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; | ||||
| } | ||||
| 
 | ||||
| static inline void *mlx5_vzalloc(unsigned long size) | ||||
| { | ||||
| 	void *rtn; | ||||
| 
 | ||||
| 	rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | ||||
| 	if (!rtn) | ||||
| 		rtn = vzalloc(size); | ||||
| 	return rtn; | ||||
| } | ||||
| 
 | ||||
| static inline void mlx5_vfree(const void *addr) | ||||
| { | ||||
| 	if (addr && is_vmalloc_addr(addr)) | ||||
| 		vfree(addr); | ||||
| 	else | ||||
| 		kfree(addr); | ||||
| } | ||||
| 
 | ||||
| int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); | ||||
| void mlx5_dev_cleanup(struct mlx5_core_dev *dev); | ||||
| int mlx5_cmd_init(struct mlx5_core_dev *dev); | ||||
| void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); | ||||
| void mlx5_cmd_use_events(struct mlx5_core_dev *dev); | ||||
| void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); | ||||
| int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); | ||||
| int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, | ||||
| 		  int out_size); | ||||
| int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); | ||||
| int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); | ||||
| int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); | ||||
| int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); | ||||
| void mlx5_health_cleanup(void); | ||||
| void  __init mlx5_health_init(void); | ||||
| void mlx5_start_health_poll(struct mlx5_core_dev *dev); | ||||
| void mlx5_stop_health_poll(struct mlx5_core_dev *dev); | ||||
| int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, | ||||
| 		   struct mlx5_buf *buf); | ||||
| void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); | ||||
| struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, | ||||
| 						      gfp_t flags, int npages); | ||||
| void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, | ||||
| 				 struct mlx5_cmd_mailbox *head); | ||||
| int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | ||||
| 			 struct mlx5_create_srq_mbox_in *in, int inlen); | ||||
| int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); | ||||
| int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | ||||
| 			struct mlx5_query_srq_mbox_out *out); | ||||
| int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | ||||
| 		      u16 lwm, int is_srq); | ||||
| int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | ||||
| 			  struct mlx5_create_mkey_mbox_in *in, int inlen); | ||||
| int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); | ||||
| int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | ||||
| 			 struct mlx5_query_mkey_mbox_out *out, int outlen); | ||||
| int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, | ||||
| 			     u32 *mkey); | ||||
| int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); | ||||
| int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); | ||||
| int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, | ||||
| 		      u16 opmod, int port); | ||||
| void mlx5_pagealloc_init(struct mlx5_core_dev *dev); | ||||
| void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); | ||||
| int mlx5_pagealloc_start(struct mlx5_core_dev *dev); | ||||
| void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); | ||||
| void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | ||||
| 				 s16 npages); | ||||
| int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); | ||||
| int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); | ||||
| void mlx5_register_debugfs(void); | ||||
| void mlx5_unregister_debugfs(void); | ||||
| int mlx5_eq_init(struct mlx5_core_dev *dev); | ||||
| void mlx5_eq_cleanup(struct mlx5_core_dev *dev); | ||||
| void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); | ||||
| void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); | ||||
| void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); | ||||
| void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); | ||||
| struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); | ||||
| void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); | ||||
| void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); | ||||
| int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | ||||
| 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar); | ||||
| int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); | ||||
| int mlx5_start_eqs(struct mlx5_core_dev *dev); | ||||
| int mlx5_stop_eqs(struct mlx5_core_dev *dev); | ||||
| int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); | ||||
| int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); | ||||
| 
 | ||||
| int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); | ||||
| void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); | ||||
| int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, | ||||
| 			 int size_in, void *data_out, int size_out, | ||||
| 			 u16 reg_num, int arg, int write); | ||||
| int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); | ||||
| 
 | ||||
| int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); | ||||
| void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); | ||||
| int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, | ||||
| 		       struct mlx5_query_eq_mbox_out *out, int outlen); | ||||
| int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); | ||||
| void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); | ||||
| int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); | ||||
| void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); | ||||
| int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); | ||||
| void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); | ||||
| 
 | ||||
| typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); | ||||
| int mlx5_register_health_report_handler(health_handler_t handler); | ||||
| void mlx5_unregister_health_report_handler(void); | ||||
| const char *mlx5_command_str(int command); | ||||
| int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); | ||||
| void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); | ||||
| 
 | ||||
| static inline u32 mlx5_mkey_to_idx(u32 mkey) | ||||
| { | ||||
| 	return mkey >> 8; | ||||
| } | ||||
| 
 | ||||
| static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) | ||||
| { | ||||
| 	return mkey_idx << 8; | ||||
| } | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0, | ||||
| 	MLX5_PROF_MASK_CMDIF_CSUM	= (u64)1 << 1, | ||||
| 	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 2, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MAX_MR_CACHE_ENTRIES    = 16, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_profile { | ||||
| 	u64	mask; | ||||
| 	u32	log_max_qp; | ||||
| 	int	cmdif_csum; | ||||
| 	struct { | ||||
| 		int	size; | ||||
| 		int	limit; | ||||
| 	} mr_cache[MAX_MR_CACHE_ENTRIES]; | ||||
| }; | ||||
| 
 | ||||
| #endif /* MLX5_DRIVER_H */ | ||||
							
								
								
									
										467
									
								
								include/linux/mlx5/qp.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										467
									
								
								include/linux/mlx5/qp.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,467 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_QP_H | ||||
| #define MLX5_QP_H | ||||
| 
 | ||||
| #include <linux/mlx5/device.h> | ||||
| #include <linux/mlx5/driver.h> | ||||
| 
 | ||||
| #define MLX5_INVALID_LKEY	0x100 | ||||
| 
 | ||||
| enum mlx5_qp_optpar { | ||||
| 	MLX5_QP_OPTPAR_ALT_ADDR_PATH		= 1 << 0, | ||||
| 	MLX5_QP_OPTPAR_RRE			= 1 << 1, | ||||
| 	MLX5_QP_OPTPAR_RAE			= 1 << 2, | ||||
| 	MLX5_QP_OPTPAR_RWE			= 1 << 3, | ||||
| 	MLX5_QP_OPTPAR_PKEY_INDEX		= 1 << 4, | ||||
| 	MLX5_QP_OPTPAR_Q_KEY			= 1 << 5, | ||||
| 	MLX5_QP_OPTPAR_RNR_TIMEOUT		= 1 << 6, | ||||
| 	MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH	= 1 << 7, | ||||
| 	MLX5_QP_OPTPAR_SRA_MAX			= 1 << 8, | ||||
| 	MLX5_QP_OPTPAR_RRA_MAX			= 1 << 9, | ||||
| 	MLX5_QP_OPTPAR_PM_STATE			= 1 << 10, | ||||
| 	MLX5_QP_OPTPAR_RETRY_COUNT		= 1 << 12, | ||||
| 	MLX5_QP_OPTPAR_RNR_RETRY		= 1 << 13, | ||||
| 	MLX5_QP_OPTPAR_ACK_TIMEOUT		= 1 << 14, | ||||
| 	MLX5_QP_OPTPAR_PRI_PORT			= 1 << 16, | ||||
| 	MLX5_QP_OPTPAR_SRQN			= 1 << 18, | ||||
| 	MLX5_QP_OPTPAR_CQN_RCV			= 1 << 19, | ||||
| 	MLX5_QP_OPTPAR_DC_HS			= 1 << 20, | ||||
| 	MLX5_QP_OPTPAR_DC_KEY			= 1 << 21, | ||||
| }; | ||||
| 
 | ||||
| enum mlx5_qp_state { | ||||
| 	MLX5_QP_STATE_RST			= 0, | ||||
| 	MLX5_QP_STATE_INIT			= 1, | ||||
| 	MLX5_QP_STATE_RTR			= 2, | ||||
| 	MLX5_QP_STATE_RTS			= 3, | ||||
| 	MLX5_QP_STATE_SQER			= 4, | ||||
| 	MLX5_QP_STATE_SQD			= 5, | ||||
| 	MLX5_QP_STATE_ERR			= 6, | ||||
| 	MLX5_QP_STATE_SQ_DRAINING		= 7, | ||||
| 	MLX5_QP_STATE_SUSPENDED			= 9, | ||||
| 	MLX5_QP_NUM_STATE | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_QP_ST_RC				= 0x0, | ||||
| 	MLX5_QP_ST_UC				= 0x1, | ||||
| 	MLX5_QP_ST_UD				= 0x2, | ||||
| 	MLX5_QP_ST_XRC				= 0x3, | ||||
| 	MLX5_QP_ST_MLX				= 0x4, | ||||
| 	MLX5_QP_ST_DCI				= 0x5, | ||||
| 	MLX5_QP_ST_DCT				= 0x6, | ||||
| 	MLX5_QP_ST_QP0				= 0x7, | ||||
| 	MLX5_QP_ST_QP1				= 0x8, | ||||
| 	MLX5_QP_ST_RAW_ETHERTYPE		= 0x9, | ||||
| 	MLX5_QP_ST_RAW_IPV6			= 0xa, | ||||
| 	MLX5_QP_ST_SNIFFER			= 0xb, | ||||
| 	MLX5_QP_ST_SYNC_UMR			= 0xe, | ||||
| 	MLX5_QP_ST_PTP_1588			= 0xd, | ||||
| 	MLX5_QP_ST_REG_UMR			= 0xc, | ||||
| 	MLX5_QP_ST_MAX | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_QP_PM_MIGRATED			= 0x3, | ||||
| 	MLX5_QP_PM_ARMED			= 0x0, | ||||
| 	MLX5_QP_PM_REARM			= 0x1 | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_NON_ZERO_RQ	= 0 << 24, | ||||
| 	MLX5_SRQ_RQ		= 1 << 24, | ||||
| 	MLX5_CRQ_RQ		= 2 << 24, | ||||
| 	MLX5_ZERO_LEN_RQ	= 3 << 24 | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	/* params1 */ | ||||
| 	MLX5_QP_BIT_SRE				= 1 << 15, | ||||
| 	MLX5_QP_BIT_SWE				= 1 << 14, | ||||
| 	MLX5_QP_BIT_SAE				= 1 << 13, | ||||
| 	/* params2 */ | ||||
| 	MLX5_QP_BIT_RRE				= 1 << 15, | ||||
| 	MLX5_QP_BIT_RWE				= 1 << 14, | ||||
| 	MLX5_QP_BIT_RAE				= 1 << 13, | ||||
| 	MLX5_QP_BIT_RIC				= 1 <<	4, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2, | ||||
| 	MLX5_WQE_CTRL_SOLICITED		= 1 << 1, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_SEND_WQE_BB	= 64, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27, | ||||
| 	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28, | ||||
| 	MLX5_WQE_FMR_PERM_REMOTE_READ	= 1 << 29, | ||||
| 	MLX5_WQE_FMR_PERM_REMOTE_WRITE	= 1 << 30, | ||||
| 	MLX5_WQE_FMR_PERM_ATOMIC	= 1 << 31 | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_FENCE_MODE_NONE			= 0 << 5, | ||||
| 	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5, | ||||
| 	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5, | ||||
| 	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_QP_LAT_SENSITIVE	= 1 << 28, | ||||
| 	MLX5_QP_ENABLE_SIG	= 1 << 31, | ||||
| }; | ||||
| 
 | ||||
| enum { | ||||
| 	MLX5_RCV_DBR	= 0, | ||||
| 	MLX5_SND_DBR	= 1, | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_fmr_seg { | ||||
| 	__be32			flags; | ||||
| 	__be32			mem_key; | ||||
| 	__be64			buf_list; | ||||
| 	__be64			start_addr; | ||||
| 	__be64			reg_len; | ||||
| 	__be32			offset; | ||||
| 	__be32			page_size; | ||||
| 	u32			reserved[2]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_ctrl_seg { | ||||
| 	__be32			opmod_idx_opcode; | ||||
| 	__be32			qpn_ds; | ||||
| 	u8			signature; | ||||
| 	u8			rsvd[2]; | ||||
| 	u8			fm_ce_se; | ||||
| 	__be32			imm; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_xrc_seg { | ||||
| 	__be32			xrc_srqn; | ||||
| 	u8			rsvd[12]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_masked_atomic_seg { | ||||
| 	__be64			swap_add; | ||||
| 	__be64			compare; | ||||
| 	__be64			swap_add_mask; | ||||
| 	__be64			compare_mask; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_av { | ||||
| 	union { | ||||
| 		struct { | ||||
| 			__be32	qkey; | ||||
| 			__be32	reserved; | ||||
| 		} qkey; | ||||
| 		__be64	dc_key; | ||||
| 	} key; | ||||
| 	__be32	dqp_dct; | ||||
| 	u8	stat_rate_sl; | ||||
| 	u8	fl_mlid; | ||||
| 	__be16	rlid; | ||||
| 	u8	reserved0[10]; | ||||
| 	u8	tclass; | ||||
| 	u8	hop_limit; | ||||
| 	__be32	grh_gid_fl; | ||||
| 	u8	rgid[16]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_datagram_seg { | ||||
| 	struct mlx5_av	av; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_raddr_seg { | ||||
| 	__be64			raddr; | ||||
| 	__be32			rkey; | ||||
| 	u32			reserved; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_atomic_seg { | ||||
| 	__be64			swap_add; | ||||
| 	__be64			compare; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_data_seg { | ||||
| 	__be32			byte_count; | ||||
| 	__be32			lkey; | ||||
| 	__be64			addr; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_umr_ctrl_seg { | ||||
| 	u8		flags; | ||||
| 	u8		rsvd0[3]; | ||||
| 	__be16		klm_octowords; | ||||
| 	__be16		bsf_octowords; | ||||
| 	__be64		mkey_mask; | ||||
| 	u8		rsvd1[32]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_seg_set_psv { | ||||
| 	__be32		psv_num; | ||||
| 	__be16		syndrome; | ||||
| 	__be16		status; | ||||
| 	__be32		transient_sig; | ||||
| 	__be32		ref_tag; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_seg_get_psv { | ||||
| 	u8		rsvd[19]; | ||||
| 	u8		num_psv; | ||||
| 	__be32		l_key; | ||||
| 	__be64		va; | ||||
| 	__be32		psv_index[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_seg_check_psv { | ||||
| 	u8		rsvd0[2]; | ||||
| 	__be16		err_coalescing_op; | ||||
| 	u8		rsvd1[2]; | ||||
| 	__be16		xport_err_op; | ||||
| 	u8		rsvd2[2]; | ||||
| 	__be16		xport_err_mask; | ||||
| 	u8		rsvd3[7]; | ||||
| 	u8		num_psv; | ||||
| 	__be32		l_key; | ||||
| 	__be64		va; | ||||
| 	__be32		psv_index[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_rwqe_sig { | ||||
| 	u8	rsvd0[4]; | ||||
| 	u8	signature; | ||||
| 	u8	rsvd1[11]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_signature_seg { | ||||
| 	u8	rsvd0[4]; | ||||
| 	u8	signature; | ||||
| 	u8	rsvd1[11]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_wqe_inline_seg { | ||||
| 	__be32	byte_count; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_core_qp { | ||||
| 	void (*event)		(struct mlx5_core_qp *, int); | ||||
| 	int			qpn; | ||||
| 	atomic_t		refcount; | ||||
| 	struct completion	free; | ||||
| 	struct mlx5_rsc_debug	*dbg; | ||||
| 	int			pid; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_qp_path { | ||||
| 	u8			fl; | ||||
| 	u8			rsvd3; | ||||
| 	u8			free_ar; | ||||
| 	u8			pkey_index; | ||||
| 	u8			rsvd0; | ||||
| 	u8			grh_mlid; | ||||
| 	__be16			rlid; | ||||
| 	u8			ackto_lt; | ||||
| 	u8			mgid_index; | ||||
| 	u8			static_rate; | ||||
| 	u8			hop_limit; | ||||
| 	__be32			tclass_flowlabel; | ||||
| 	u8			rgid[16]; | ||||
| 	u8			rsvd1[4]; | ||||
| 	u8			sl; | ||||
| 	u8			port; | ||||
| 	u8			rsvd2[6]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_qp_context { | ||||
| 	__be32			flags; | ||||
| 	__be32			flags_pd; | ||||
| 	u8			mtu_msgmax; | ||||
| 	u8			rq_size_stride; | ||||
| 	__be16			sq_crq_size; | ||||
| 	__be32			qp_counter_set_usr_page; | ||||
| 	__be32			wire_qpn; | ||||
| 	__be32			log_pg_sz_remote_qpn; | ||||
| 	struct			mlx5_qp_path pri_path; | ||||
| 	struct			mlx5_qp_path alt_path; | ||||
| 	__be32			params1; | ||||
| 	u8			reserved2[4]; | ||||
| 	__be32			next_send_psn; | ||||
| 	__be32			cqn_send; | ||||
| 	u8			reserved3[8]; | ||||
| 	__be32			last_acked_psn; | ||||
| 	__be32			ssn; | ||||
| 	__be32			params2; | ||||
| 	__be32			rnr_nextrecvpsn; | ||||
| 	__be32			xrcd; | ||||
| 	__be32			cqn_recv; | ||||
| 	__be64			db_rec_addr; | ||||
| 	__be32			qkey; | ||||
| 	__be32			rq_type_srqn; | ||||
| 	__be32			rmsn; | ||||
| 	__be16			hw_sq_wqe_counter; | ||||
| 	__be16			sw_sq_wqe_counter; | ||||
| 	__be16			hw_rcyclic_byte_counter; | ||||
| 	__be16			hw_rq_counter; | ||||
| 	__be16			sw_rcyclic_byte_counter; | ||||
| 	__be16			sw_rq_counter; | ||||
| 	u8			rsvd0[5]; | ||||
| 	u8			cgs; | ||||
| 	u8			cs_req; | ||||
| 	u8			cs_res; | ||||
| 	__be64			dc_access_key; | ||||
| 	u8			rsvd1[24]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_qp_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			input_qpn; | ||||
| 	u8			rsvd0[4]; | ||||
| 	__be32			opt_param_mask; | ||||
| 	u8			rsvd1[4]; | ||||
| 	struct mlx5_qp_context	ctx; | ||||
| 	u8			rsvd3[16]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_create_qp_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			qpn; | ||||
| 	u8			rsvd0[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_qp_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			qpn; | ||||
| 	u8			rsvd0[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_destroy_qp_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_modify_qp_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			qpn; | ||||
| 	u8			rsvd1[4]; | ||||
| 	__be32			optparam; | ||||
| 	u8			rsvd0[4]; | ||||
| 	struct mlx5_qp_context	ctx; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_modify_qp_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd0[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_qp_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			qpn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_query_qp_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd1[8]; | ||||
| 	__be32			optparam; | ||||
| 	u8			rsvd0[4]; | ||||
| 	struct mlx5_qp_context	ctx; | ||||
| 	u8			rsvd2[16]; | ||||
| 	__be64			pas[0]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_conf_sqp_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			qpn; | ||||
| 	u8			rsvd[3]; | ||||
| 	u8			type; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_conf_sqp_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_alloc_xrcd_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_alloc_xrcd_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	__be32			xrcdn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_dealloc_xrcd_mbox_in { | ||||
| 	struct mlx5_inbox_hdr	hdr; | ||||
| 	__be32			xrcdn; | ||||
| 	u8			rsvd[4]; | ||||
| }; | ||||
| 
 | ||||
| struct mlx5_dealloc_xrcd_mbox_out { | ||||
| 	struct mlx5_outbox_hdr	hdr; | ||||
| 	u8			rsvd[8]; | ||||
| }; | ||||
| 
 | ||||
| static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) | ||||
| { | ||||
| 	return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); | ||||
| } | ||||
| 
 | ||||
| int mlx5_core_create_qp(struct mlx5_core_dev *dev, | ||||
| 			struct mlx5_core_qp *qp, | ||||
| 			struct mlx5_create_qp_mbox_in *in, | ||||
| 			int inlen); | ||||
| int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, | ||||
| 			enum mlx5_qp_state new_state, | ||||
| 			struct mlx5_modify_qp_mbox_in *in, int sqd_event, | ||||
| 			struct mlx5_core_qp *qp); | ||||
| int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, | ||||
| 			 struct mlx5_core_qp *qp); | ||||
| int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, | ||||
| 		       struct mlx5_query_qp_mbox_out *out, int outlen); | ||||
| 
 | ||||
| int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); | ||||
| int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); | ||||
| void mlx5_init_qp_table(struct mlx5_core_dev *dev); | ||||
| void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); | ||||
| int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); | ||||
| void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); | ||||
| 
 | ||||
| #endif /* MLX5_QP_H */ | ||||
							
								
								
									
										41
									
								
								include/linux/mlx5/srq.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								include/linux/mlx5/srq.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,41 @@ | |||
| /*
 | ||||
|  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved. | ||||
|  * | ||||
|  * This software is available to you under a choice of one of two | ||||
|  * licenses.  You may choose to be licensed under the terms of the GNU | ||||
|  * General Public License (GPL) Version 2, available from the file | ||||
|  * COPYING in the main directory of this source tree, or the | ||||
|  * OpenIB.org BSD license below: | ||||
|  * | ||||
|  *     Redistribution and use in source and binary forms, with or | ||||
|  *     without modification, are permitted provided that the following | ||||
|  *     conditions are met: | ||||
|  * | ||||
|  *      - Redistributions of source code must retain the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer. | ||||
|  * | ||||
|  *      - Redistributions in binary form must reproduce the above | ||||
|  *        copyright notice, this list of conditions and the following | ||||
|  *        disclaimer in the documentation and/or other materials | ||||
|  *        provided with the distribution. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||||
|  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
|  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
|  * SOFTWARE. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef MLX5_SRQ_H | ||||
| #define MLX5_SRQ_H | ||||
| 
 | ||||
| #include <linux/mlx5/driver.h> | ||||
| 
 | ||||
| void mlx5_init_srq_table(struct mlx5_core_dev *dev); | ||||
| void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); | ||||
| 
 | ||||
| #endif /* MLX5_SRQ_H */ | ||||
|  | @ -610,7 +610,21 @@ enum ib_qp_type { | |||
| 	IB_QPT_RAW_PACKET = 8, | ||||
| 	IB_QPT_XRC_INI = 9, | ||||
| 	IB_QPT_XRC_TGT, | ||||
| 	IB_QPT_MAX | ||||
| 	IB_QPT_MAX, | ||||
| 	/* Reserve a range for qp types internal to the low level driver.
 | ||||
| 	 * These qp types will not be visible at the IB core layer, so the | ||||
| 	 * IB_QPT_MAX usages should not be affected in the core layer | ||||
| 	 */ | ||||
| 	IB_QPT_RESERVED1 = 0x1000, | ||||
| 	IB_QPT_RESERVED2, | ||||
| 	IB_QPT_RESERVED3, | ||||
| 	IB_QPT_RESERVED4, | ||||
| 	IB_QPT_RESERVED5, | ||||
| 	IB_QPT_RESERVED6, | ||||
| 	IB_QPT_RESERVED7, | ||||
| 	IB_QPT_RESERVED8, | ||||
| 	IB_QPT_RESERVED9, | ||||
| 	IB_QPT_RESERVED10, | ||||
| }; | ||||
| 
 | ||||
| enum ib_qp_create_flags { | ||||
|  | @ -766,6 +780,19 @@ enum ib_wr_opcode { | |||
| 	IB_WR_MASKED_ATOMIC_CMP_AND_SWP, | ||||
| 	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, | ||||
| 	IB_WR_BIND_MW, | ||||
| 	/* reserve values for low level drivers' internal use.
 | ||||
| 	 * These values will not be used at all in the ib core layer. | ||||
| 	 */ | ||||
| 	IB_WR_RESERVED1 = 0xf0, | ||||
| 	IB_WR_RESERVED2, | ||||
| 	IB_WR_RESERVED3, | ||||
| 	IB_WR_RESERVED4, | ||||
| 	IB_WR_RESERVED5, | ||||
| 	IB_WR_RESERVED6, | ||||
| 	IB_WR_RESERVED7, | ||||
| 	IB_WR_RESERVED8, | ||||
| 	IB_WR_RESERVED9, | ||||
| 	IB_WR_RESERVED10, | ||||
| }; | ||||
| 
 | ||||
| enum ib_send_flags { | ||||
|  | @ -773,7 +800,11 @@ enum ib_send_flags { | |||
| 	IB_SEND_SIGNALED	= (1<<1), | ||||
| 	IB_SEND_SOLICITED	= (1<<2), | ||||
| 	IB_SEND_INLINE		= (1<<3), | ||||
| 	IB_SEND_IP_CSUM		= (1<<4) | ||||
| 	IB_SEND_IP_CSUM		= (1<<4), | ||||
| 
 | ||||
| 	/* reserve bits 26-31 for low level drivers' internal use */ | ||||
| 	IB_SEND_RESERVED_START	= (1 << 26), | ||||
| 	IB_SEND_RESERVED_END	= (1 << 31), | ||||
| }; | ||||
| 
 | ||||
| struct ib_sge { | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Roland Dreier
						Roland Dreier