1 /*
2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef MLX4_ICM_H
35 #define MLX4_ICM_H
36
37 #include <linux/list.h>
38 #include <linux/pci.h>
39 #include <linux/mutex.h>
40
41 #define MLX4_ICM_CHUNK_LEN \
42 ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
43 (sizeof (struct scatterlist)))
44
45 enum {
46 MLX4_ICM_PAGE_SHIFT = 12,
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48 };
49
50 struct mlx4_icm_chunk {
51 struct list_head list;
52 int npages;
53 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
55 };
56
57 struct mlx4_icm {
58 struct list_head chunk_list;
59 int refcount;
60 };
61
62 struct mlx4_icm_iter {
63 struct mlx4_icm *icm;
64 struct mlx4_icm_chunk *chunk;
65 int page_idx;
66 };
67
68 struct mlx4_dev;
69
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
71 gfp_t gfp_mask, int coherent);
72 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
73
74 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
75 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
76 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
77 int start, int end);
78 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
79 int start, int end);
80 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
81 u64 virt, int obj_size, int nobj, int reserved,
82 int use_lowmem, int use_coherent);
83 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
84 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
85 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
86 void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
87 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
88 int start, int end);
89 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
90 int start, int end);
91
mlx4_icm_first(struct mlx4_icm * icm,struct mlx4_icm_iter * iter)92 static inline void mlx4_icm_first(struct mlx4_icm *icm,
93 struct mlx4_icm_iter *iter)
94 {
95 iter->icm = icm;
96 iter->chunk = list_empty(&icm->chunk_list) ?
97 NULL : list_entry(icm->chunk_list.next,
98 struct mlx4_icm_chunk, list);
99 iter->page_idx = 0;
100 }
101
mlx4_icm_last(struct mlx4_icm_iter * iter)102 static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
103 {
104 return !iter->chunk;
105 }
106
mlx4_icm_next(struct mlx4_icm_iter * iter)107 static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
108 {
109 if (++iter->page_idx >= iter->chunk->nsg) {
110 if (iter->chunk->list.next == &iter->icm->chunk_list) {
111 iter->chunk = NULL;
112 return;
113 }
114
115 iter->chunk = list_entry(iter->chunk->list.next,
116 struct mlx4_icm_chunk, list);
117 iter->page_idx = 0;
118 }
119 }
120
mlx4_icm_addr(struct mlx4_icm_iter * iter)121 static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
122 {
123 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
124 }
125
mlx4_icm_size(struct mlx4_icm_iter * iter)126 static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
127 {
128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
129 }
130
131 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
132 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
133
134 #endif /* MLX4_ICM_H */
135