@@ -56,7 +56,7 @@ static void
56
56
create_mkey_callback (int status , struct mlx5_async_work * context );
57
57
static struct mlx5_ib_mr * reg_create (struct ib_pd * pd , struct ib_umem * umem ,
58
58
u64 iova , int access_flags ,
59
- unsigned int page_size , bool populate ,
59
+ unsigned long page_size , bool populate ,
60
60
int access_mode );
61
61
static int __mlx5_ib_dereg_mr (struct ib_mr * ibmr );
62
62
@@ -1125,7 +1125,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
1125
1125
struct mlx5r_cache_rb_key rb_key = {};
1126
1126
struct mlx5_cache_ent * ent ;
1127
1127
struct mlx5_ib_mr * mr ;
1128
- unsigned int page_size ;
1128
+ unsigned long page_size ;
1129
1129
1130
1130
if (umem -> is_dmabuf )
1131
1131
page_size = mlx5_umem_dmabuf_default_pgsz (umem , iova );
@@ -1229,7 +1229,7 @@ reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_f
1229
1229
*/
1230
1230
static struct mlx5_ib_mr * reg_create (struct ib_pd * pd , struct ib_umem * umem ,
1231
1231
u64 iova , int access_flags ,
1232
- unsigned int page_size , bool populate ,
1232
+ unsigned long page_size , bool populate ,
1233
1233
int access_mode )
1234
1234
{
1235
1235
struct mlx5_ib_dev * dev = to_mdev (pd -> device );
@@ -1435,7 +1435,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1435
1435
mr = alloc_cacheable_mr (pd , umem , iova , access_flags ,
1436
1436
MLX5_MKC_ACCESS_MODE_MTT );
1437
1437
} else {
1438
- unsigned int page_size =
1438
+ unsigned long page_size =
1439
1439
mlx5_umem_mkc_find_best_pgsz (dev , umem , iova );
1440
1440
1441
1441
mutex_lock (& dev -> slow_path_mutex );
0 commit comments