Lines Matching refs:eq
80 struct mlx5_ib_pf_eq *eq; member
1381 struct mlx5_ib_pf_eq *eq = pfault->eq; in mlx5_ib_eqe_pf_action() local
1383 mlx5_ib_pfault(eq->dev, pfault); in mlx5_ib_eqe_pf_action()
1384 mempool_free(pfault, eq->pool); in mlx5_ib_eqe_pf_action()
1387 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) in mlx5_ib_eq_pf_process() argument
1394 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { in mlx5_ib_eq_pf_process()
1395 pfault = mempool_alloc(eq->pool, GFP_ATOMIC); in mlx5_ib_eq_pf_process()
1397 schedule_work(&eq->work); in mlx5_ib_eq_pf_process()
1405 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process()
1425 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process()
1429 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process()
1448 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process()
1456 mlx5_ib_warn(eq->dev, in mlx5_ib_eq_pf_process()
1464 pfault->eq = eq; in mlx5_ib_eq_pf_process()
1466 queue_work(eq->wq, &pfault->work); in mlx5_ib_eq_pf_process()
1468 cc = mlx5_eq_update_cc(eq->core, ++cc); in mlx5_ib_eq_pf_process()
1471 mlx5_eq_update_ci(eq->core, cc, 1); in mlx5_ib_eq_pf_process()
1477 struct mlx5_ib_pf_eq *eq = in mlx5_ib_eq_pf_int() local
1481 if (spin_trylock_irqsave(&eq->lock, flags)) { in mlx5_ib_eq_pf_int()
1482 mlx5_ib_eq_pf_process(eq); in mlx5_ib_eq_pf_int()
1483 spin_unlock_irqrestore(&eq->lock, flags); in mlx5_ib_eq_pf_int()
1485 schedule_work(&eq->work); in mlx5_ib_eq_pf_int()
1503 struct mlx5_ib_pf_eq *eq = in mlx5_ib_eq_pf_action() local
1506 mempool_refill(eq->pool); in mlx5_ib_eq_pf_action()
1508 spin_lock_irq(&eq->lock); in mlx5_ib_eq_pf_action()
1509 mlx5_ib_eq_pf_process(eq); in mlx5_ib_eq_pf_action()
1510 spin_unlock_irq(&eq->lock); in mlx5_ib_eq_pf_action()
1518 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) in mlx5r_odp_create_eq() argument
1524 if (eq->core) in mlx5r_odp_create_eq()
1526 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action); in mlx5r_odp_create_eq()
1527 spin_lock_init(&eq->lock); in mlx5r_odp_create_eq()
1528 eq->dev = dev; in mlx5r_odp_create_eq()
1530 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN, in mlx5r_odp_create_eq()
1532 if (!eq->pool) { in mlx5r_odp_create_eq()
1537 eq->wq = alloc_workqueue("mlx5_ib_page_fault", in mlx5r_odp_create_eq()
1540 if (!eq->wq) { in mlx5r_odp_create_eq()
1545 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; in mlx5r_odp_create_eq()
1550 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m); in mlx5r_odp_create_eq()
1551 if (IS_ERR(eq->core)) { in mlx5r_odp_create_eq()
1552 err = PTR_ERR(eq->core); in mlx5r_odp_create_eq()
1555 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb); in mlx5r_odp_create_eq()
1564 mlx5_eq_destroy_generic(dev->mdev, eq->core); in mlx5r_odp_create_eq()
1566 eq->core = NULL; in mlx5r_odp_create_eq()
1567 destroy_workqueue(eq->wq); in mlx5r_odp_create_eq()
1569 mempool_destroy(eq->pool); in mlx5r_odp_create_eq()
1576 mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) in mlx5_ib_odp_destroy_eq() argument
1580 if (!eq->core) in mlx5_ib_odp_destroy_eq()
1582 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb); in mlx5_ib_odp_destroy_eq()
1583 err = mlx5_eq_destroy_generic(dev->mdev, eq->core); in mlx5_ib_odp_destroy_eq()
1584 cancel_work_sync(&eq->work); in mlx5_ib_odp_destroy_eq()
1585 destroy_workqueue(eq->wq); in mlx5_ib_odp_destroy_eq()
1586 mempool_destroy(eq->pool); in mlx5_ib_odp_destroy_eq()