On Tue, 2022-04-12 at 12:53 -0300, Jason Gunthorpe wrote:
The next patch wants the vfio_device instead. There is no reason to store a pointer here since we can container_of back to the vfio_device.
Signed-off-by: Jason Gunthorpe jgg@nvidia.com
drivers/s390/cio/vfio_ccw_cp.c | 44 +++++++++++++++++++----------
drivers/s390/cio/vfio_ccw_cp.h | 4 +-- drivers/s390/cio/vfio_ccw_fsm.c | 3 +-- 3 files changed, 28 insertions(+), 23 deletions(-)
There's opportunity for simplification here, but I'll handle that when I get to some other work in this space. For this series, this is fine.
Reviewed-by: Eric Farman farman@linux.ibm.com
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 8d1b2771c1aa02..af5048a1ba8894 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -16,6 +16,7 @@ #include <asm/idals.h>
#include "vfio_ccw_cp.h" +#include "vfio_ccw_private.h"
struct pfn_array { /* Starting guest physical I/O address. */ @@ -98,17 +99,17 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
- If the pin request partially succeeds, or fails completely,
- all pages are left unpinned and a negative error value is
returned. */ -static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) +static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev) { int ret = 0;
- ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
ret = vfio_pin_pages(vdev->dev, pa->pa_iova_pfn, pa->pa_nr, IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
if (ret < 0) { goto err_out; } else if (ret > 0 && ret != pa->pa_nr) {
vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
ret = -EINVAL; goto err_out; }vfio_unpin_pages(vdev->dev, pa->pa_iova_pfn, ret);
@@ -122,11 +123,11 @@ static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) }
/* Unpin the pages before releasing the memory. */ -static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) +static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev) { /* Only unpin if any pages were pinned to begin with */ if (pa->pa_nr)
vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
vfio_unpin_pages(vdev->dev, pa->pa_iova_pfn, pa-
pa_nr);
pa->pa_nr = 0; kfree(pa->pa_iova_pfn); } @@ -190,7 +191,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
- Within the domain (@mdev), copy @n bytes from a guest physical
- address (@iova) to a host physical address (@to).
*/ -static long copy_from_iova(struct device *mdev, +static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova, unsigned long n) { @@ -203,9 +204,9 @@ static long copy_from_iova(struct device *mdev, if (ret < 0) return ret;
- ret = pfn_array_pin(&pa, mdev);
- ret = pfn_array_pin(&pa, vdev); if (ret < 0) {
pfn_array_unpin_free(&pa, mdev);
return ret; }pfn_array_unpin_free(&pa, vdev);
@@ -226,7 +227,7 @@ static long copy_from_iova(struct device *mdev, break; }
- pfn_array_unpin_free(&pa, mdev);
pfn_array_unpin_free(&pa, vdev);
return l;
} @@ -423,11 +424,13 @@ static int ccwchain_loop_tic(struct ccwchain *chain,
static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) {
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain; int len, ret;
/* Copy 2K (the most we support today) of possible CCWs */
- len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
- len = copy_from_iova(vdev, cp->guest_cp, cda, CCWCHAIN_LEN_MAX * sizeof(struct ccw1)); if (len) return len;
@@ -508,6 +511,8 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, int idx, struct channel_program *cp) {
- struct vfio_device *vdev =
struct ccw1 *ccw; struct pfn_array *pa; u64 iova;&container_of(cp, struct vfio_ccw_private, cp)->vdev;
@@ -526,7 +531,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, if (ccw_is_idal(ccw)) { /* Read first IDAW to see if it's 4K-aligned or not. */ /* All subsequent IDAws will be 4K-aligned. */
ret = copy_from_iova(cp->mdev, &iova, ccw->cda,
sizeof(iova));
ret = copy_from_iova(vdev, &iova, ccw->cda,
sizeof(iova)); if (ret) return ret; } else { @@ -555,7 +560,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
if (ccw_is_idal(ccw)) { /* Copy guest IDAL into host IDAL */
ret = copy_from_iova(cp->mdev, idaws, ccw->cda,
idal_len);
if (ret) goto out_unpin;ret = copy_from_iova(vdev, idaws, ccw->cda, idal_len);
@@ -574,7 +579,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, }
if (ccw_does_data_transfer(ccw)) {
ret = pfn_array_pin(pa, cp->mdev);
if (ret < 0) goto out_unpin; } else {ret = pfn_array_pin(pa, vdev);
@@ -590,7 +595,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, return 0;
out_unpin:
- pfn_array_unpin_free(pa, cp->mdev);
- pfn_array_unpin_free(pa, vdev);
out_free_idaws: kfree(idaws); out_init: @@ -632,8 +637,10 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
- Returns:
- %0 on success and a negative error value on failure.
*/ -int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) +int cp_init(struct channel_program *cp, union orb *orb) {
- struct vfio_device *vdev =
/* custom ratelimit used to avoid flood during guest IPL */ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1); int ret;&container_of(cp, struct vfio_ccw_private, cp)->vdev;
@@ -650,11 +657,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) * the problem if something does break. */ if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
dev_warn(mdev, "Prefetching channel program even though
prefetch not specified in ORB");
dev_warn(vdev->dev, "Prefetching channel program even
though prefetch not specified in ORB");
INIT_LIST_HEAD(&cp->ccwchain_list); memcpy(&cp->orb, orb, sizeof(*orb));
cp->mdev = mdev;
/* Build a ccwchain for the first CCW segment */ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
@@ -682,6 +688,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) */ void cp_free(struct channel_program *cp) {
- struct vfio_device *vdev =
struct ccwchain *chain, *temp; int i;&container_of(cp, struct vfio_ccw_private, cp)->vdev;
@@ -691,7 +699,7 @@ void cp_free(struct channel_program *cp) cp->initialized = false; list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { for (i = 0; i < chain->ch_len; i++) {
pfn_array_unpin_free(chain->ch_pa + i, cp-
mdev);
} ccwchain_free(chain);pfn_array_unpin_free(chain->ch_pa + i, vdev); ccwchain_cda_free(chain, i);
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h index ba31240ce96594..e4c436199b4cda 100644 --- a/drivers/s390/cio/vfio_ccw_cp.h +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -37,13 +37,11 @@ struct channel_program { struct list_head ccwchain_list; union orb orb;
- struct device *mdev; bool initialized; struct ccw1 *guest_cp;
};
-extern int cp_init(struct channel_program *cp, struct device *mdev,
union orb *orb);
+extern int cp_init(struct channel_program *cp, union orb *orb); extern void cp_free(struct channel_program *cp); extern int cp_prefetch(struct channel_program *cp); extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm); diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index e435a9cd92dacf..8483a266051c21 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -262,8 +262,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, errstr = "transport mode"; goto err_out; }
io_region->ret_code = cp_init(&private->cp,
mdev_dev(mdev),
orb);
if (io_region->ret_code) { VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x):io_region->ret_code = cp_init(&private->cp, orb);
cp_init=%d\n",