From patchwork Tue Jul 10 10:57:46 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maarten Lankhorst X-Patchwork-Id: 13300 Received: from mail.tu-berlin.de ([130.149.7.33]) by www.linuxtv.org with esmtp (Exim 4.72) (envelope-from ) id 1SoYCP-00018F-GU for patchwork@linuxtv.org; Tue, 10 Jul 2012 13:01:45 +0200 X-tubIT-Incoming-IP: 209.132.180.67 Received: from vger.kernel.org ([209.132.180.67]) by mail.tu-berlin.de (exim-4.75/mailfrontend-2) with esmtp for id 1SoYCO-00077y-Hj; Tue, 10 Jul 2012 13:01:45 +0200 Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754493Ab2GJK7P (ORCPT ); Tue, 10 Jul 2012 06:59:15 -0400 Received: from mail-ee0-f46.google.com ([74.125.83.46]:53636 "EHLO mail-ee0-f46.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753944Ab2GJK6V (ORCPT ); Tue, 10 Jul 2012 06:58:21 -0400 Received: by mail-ee0-f46.google.com with SMTP id t10so4627641eei.19 for ; Tue, 10 Jul 2012 03:58:20 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; bh=InCBWIldbR4RM9azBG+S8cG+DLmmlE6ePCe0dqySyno=; b=zpLgOeEmogzrj+nW2DKLVyrVApZpw1/J8QcKcwtLtOha17JoHLPzu8V3AE463RxTZ1 zD/RIfzwt8u7Ns1Lllq4VJVVaZnY8hIn9qGM9znumGkySClTCi9wFZqtZeeLZuxChL6t O6Obp5viBmYlclWmhhffbBAxgWZ0Z+MD926PMlIH5Z8/b9kajo/2/HrFp13lkVRY1BbL UuH23rUHrqmZMzvd/iXnQkInSwnogQPhcgR8LJ50WBqiBPfIW9UGnJkkWGi7zR5tvPrO umUgO8FhDYGoo+qgnDPeUHQVvVhl20+Gr0nd42ubydetlO8Za9YIeLw/uYIMjRZ6uRtW rcyQ== Received: by 10.14.95.68 with SMTP id o44mr10555401eef.192.1341917900351; Tue, 10 Jul 2012 03:58:20 -0700 (PDT) Received: from localhost (5ED48CEF.cm-7-5c.dynamic.ziggo.nl. [94.212.140.239]) by mx.google.com with ESMTPS id x52sm101494577eea.11.2012.07.10.03.58.16 (version=TLSv1/SSLv3 cipher=OTHER); Tue, 10 Jul 2012 03:58:19 -0700 (PDT) Received: by localhost (sSMTP sendmail emulation); Tue, 10 Jul 2012 12:58:15 +0200 From: Maarten Lankhorst To: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org, linux-media@vger.kernel.org, linux-kernel@vger.kernel.org, Maarten Lankhorst Subject: [RFC PATCH 3/8] nouveau: Extend prime code Date: Tue, 10 Jul 2012 12:57:46 +0200 Message-Id: <1341917871-2512-4-git-send-email-m.b.lankhorst@gmail.com> X-Mailer: git-send-email 1.7.9.5 In-Reply-To: <1341917871-2512-1-git-send-email-m.b.lankhorst@gmail.com> References: <1341917871-2512-1-git-send-email-m.b.lankhorst@gmail.com> Sender: linux-media-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-media@vger.kernel.org X-PMX-Version: 5.6.1.2065439, Antispam-Engine: 2.7.2.376379, Antispam-Data: 2012.7.10.104821 X-PMX-Spam: Gauge=IIIIIIIII, Probability=9%, Report=' FORGED_FROM_GMAIL 0.1, MULTIPLE_RCPTS 0.1, HTML_00_01 0.05, HTML_00_10 0.05, URI_ENDS_IN_HTML 0, __ANY_URI 0, __CP_URI_IN_BODY 0, __FRAUD_WEBMAIL 0, __FRAUD_WEBMAIL_FROM 0, __FROM_GMAIL 0, __HAS_FROM 0, __HAS_MSGID 0, __HAS_X_MAILER 0, __HAS_X_MAILING_LIST 0, __MIME_TEXT_ONLY 0, __MULTIPLE_RCPTS_CC_X2 0, __PHISH_SPEAR_STRUCTURE_1 0, __SANE_MSGID 0, __SUBJ_ALPHA_END 0, __TO_MALFORMED_2 0, __TO_NO_NAME 0, __URI_NO_WWW 0, __URI_NS ' From: Maarten Lankhorst The prime code no longer requires the bo to be backed by a gem object, and cpu access calls have been implemented. This will be needed for exporting fence bo's. Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/nouveau/nouveau_drv.h | 6 +- drivers/gpu/drm/nouveau/nouveau_prime.c | 106 +++++++++++++++++++++---------- 2 files changed, 79 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8613cb2..7c52eba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1374,11 +1374,15 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, extern int nouveau_gem_ioctl_info(struct drm_device *, void *, struct drm_file *); +extern int nouveau_gem_prime_export_bo(struct nouveau_bo *nvbo, int flags, + u32 size, struct dma_buf **ret); extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags); extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); - +extern int nouveau_prime_import_bo(struct drm_device *dev, + struct dma_buf *dma_buf, + struct nouveau_bo **pnvbo, bool gem); /* nouveau_display.c */ int nouveau_display_create(struct drm_device *dev); void nouveau_display_destroy(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index a25cf2c..537154d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -35,7 +35,8 @@ static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attac enum dma_data_direction dir) { struct nouveau_bo *nvbo = attachment->dmabuf->priv; - struct drm_device *dev = nvbo->gem->dev; + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct drm_device *dev = dev_priv->dev; int npages = nvbo->bo.num_pages; struct sg_table *sg; int nents; @@ -59,29 +60,37 @@ static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf) { struct nouveau_bo *nvbo = dma_buf->priv; - if (nvbo->gem->export_dma_buf == dma_buf) { - nvbo->gem->export_dma_buf = NULL; + nouveau_bo_unpin(nvbo); + if (!nvbo->gem) + nouveau_bo_ref(NULL, &nvbo); + else { + if (nvbo->gem->export_dma_buf == dma_buf) + nvbo->gem->export_dma_buf = NULL; drm_gem_object_unreference_unlocked(nvbo->gem); } } static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) { - return NULL; + struct nouveau_bo *nvbo = dma_buf->priv; + return kmap_atomic(nvbo->bo.ttm->pages[page_num]); } static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) { - + kunmap_atomic(addr); } + static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num) { - return NULL; + struct nouveau_bo *nvbo = dma_buf->priv; + return kmap(nvbo->bo.ttm->pages[page_num]); } static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) { - + struct nouveau_bo *nvbo = dma_buf->priv; + return kunmap(nvbo->bo.ttm->pages[page_num]); } static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) @@ -92,7 +101,8 @@ static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf) { struct nouveau_bo *nvbo = dma_buf->priv; - struct drm_device *dev = nvbo->gem->dev; + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct drm_device *dev = dev_priv->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -116,7 +126,8 @@ out_unlock: static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) { struct nouveau_bo *nvbo = dma_buf->priv; - struct drm_device *dev = nvbo->gem->dev; + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct drm_device *dev = dev_priv->dev; mutex_lock(&dev->struct_mutex); nvbo->vmapping_count--; @@ -140,10 +151,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops = { }; static int -nouveau_prime_new(struct drm_device *dev, - size_t size, +nouveau_prime_new(struct drm_device *dev, size_t size, struct sg_table *sg, - struct nouveau_bo **pnvbo) + struct nouveau_bo **pnvbo, bool gem) { struct nouveau_bo *nvbo; u32 flags = 0; @@ -156,12 +166,10 @@ nouveau_prime_new(struct drm_device *dev, if (ret) return ret; nvbo = *pnvbo; - - /* we restrict allowed domains on nv50+ to only the types - * that were requested at creation time. not possibly on - * earlier chips without busting the ABI. - */ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; + if (!gem) + return 0; + nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); if (!nvbo->gem) { nouveau_bo_ref(NULL, pnvbo); @@ -172,22 +180,37 @@ nouveau_prime_new(struct drm_device *dev, return 0; } -struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, int flags) +int nouveau_gem_prime_export_bo(struct nouveau_bo *nvbo, int flags, + u32 size, struct dma_buf **buf) { - struct nouveau_bo *nvbo = nouveau_gem_object(obj); int ret = 0; + *buf = NULL; /* pin buffer into GTT */ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); if (ret) - return ERR_PTR(-EINVAL); + return -EINVAL; + + *buf = dma_buf_export(nvbo, &nouveau_dmabuf_ops, size, flags); + if (!IS_ERR(*buf)) + return 0; - return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags); + nouveau_bo_unpin(nvbo); + return PTR_ERR(*buf); +} + +struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, int flags) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(obj); + struct dma_buf *buf; + nouveau_gem_prime_export_bo(nvbo, flags, obj->size, &buf); + return buf; } -struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) +int nouveau_prime_import_bo(struct drm_device *dev, + struct dma_buf *dma_buf, + struct nouveau_bo **pnvbo, bool gem) { struct dma_buf_attachment *attach; struct sg_table *sg; @@ -196,17 +219,22 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, if (dma_buf->ops == &nouveau_dmabuf_ops) { nvbo = dma_buf->priv; - if (nvbo->gem) { + if (!gem) { + nouveau_bo_ref(nvbo, pnvbo); + return 0; + } + else if (nvbo->gem) { if (nvbo->gem->dev == dev) { drm_gem_object_reference(nvbo->gem); - return nvbo->gem; + *pnvbo = nvbo; + return 0; } } } /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) - return ERR_PTR(PTR_ERR(attach)); + return PTR_ERR(attach); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { @@ -214,18 +242,32 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, goto fail_detach; } - ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo); + ret = nouveau_prime_new(dev, dma_buf->size, sg, pnvbo, gem); if (ret) goto fail_unmap; - nvbo->gem->import_attach = attach; - - return nvbo->gem; + if (gem) + (*pnvbo)->gem->import_attach = attach; + BUG_ON(attach->priv); + attach->priv = *pnvbo; + return 0; fail_unmap: dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); - return ERR_PTR(ret); + return ret; +} + +struct drm_gem_object * +nouveau_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) +{ + struct nouveau_bo *nvbo = NULL; + int ret; + + ret = nouveau_prime_import_bo(dev, dma_buf, &nvbo, true); + if (ret) + return ERR_PTR(ret); + return nvbo->gem; }