drm/msm: avoid unmapping the buffer twice during msm_release

The dma buffer associated with the gem object is already
unmapped during put_iova. Avoid unmapping it again in
put_pages.

Change-Id: Iac57e164dde6f3e5913070acbe74b42691049913
Signed-off-by: Krishna Manikandan <mkrishn@codeaurora.org>
This commit is contained in:
Krishna Manikandan 2019-03-29 12:11:15 +05:30 committed by Dhaval Patel
parent 32c581ff56
commit da4c79c55b

View File

@ -137,23 +137,10 @@ static void put_pages_vram(struct drm_gem_object *obj)
static void put_pages(struct drm_gem_object *obj)
{
struct device *aspace_dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
aspace_dev =
msm_gem_get_aspace_device(msm_obj->aspace);
dma_unmap_sg(aspace_dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents,
DMA_BIDIRECTIONAL);
}
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
}