Blob Blame History Raw
From: Ben Skeggs <bskeggs@redhat.com>
Date: Fri, 3 Nov 2017 08:36:25 +1000
Subject: drm/nouveau/mmu: swap out round for ALIGN
Git-commit: 6497c2baf2c66938bfff51af9806d495bbb506f9
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Rounding value is guaranteed to be power-of-two, so this is better
anyway.

Fixes build on 32-bit.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1349,12 +1349,12 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm
 
 		addr = this->addr;
 		if (vmm->func->page_block && prev && prev->page != p)
-			addr = roundup(addr, vmm->func->page_block);
+			addr = ALIGN(addr, vmm->func->page_block);
 		addr = ALIGN(addr, 1ULL << align);
 
 		tail = this->addr + this->size;
 		if (vmm->func->page_block && next && next->page != p)
-			tail = rounddown(tail, vmm->func->page_block);
+			tail = ALIGN_DOWN(addr, vmm->func->page_block);
 
 		if (addr <= tail && tail - addr >= size) {
 			rb_erase(&this->tree, &vmm->free);