*/
 int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
 {
-       volatile u32 *dst_ptr;
        u32 dws;
        int r;
 
        /* allocate clear state block */
        adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
-       r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+       r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
                                      AMDGPU_GEM_DOMAIN_VRAM,
                                      &adev->gfx.rlc.clear_state_obj,
                                      &adev->gfx.rlc.clear_state_gpu_addr,
                return r;
        }
 
-       /* set up the cs buffer */
-       dst_ptr = adev->gfx.rlc.cs_ptr;
-       adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
-       amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-       amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
        return 0;
 }
 
 
        return 0;
 }
 
-static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
-{
-       int r;
-
-       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
-                       AMDGPU_GEM_DOMAIN_VRAM);
-       if (!r)
-               adev->gfx.rlc.clear_state_gpu_addr =
-                       amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
-       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
-       return r;
-}
-
-static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
-       int r;
-
-       if (!adev->gfx.rlc.clear_state_obj)
-               return;
-
-       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
-       if (likely(r == 0)) {
-               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-       }
-}
-
 static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
 {
        amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
 
 static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
 {
-       int r;
-
-       if (adev->in_gpu_reset) {
-               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
-               if (r)
-                       return r;
-
-               r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
-                                  (void **)&adev->gfx.rlc.cs_ptr);
-               if (!r) {
-                       adev->gfx.rlc.funcs->get_csb_buffer(adev,
-                                       adev->gfx.rlc.cs_ptr);
-                       amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
-               }
-
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-               if (r)
-                       return r;
-       }
+       adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
 
        /* csib */
        WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = gfx_v10_0_csb_vram_pin(adev);
-       if (r)
-               return r;
-
        if (!amdgpu_emu_mode)
                gfx_v10_0_init_golden_registers(adev);
 
        }
        gfx_v10_0_cp_enable(adev, false);
        gfx_v10_0_enable_gui_idle_interrupt(adev, false);
-       gfx_v10_0_csb_vram_unpin(adev);
 
        return 0;
 }
 
 
        gfx_v7_0_constants_init(adev);
 
+       /* init CSB */
+       adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
        /* init rlc */
        r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
 
        return 0;
 }
 
-static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
-{
-       int r;
-
-       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
-                       AMDGPU_GEM_DOMAIN_VRAM);
-       if (!r)
-               adev->gfx.rlc.clear_state_gpu_addr =
-                       amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
-       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
-       return r;
-}
-
-static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
-       int r;
-
-       if (!adev->gfx.rlc.clear_state_obj)
-               return;
-
-       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
-       if (likely(r == 0)) {
-               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-       }
-}
-
 static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
 {
        amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
 
 static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
 {
+       adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
        /* csib */
        WREG32(mmRLC_CSIB_ADDR_HI,
                        adev->gfx.rlc.clear_state_gpu_addr >> 32);
        gfx_v8_0_init_golden_registers(adev);
        gfx_v8_0_constants_init(adev);
 
-       r = gfx_v8_0_csb_vram_pin(adev);
-       if (r)
-               return r;
-
        r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;
                pr_err("rlc is busy, skip halt rlc\n");
        amdgpu_gfx_rlc_exit_safe_mode(adev);
 
-       gfx_v8_0_csb_vram_unpin(adev);
-
        return 0;
 }
 
 
        return 0;
 }
 
-static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
-{
-       int r;
-
-       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
-       if (unlikely(r != 0))
-               return r;
-
-       r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
-                       AMDGPU_GEM_DOMAIN_VRAM);
-       if (!r)
-               adev->gfx.rlc.clear_state_gpu_addr =
-                       amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
-
-       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
-       return r;
-}
-
-static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
-{
-       int r;
-
-       if (!adev->gfx.rlc.clear_state_obj)
-               return;
-
-       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
-       if (likely(r == 0)) {
-               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
-               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-       }
-}
-
 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
 {
        amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
 
 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
 {
+       adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
        /* csib */
        WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
                        adev->gfx.rlc.clear_state_gpu_addr >> 32);
 
        gfx_v9_0_constants_init(adev);
 
-       r = gfx_v9_0_csb_vram_pin(adev);
-       if (r)
-               return r;
-
        r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;
        gfx_v9_0_cp_enable(adev, false);
        adev->gfx.rlc.funcs->stop(adev);
 
-       gfx_v9_0_csb_vram_unpin(adev);
-
        return 0;
 }