1/* $NetBSD: nouveau_subdev_vm_nv04.c,v 1.3 2015/10/14 00:12:55 mrg Exp $ */
2
3/*
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26
27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: nouveau_subdev_vm_nv04.c,v 1.3 2015/10/14 00:12:55 mrg Exp $");
29
30#include <core/gpuobj.h>
31
32#include "nv04.h"
33
34#define NV04_PDMA_SIZE (128 * 1024 * 1024)
35#define NV04_PDMA_PAGE ( 4 * 1024)
36
37/*******************************************************************************
38 * VM map/unmap callbacks
39 ******************************************************************************/
40
41static void
42nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
43 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
44{
45 pte = 0x00008 + (pte * 4);
46 while (cnt) {
47 u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
48 u32 phys = (u32)*list++;
49 while (cnt && page--) {
50 nv_wo32(pgt, pte, phys | 3);
51 phys += NV04_PDMA_PAGE;
52 pte += 4;
53 cnt -= 1;
54 }
55 }
56}
57
58static void
59nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
60{
61 pte = 0x00008 + (pte * 4);
62 while (cnt--) {
63 nv_wo32(pgt, pte, 0x00000000);
64 pte += 4;
65 }
66}
67
68static void
69nv04_vm_flush(struct nouveau_vm *vm)
70{
71}
72
73/*******************************************************************************
74 * VM object
75 ******************************************************************************/
76
77int
78nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
79 struct nouveau_vm **pvm)
80{
81 return -EINVAL;
82}
83
84/*******************************************************************************
85 * VMMGR subdev
86 ******************************************************************************/
87
88static int
89nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 struct nouveau_oclass *oclass, void *data, u32 size,
91 struct nouveau_object **pobject)
92{
93 struct nv04_vmmgr_priv *priv;
94 struct nouveau_gpuobj *dma;
95 int ret;
96
97 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
98 "pcigart", &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 priv->base.create = nv04_vm_create;
104 priv->base.limit = NV04_PDMA_SIZE;
105 priv->base.dma_bits = 32;
106 priv->base.pgt_bits = 32 - 12;
107 priv->base.spg_shift = 12;
108 priv->base.lpg_shift = 12;
109 priv->base.map_sg = nv04_vm_map_sg;
110 priv->base.unmap = nv04_vm_unmap;
111 priv->base.flush = nv04_vm_flush;
112
113 ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
114 &priv->vm);
115 if (ret)
116 return ret;
117
118 ret = nouveau_gpuobj_new(nv_object(priv), NULL,
119 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
120 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
121 &priv->vm->pgt[0].obj[0]);
122 dma = priv->vm->pgt[0].obj[0];
123 priv->vm->pgt[0].refcount[0] = 1;
124 if (ret)
125 return ret;
126
127 nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
128 nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
129 return 0;
130}
131
132void
133nv04_vmmgr_dtor(struct nouveau_object *object)
134{
135 struct nv04_vmmgr_priv *priv = (void *)object;
136 if (priv->vm) {
137 nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
138 nouveau_vm_ref(NULL, &priv->vm, NULL);
139 }
140#ifdef __NetBSD__
141 if (priv->nullp) {
142 struct nouveau_device *device = nv_device(priv);
143 const bus_dma_tag_t dmat = pci_dma64_available(&device->pdev->pd_pa) ?
144 device->pdev->pd_pa.pa_dmat64 : device->pdev->pd_pa.pa_dmat;
145
146 bus_dmamap_unload(dmat, priv->nullmap);
147 bus_dmamem_unmap(dmat, priv->nullp, PAGE_SIZE);
148 bus_dmamap_destroy(dmat, priv->nullmap);
149 bus_dmamem_free(dmat, &priv->nullseg, 1);
150 }
151#else
152 if (priv->nullp) {
153 pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
154 priv->nullp, priv->null);
155 }
156#endif
157 nouveau_vmmgr_destroy(&priv->base);
158}
159
160struct nouveau_oclass
161nv04_vmmgr_oclass = {
162 .handle = NV_SUBDEV(VM, 0x04),
163 .ofuncs = &(struct nouveau_ofuncs) {
164 .ctor = nv04_vmmgr_ctor,
165 .dtor = nv04_vmmgr_dtor,
166 .init = _nouveau_vmmgr_init,
167 .fini = _nouveau_vmmgr_fini,
168 },
169};
170