1/* $NetBSD: nouveau_subdev_instmem_nv40.c,v 1.3 2016/02/15 19:36:35 riastradh Exp $ */
2
3/*
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26
27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: nouveau_subdev_instmem_nv40.c,v 1.3 2016/02/15 19:36:35 riastradh Exp $");
29
30#include <engine/graph/nv40.h>
31
32#include "nv04.h"
33
34/******************************************************************************
35 * instmem subdev implementation
36 *****************************************************************************/
37
38static u32
39nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
40{
41 struct nv04_instmem_priv *priv = (void *)object;
42#ifdef __NetBSD__
43 return bus_space_read_4(priv->iomemt, priv->iomemh, addr);
44#else
45 return ioread32_native(priv->iomem + addr);
46#endif
47}
48
49static void
50nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
51{
52 struct nv04_instmem_priv *priv = (void *)object;
53#ifdef __NetBSD__
54 bus_space_write_4(priv->iomemt, priv->iomemh, addr, data);
55#else
56 iowrite32_native(data, priv->iomem + addr);
57#endif
58}
59
60static int
61nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
62 struct nouveau_oclass *oclass, void *data, u32 size,
63 struct nouveau_object **pobject)
64{
65 struct nouveau_device *device = nv_device(parent);
66 struct nv04_instmem_priv *priv;
67 int ret, bar, vs;
68
69 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
70 *pobject = nv_object(priv);
71 if (ret)
72 return ret;
73
74 /* map bar */
75 if (nv_device_resource_len(device, 2))
76 bar = 2;
77 else
78 bar = 3;
79
80#ifdef __NetBSD__
81 priv->iomemt = nv_device_resource_tag(device, bar);
82 priv->iomemsz = nv_device_resource_len(device, bar);
83 ret = bus_space_map(priv->iomemt,
84 nv_device_resource_start(device, bar),
85 priv->iomemsz, 0, &priv->iomemh);
86 if (ret) {
87 priv->iomemsz = 0;
88 nv_error(priv, "unable to map PRAMIN BAR: %d\n", ret);
89 return -EFAULT;
90 }
91#else
92 priv->iomem = ioremap(nv_device_resource_start(device, bar),
93 nv_device_resource_len(device, bar));
94 if (!priv->iomem) {
95 nv_error(priv, "unable to map PRAMIN BAR\n");
96 return -EFAULT;
97 }
98#endif
99
100 /* PRAMIN aperture maps over the end of vram, reserve enough space
101 * to fit graphics contexts for every channel, the magics come
102 * from engine/graph/nv40.c
103 */
104 vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
105 if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
106 else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
107 else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs;
108 else priv->base.reserved = 0x4a40 * vs;
109 priv->base.reserved += 16 * 1024;
110 priv->base.reserved *= 32; /* per-channel */
111 priv->base.reserved += 512 * 1024; /* pci(e)gart table */
112 priv->base.reserved += 512 * 1024; /* object storage */
113
114 priv->base.reserved = round_up(priv->base.reserved, 4096);
115
116 ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
117 if (ret)
118 return ret;
119
120 /* 0x00000-0x10000: reserve for probable vbios image */
121 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
122 &priv->vbios);
123 if (ret)
124 return ret;
125
126 /* 0x10000-0x18000: reserve for RAMHT */
127 ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
128 &priv->ramht);
129 if (ret)
130 return ret;
131
132 /* 0x18000-0x18200: reserve for RAMRO
133 * 0x18200-0x20000: padding
134 */
135 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
136 &priv->ramro);
137 if (ret)
138 return ret;
139
140 /* 0x20000-0x21000: reserve for RAMFC
141 * 0x21000-0x40000: padding and some unknown crap
142 */
143 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
144 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
145 if (ret)
146 return ret;
147
148 return 0;
149}
150
151struct nouveau_oclass *
152nv40_instmem_oclass = &(struct nouveau_instmem_impl) {
153 .base.handle = NV_SUBDEV(INSTMEM, 0x40),
154 .base.ofuncs = &(struct nouveau_ofuncs) {
155 .ctor = nv40_instmem_ctor,
156 .dtor = nv04_instmem_dtor,
157 .init = _nouveau_instmem_init,
158 .fini = _nouveau_instmem_fini,
159 .rd32 = nv40_instmem_rd32,
160 .wr32 = nv40_instmem_wr32,
161 },
162 .instobj = &nv04_instobj_oclass.base,
163}.base;
164