Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 1 | /* |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 20 | * DEALINGS IN THE SOFTWARE. |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 21 | */ |
| 22 | #include <core/tegra.h> |
| 23 | #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER |
| 24 | #include "priv.h" |
| 25 | |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 26 | static int |
| 27 | nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) |
| 28 | { |
| 29 | int ret; |
| 30 | |
| 31 | ret = regulator_enable(tdev->vdd); |
| 32 | if (ret) |
| 33 | goto err_power; |
| 34 | |
| 35 | ret = clk_prepare_enable(tdev->clk); |
| 36 | if (ret) |
| 37 | goto err_clk; |
Alexandre Courbot | 34440ed | 2016-04-01 11:37:44 +0900 | [diff] [blame] | 38 | if (tdev->clk_ref) { |
| 39 | ret = clk_prepare_enable(tdev->clk_ref); |
| 40 | if (ret) |
| 41 | goto err_clk_ref; |
| 42 | } |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 43 | ret = clk_prepare_enable(tdev->clk_pwr); |
| 44 | if (ret) |
| 45 | goto err_clk_pwr; |
| 46 | clk_set_rate(tdev->clk_pwr, 204000000); |
| 47 | udelay(10); |
| 48 | |
| 49 | reset_control_assert(tdev->rst); |
| 50 | udelay(10); |
| 51 | |
| 52 | ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); |
| 53 | if (ret) |
| 54 | goto err_clamp; |
| 55 | udelay(10); |
| 56 | |
| 57 | reset_control_deassert(tdev->rst); |
| 58 | udelay(10); |
| 59 | |
| 60 | return 0; |
| 61 | |
| 62 | err_clamp: |
| 63 | clk_disable_unprepare(tdev->clk_pwr); |
| 64 | err_clk_pwr: |
Alexandre Courbot | 34440ed | 2016-04-01 11:37:44 +0900 | [diff] [blame] | 65 | if (tdev->clk_ref) |
| 66 | clk_disable_unprepare(tdev->clk_ref); |
| 67 | err_clk_ref: |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 68 | clk_disable_unprepare(tdev->clk); |
| 69 | err_clk: |
| 70 | regulator_disable(tdev->vdd); |
| 71 | err_power: |
| 72 | return ret; |
| 73 | } |
| 74 | |
| 75 | static int |
| 76 | nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) |
| 77 | { |
| 78 | reset_control_assert(tdev->rst); |
| 79 | udelay(10); |
| 80 | |
| 81 | clk_disable_unprepare(tdev->clk_pwr); |
Alexandre Courbot | 34440ed | 2016-04-01 11:37:44 +0900 | [diff] [blame] | 82 | if (tdev->clk_ref) |
| 83 | clk_disable_unprepare(tdev->clk_ref); |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 84 | clk_disable_unprepare(tdev->clk); |
| 85 | udelay(10); |
| 86 | |
| 87 | return regulator_disable(tdev->vdd); |
| 88 | } |
| 89 | |
| 90 | static void |
| 91 | nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) |
| 92 | { |
| 93 | #if IS_ENABLED(CONFIG_IOMMU_API) |
| 94 | struct device *dev = &tdev->pdev->dev; |
| 95 | unsigned long pgsize_bitmap; |
| 96 | int ret; |
| 97 | |
Alexandre Courbot | e396ecd | 2015-09-04 19:59:31 +0900 | [diff] [blame] | 98 | if (!tdev->func->iommu_bit) |
| 99 | return; |
| 100 | |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 101 | mutex_init(&tdev->iommu.mutex); |
| 102 | |
| 103 | if (iommu_present(&platform_bus_type)) { |
| 104 | tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); |
| 105 | if (IS_ERR(tdev->iommu.domain)) |
| 106 | goto error; |
| 107 | |
| 108 | /* |
| 109 | * A IOMMU is only usable if it supports page sizes smaller |
| 110 | * or equal to the system's PAGE_SIZE, with a preference if |
| 111 | * both are equal. |
| 112 | */ |
| 113 | pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; |
| 114 | if (pgsize_bitmap & PAGE_SIZE) { |
| 115 | tdev->iommu.pgshift = PAGE_SHIFT; |
| 116 | } else { |
| 117 | tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); |
| 118 | if (tdev->iommu.pgshift == 0) { |
| 119 | dev_warn(dev, "unsupported IOMMU page size\n"); |
| 120 | goto free_domain; |
| 121 | } |
| 122 | tdev->iommu.pgshift -= 1; |
| 123 | } |
| 124 | |
| 125 | ret = iommu_attach_device(tdev->iommu.domain, dev); |
| 126 | if (ret) |
| 127 | goto free_domain; |
| 128 | |
| 129 | ret = nvkm_mm_init(&tdev->iommu.mm, 0, |
Alexandre Courbot | e396ecd | 2015-09-04 19:59:31 +0900 | [diff] [blame] | 130 | (1ULL << tdev->func->iommu_bit) >> |
| 131 | tdev->iommu.pgshift, 1); |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 132 | if (ret) |
| 133 | goto detach_device; |
| 134 | } |
| 135 | |
| 136 | return; |
| 137 | |
| 138 | detach_device: |
| 139 | iommu_detach_device(tdev->iommu.domain, dev); |
| 140 | |
| 141 | free_domain: |
| 142 | iommu_domain_free(tdev->iommu.domain); |
| 143 | |
| 144 | error: |
| 145 | tdev->iommu.domain = NULL; |
| 146 | tdev->iommu.pgshift = 0; |
| 147 | dev_err(dev, "cannot initialize IOMMU MM\n"); |
| 148 | #endif |
| 149 | } |
| 150 | |
| 151 | static void |
| 152 | nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) |
| 153 | { |
| 154 | #if IS_ENABLED(CONFIG_IOMMU_API) |
| 155 | if (tdev->iommu.domain) { |
| 156 | nvkm_mm_fini(&tdev->iommu.mm); |
| 157 | iommu_detach_device(tdev->iommu.domain, tdev->device.dev); |
| 158 | iommu_domain_free(tdev->iommu.domain); |
| 159 | } |
| 160 | #endif |
| 161 | } |
| 162 | |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 163 | static struct nvkm_device_tegra * |
Ben Skeggs | 7e8820f | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 164 | nvkm_device_tegra(struct nvkm_device *device) |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 165 | { |
Ben Skeggs | 7e8820f | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 166 | return container_of(device, struct nvkm_device_tegra, device); |
| 167 | } |
| 168 | |
| 169 | static struct resource * |
| 170 | nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) |
| 171 | { |
| 172 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); |
| 173 | return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); |
| 174 | } |
| 175 | |
| 176 | static resource_size_t |
| 177 | nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) |
| 178 | { |
| 179 | struct resource *res = nvkm_device_tegra_resource(device, bar); |
| 180 | return res ? res->start : 0; |
| 181 | } |
| 182 | |
| 183 | static resource_size_t |
| 184 | nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) |
| 185 | { |
| 186 | struct resource *res = nvkm_device_tegra_resource(device, bar); |
| 187 | return res ? resource_size(res) : 0; |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 188 | } |
| 189 | |
Ben Skeggs | 2b70082 | 2015-08-20 14:54:22 +1000 | [diff] [blame] | 190 | static irqreturn_t |
| 191 | nvkm_device_tegra_intr(int irq, void *arg) |
| 192 | { |
| 193 | struct nvkm_device_tegra *tdev = arg; |
Ben Skeggs | d398119 | 2016-05-30 08:17:58 +1000 | [diff] [blame] | 194 | struct nvkm_device *device = &tdev->device; |
Ben Skeggs | 2b70082 | 2015-08-20 14:54:22 +1000 | [diff] [blame] | 195 | bool handled = false; |
Ben Skeggs | d398119 | 2016-05-30 08:17:58 +1000 | [diff] [blame] | 196 | nvkm_mc_intr_unarm(device); |
| 197 | nvkm_mc_intr(device, &handled); |
| 198 | nvkm_mc_intr_rearm(device); |
Ben Skeggs | 2b70082 | 2015-08-20 14:54:22 +1000 | [diff] [blame] | 199 | return handled ? IRQ_HANDLED : IRQ_NONE; |
| 200 | } |
| 201 | |
| 202 | static void |
| 203 | nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) |
| 204 | { |
| 205 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); |
| 206 | if (tdev->irq) { |
| 207 | free_irq(tdev->irq, tdev); |
| 208 | tdev->irq = 0; |
| 209 | }; |
| 210 | } |
| 211 | |
| 212 | static int |
| 213 | nvkm_device_tegra_init(struct nvkm_device *device) |
| 214 | { |
| 215 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); |
| 216 | int irq, ret; |
| 217 | |
| 218 | irq = platform_get_irq_byname(tdev->pdev, "stall"); |
| 219 | if (irq < 0) |
| 220 | return irq; |
| 221 | |
| 222 | ret = request_irq(irq, nvkm_device_tegra_intr, |
| 223 | IRQF_SHARED, "nvkm", tdev); |
| 224 | if (ret) |
| 225 | return ret; |
| 226 | |
| 227 | tdev->irq = irq; |
| 228 | return 0; |
| 229 | } |
| 230 | |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 231 | static void * |
| 232 | nvkm_device_tegra_dtor(struct nvkm_device *device) |
| 233 | { |
| 234 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); |
| 235 | nvkm_device_tegra_power_down(tdev); |
| 236 | nvkm_device_tegra_remove_iommu(tdev); |
| 237 | return tdev; |
| 238 | } |
| 239 | |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 240 | static const struct nvkm_device_func |
| 241 | nvkm_device_tegra_func = { |
| 242 | .tegra = nvkm_device_tegra, |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 243 | .dtor = nvkm_device_tegra_dtor, |
Ben Skeggs | 2b70082 | 2015-08-20 14:54:22 +1000 | [diff] [blame] | 244 | .init = nvkm_device_tegra_init, |
| 245 | .fini = nvkm_device_tegra_fini, |
Ben Skeggs | 7e8820f | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 246 | .resource_addr = nvkm_device_tegra_resource_addr, |
| 247 | .resource_size = nvkm_device_tegra_resource_size, |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 248 | }; |
| 249 | |
| 250 | int |
Alexandre Courbot | e396ecd | 2015-09-04 19:59:31 +0900 | [diff] [blame] | 251 | nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, |
| 252 | struct platform_device *pdev, |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 253 | const char *cfg, const char *dbg, |
| 254 | bool detect, bool mmio, u64 subdev_mask, |
| 255 | struct nvkm_device **pdevice) |
| 256 | { |
| 257 | struct nvkm_device_tegra *tdev; |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 258 | int ret; |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 259 | |
| 260 | if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) |
| 261 | return -ENOMEM; |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 262 | |
Alexandre Courbot | e396ecd | 2015-09-04 19:59:31 +0900 | [diff] [blame] | 263 | tdev->func = func; |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 264 | tdev->pdev = pdev; |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 265 | |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 266 | tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 267 | if (IS_ERR(tdev->vdd)) { |
| 268 | ret = PTR_ERR(tdev->vdd); |
| 269 | goto free; |
| 270 | } |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 271 | |
| 272 | tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 273 | if (IS_ERR(tdev->rst)) { |
| 274 | ret = PTR_ERR(tdev->rst); |
| 275 | goto free; |
| 276 | } |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 277 | |
| 278 | tdev->clk = devm_clk_get(&pdev->dev, "gpu"); |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 279 | if (IS_ERR(tdev->clk)) { |
| 280 | ret = PTR_ERR(tdev->clk); |
| 281 | goto free; |
| 282 | } |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 283 | |
Alexandre Courbot | 34440ed | 2016-04-01 11:37:44 +0900 | [diff] [blame] | 284 | if (func->require_ref_clk) |
| 285 | tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); |
| 286 | if (IS_ERR(tdev->clk_ref)) { |
| 287 | ret = PTR_ERR(tdev->clk_ref); |
| 288 | goto free; |
| 289 | } |
| 290 | |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 291 | tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 292 | if (IS_ERR(tdev->clk_pwr)) { |
| 293 | ret = PTR_ERR(tdev->clk_pwr); |
| 294 | goto free; |
| 295 | } |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 296 | |
Alexandre Courbot | 9d0394c | 2016-02-25 15:08:42 +0900 | [diff] [blame] | 297 | /** |
| 298 | * The IOMMU bit defines the upper limit of the GPU-addressable space. |
| 299 | * This will be refined in nouveau_ttm_init but we need to do it early |
| 300 | * for instmem to behave properly |
| 301 | */ |
| 302 | ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit)); |
| 303 | if (ret) |
| 304 | goto free; |
| 305 | |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 306 | nvkm_device_tegra_probe_iommu(tdev); |
| 307 | |
| 308 | ret = nvkm_device_tegra_power_up(tdev); |
| 309 | if (ret) |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 310 | goto remove; |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 311 | |
| 312 | tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; |
Alexandre Courbot | d268090 | 2016-06-01 17:39:15 +0900 | [diff] [blame] | 313 | tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 314 | ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, |
| 315 | NVKM_DEVICE_TEGRA, pdev->id, NULL, |
| 316 | cfg, dbg, detect, mmio, subdev_mask, |
| 317 | &tdev->device); |
| 318 | if (ret) |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 319 | goto powerdown; |
| 320 | |
| 321 | *pdevice = &tdev->device; |
Ben Skeggs | 43a7066 | 2015-08-20 14:54:23 +1000 | [diff] [blame] | 322 | |
| 323 | return 0; |
Thierry Reding | 870571a5 | 2016-02-24 18:34:43 +0100 | [diff] [blame] | 324 | |
| 325 | powerdown: |
| 326 | nvkm_device_tegra_power_down(tdev); |
| 327 | remove: |
| 328 | nvkm_device_tegra_remove_iommu(tdev); |
| 329 | free: |
| 330 | kfree(tdev); |
| 331 | return ret; |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 332 | } |
| 333 | #else |
| 334 | int |
Alexandre Courbot | e396ecd | 2015-09-04 19:59:31 +0900 | [diff] [blame] | 335 | nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, |
| 336 | struct platform_device *pdev, |
Ben Skeggs | 7974dd1 | 2015-08-20 14:54:17 +1000 | [diff] [blame] | 337 | const char *cfg, const char *dbg, |
| 338 | bool detect, bool mmio, u64 subdev_mask, |
| 339 | struct nvkm_device **pdevice) |
| 340 | { |
| 341 | return -ENOSYS; |
| 342 | } |
| 343 | #endif |