2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
28 #include <subdev/mc.h>
29 #include <subdev/timer.h>
30 #include <engine/falcon.h>
31 #include <engine/nvdec.h>
34 gp102_secboot_scrub_required(struct nvkm_secboot *sb)
36 struct nvkm_subdev *subdev = &sb->subdev;
37 struct nvkm_device *device = subdev->device;
40 nvkm_wr32(device, 0x100cd0, 0x2);
41 reg = nvkm_rd32(device, 0x100cd0);
43 return (reg & BIT(4));
47 gp102_run_secure_scrub(struct nvkm_secboot *sb)
49 struct nvkm_subdev *subdev = &sb->subdev;
50 struct nvkm_device *device = subdev->device;
51 struct nvkm_engine *engine;
52 struct nvkm_falcon *falcon;
54 struct fw_bin_header *hsbin_hdr;
55 struct hsf_fw_header *fw_hdr;
56 struct hsf_load_header *lhdr;
60 nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
62 engine = nvkm_engine_ref(&device->nvdec->engine);
64 return PTR_ERR(engine);
65 falcon = device->nvdec->falcon;
67 nvkm_falcon_get(falcon, &sb->subdev);
69 scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
70 if (IS_ERR(scrub_image))
71 return PTR_ERR(scrub_image);
73 nvkm_falcon_reset(falcon);
74 nvkm_falcon_bind_context(falcon, NULL);
76 hsbin_hdr = scrub_image;
77 fw_hdr = scrub_image + hsbin_hdr->header_offset;
78 lhdr = scrub_image + fw_hdr->hdr_offset;
79 scrub_data = scrub_image + hsbin_hdr->data_offset;
81 nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
82 lhdr->non_sec_code_size,
83 lhdr->non_sec_code_off >> 8, 0, false);
84 nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
85 ALIGN(lhdr->apps[0], 0x100),
87 lhdr->apps[0] >> 8, 0, true);
88 nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
93 nvkm_falcon_set_start_addr(falcon, 0x0);
94 nvkm_falcon_start(falcon);
96 ret = nvkm_falcon_wait_for_halt(falcon, 500);
98 nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
103 /* put nvdec in clean state - without reset it will remain in HS mode */
104 nvkm_falcon_reset(falcon);
106 if (gp102_secboot_scrub_required(sb)) {
107 nvkm_error(subdev, "VPR scrubber binary failed!\n");
112 nvkm_debug(subdev, "VPR scrub successfully completed\n");
115 nvkm_falcon_put(falcon, &sb->subdev);
116 nvkm_engine_unref(&engine);
121 gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
122 struct nvkm_falcon *falcon)
126 /* make sure the VPR region is unlocked */
127 if (gp102_secboot_scrub_required(sb)) {
128 ret = gp102_run_secure_scrub(sb);
133 return gm200_secboot_run_blob(sb, blob, falcon);
136 const struct nvkm_secboot_func
138 .dtor = gm200_secboot_dtor,
139 .oneinit = gm200_secboot_oneinit,
140 .fini = gm200_secboot_fini,
141 .run_blob = gp102_secboot_run_blob,
145 gp102_secboot_new(struct nvkm_device *device, int index,
146 struct nvkm_secboot **psb)
149 struct gm200_secboot *gsb;
150 struct nvkm_acr *acr;
152 acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
153 BIT(NVKM_SECBOOT_FALCON_FECS) |
154 BIT(NVKM_SECBOOT_FALCON_GPCCS) |
155 BIT(NVKM_SECBOOT_FALCON_SEC2));
159 gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
166 ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);