Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
07db5bd
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
i915
/
pxp
/
intel_pxp_tee.c
Blame
Blame
Latest commit
History
History
311 lines (248 loc) · 7.85 KB
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
i915
/
pxp
/
intel_pxp_tee.c
Top
File metadata and controls
Code
Blame
311 lines (248 loc) · 7.85 KB
Raw
// SPDX-License-Identifier: MIT /* * Copyright(c) 2020 Intel Corporation. */ #include <linux/component.h> #include <drm/i915_pxp_tee_interface.h> #include <drm/i915_component.h> #include "gem/i915_gem_lmem.h" #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_session.h" #include "intel_pxp_tee.h" #include "intel_pxp_cmd_interface_42.h" #include "intel_pxp_huc.h" static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev) { struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); return &to_gt(i915)->pxp; } static int intel_pxp_tee_io_message(struct intel_pxp *pxp, void *msg_in, u32 msg_in_size, void *msg_out, u32 msg_out_max_size, u32 *msg_out_rcv_size) { struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; struct i915_pxp_component *pxp_component = pxp->pxp_component; int ret = 0; mutex_lock(&pxp->tee_mutex); /* * The binding of the component is asynchronous from i915 probe, so we * can't be sure it has happened. */ if (!pxp_component) { ret = -ENODEV; goto unlock; } ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size); if (ret) { drm_err(&i915->drm, "Failed to send PXP TEE message\n"); goto unlock; } ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size); if (ret < 0) { drm_err(&i915->drm, "Failed to receive PXP TEE message\n"); goto unlock; } if (ret > msg_out_max_size) { drm_err(&i915->drm, "Failed to receive PXP TEE message due to unexpected output size\n"); ret = -ENOSPC; goto unlock; } if (msg_out_rcv_size) *msg_out_rcv_size = ret; ret = 0; unlock: mutex_unlock(&pxp->tee_mutex); return ret; } int intel_pxp_tee_stream_message(struct intel_pxp *pxp, u8 client_id, u32 fence_id, void *msg_in, size_t msg_in_len, void *msg_out, size_t msg_out_len) { /* TODO: for bigger objects we need to use a sg of 4k pages */ const size_t max_msg_size = PAGE_SIZE; struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; struct i915_pxp_component *pxp_component = pxp->pxp_component; unsigned int offset = 0; struct scatterlist *sg; int ret; if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) return -ENOSPC; mutex_lock(&pxp->tee_mutex); if (unlikely(!pxp_component || !pxp_component->ops->gsc_command)) { ret = -ENODEV; goto unlock; } GEM_BUG_ON(!pxp->stream_cmd.obj); sg = i915_gem_object_get_sg_dma(pxp->stream_cmd.obj, 0, &offset); memcpy(pxp->stream_cmd.vaddr, msg_in, msg_in_len); ret = pxp_component->ops->gsc_command(pxp_component->tee_dev, client_id, fence_id, sg, msg_in_len, sg); if (ret < 0) drm_err(&i915->drm, "Failed to send PXP TEE gsc command\n"); else memcpy(msg_out, pxp->stream_cmd.vaddr, msg_out_len); unlock: mutex_unlock(&pxp->tee_mutex); return ret; } /** * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee * @i915_kdev: pointer to i915 kernel device * @tee_kdev: pointer to tee kernel device * @data: pointer to pxp_tee_master containing the function pointers * * This bind function is called during the system boot or resume from system sleep. * * Return: return 0 if successful. */ static int i915_pxp_tee_component_bind(struct device *i915_kdev, struct device *tee_kdev, void *data) { struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); struct intel_uc *uc = &pxp_to_gt(pxp)->uc; intel_wakeref_t wakeref; int ret = 0; mutex_lock(&pxp->tee_mutex); pxp->pxp_component = data; pxp->pxp_component->tee_dev = tee_kdev; mutex_unlock(&pxp->tee_mutex); if (intel_uc_uses_huc(uc) && intel_huc_is_loaded_by_gsc(&uc->huc)) { with_intel_runtime_pm(&i915->runtime_pm, wakeref) { /* load huc via pxp */ ret = intel_huc_fw_load_and_auth_via_gsc(&uc->huc); if (ret < 0) drm_err(&i915->drm, "failed to load huc via gsc %d\n", ret); } } /* if we are suspended, the HW will be re-initialized on resume */ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) return 0; /* the component is required to fully start the PXP HW */ if (intel_pxp_is_enabled(pxp)) intel_pxp_init_hw(pxp); intel_runtime_pm_put(&i915->runtime_pm, wakeref); return ret; } static void i915_pxp_tee_component_unbind(struct device *i915_kdev, struct device *tee_kdev, void *data) { struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); intel_wakeref_t wakeref; if (intel_pxp_is_enabled(pxp)) with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref) intel_pxp_fini_hw(pxp); mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; mutex_unlock(&pxp->tee_mutex); } static const struct component_ops i915_pxp_tee_component_ops = { .bind = i915_pxp_tee_component_bind, .unbind = i915_pxp_tee_component_unbind, }; static int alloc_streaming_command(struct intel_pxp *pxp) { struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; struct drm_i915_gem_object *obj = NULL; void *cmd; int err; pxp->stream_cmd.obj = NULL; pxp->stream_cmd.vaddr = NULL; if (!IS_DGFX(i915)) return 0; /* allocate lmem object of one page for PXP command memory and store it */ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(obj)) { drm_err(&i915->drm, "Failed to allocate pxp streaming command!\n"); return PTR_ERR(obj); } err = i915_gem_object_pin_pages_unlocked(obj); if (err) { drm_err(&i915->drm, "Failed to pin gsc message page!\n"); goto out_put; } /* map the lmem into the virtual memory pointer */ cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); if (IS_ERR(cmd)) { drm_err(&i915->drm, "Failed to map gsc message page!\n"); err = PTR_ERR(cmd); goto out_unpin; } memset(cmd, 0, obj->base.size); pxp->stream_cmd.obj = obj; pxp->stream_cmd.vaddr = cmd; return 0; out_unpin: i915_gem_object_unpin_pages(obj); out_put: i915_gem_object_put(obj); return err; } static void free_streaming_command(struct intel_pxp *pxp) { struct drm_i915_gem_object *obj = fetch_and_zero(&pxp->stream_cmd.obj); if (!obj) return; i915_gem_object_unpin_map(obj); i915_gem_object_unpin_pages(obj); i915_gem_object_put(obj); } int intel_pxp_tee_component_init(struct intel_pxp *pxp) { int ret; struct intel_gt *gt = pxp_to_gt(pxp); struct drm_i915_private *i915 = gt->i915; mutex_init(&pxp->tee_mutex); ret = alloc_streaming_command(pxp); if (ret) return ret; ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops, I915_COMPONENT_PXP); if (ret < 0) { drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret); goto out_free; } pxp->pxp_component_added = true; return 0; out_free: free_streaming_command(pxp); return ret; } void intel_pxp_tee_component_fini(struct intel_pxp *pxp) { struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; if (!pxp->pxp_component_added) return; component_del(i915->drm.dev, &i915_pxp_tee_component_ops); pxp->pxp_component_added = false; free_streaming_command(pxp); } int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, int arb_session_id) { struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; struct pxp42_create_arb_in msg_in = {0}; struct pxp42_create_arb_out msg_out = {0}; int ret; msg_in.header.api_version = PXP_APIVER(4, 2); msg_in.header.command_id = PXP42_CMDID_INIT_SESSION; msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); msg_in.protection_mode = PXP42_ARB_SESSION_MODE_HEAVY; msg_in.session_id = arb_session_id; ret = intel_pxp_tee_io_message(pxp, &msg_in, sizeof(msg_in), &msg_out, sizeof(msg_out), NULL); if (ret) drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret); else if (msg_out.header.status != 0x0) drm_warn(&i915->drm, "PXP firmware failed arb session init request ret=[0x%08x]\n", msg_out.header.status); return ret; }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
You can’t perform that action at this time.