qat_hw17.c revision 1.1 1 1.1 hikaru /* $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
2 1.1 hikaru
3 1.1 hikaru /*
4 1.1 hikaru * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 1.1 hikaru * All rights reserved.
6 1.1 hikaru *
7 1.1 hikaru * Redistribution and use in source and binary forms, with or without
8 1.1 hikaru * modification, are permitted provided that the following conditions
9 1.1 hikaru * are met:
10 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
11 1.1 hikaru * notice, this list of conditions and the following disclaimer.
12 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
14 1.1 hikaru * documentation and/or other materials provided with the distribution.
15 1.1 hikaru *
16 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.1 hikaru * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
27 1.1 hikaru */
28 1.1 hikaru
29 1.1 hikaru /*
30 1.1 hikaru * Copyright(c) 2014 Intel Corporation.
31 1.1 hikaru * Redistribution and use in source and binary forms, with or without
32 1.1 hikaru * modification, are permitted provided that the following conditions
33 1.1 hikaru * are met:
34 1.1 hikaru *
35 1.1 hikaru * * Redistributions of source code must retain the above copyright
36 1.1 hikaru * notice, this list of conditions and the following disclaimer.
37 1.1 hikaru * * Redistributions in binary form must reproduce the above copyright
38 1.1 hikaru * notice, this list of conditions and the following disclaimer in
39 1.1 hikaru * the documentation and/or other materials provided with the
40 1.1 hikaru * distribution.
41 1.1 hikaru * * Neither the name of Intel Corporation nor the names of its
42 1.1 hikaru * contributors may be used to endorse or promote products derived
43 1.1 hikaru * from this software without specific prior written permission.
44 1.1 hikaru *
45 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 1.1 hikaru * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 1.1 hikaru * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 1.1 hikaru * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 1.1 hikaru * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 1.1 hikaru * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 1.1 hikaru * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 1.1 hikaru * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 1.1 hikaru * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 1.1 hikaru * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 1.1 hikaru * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 1.1 hikaru */
57 1.1 hikaru
58 1.1 hikaru #include <sys/cdefs.h>
59 1.1 hikaru __KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
60 1.1 hikaru
61 1.1 hikaru #include <sys/param.h>
62 1.1 hikaru #include <sys/systm.h>
63 1.1 hikaru #include <sys/proc.h>
64 1.1 hikaru
65 1.1 hikaru #include <opencrypto/xform.h>
66 1.1 hikaru
67 1.1 hikaru /* XXX same as sys/arch/x86/x86/via_padlock.c */
68 1.1 hikaru #include <opencrypto/cryptosoft_xform.c>
69 1.1 hikaru
70 1.1 hikaru #include <dev/pci/pcireg.h>
71 1.1 hikaru #include <dev/pci/pcivar.h>
72 1.1 hikaru
73 1.1 hikaru #include "qatreg.h"
74 1.1 hikaru #include "qat_hw17reg.h"
75 1.1 hikaru #include "qatvar.h"
76 1.1 hikaru #include "qat_hw17var.h"
77 1.1 hikaru
78 1.1 hikaru int qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t,
79 1.1 hikaru void *, void *);
80 1.1 hikaru int qat_adm_mailbox_send(struct qat_softc *,
81 1.1 hikaru struct fw_init_admin_req *, struct fw_init_admin_resp *);
82 1.1 hikaru int qat_adm_mailbox_send_init_me(struct qat_softc *);
83 1.1 hikaru int qat_adm_mailbox_send_hb_timer(struct qat_softc *);
84 1.1 hikaru int qat_adm_mailbox_send_fw_status(struct qat_softc *);
85 1.1 hikaru int qat_adm_mailbox_send_constants(struct qat_softc *);
86 1.1 hikaru
87 1.1 hikaru uint32_t qat_hw17_crypto_setup_cipher_desc(struct qat_session *,
88 1.1 hikaru struct qat_crypto_desc *, struct cryptoini *,
89 1.1 hikaru union hw_cipher_algo_blk *, uint32_t, struct fw_la_bulk_req *,
90 1.1 hikaru enum fw_slice);
91 1.1 hikaru uint32_t qat_hw17_crypto_setup_auth_desc(struct qat_session *,
92 1.1 hikaru struct qat_crypto_desc *, struct cryptoini *,
93 1.1 hikaru union hw_auth_algo_blk *, uint32_t, struct fw_la_bulk_req *,
94 1.1 hikaru enum fw_slice);
95 1.1 hikaru void qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *,
96 1.1 hikaru struct fw_la_bulk_req *);
97 1.1 hikaru
98 1.1 hikaru int
99 1.1 hikaru qat_adm_mailbox_init(struct qat_softc *sc)
100 1.1 hikaru {
101 1.1 hikaru uint64_t addr;
102 1.1 hikaru int error;
103 1.1 hikaru struct qat_dmamem *qdm;
104 1.1 hikaru
105 1.1 hikaru error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma,
106 1.1 hikaru PAGE_SIZE, PAGE_SIZE);
107 1.1 hikaru if (error)
108 1.1 hikaru return error;
109 1.1 hikaru
110 1.1 hikaru qdm = &sc->sc_admin_comms.qadc_const_tbl_dma;
111 1.1 hikaru error = qat_alloc_dmamem(sc, qdm, PAGE_SIZE, PAGE_SIZE);
112 1.1 hikaru if (error)
113 1.1 hikaru return error;
114 1.1 hikaru
115 1.1 hikaru memcpy(qdm->qdm_dma_vaddr,
116 1.1 hikaru mailbox_const_tab, sizeof(mailbox_const_tab));
117 1.1 hikaru
118 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, qdm->qdm_dma_map, 0,
119 1.1 hikaru qdm->qdm_dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
120 1.1 hikaru
121 1.1 hikaru error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma,
122 1.1 hikaru PAGE_SIZE, PAGE_SIZE);
123 1.1 hikaru if (error)
124 1.1 hikaru return error;
125 1.1 hikaru
126 1.1 hikaru addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr;
127 1.1 hikaru qat_misc_write_4(sc, ADMINMSGUR, addr >> 32);
128 1.1 hikaru qat_misc_write_4(sc, ADMINMSGLR, addr);
129 1.1 hikaru
130 1.1 hikaru return 0;
131 1.1 hikaru }
132 1.1 hikaru
133 1.1 hikaru int
134 1.1 hikaru qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae,
135 1.1 hikaru void *in, void *out)
136 1.1 hikaru {
137 1.1 hikaru uint32_t mailbox;
138 1.1 hikaru bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE);
139 1.1 hikaru int offset = ae * ADMINMSG_LEN * 2;
140 1.1 hikaru int times, received;
141 1.1 hikaru uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset;
142 1.1 hikaru
143 1.1 hikaru mailbox = qat_misc_read_4(sc, mb_offset);
144 1.1 hikaru if (mailbox == 1)
145 1.1 hikaru return EAGAIN;
146 1.1 hikaru
147 1.1 hikaru memcpy(buf, in, ADMINMSG_LEN);
148 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, sc->sc_admin_comms.qadc_dma.qdm_dma_map, 0,
149 1.1 hikaru sc->sc_admin_comms.qadc_dma.qdm_dma_map->dm_mapsize,
150 1.1 hikaru BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
151 1.1 hikaru qat_misc_write_4(sc, mb_offset, 1);
152 1.1 hikaru
153 1.1 hikaru received = 0;
154 1.1 hikaru for (times = 0; times < 50; times++) {
155 1.1 hikaru delay(20000);
156 1.1 hikaru if (qat_misc_read_4(sc, mb_offset) == 0) {
157 1.1 hikaru received = 1;
158 1.1 hikaru break;
159 1.1 hikaru }
160 1.1 hikaru }
161 1.1 hikaru if (received) {
162 1.1 hikaru bus_dmamap_sync(sc->sc_dmat, sc->sc_admin_comms.qadc_dma.qdm_dma_map, 0,
163 1.1 hikaru sc->sc_admin_comms.qadc_dma.qdm_dma_map->dm_mapsize,
164 1.1 hikaru BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
165 1.1 hikaru memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN);
166 1.1 hikaru } else {
167 1.1 hikaru aprint_error_dev(sc->sc_dev,
168 1.1 hikaru "Failed to send admin msg to accelerator\n");
169 1.1 hikaru }
170 1.1 hikaru
171 1.1 hikaru return received ? 0 : EFAULT;
172 1.1 hikaru }
173 1.1 hikaru
174 1.1 hikaru int
175 1.1 hikaru qat_adm_mailbox_send(struct qat_softc *sc,
176 1.1 hikaru struct fw_init_admin_req *req, struct fw_init_admin_resp *resp)
177 1.1 hikaru {
178 1.1 hikaru int error;
179 1.1 hikaru uint32_t mask;
180 1.1 hikaru uint8_t ae;
181 1.1 hikaru
182 1.1 hikaru for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
183 1.1 hikaru if (!(mask & 1))
184 1.1 hikaru continue;
185 1.1 hikaru
186 1.1 hikaru error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp);
187 1.1 hikaru if (error)
188 1.1 hikaru return error;
189 1.1 hikaru if (resp->init_resp_hdr.status) {
190 1.1 hikaru aprint_error_dev(sc->sc_dev,
191 1.1 hikaru "Failed to send admin msg: cmd %d\n",
192 1.1 hikaru req->init_admin_cmd_id);
193 1.1 hikaru return EFAULT;
194 1.1 hikaru }
195 1.1 hikaru }
196 1.1 hikaru
197 1.1 hikaru return 0;
198 1.1 hikaru }
199 1.1 hikaru
200 1.1 hikaru int
201 1.1 hikaru qat_adm_mailbox_send_init_me(struct qat_softc *sc)
202 1.1 hikaru {
203 1.1 hikaru struct fw_init_admin_req req;
204 1.1 hikaru struct fw_init_admin_resp resp;
205 1.1 hikaru
206 1.1 hikaru memset(&req, 0, sizeof(req));
207 1.1 hikaru req.init_admin_cmd_id = FW_INIT_ME;
208 1.1 hikaru
209 1.1 hikaru return qat_adm_mailbox_send(sc, &req, &resp);
210 1.1 hikaru }
211 1.1 hikaru
212 1.1 hikaru int
213 1.1 hikaru qat_adm_mailbox_send_hb_timer(struct qat_softc *sc)
214 1.1 hikaru {
215 1.1 hikaru struct fw_init_admin_req req;
216 1.1 hikaru struct fw_init_admin_resp resp;
217 1.1 hikaru
218 1.1 hikaru memset(&req, 0, sizeof(req));
219 1.1 hikaru req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET;
220 1.1 hikaru
221 1.1 hikaru req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr;
222 1.1 hikaru req.heartbeat_ticks =
223 1.1 hikaru sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL;
224 1.1 hikaru
225 1.1 hikaru return qat_adm_mailbox_send(sc, &req, &resp);
226 1.1 hikaru }
227 1.1 hikaru
228 1.1 hikaru int
229 1.1 hikaru qat_adm_mailbox_send_fw_status(struct qat_softc *sc)
230 1.1 hikaru {
231 1.1 hikaru int error;
232 1.1 hikaru struct fw_init_admin_req req;
233 1.1 hikaru struct fw_init_admin_resp resp;
234 1.1 hikaru
235 1.1 hikaru memset(&req, 0, sizeof(req));
236 1.1 hikaru req.init_admin_cmd_id = FW_STATUS_GET;
237 1.1 hikaru
238 1.1 hikaru error = qat_adm_mailbox_send(sc, &req, &resp);
239 1.1 hikaru if (error)
240 1.1 hikaru return error;
241 1.1 hikaru
242 1.1 hikaru aprint_normal_dev(sc->sc_dev,
243 1.1 hikaru "loaded firmware: version %d.%d.%d\n",
244 1.1 hikaru resp.u.s.version_major_num,
245 1.1 hikaru resp.u.s.version_minor_num,
246 1.1 hikaru resp.init_resp_pars.u.s1.version_patch_num);
247 1.1 hikaru
248 1.1 hikaru return 0;
249 1.1 hikaru }
250 1.1 hikaru
251 1.1 hikaru int
252 1.1 hikaru qat_adm_mailbox_send_constants(struct qat_softc *sc)
253 1.1 hikaru {
254 1.1 hikaru struct fw_init_admin_req req;
255 1.1 hikaru struct fw_init_admin_resp resp;
256 1.1 hikaru
257 1.1 hikaru memset(&req, 0, sizeof(req));
258 1.1 hikaru req.init_admin_cmd_id = FW_CONSTANTS_CFG;
259 1.1 hikaru
260 1.1 hikaru req.init_cfg_sz = 1024;
261 1.1 hikaru req.init_cfg_ptr =
262 1.1 hikaru sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr;
263 1.1 hikaru
264 1.1 hikaru return qat_adm_mailbox_send(sc, &req, &resp);
265 1.1 hikaru }
266 1.1 hikaru
267 1.1 hikaru int
268 1.1 hikaru qat_adm_mailbox_send_init(struct qat_softc *sc)
269 1.1 hikaru {
270 1.1 hikaru int error;
271 1.1 hikaru
272 1.1 hikaru error = qat_adm_mailbox_send_init_me(sc);
273 1.1 hikaru if (error)
274 1.1 hikaru return error;
275 1.1 hikaru
276 1.1 hikaru error = qat_adm_mailbox_send_hb_timer(sc);
277 1.1 hikaru if (error)
278 1.1 hikaru return error;
279 1.1 hikaru
280 1.1 hikaru error = qat_adm_mailbox_send_fw_status(sc);
281 1.1 hikaru if (error)
282 1.1 hikaru return error;
283 1.1 hikaru
284 1.1 hikaru return qat_adm_mailbox_send_constants(sc);
285 1.1 hikaru }
286 1.1 hikaru
287 1.1 hikaru int
288 1.1 hikaru qat_arb_init(struct qat_softc *sc)
289 1.1 hikaru {
290 1.1 hikaru uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
291 1.1 hikaru uint32_t arb, i;
292 1.1 hikaru const uint32_t *thd_2_arb_cfg;
293 1.1 hikaru
294 1.1 hikaru /* Service arb configured for 32 bytes responses and
295 1.1 hikaru * ring flow control check enabled. */
296 1.1 hikaru for (arb = 0; arb < MAX_ARB; arb++)
297 1.1 hikaru qat_arb_sarconfig_write_4(sc, arb, arb_cfg);
298 1.1 hikaru
299 1.1 hikaru /* Map worker threads to service arbiters */
300 1.1 hikaru sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg);
301 1.1 hikaru
302 1.1 hikaru if (!thd_2_arb_cfg)
303 1.1 hikaru return EINVAL;
304 1.1 hikaru
305 1.1 hikaru for (i = 0; i < sc->sc_hw.qhw_num_engines; i++)
306 1.1 hikaru qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i));
307 1.1 hikaru
308 1.1 hikaru return 0;
309 1.1 hikaru }
310 1.1 hikaru
311 1.1 hikaru int
312 1.1 hikaru qat_set_ssm_wdtimer(struct qat_softc *sc)
313 1.1 hikaru {
314 1.1 hikaru uint32_t timer;
315 1.1 hikaru u_int mask;
316 1.1 hikaru int i;
317 1.1 hikaru
318 1.1 hikaru timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT;
319 1.1 hikaru for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
320 1.1 hikaru if (!(mask & 1))
321 1.1 hikaru continue;
322 1.1 hikaru qat_misc_write_4(sc, SSMWDT(i), timer);
323 1.1 hikaru qat_misc_write_4(sc, SSMWDTPKE(i), timer);
324 1.1 hikaru }
325 1.1 hikaru
326 1.1 hikaru return 0;
327 1.1 hikaru }
328 1.1 hikaru
329 1.1 hikaru int
330 1.1 hikaru qat_check_slice_hang(struct qat_softc *sc)
331 1.1 hikaru {
332 1.1 hikaru int handled = 0;
333 1.1 hikaru
334 1.1 hikaru return handled;
335 1.1 hikaru }
336 1.1 hikaru
337 1.1 hikaru uint32_t
338 1.1 hikaru qat_hw17_crypto_setup_cipher_desc(struct qat_session *qs,
339 1.1 hikaru struct qat_crypto_desc *desc, struct cryptoini *crie,
340 1.1 hikaru union hw_cipher_algo_blk *cipher, uint32_t cd_blk_offset,
341 1.1 hikaru struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
342 1.1 hikaru {
343 1.1 hikaru struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
344 1.1 hikaru (struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
345 1.1 hikaru int keylen = crie->cri_klen / 8;
346 1.1 hikaru
347 1.1 hikaru cipher->max.cipher_config.val =
348 1.1 hikaru qat_crypto_load_cipher_cryptoini(desc, crie);
349 1.1 hikaru memcpy(cipher->max.key, crie->cri_key, keylen);
350 1.1 hikaru
351 1.1 hikaru cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3;
352 1.1 hikaru cipher_cd_ctrl->cipher_key_sz = keylen >> 3;
353 1.1 hikaru cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3;
354 1.1 hikaru FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER);
355 1.1 hikaru FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice);
356 1.1 hikaru
357 1.1 hikaru return roundup(sizeof(struct hw_cipher_config) + keylen, 8);
358 1.1 hikaru }
359 1.1 hikaru
360 1.1 hikaru uint32_t
361 1.1 hikaru qat_hw17_crypto_setup_auth_desc(struct qat_session *qs,
362 1.1 hikaru struct qat_crypto_desc *desc, struct cryptoini *cria,
363 1.1 hikaru union hw_auth_algo_blk *auth, uint32_t cd_blk_offset,
364 1.1 hikaru struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
365 1.1 hikaru {
366 1.1 hikaru struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl =
367 1.1 hikaru (struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
368 1.1 hikaru struct qat_sym_hash_def const *hash_def;
369 1.1 hikaru uint8_t *state1, *state2;
370 1.1 hikaru
371 1.1 hikaru auth->max.inner_setup.auth_config.config =
372 1.1 hikaru qat_crypto_load_auth_cryptoini(desc, cria, &hash_def);
373 1.1 hikaru auth->max.inner_setup.auth_counter.counter =
374 1.1 hikaru htonl(hash_def->qshd_qat->qshqi_auth_counter);
375 1.1 hikaru
376 1.1 hikaru auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3;
377 1.1 hikaru auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
378 1.1 hikaru auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
379 1.1 hikaru auth_cd_ctrl->final_sz = desc->qcd_auth_sz;
380 1.1 hikaru
381 1.1 hikaru auth_cd_ctrl->inner_state1_sz =
382 1.1 hikaru roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
383 1.1 hikaru auth_cd_ctrl->inner_state2_sz =
384 1.1 hikaru roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
385 1.1 hikaru auth_cd_ctrl->inner_state2_offset =
386 1.1 hikaru auth_cd_ctrl->hash_cfg_offset +
387 1.1 hikaru ((sizeof(struct hw_auth_setup) +
388 1.1 hikaru auth_cd_ctrl->inner_state1_sz) >> 3);
389 1.1 hikaru
390 1.1 hikaru state1 = auth->max.state1;
391 1.1 hikaru state2 = auth->max.state1 + auth_cd_ctrl->inner_state1_sz;
392 1.1 hikaru qat_crypto_hmac_precompute(desc, cria, hash_def, state1, state2);
393 1.1 hikaru
394 1.1 hikaru FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH);
395 1.1 hikaru FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice);
396 1.1 hikaru
397 1.1 hikaru return roundup(auth_cd_ctrl->inner_state1_sz +
398 1.1 hikaru auth_cd_ctrl->inner_state2_sz +
399 1.1 hikaru sizeof(struct hw_auth_setup), 8);
400 1.1 hikaru }
401 1.1 hikaru
402 1.1 hikaru void
403 1.1 hikaru qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc,
404 1.1 hikaru struct fw_la_bulk_req *req)
405 1.1 hikaru {
406 1.1 hikaru union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
407 1.1 hikaru struct fw_comn_req_hdr *req_hdr = &req->comn_hdr;
408 1.1 hikaru
409 1.1 hikaru req_hdr->service_cmd_id = desc->qcd_cmd_id;
410 1.1 hikaru req_hdr->hdr_flags = FW_COMN_VALID;
411 1.1 hikaru req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA;
412 1.1 hikaru req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD(
413 1.1 hikaru COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL);
414 1.1 hikaru req_hdr->serv_specif_flags = 0;
415 1.1 hikaru cd_pars->s.content_desc_addr = desc->qcd_desc_paddr;
416 1.1 hikaru }
417 1.1 hikaru
418 1.1 hikaru void
419 1.1 hikaru qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
420 1.1 hikaru struct qat_crypto_desc *desc,
421 1.1 hikaru struct cryptoini *crie, struct cryptoini *cria)
422 1.1 hikaru {
423 1.1 hikaru union hw_cipher_algo_blk *cipher;
424 1.1 hikaru union hw_auth_algo_blk *auth;
425 1.1 hikaru struct fw_la_bulk_req *req_tmpl;
426 1.1 hikaru struct fw_comn_req_hdr *req_hdr;
427 1.1 hikaru union fw_comn_req_hdr_cd_pars *cd_pars;
428 1.1 hikaru uint32_t cd_blk_offset = 0;
429 1.1 hikaru int i;
430 1.1 hikaru uint8_t *cd_blk_ptr;
431 1.1 hikaru
432 1.1 hikaru req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache;
433 1.1 hikaru req_hdr = &req_tmpl->comn_hdr;
434 1.1 hikaru cd_pars = &req_tmpl->cd_pars;
435 1.1 hikaru cd_blk_ptr = desc->qcd_content_desc;
436 1.1 hikaru
437 1.1 hikaru memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req));
438 1.1 hikaru qat_hw17_init_comn_req_hdr(desc, req_tmpl);
439 1.1 hikaru
440 1.1 hikaru for (i = 0; i < MAX_FW_SLICE; i++) {
441 1.1 hikaru switch (desc->qcd_slices[i]) {
442 1.1 hikaru case FW_SLICE_CIPHER:
443 1.1 hikaru cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr +
444 1.1 hikaru cd_blk_offset);
445 1.1 hikaru cd_blk_offset += qat_hw17_crypto_setup_cipher_desc(
446 1.1 hikaru qs, desc, crie, cipher, cd_blk_offset, req_tmpl,
447 1.1 hikaru desc->qcd_slices[i + 1]);
448 1.1 hikaru break;
449 1.1 hikaru case FW_SLICE_AUTH:
450 1.1 hikaru auth = (union hw_auth_algo_blk *)(cd_blk_ptr +
451 1.1 hikaru cd_blk_offset);
452 1.1 hikaru cd_blk_offset += qat_hw17_crypto_setup_auth_desc(
453 1.1 hikaru qs, desc, cria, auth, cd_blk_offset, req_tmpl,
454 1.1 hikaru desc->qcd_slices[i + 1]);
455 1.1 hikaru req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES;
456 1.1 hikaru /* no digest verify */
457 1.1 hikaru break;
458 1.1 hikaru case FW_SLICE_DRAM_WR:
459 1.1 hikaru i = MAX_FW_SLICE; /* end of chain */
460 1.1 hikaru break;
461 1.1 hikaru default:
462 1.1 hikaru KASSERT(0);
463 1.1 hikaru break;
464 1.1 hikaru }
465 1.1 hikaru }
466 1.1 hikaru
467 1.1 hikaru cd_pars->s.content_desc_params_sz =
468 1.1 hikaru roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3;
469 1.1 hikaru
470 1.1 hikaru #ifdef QAT_DUMP
471 1.1 hikaru qat_dump_raw(QAT_DUMP_DESC, "qcd_content_desc",
472 1.1 hikaru desc->qcd_content_desc, cd_pars->s.content_desc_params_sz << 3);
473 1.1 hikaru qat_dump_raw(QAT_DUMP_DESC, "qcd_req_cache",
474 1.1 hikaru &desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
475 1.1 hikaru #endif
476 1.1 hikaru
477 1.1 hikaru bus_dmamap_sync(qcy->qcy_sc->sc_dmat,
478 1.1 hikaru qcy->qcy_session_dmamems[qs->qs_lid].qdm_dma_map, 0,
479 1.1 hikaru sizeof(struct qat_session),
480 1.1 hikaru BUS_DMASYNC_PREWRITE);
481 1.1 hikaru }
482 1.1 hikaru
483 1.1 hikaru void
484 1.1 hikaru qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb, struct qat_session *qs,
485 1.1 hikaru struct qat_crypto_desc const *desc, struct qat_sym_cookie *qsc,
486 1.1 hikaru struct cryptodesc *crde, struct cryptodesc *crda, bus_addr_t icv_paddr)
487 1.1 hikaru {
488 1.1 hikaru struct qat_sym_bulk_cookie *qsbc;
489 1.1 hikaru struct fw_la_bulk_req *bulk_req;
490 1.1 hikaru struct fw_la_cipher_req_params *cipher_param;
491 1.1 hikaru struct fw_la_auth_req_params *auth_param;
492 1.1 hikaru uint32_t req_params_offset = 0;
493 1.1 hikaru uint8_t *req_params_ptr;
494 1.1 hikaru enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
495 1.1 hikaru
496 1.1 hikaru qsbc = &qsc->u.qsc_bulk_cookie;
497 1.1 hikaru bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
498 1.1 hikaru
499 1.1 hikaru memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
500 1.1 hikaru bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
501 1.1 hikaru bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
502 1.1 hikaru bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
503 1.1 hikaru
504 1.1 hikaru if (icv_paddr != 0)
505 1.1 hikaru bulk_req->comn_hdr.serv_specif_flags |= FW_LA_DIGEST_IN_BUFFER;
506 1.1 hikaru
507 1.1 hikaru req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars;
508 1.1 hikaru
509 1.1 hikaru if (cmd_id != FW_LA_CMD_AUTH) {
510 1.1 hikaru cipher_param = (struct fw_la_cipher_req_params *)
511 1.1 hikaru (req_params_ptr + req_params_offset);
512 1.1 hikaru req_params_offset += sizeof(struct fw_la_cipher_req_params);
513 1.1 hikaru
514 1.1 hikaru cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr;
515 1.1 hikaru cipher_param->cipher_offset = crde->crd_skip;
516 1.1 hikaru cipher_param->cipher_length = crde->crd_len;
517 1.1 hikaru }
518 1.1 hikaru
519 1.1 hikaru if (cmd_id != FW_LA_CMD_CIPHER) {
520 1.1 hikaru auth_param = (struct fw_la_auth_req_params *)
521 1.1 hikaru (req_params_ptr + req_params_offset);
522 1.1 hikaru req_params_offset += sizeof(struct fw_la_auth_req_params);
523 1.1 hikaru
524 1.1 hikaru auth_param->auth_off = crda->crd_skip;
525 1.1 hikaru auth_param->auth_len = crda->crd_len;
526 1.1 hikaru auth_param->auth_res_addr = icv_paddr;
527 1.1 hikaru auth_param->auth_res_sz = 0; /* XXX no digest verify */
528 1.1 hikaru auth_param->hash_state_sz = 0;
529 1.1 hikaru auth_param->u1.auth_partial_st_prefix = 0;
530 1.1 hikaru auth_param->u2.aad_sz = 0;
531 1.1 hikaru }
532 1.1 hikaru
533 1.1 hikaru #ifdef QAT_DUMP
534 1.1 hikaru qat_dump_raw(QAT_DUMP_DESC, "req_params", req_params_ptr, req_params_offset);
535 1.1 hikaru #endif
536 1.1 hikaru }
537 1.1 hikaru
538