qat_hw15.c revision 1.1 1 1.1 hikaru /* $NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
2 1.1 hikaru
3 1.1 hikaru /*
4 1.1 hikaru * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 1.1 hikaru * All rights reserved.
6 1.1 hikaru *
7 1.1 hikaru * Redistribution and use in source and binary forms, with or without
8 1.1 hikaru * modification, are permitted provided that the following conditions
9 1.1 hikaru * are met:
10 1.1 hikaru * 1. Redistributions of source code must retain the above copyright
11 1.1 hikaru * notice, this list of conditions and the following disclaimer.
12 1.1 hikaru * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 hikaru * notice, this list of conditions and the following disclaimer in the
14 1.1 hikaru * documentation and/or other materials provided with the distribution.
15 1.1 hikaru *
16 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.1 hikaru * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.1 hikaru * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.1 hikaru * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.1 hikaru * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.1 hikaru * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.1 hikaru * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.1 hikaru * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.1 hikaru * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.1 hikaru * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.1 hikaru * POSSIBILITY OF SUCH DAMAGE.
27 1.1 hikaru */
28 1.1 hikaru
29 1.1 hikaru /*
30 1.1 hikaru * Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
31 1.1 hikaru *
32 1.1 hikaru * Redistribution and use in source and binary forms, with or without
33 1.1 hikaru * modification, are permitted provided that the following conditions
34 1.1 hikaru * are met:
35 1.1 hikaru *
36 1.1 hikaru * * Redistributions of source code must retain the above copyright
37 1.1 hikaru * notice, this list of conditions and the following disclaimer.
38 1.1 hikaru * * Redistributions in binary form must reproduce the above copyright
39 1.1 hikaru * notice, this list of conditions and the following disclaimer in
40 1.1 hikaru * the documentation and/or other materials provided with the
41 1.1 hikaru * distribution.
42 1.1 hikaru * * Neither the name of Intel Corporation nor the names of its
43 1.1 hikaru * contributors may be used to endorse or promote products derived
44 1.1 hikaru * from this software without specific prior written permission.
45 1.1 hikaru *
46 1.1 hikaru * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 1.1 hikaru * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 1.1 hikaru * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 1.1 hikaru * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 1.1 hikaru * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 1.1 hikaru * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 1.1 hikaru * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 1.1 hikaru * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 1.1 hikaru * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 1.1 hikaru * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 1.1 hikaru * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 1.1 hikaru */
58 1.1 hikaru
59 1.1 hikaru #include <sys/cdefs.h>
60 1.1 hikaru __KERNEL_RCSID(0, "$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
61 1.1 hikaru
62 1.1 hikaru #include <sys/param.h>
63 1.1 hikaru #include <sys/systm.h>
64 1.1 hikaru #include <sys/proc.h>
65 1.1 hikaru
66 1.1 hikaru #include <opencrypto/xform.h>
67 1.1 hikaru
68 1.1 hikaru /* XXX same as sys/arch/x86/x86/via_padlock.c */
69 1.1 hikaru #include <opencrypto/cryptosoft_xform.c>
70 1.1 hikaru
71 1.1 hikaru #include <dev/pci/pcireg.h>
72 1.1 hikaru #include <dev/pci/pcivar.h>
73 1.1 hikaru
74 1.1 hikaru #include "qatreg.h"
75 1.1 hikaru #include "qat_hw15reg.h"
76 1.1 hikaru #include "qatvar.h"
77 1.1 hikaru #include "qat_hw15var.h"
78 1.1 hikaru
79 1.1 hikaru int qat_adm_ring_init_ring_table(struct qat_softc *);
80 1.1 hikaru void qat_adm_ring_build_slice_mask(uint16_t *, uint32_t, uint32_t);
81 1.1 hikaru void qat_adm_ring_build_shram_mask(uint64_t *, uint32_t, uint32_t);
82 1.1 hikaru int qat_adm_ring_build_ring_table(struct qat_softc *, uint32_t);
83 1.1 hikaru int qat_adm_ring_build_init_msg(struct qat_softc *,
84 1.1 hikaru struct fw_init_req *, enum fw_init_cmd_id, uint32_t,
85 1.1 hikaru struct qat_accel_init_cb *);
86 1.1 hikaru int qat_adm_ring_send_init_msg_sync(struct qat_softc *,
87 1.1 hikaru enum fw_init_cmd_id, uint32_t);
88 1.1 hikaru int qat_adm_ring_send_init_msg(struct qat_softc *,
89 1.1 hikaru enum fw_init_cmd_id);
90 1.1 hikaru int qat_adm_ring_intr(struct qat_softc *, void *, void *);
91 1.1 hikaru
92 1.1 hikaru uint32_t qat_crypto_setup_cipher_desc(struct qat_session *,
93 1.1 hikaru struct qat_crypto_desc *desc, struct cryptoini *,
94 1.1 hikaru struct fw_cipher_hdr *, uint8_t *, uint32_t, enum fw_slice);
95 1.1 hikaru uint32_t qat_crypto_setup_auth_desc(struct qat_session *,
96 1.1 hikaru struct qat_crypto_desc *, struct cryptoini *,
97 1.1 hikaru struct fw_auth_hdr *, uint8_t *, uint32_t, enum fw_slice);
98 1.1 hikaru
99 1.1 hikaru void
100 1.1 hikaru qat_msg_req_type_populate(struct arch_if_req_hdr *msg, enum arch_if_req type,
101 1.1 hikaru uint32_t rxring)
102 1.1 hikaru {
103 1.1 hikaru
104 1.1 hikaru memset(msg, 0, sizeof(struct arch_if_req_hdr));
105 1.1 hikaru msg->flags = ARCH_IF_FLAGS_VALID_FLAG |
106 1.1 hikaru ARCH_IF_FLAGS_RESP_RING_TYPE_ET | ARCH_IF_FLAGS_RESP_TYPE_S;
107 1.1 hikaru msg->req_type = type;
108 1.1 hikaru msg->resp_pipe_id = rxring;
109 1.1 hikaru }
110 1.1 hikaru
111 1.1 hikaru void
112 1.1 hikaru qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *msg, bus_addr_t desc_paddr,
113 1.1 hikaru uint8_t hdrsz, uint8_t hwblksz, uint16_t comn_req_flags, uint32_t flow_id)
114 1.1 hikaru {
115 1.1 hikaru struct fw_comn_req_hdr *hdr = &msg->comn_hdr;
116 1.1 hikaru
117 1.1 hikaru hdr->comn_req_flags = comn_req_flags;
118 1.1 hikaru hdr->content_desc_params_sz = hwblksz;
119 1.1 hikaru hdr->content_desc_hdr_sz = hdrsz;
120 1.1 hikaru hdr->content_desc_addr = desc_paddr;
121 1.1 hikaru msg->flow_id = flow_id;
122 1.1 hikaru }
123 1.1 hikaru
124 1.1 hikaru void
125 1.1 hikaru qat_msg_service_cmd_populate(struct fw_la_bulk_req *msg, enum fw_la_cmd_id cmdid,
126 1.1 hikaru uint16_t cmd_flags)
127 1.1 hikaru {
128 1.1 hikaru msg->comn_la_req.la_cmd_id = cmdid;
129 1.1 hikaru msg->comn_la_req.u.la_flags = cmd_flags;
130 1.1 hikaru }
131 1.1 hikaru
132 1.1 hikaru void
133 1.1 hikaru qat_msg_cmn_mid_populate(struct fw_comn_req_mid *msg, void *cookie,
134 1.1 hikaru uint64_t src, uint64_t dst)
135 1.1 hikaru {
136 1.1 hikaru
137 1.1 hikaru msg->opaque_data = (uint64_t)(uintptr_t)cookie;
138 1.1 hikaru msg->src_data_addr = src;
139 1.1 hikaru if (dst == 0)
140 1.1 hikaru msg->dest_data_addr = src;
141 1.1 hikaru else
142 1.1 hikaru msg->dest_data_addr = dst;
143 1.1 hikaru }
144 1.1 hikaru
145 1.1 hikaru void
146 1.1 hikaru qat_msg_req_params_populate(struct fw_la_bulk_req *msg,
147 1.1 hikaru bus_addr_t req_params_paddr, uint8_t req_params_sz)
148 1.1 hikaru {
149 1.1 hikaru msg->req_params_addr = req_params_paddr;
150 1.1 hikaru msg->comn_la_req.u1.req_params_blk_sz = req_params_sz / 8;
151 1.1 hikaru }
152 1.1 hikaru
153 1.1 hikaru
154 1.1 hikaru void
155 1.1 hikaru qat_msg_cmn_footer_populate(union fw_comn_req_ftr *msg, uint64_t next_addr)
156 1.1 hikaru {
157 1.1 hikaru msg->next_request_addr = next_addr;
158 1.1 hikaru }
159 1.1 hikaru
160 1.1 hikaru void
161 1.1 hikaru qat_msg_params_populate(struct fw_la_bulk_req *msg,
162 1.1 hikaru struct qat_crypto_desc *desc, uint8_t req_params_sz,
163 1.1 hikaru uint16_t service_cmd_flags, uint16_t comn_req_flags)
164 1.1 hikaru {
165 1.1 hikaru qat_msg_cmn_hdr_populate(msg, desc->qcd_desc_paddr,
166 1.1 hikaru desc->qcd_hdr_sz, desc->qcd_hw_blk_sz, comn_req_flags, 0);
167 1.1 hikaru qat_msg_service_cmd_populate(msg, desc->qcd_cmd_id, service_cmd_flags);
168 1.1 hikaru qat_msg_cmn_mid_populate(&msg->comn_mid, NULL, 0, 0);
169 1.1 hikaru qat_msg_req_params_populate(msg, 0, req_params_sz);
170 1.1 hikaru qat_msg_cmn_footer_populate(&msg->comn_ftr, 0);
171 1.1 hikaru }
172 1.1 hikaru
173 1.1 hikaru int
174 1.1 hikaru qat_adm_ring_init_ring_table(struct qat_softc *sc)
175 1.1 hikaru {
176 1.1 hikaru struct qat_admin_rings *qadr = &sc->sc_admin_rings;
177 1.1 hikaru
178 1.1 hikaru if (sc->sc_ae_num == 1) {
179 1.1 hikaru qadr->qadr_cya_ring_tbl =
180 1.1 hikaru &qadr->qadr_master_ring_tbl[0];
181 1.1 hikaru qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
182 1.1 hikaru } else if (sc->sc_ae_num == 2 ||
183 1.1 hikaru sc->sc_ae_num == 4) {
184 1.1 hikaru qadr->qadr_cya_ring_tbl =
185 1.1 hikaru &qadr->qadr_master_ring_tbl[0];
186 1.1 hikaru qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
187 1.1 hikaru qadr->qadr_cyb_ring_tbl =
188 1.1 hikaru &qadr->qadr_master_ring_tbl[1];
189 1.1 hikaru qadr->qadr_srv_mask[1] = QAT_SERVICE_CRYPTO_B;
190 1.1 hikaru }
191 1.1 hikaru
192 1.1 hikaru return 0;
193 1.1 hikaru }
194 1.1 hikaru
195 1.1 hikaru int
196 1.1 hikaru qat_adm_ring_init(struct qat_softc *sc)
197 1.1 hikaru {
198 1.1 hikaru struct qat_admin_rings *qadr = &sc->sc_admin_rings;
199 1.1 hikaru int error, i, j;
200 1.1 hikaru
201 1.1 hikaru error = qat_alloc_dmamem(sc, &qadr->qadr_dma,
202 1.1 hikaru PAGE_SIZE, PAGE_SIZE);
203 1.1 hikaru if (error)
204 1.1 hikaru return error;
205 1.1 hikaru
206 1.1 hikaru qadr->qadr_master_ring_tbl = qadr->qadr_dma.qdm_dma_vaddr;
207 1.1 hikaru
208 1.1 hikaru KASSERT(sc->sc_ae_num *
209 1.1 hikaru sizeof(struct fw_init_ring_table) <= PAGE_SIZE);
210 1.1 hikaru
211 1.1 hikaru /* Initialize the Master Ring Table */
212 1.1 hikaru for (i = 0; i < sc->sc_ae_num; i++) {
213 1.1 hikaru struct fw_init_ring_table *firt =
214 1.1 hikaru &qadr->qadr_master_ring_tbl[i];
215 1.1 hikaru
216 1.1 hikaru for (j = 0; j < INIT_RING_TABLE_SZ; j++) {
217 1.1 hikaru struct fw_init_ring_params *firp =
218 1.1 hikaru &firt->firt_bulk_rings[j];
219 1.1 hikaru
220 1.1 hikaru firp->firp_reserved = 0;
221 1.1 hikaru firp->firp_curr_weight = QAT_DEFAULT_RING_WEIGHT;
222 1.1 hikaru firp->firp_init_weight = QAT_DEFAULT_RING_WEIGHT;
223 1.1 hikaru firp->firp_ring_pvl = QAT_DEFAULT_PVL;
224 1.1 hikaru }
225 1.1 hikaru memset(firt->firt_ring_mask, 0, sizeof(firt->firt_ring_mask));
226 1.1 hikaru }
227 1.1 hikaru
228 1.1 hikaru error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_TX,
229 1.1 hikaru ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_req_size,
230 1.1 hikaru NULL, NULL, "admin_tx", &qadr->qadr_admin_tx);
231 1.1 hikaru if (error)
232 1.1 hikaru return error;
233 1.1 hikaru
234 1.1 hikaru error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_RX,
235 1.1 hikaru ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_resp_size,
236 1.1 hikaru qat_adm_ring_intr, qadr, "admin_rx", &qadr->qadr_admin_rx);
237 1.1 hikaru if (error)
238 1.1 hikaru return error;
239 1.1 hikaru
240 1.1 hikaru /*
241 1.1 hikaru * Finally set up the service indices into the Master Ring Table
242 1.1 hikaru * and convenient ring table pointers for each service enabled.
243 1.1 hikaru * Only the Admin rings are initialized.
244 1.1 hikaru */
245 1.1 hikaru error = qat_adm_ring_init_ring_table(sc);
246 1.1 hikaru if (error)
247 1.1 hikaru return error;
248 1.1 hikaru
249 1.1 hikaru /*
250 1.1 hikaru * Calculate the number of active AEs per QAT
251 1.1 hikaru * needed for Shram partitioning.
252 1.1 hikaru */
253 1.1 hikaru for (i = 0; i < sc->sc_ae_num; i++) {
254 1.1 hikaru if (qadr->qadr_srv_mask[i])
255 1.1 hikaru qadr->qadr_active_aes_per_accel++;
256 1.1 hikaru }
257 1.1 hikaru
258 1.1 hikaru return 0;
259 1.1 hikaru }
260 1.1 hikaru
261 1.1 hikaru void
262 1.1 hikaru qat_adm_ring_build_slice_mask(uint16_t *slice_mask, uint32_t srv_mask,
263 1.1 hikaru uint32_t init_shram)
264 1.1 hikaru {
265 1.1 hikaru uint16_t shram = 0, comn_req = 0;
266 1.1 hikaru
267 1.1 hikaru if (init_shram)
268 1.1 hikaru shram = COMN_REQ_SHRAM_INIT_REQUIRED;
269 1.1 hikaru
270 1.1 hikaru if (srv_mask & QAT_SERVICE_CRYPTO_A)
271 1.1 hikaru comn_req |= COMN_REQ_CY0_ONLY(shram);
272 1.1 hikaru if (srv_mask & QAT_SERVICE_CRYPTO_B)
273 1.1 hikaru comn_req |= COMN_REQ_CY1_ONLY(shram);
274 1.1 hikaru
275 1.1 hikaru *slice_mask = comn_req;
276 1.1 hikaru }
277 1.1 hikaru
278 1.1 hikaru void
279 1.1 hikaru qat_adm_ring_build_shram_mask(uint64_t *shram_mask, uint32_t active_aes,
280 1.1 hikaru uint32_t ae)
281 1.1 hikaru {
282 1.1 hikaru *shram_mask = 0;
283 1.1 hikaru
284 1.1 hikaru if (active_aes == 1) {
285 1.1 hikaru *shram_mask = ~(*shram_mask);
286 1.1 hikaru } else if (active_aes == 2) {
287 1.1 hikaru if (ae == 1)
288 1.1 hikaru *shram_mask = ((~(*shram_mask)) & 0xffffffff);
289 1.1 hikaru else
290 1.1 hikaru *shram_mask = ((~(*shram_mask)) & 0xffffffff00000000ull);
291 1.1 hikaru } else if (active_aes == 3) {
292 1.1 hikaru if (ae == 0)
293 1.1 hikaru *shram_mask = ((~(*shram_mask)) & 0x7fffff);
294 1.1 hikaru else if (ae == 1)
295 1.1 hikaru *shram_mask = ((~(*shram_mask)) & 0x3fffff800000ull);
296 1.1 hikaru else
297 1.1 hikaru *shram_mask = ((~(*shram_mask)) & 0xffffc00000000000ull);
298 1.1 hikaru } else {
299 1.1 hikaru panic("Only three services are supported in current version");
300 1.1 hikaru }
301 1.1 hikaru }
302 1.1 hikaru
303 1.1 hikaru int
304 1.1 hikaru qat_adm_ring_build_ring_table(struct qat_softc *sc, uint32_t ae)
305 1.1 hikaru {
306 1.1 hikaru struct qat_admin_rings *qadr = &sc->sc_admin_rings;
307 1.1 hikaru struct fw_init_ring_table *tbl;
308 1.1 hikaru struct fw_init_ring_params *param;
309 1.1 hikaru uint8_t srv_mask = sc->sc_admin_rings.qadr_srv_mask[ae];
310 1.1 hikaru
311 1.1 hikaru if ((srv_mask & QAT_SERVICE_CRYPTO_A)) {
312 1.1 hikaru tbl = qadr->qadr_cya_ring_tbl;
313 1.1 hikaru } else if ((srv_mask & QAT_SERVICE_CRYPTO_B)) {
314 1.1 hikaru tbl = qadr->qadr_cyb_ring_tbl;
315 1.1 hikaru } else {
316 1.1 hikaru aprint_error_dev(sc->sc_dev,
317 1.1 hikaru "Invalid execution engine %d\n", ae);
318 1.1 hikaru return EINVAL;
319 1.1 hikaru }
320 1.1 hikaru
321 1.1 hikaru param = &tbl->firt_bulk_rings[sc->sc_hw.qhw_ring_sym_tx];
322 1.1 hikaru param->firp_curr_weight = QAT_HI_PRIO_RING_WEIGHT;
323 1.1 hikaru param->firp_init_weight = QAT_HI_PRIO_RING_WEIGHT;
324 1.1 hikaru FW_INIT_RING_MASK_SET(tbl, sc->sc_hw.qhw_ring_sym_tx);
325 1.1 hikaru
326 1.1 hikaru return 0;
327 1.1 hikaru }
328 1.1 hikaru
329 1.1 hikaru int
330 1.1 hikaru qat_adm_ring_build_init_msg(struct qat_softc *sc,
331 1.1 hikaru struct fw_init_req *initmsg, enum fw_init_cmd_id cmd, uint32_t ae,
332 1.1 hikaru struct qat_accel_init_cb *cb)
333 1.1 hikaru {
334 1.1 hikaru struct fw_init_set_ae_info_hdr *aehdr;
335 1.1 hikaru struct fw_init_set_ae_info *aeinfo;
336 1.1 hikaru struct fw_init_set_ring_info_hdr *ringhdr;
337 1.1 hikaru struct fw_init_set_ring_info *ringinfo;
338 1.1 hikaru int init_shram = 0, tgt_id, cluster_id;
339 1.1 hikaru uint32_t srv_mask;
340 1.1 hikaru
341 1.1 hikaru srv_mask = sc->sc_admin_rings.qadr_srv_mask[
342 1.1 hikaru ae % sc->sc_ae_num];
343 1.1 hikaru
344 1.1 hikaru memset(initmsg, 0, sizeof(struct fw_init_req));
345 1.1 hikaru
346 1.1 hikaru qat_msg_req_type_populate(&initmsg->comn_hdr.arch_if,
347 1.1 hikaru ARCH_IF_REQ_QAT_FW_INIT,
348 1.1 hikaru sc->sc_admin_rings.qadr_admin_rx->qr_ring_id);
349 1.1 hikaru
350 1.1 hikaru qat_msg_cmn_mid_populate(&initmsg->comn_mid, cb, 0, 0);
351 1.1 hikaru
352 1.1 hikaru switch (cmd) {
353 1.1 hikaru case FW_INIT_CMD_SET_AE_INFO:
354 1.1 hikaru if (ae % sc->sc_ae_num == 0)
355 1.1 hikaru init_shram = 1;
356 1.1 hikaru if (ae >= sc->sc_ae_num) {
357 1.1 hikaru tgt_id = 1;
358 1.1 hikaru cluster_id = 1;
359 1.1 hikaru } else {
360 1.1 hikaru cluster_id = 0;
361 1.1 hikaru if (sc->sc_ae_mask)
362 1.1 hikaru tgt_id = 0;
363 1.1 hikaru else
364 1.1 hikaru tgt_id = 1;
365 1.1 hikaru }
366 1.1 hikaru aehdr = &initmsg->u.set_ae_info;
367 1.1 hikaru aeinfo = &initmsg->u1.set_ae_info;
368 1.1 hikaru
369 1.1 hikaru aehdr->init_cmd_id = cmd;
370 1.1 hikaru /* XXX that does not support sparse ae_mask */
371 1.1 hikaru aehdr->init_trgt_id = ae;
372 1.1 hikaru aehdr->init_ring_cluster_id = cluster_id;
373 1.1 hikaru aehdr->init_qat_id = tgt_id;
374 1.1 hikaru
375 1.1 hikaru qat_adm_ring_build_slice_mask(&aehdr->init_slice_mask, srv_mask,
376 1.1 hikaru init_shram);
377 1.1 hikaru
378 1.1 hikaru qat_adm_ring_build_shram_mask(&aeinfo->init_shram_mask,
379 1.1 hikaru sc->sc_admin_rings.qadr_active_aes_per_accel,
380 1.1 hikaru ae % sc->sc_ae_num);
381 1.1 hikaru
382 1.1 hikaru break;
383 1.1 hikaru case FW_INIT_CMD_SET_RING_INFO:
384 1.1 hikaru ringhdr = &initmsg->u.set_ring_info;
385 1.1 hikaru ringinfo = &initmsg->u1.set_ring_info;
386 1.1 hikaru
387 1.1 hikaru ringhdr->init_cmd_id = cmd;
388 1.1 hikaru /* XXX that does not support sparse ae_mask */
389 1.1 hikaru ringhdr->init_trgt_id = ae;
390 1.1 hikaru
391 1.1 hikaru /* XXX */
392 1.1 hikaru qat_adm_ring_build_ring_table(sc,
393 1.1 hikaru ae % sc->sc_ae_num);
394 1.1 hikaru
395 1.1 hikaru ringhdr->init_ring_tbl_sz = sizeof(struct fw_init_ring_table);
396 1.1 hikaru
397 1.1 hikaru ringinfo->init_ring_table_ptr =
398 1.1 hikaru sc->sc_admin_rings.qadr_dma.qdm_dma_seg.ds_addr +
399 1.1 hikaru ((ae % sc->sc_ae_num) *
400 1.1 hikaru sizeof(struct fw_init_ring_table));
401 1.1 hikaru
402 1.1 hikaru break;
403 1.1 hikaru default:
404 1.1 hikaru return ENOTSUP;
405 1.1 hikaru }
406 1.1 hikaru
407 1.1 hikaru return 0;
408 1.1 hikaru }
409 1.1 hikaru
410 1.1 hikaru int
411 1.1 hikaru qat_adm_ring_send_init_msg_sync(struct qat_softc *sc,
412 1.1 hikaru enum fw_init_cmd_id cmd, uint32_t ae)
413 1.1 hikaru {
414 1.1 hikaru struct fw_init_req initmsg;
415 1.1 hikaru struct qat_accel_init_cb cb;
416 1.1 hikaru int error;
417 1.1 hikaru
418 1.1 hikaru error = qat_adm_ring_build_init_msg(sc, &initmsg, cmd, ae, &cb);
419 1.1 hikaru if (error)
420 1.1 hikaru return error;
421 1.1 hikaru
422 1.1 hikaru error = qat_etr_put_msg(sc, sc->sc_admin_rings.qadr_admin_tx,
423 1.1 hikaru (uint32_t *)&initmsg);
424 1.1 hikaru if (error)
425 1.1 hikaru return error;
426 1.1 hikaru
427 1.1 hikaru error = tsleep(&cb, PZERO, "qat_init", hz * 3 / 2);
428 1.1 hikaru if (error) {
429 1.1 hikaru aprint_error_dev(sc->sc_dev,
430 1.1 hikaru "Timed out initialization firmware: %d\n", error);
431 1.1 hikaru return error;
432 1.1 hikaru }
433 1.1 hikaru if (cb.qaic_status) {
434 1.1 hikaru aprint_error_dev(sc->sc_dev, "Failed to initialize firmware\n");
435 1.1 hikaru return EIO;
436 1.1 hikaru }
437 1.1 hikaru
438 1.1 hikaru return error;
439 1.1 hikaru }
440 1.1 hikaru
441 1.1 hikaru int
442 1.1 hikaru qat_adm_ring_send_init_msg(struct qat_softc *sc,
443 1.1 hikaru enum fw_init_cmd_id cmd)
444 1.1 hikaru {
445 1.1 hikaru struct qat_admin_rings *qadr = &sc->sc_admin_rings;
446 1.1 hikaru uint32_t error, ae;
447 1.1 hikaru
448 1.1 hikaru for (ae = 0; ae < sc->sc_ae_num; ae++) {
449 1.1 hikaru uint8_t srv_mask = qadr->qadr_srv_mask[ae];
450 1.1 hikaru switch (cmd) {
451 1.1 hikaru case FW_INIT_CMD_SET_AE_INFO:
452 1.1 hikaru case FW_INIT_CMD_SET_RING_INFO:
453 1.1 hikaru if (!srv_mask)
454 1.1 hikaru continue;
455 1.1 hikaru break;
456 1.1 hikaru case FW_INIT_CMD_TRNG_ENABLE:
457 1.1 hikaru case FW_INIT_CMD_TRNG_DISABLE:
458 1.1 hikaru if (!(srv_mask & QAT_SERVICE_CRYPTO_A))
459 1.1 hikaru continue;
460 1.1 hikaru break;
461 1.1 hikaru default:
462 1.1 hikaru return ENOTSUP;
463 1.1 hikaru }
464 1.1 hikaru
465 1.1 hikaru error = qat_adm_ring_send_init_msg_sync(sc, cmd, ae);
466 1.1 hikaru if (error)
467 1.1 hikaru return error;
468 1.1 hikaru }
469 1.1 hikaru
470 1.1 hikaru return 0;
471 1.1 hikaru }
472 1.1 hikaru
473 1.1 hikaru int
474 1.1 hikaru qat_adm_ring_send_init(struct qat_softc *sc)
475 1.1 hikaru {
476 1.1 hikaru int error;
477 1.1 hikaru
478 1.1 hikaru error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_AE_INFO);
479 1.1 hikaru if (error)
480 1.1 hikaru return error;
481 1.1 hikaru
482 1.1 hikaru error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_RING_INFO);
483 1.1 hikaru if (error)
484 1.1 hikaru return error;
485 1.1 hikaru
486 1.1 hikaru aprint_verbose_dev(sc->sc_dev, "Initialization completed\n");
487 1.1 hikaru
488 1.1 hikaru return 0;
489 1.1 hikaru }
490 1.1 hikaru
491 1.1 hikaru int
492 1.1 hikaru qat_adm_ring_intr(struct qat_softc *sc, void *arg, void *msg)
493 1.1 hikaru {
494 1.1 hikaru struct arch_if_resp_hdr *resp;
495 1.1 hikaru struct fw_init_resp *init_resp;
496 1.1 hikaru struct qat_accel_init_cb *init_cb;
497 1.1 hikaru int handled = 0;
498 1.1 hikaru
499 1.1 hikaru resp = (struct arch_if_resp_hdr *)msg;
500 1.1 hikaru
501 1.1 hikaru switch (resp->resp_type) {
502 1.1 hikaru case ARCH_IF_REQ_QAT_FW_INIT:
503 1.1 hikaru init_resp = (struct fw_init_resp *)msg;
504 1.1 hikaru init_cb = (struct qat_accel_init_cb *)
505 1.1 hikaru (uintptr_t)init_resp->comn_resp.opaque_data;
506 1.1 hikaru init_cb->qaic_status =
507 1.1 hikaru __SHIFTOUT(init_resp->comn_resp.comn_status,
508 1.1 hikaru COMN_RESP_INIT_ADMIN_STATUS);
509 1.1 hikaru wakeup(init_cb);
510 1.1 hikaru break;
511 1.1 hikaru default:
512 1.1 hikaru aprint_error_dev(sc->sc_dev,
513 1.1 hikaru "unknown resp type %d\n", resp->resp_type);
514 1.1 hikaru break;
515 1.1 hikaru }
516 1.1 hikaru
517 1.1 hikaru return handled;
518 1.1 hikaru }
519 1.1 hikaru
520 1.1 hikaru static inline uint16_t
521 1.1 hikaru qat_hw15_get_comn_req_flags(uint8_t ae)
522 1.1 hikaru {
523 1.1 hikaru if (ae == 0) {
524 1.1 hikaru return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
525 1.1 hikaru COMN_REQ_AUTH0_SLICE_REQUIRED |
526 1.1 hikaru COMN_REQ_CIPHER0_SLICE_REQUIRED;
527 1.1 hikaru } else {
528 1.1 hikaru return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
529 1.1 hikaru COMN_REQ_AUTH1_SLICE_REQUIRED |
530 1.1 hikaru COMN_REQ_CIPHER1_SLICE_REQUIRED;
531 1.1 hikaru }
532 1.1 hikaru }
533 1.1 hikaru
534 1.1 hikaru uint32_t
535 1.1 hikaru qat_crypto_setup_cipher_desc(struct qat_session *qs,
536 1.1 hikaru struct qat_crypto_desc *desc, struct cryptoini *crie,
537 1.1 hikaru struct fw_cipher_hdr *cipher_hdr, uint8_t *hw_blk_ptr,
538 1.1 hikaru uint32_t hw_blk_offset, enum fw_slice next_slice)
539 1.1 hikaru {
540 1.1 hikaru struct hw_cipher_config *cipher_config = (struct hw_cipher_config *)
541 1.1 hikaru (hw_blk_ptr + hw_blk_offset);
542 1.1 hikaru uint32_t hw_blk_size;
543 1.1 hikaru uint8_t *cipher_key = (uint8_t *)(cipher_config + 1);
544 1.1 hikaru
545 1.1 hikaru cipher_config->val = qat_crypto_load_cipher_cryptoini(desc, crie);
546 1.1 hikaru cipher_config->reserved = 0;
547 1.1 hikaru
548 1.1 hikaru cipher_hdr->state_padding_sz = 0;
549 1.1 hikaru cipher_hdr->key_sz = crie->cri_klen / 64; /* bits to quad words */
550 1.1 hikaru
551 1.1 hikaru cipher_hdr->state_sz = desc->qcd_cipher_blk_sz / 8;
552 1.1 hikaru
553 1.1 hikaru cipher_hdr->next_id = next_slice;
554 1.1 hikaru cipher_hdr->curr_id = FW_SLICE_CIPHER;
555 1.1 hikaru cipher_hdr->offset = hw_blk_offset / 8;
556 1.1 hikaru cipher_hdr->resrvd = 0;
557 1.1 hikaru
558 1.1 hikaru hw_blk_size = sizeof(struct hw_cipher_config);
559 1.1 hikaru
560 1.1 hikaru memcpy(cipher_key, crie->cri_key, crie->cri_klen / 8);
561 1.1 hikaru hw_blk_size += crie->cri_klen / 8;
562 1.1 hikaru
563 1.1 hikaru return hw_blk_size;
564 1.1 hikaru }
565 1.1 hikaru
566 1.1 hikaru uint32_t
567 1.1 hikaru qat_crypto_setup_auth_desc(struct qat_session *qs, struct qat_crypto_desc *desc,
568 1.1 hikaru struct cryptoini *cria, struct fw_auth_hdr *auth_hdr, uint8_t *hw_blk_ptr,
569 1.1 hikaru uint32_t hw_blk_offset, enum fw_slice next_slice)
570 1.1 hikaru {
571 1.1 hikaru struct qat_sym_hash_def const *hash_def;
572 1.1 hikaru const struct swcr_auth_hash *sah;
573 1.1 hikaru struct hw_auth_setup *auth_setup;
574 1.1 hikaru uint32_t hw_blk_size;
575 1.1 hikaru uint8_t *state1, *state2;
576 1.1 hikaru uint32_t state_size;
577 1.1 hikaru
578 1.1 hikaru auth_setup = (struct hw_auth_setup *)(hw_blk_ptr + hw_blk_offset);
579 1.1 hikaru
580 1.1 hikaru auth_setup->auth_config.config =
581 1.1 hikaru qat_crypto_load_auth_cryptoini(desc, cria, &hash_def);
582 1.1 hikaru sah = hash_def->qshd_alg->qshai_sah;
583 1.1 hikaru auth_setup->auth_config.reserved = 0;
584 1.1 hikaru
585 1.1 hikaru /* for HMAC in mode 1 authCounter is the block size
586 1.1 hikaru * else the authCounter is 0. The firmware expects the counter to be
587 1.1 hikaru * big endian */
588 1.1 hikaru auth_setup->auth_counter.counter =
589 1.1 hikaru htonl(hash_def->qshd_qat->qshqi_auth_counter);
590 1.1 hikaru auth_setup->auth_counter.reserved = 0;
591 1.1 hikaru
592 1.1 hikaru auth_hdr->next_id = next_slice;
593 1.1 hikaru auth_hdr->curr_id = FW_SLICE_AUTH;
594 1.1 hikaru auth_hdr->offset = hw_blk_offset / 8;
595 1.1 hikaru auth_hdr->resrvd = 0;
596 1.1 hikaru
597 1.1 hikaru auth_hdr->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
598 1.1 hikaru auth_hdr->u.inner_prefix_sz = 0;
599 1.1 hikaru auth_hdr->outer_prefix_sz = 0;
600 1.1 hikaru auth_hdr->final_sz = sah->auth_hash->authsize;
601 1.1 hikaru auth_hdr->inner_state1_sz =
602 1.1 hikaru roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
603 1.1 hikaru auth_hdr->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
604 1.1 hikaru auth_hdr->inner_state2_sz =
605 1.1 hikaru roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
606 1.1 hikaru auth_hdr->inner_state2_off = auth_hdr->offset +
607 1.1 hikaru ((sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz) / 8);
608 1.1 hikaru
609 1.1 hikaru hw_blk_size = sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
610 1.1 hikaru auth_hdr->inner_state2_sz;
611 1.1 hikaru
612 1.1 hikaru auth_hdr->outer_config_off = 0;
613 1.1 hikaru auth_hdr->outer_state1_sz = 0;
614 1.1 hikaru auth_hdr->outer_res_sz = 0;
615 1.1 hikaru auth_hdr->outer_prefix_off = 0;
616 1.1 hikaru
617 1.1 hikaru state1 = (uint8_t *)(auth_setup + 1);
618 1.1 hikaru state2 = state1 + auth_hdr->inner_state1_sz;
619 1.1 hikaru
620 1.1 hikaru state_size = hash_def->qshd_alg->qshai_state_size;
621 1.1 hikaru if (hash_def->qshd_qat->qshqi_algo_enc == HW_AUTH_ALGO_SHA1) {
622 1.1 hikaru uint32_t state1_pad_len = auth_hdr->inner_state1_sz -
623 1.1 hikaru state_size;
624 1.1 hikaru uint32_t state2_pad_len = auth_hdr->inner_state2_sz -
625 1.1 hikaru state_size;
626 1.1 hikaru if (state1_pad_len > 0)
627 1.1 hikaru memset(state1 + state_size, 0, state1_pad_len);
628 1.1 hikaru if (state2_pad_len > 0)
629 1.1 hikaru memset(state2 + state_size, 0, state2_pad_len);
630 1.1 hikaru }
631 1.1 hikaru
632 1.1 hikaru desc->qcd_state_storage_sz = (sizeof(struct hw_auth_counter) +
633 1.1 hikaru roundup(state_size, 8)) / 8;
634 1.1 hikaru
635 1.1 hikaru qat_crypto_hmac_precompute(desc, cria, hash_def, state1, state2);
636 1.1 hikaru
637 1.1 hikaru return hw_blk_size;
638 1.1 hikaru }
639 1.1 hikaru
640 1.1 hikaru
641 1.1 hikaru void
642 1.1 hikaru qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
643 1.1 hikaru struct qat_crypto_desc *desc,
644 1.1 hikaru struct cryptoini *crie, struct cryptoini *cria)
645 1.1 hikaru {
646 1.1 hikaru struct fw_cipher_hdr *cipher_hdr;
647 1.1 hikaru struct fw_auth_hdr *auth_hdr;
648 1.1 hikaru struct fw_la_bulk_req *req_cache;
649 1.1 hikaru uint32_t ctrl_blk_size = 0, ctrl_blk_offset = 0, hw_blk_offset = 0;
650 1.1 hikaru int i;
651 1.1 hikaru uint16_t la_cmd_flags = 0;
652 1.1 hikaru uint8_t req_params_sz = 0;
653 1.1 hikaru uint8_t *ctrl_blk_ptr;
654 1.1 hikaru uint8_t *hw_blk_ptr;
655 1.1 hikaru
656 1.1 hikaru if (crie != NULL)
657 1.1 hikaru ctrl_blk_size += sizeof(struct fw_cipher_hdr);
658 1.1 hikaru if (cria != NULL)
659 1.1 hikaru ctrl_blk_size += sizeof(struct fw_auth_hdr);
660 1.1 hikaru
661 1.1 hikaru ctrl_blk_ptr = desc->qcd_content_desc;
662 1.1 hikaru hw_blk_ptr = ctrl_blk_ptr + ctrl_blk_size;
663 1.1 hikaru
664 1.1 hikaru for (i = 0; i < MAX_FW_SLICE; i++) {
665 1.1 hikaru switch (desc->qcd_slices[i]) {
666 1.1 hikaru case FW_SLICE_CIPHER:
667 1.1 hikaru cipher_hdr = (struct fw_cipher_hdr *)(ctrl_blk_ptr +
668 1.1 hikaru ctrl_blk_offset);
669 1.1 hikaru ctrl_blk_offset += sizeof(struct fw_cipher_hdr);
670 1.1 hikaru hw_blk_offset += qat_crypto_setup_cipher_desc(qs, desc,
671 1.1 hikaru crie, cipher_hdr, hw_blk_ptr, hw_blk_offset,
672 1.1 hikaru desc->qcd_slices[i + 1]);
673 1.1 hikaru req_params_sz += sizeof(struct fw_la_cipher_req_params);
674 1.1 hikaru break;
675 1.1 hikaru case FW_SLICE_AUTH:
676 1.1 hikaru auth_hdr = (struct fw_auth_hdr *)(ctrl_blk_ptr +
677 1.1 hikaru ctrl_blk_offset);
678 1.1 hikaru ctrl_blk_offset += sizeof(struct fw_auth_hdr);
679 1.1 hikaru hw_blk_offset += qat_crypto_setup_auth_desc(qs, desc,
680 1.1 hikaru cria, auth_hdr, hw_blk_ptr, hw_blk_offset,
681 1.1 hikaru desc->qcd_slices[i + 1]);
682 1.1 hikaru req_params_sz += sizeof(struct fw_la_auth_req_params);
683 1.1 hikaru la_cmd_flags |= LA_FLAGS_RET_AUTH_RES;
684 1.1 hikaru /* no digest verify */
685 1.1 hikaru break;
686 1.1 hikaru case FW_SLICE_DRAM_WR:
687 1.1 hikaru i = MAX_FW_SLICE; /* end of chain */
688 1.1 hikaru break;
689 1.1 hikaru default:
690 1.1 hikaru KASSERT(0);
691 1.1 hikaru break;
692 1.1 hikaru }
693 1.1 hikaru }
694 1.1 hikaru
695 1.1 hikaru desc->qcd_hdr_sz = ctrl_blk_offset / 8;
696 1.1 hikaru desc->qcd_hw_blk_sz = hw_blk_offset / 8;
697 1.1 hikaru
698 1.1 hikaru req_cache = (struct fw_la_bulk_req *)desc->qcd_req_cache;
699 1.1 hikaru qat_msg_req_type_populate(
700 1.1 hikaru &req_cache->comn_hdr.arch_if,
701 1.1 hikaru ARCH_IF_REQ_QAT_FW_LA, 0);
702 1.1 hikaru
703 1.1 hikaru la_cmd_flags |= LA_FLAGS_PROTO_NO;
704 1.1 hikaru
705 1.1 hikaru qat_msg_params_populate(req_cache,
706 1.1 hikaru desc, req_params_sz, la_cmd_flags, 0);
707 1.1 hikaru
708 1.1 hikaru #ifdef QAT_DUMP
709 1.1 hikaru qat_dump_raw(QAT_DUMP_DESC, "qcd_content_desc",
710 1.1 hikaru desc->qcd_content_desc, sizeof(desc->qcd_content_desc));
711 1.1 hikaru qat_dump_raw(QAT_DUMP_DESC, "qcd_req_cache",
712 1.1 hikaru &desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
713 1.1 hikaru #endif
714 1.1 hikaru
715 1.1 hikaru bus_dmamap_sync(qcy->qcy_sc->sc_dmat,
716 1.1 hikaru qcy->qcy_session_dmamems[qs->qs_lid].qdm_dma_map, 0,
717 1.1 hikaru sizeof(struct qat_session),
718 1.1 hikaru BUS_DMASYNC_PREWRITE);
719 1.1 hikaru }
720 1.1 hikaru
721 1.1 hikaru void
722 1.1 hikaru qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb, struct qat_session *qs,
723 1.1 hikaru struct qat_crypto_desc const *desc, struct qat_sym_cookie *qsc,
724 1.1 hikaru struct cryptodesc *crde, struct cryptodesc *crda, bus_addr_t icv_paddr)
725 1.1 hikaru {
726 1.1 hikaru struct qat_sym_bulk_cookie *qsbc;
727 1.1 hikaru struct fw_la_bulk_req *bulk_req;
728 1.1 hikaru struct fw_la_cipher_req_params *cipher_req;
729 1.1 hikaru struct fw_la_auth_req_params *auth_req;
730 1.1 hikaru uint32_t req_params_offset = 0;
731 1.1 hikaru uint8_t *req_params_ptr;
732 1.1 hikaru enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
733 1.1 hikaru enum fw_slice next_slice;
734 1.1 hikaru
735 1.1 hikaru qsbc = &qsc->u.qsc_bulk_cookie;
736 1.1 hikaru
737 1.1 hikaru bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
738 1.1 hikaru memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE);
739 1.1 hikaru bulk_req->comn_hdr.arch_if.resp_pipe_id = qcb->qcb_sym_rx->qr_ring_id;
740 1.1 hikaru bulk_req->comn_hdr.comn_req_flags =
741 1.1 hikaru qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2);
742 1.1 hikaru bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
743 1.1 hikaru bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
744 1.1 hikaru bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
745 1.1 hikaru bulk_req->comn_ftr.next_request_addr = 0;
746 1.1 hikaru bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
747 1.1 hikaru
748 1.1 hikaru if (icv_paddr != 0)
749 1.1 hikaru bulk_req->comn_la_req.u.la_flags |= LA_FLAGS_DIGEST_IN_BUFFER;
750 1.1 hikaru
751 1.1 hikaru req_params_ptr = qsbc->qsbc_req_params_buf;
752 1.1 hikaru
753 1.1 hikaru if (cmd_id != FW_LA_CMD_AUTH) {
754 1.1 hikaru cipher_req = (struct fw_la_cipher_req_params *)
755 1.1 hikaru (req_params_ptr + req_params_offset);
756 1.1 hikaru req_params_offset += sizeof(struct fw_la_cipher_req_params);
757 1.1 hikaru
758 1.1 hikaru if (cmd_id == FW_LA_CMD_CIPHER || cmd_id == FW_LA_CMD_HASH_CIPHER)
759 1.1 hikaru next_slice = FW_SLICE_DRAM_WR;
760 1.1 hikaru else
761 1.1 hikaru next_slice = FW_SLICE_AUTH;
762 1.1 hikaru
763 1.1 hikaru cipher_req->resrvd = 0;
764 1.1 hikaru
765 1.1 hikaru cipher_req->cipher_state_sz = desc->qcd_cipher_blk_sz / 8;
766 1.1 hikaru
767 1.1 hikaru cipher_req->curr_id = FW_SLICE_CIPHER;
768 1.1 hikaru cipher_req->next_id = next_slice;
769 1.1 hikaru
770 1.1 hikaru cipher_req->resrvd1 = 0;
771 1.1 hikaru
772 1.1 hikaru cipher_req->cipher_off = crde->crd_skip;
773 1.1 hikaru cipher_req->cipher_len = crde->crd_len;
774 1.1 hikaru cipher_req->state_address = qsc->qsc_iv_buf_paddr;
775 1.1 hikaru
776 1.1 hikaru }
777 1.1 hikaru if (cmd_id != FW_LA_CMD_CIPHER) {
778 1.1 hikaru auth_req = (struct fw_la_auth_req_params *)
779 1.1 hikaru (req_params_ptr + req_params_offset);
780 1.1 hikaru req_params_offset += sizeof(struct fw_la_auth_req_params);
781 1.1 hikaru
782 1.1 hikaru if (cmd_id == FW_LA_CMD_HASH_CIPHER)
783 1.1 hikaru next_slice = FW_SLICE_CIPHER;
784 1.1 hikaru else
785 1.1 hikaru next_slice = FW_SLICE_DRAM_WR;
786 1.1 hikaru
787 1.1 hikaru auth_req->next_id = next_slice;
788 1.1 hikaru auth_req->curr_id = FW_SLICE_AUTH;
789 1.1 hikaru
790 1.1 hikaru auth_req->auth_res_address = icv_paddr;
791 1.1 hikaru auth_req->auth_res_sz = 0; /* no digest verify */
792 1.1 hikaru
793 1.1 hikaru auth_req->auth_len = crda->crd_len;
794 1.1 hikaru auth_req->auth_off = crda->crd_skip;
795 1.1 hikaru
796 1.1 hikaru auth_req->hash_state_sz = 0;
797 1.1 hikaru auth_req->u1.prefix_addr = desc->qcd_hash_state_paddr +
798 1.1 hikaru desc->qcd_state_storage_sz;
799 1.1 hikaru
800 1.1 hikaru auth_req->u.resrvd = 0;
801 1.1 hikaru }
802 1.1 hikaru
803 1.1 hikaru #ifdef QAT_DUMP
804 1.1 hikaru qat_dump_raw(QAT_DUMP_DESC, "req_params", req_params_ptr, req_params_offset);
805 1.1 hikaru #endif
806 1.1 hikaru }
807 1.1 hikaru
808