drm_dp_mst_topology.c revision 1.2 1 /* $NetBSD: drm_dp_mst_topology.c,v 1.2 2018/08/27 04:58:19 riastradh Exp $ */
2
3 /*
4 * Copyright 2014 Red Hat
5 *
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that copyright
9 * notice and this permission notice appear in supporting documentation, and
10 * that the name of the copyright holders not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. The copyright holders make no representations
13 * about the suitability of this software for any purpose. It is provided "as
14 * is" without express or implied warranty.
15 *
16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
17 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
18 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
19 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 * OF THIS SOFTWARE.
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.2 2018/08/27 04:58:19 riastradh Exp $");
27
28 #include <linux/kernel.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/errno.h>
32 #include <linux/sched.h>
33 #include <linux/seq_file.h>
34 #include <linux/i2c.h>
35 #include <drm/drm_dp_mst_helper.h>
36 #include <drm/drmP.h>
37
38 #include <drm/drm_fixed.h>
39
40 /**
41 * DOC: dp mst helper
42 *
43 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
44 * protocol. The helpers contain a topology manager and bandwidth manager.
45 * The helpers encapsulate the sending and received of sideband msgs.
46 */
47 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
48 char *buf);
49 static int test_calc_pbn_mode(void);
50
51 static void drm_dp_put_port(struct drm_dp_mst_port *port);
52
53 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
54 int id,
55 struct drm_dp_payload *payload);
56
57 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
58 struct drm_dp_mst_port *port,
59 int offset, int size, u8 *bytes);
60
61 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
62 struct drm_dp_mst_branch *mstb);
63 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
64 struct drm_dp_mst_branch *mstb,
65 struct drm_dp_mst_port *port);
66 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
67 u8 *guid);
68
69 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
70 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
71 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
72 /* sideband msg handling */
73 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
74 {
75 u8 bitmask = 0x80;
76 u8 bitshift = 7;
77 u8 array_index = 0;
78 int number_of_bits = num_nibbles * 4;
79 u8 remainder = 0;
80
81 while (number_of_bits != 0) {
82 number_of_bits--;
83 remainder <<= 1;
84 remainder |= (data[array_index] & bitmask) >> bitshift;
85 bitmask >>= 1;
86 bitshift--;
87 if (bitmask == 0) {
88 bitmask = 0x80;
89 bitshift = 7;
90 array_index++;
91 }
92 if ((remainder & 0x10) == 0x10)
93 remainder ^= 0x13;
94 }
95
96 number_of_bits = 4;
97 while (number_of_bits != 0) {
98 number_of_bits--;
99 remainder <<= 1;
100 if ((remainder & 0x10) != 0)
101 remainder ^= 0x13;
102 }
103
104 return remainder;
105 }
106
107 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
108 {
109 u8 bitmask = 0x80;
110 u8 bitshift = 7;
111 u8 array_index = 0;
112 int number_of_bits = number_of_bytes * 8;
113 u16 remainder = 0;
114
115 while (number_of_bits != 0) {
116 number_of_bits--;
117 remainder <<= 1;
118 remainder |= (data[array_index] & bitmask) >> bitshift;
119 bitmask >>= 1;
120 bitshift--;
121 if (bitmask == 0) {
122 bitmask = 0x80;
123 bitshift = 7;
124 array_index++;
125 }
126 if ((remainder & 0x100) == 0x100)
127 remainder ^= 0xd5;
128 }
129
130 number_of_bits = 8;
131 while (number_of_bits != 0) {
132 number_of_bits--;
133 remainder <<= 1;
134 if ((remainder & 0x100) != 0)
135 remainder ^= 0xd5;
136 }
137
138 return remainder & 0xff;
139 }
140 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
141 {
142 u8 size = 3;
143 size += (hdr->lct / 2);
144 return size;
145 }
146
147 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
148 u8 *buf, int *len)
149 {
150 int idx = 0;
151 int i;
152 u8 crc4;
153 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
154 for (i = 0; i < (hdr->lct / 2); i++)
155 buf[idx++] = hdr->rad[i];
156 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
157 (hdr->msg_len & 0x3f);
158 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
159
160 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
161 buf[idx - 1] |= (crc4 & 0xf);
162
163 *len = idx;
164 }
165
166 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
167 u8 *buf, int buflen, u8 *hdrlen)
168 {
169 u8 crc4;
170 u8 len;
171 int i;
172 u8 idx;
173 if (buf[0] == 0)
174 return false;
175 len = 3;
176 len += ((buf[0] & 0xf0) >> 4) / 2;
177 if (len > buflen)
178 return false;
179 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
180
181 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
182 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
183 return false;
184 }
185
186 hdr->lct = (buf[0] & 0xf0) >> 4;
187 hdr->lcr = (buf[0] & 0xf);
188 idx = 1;
189 for (i = 0; i < (hdr->lct / 2); i++)
190 hdr->rad[i] = buf[idx++];
191 hdr->broadcast = (buf[idx] >> 7) & 0x1;
192 hdr->path_msg = (buf[idx] >> 6) & 0x1;
193 hdr->msg_len = buf[idx] & 0x3f;
194 idx++;
195 hdr->somt = (buf[idx] >> 7) & 0x1;
196 hdr->eomt = (buf[idx] >> 6) & 0x1;
197 hdr->seqno = (buf[idx] >> 4) & 0x1;
198 idx++;
199 *hdrlen = idx;
200 return true;
201 }
202
203 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
204 struct drm_dp_sideband_msg_tx *raw)
205 {
206 int idx = 0;
207 int i;
208 u8 *buf = raw->msg;
209 buf[idx++] = req->req_type & 0x7f;
210
211 switch (req->req_type) {
212 case DP_ENUM_PATH_RESOURCES:
213 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
214 idx++;
215 break;
216 case DP_ALLOCATE_PAYLOAD:
217 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
218 (req->u.allocate_payload.number_sdp_streams & 0xf);
219 idx++;
220 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
221 idx++;
222 buf[idx] = (req->u.allocate_payload.pbn >> 8);
223 idx++;
224 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
225 idx++;
226 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
227 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
228 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
229 idx++;
230 }
231 if (req->u.allocate_payload.number_sdp_streams & 1) {
232 i = req->u.allocate_payload.number_sdp_streams - 1;
233 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
234 idx++;
235 }
236 break;
237 case DP_QUERY_PAYLOAD:
238 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
239 idx++;
240 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
241 idx++;
242 break;
243 case DP_REMOTE_DPCD_READ:
244 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
245 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
246 idx++;
247 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
248 idx++;
249 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
250 idx++;
251 buf[idx] = (req->u.dpcd_read.num_bytes);
252 idx++;
253 break;
254
255 case DP_REMOTE_DPCD_WRITE:
256 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
257 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
258 idx++;
259 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
260 idx++;
261 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
262 idx++;
263 buf[idx] = (req->u.dpcd_write.num_bytes);
264 idx++;
265 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
266 idx += req->u.dpcd_write.num_bytes;
267 break;
268 case DP_REMOTE_I2C_READ:
269 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
270 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
271 idx++;
272 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
273 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
274 idx++;
275 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
276 idx++;
277 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
278 idx += req->u.i2c_read.transactions[i].num_bytes;
279
280 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
281 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
282 idx++;
283 }
284 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
285 idx++;
286 buf[idx] = (req->u.i2c_read.num_bytes_read);
287 idx++;
288 break;
289
290 case DP_REMOTE_I2C_WRITE:
291 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
292 idx++;
293 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
294 idx++;
295 buf[idx] = (req->u.i2c_write.num_bytes);
296 idx++;
297 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
298 idx += req->u.i2c_write.num_bytes;
299 break;
300 }
301 raw->cur_len = idx;
302 }
303
304 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
305 {
306 u8 crc4;
307 crc4 = drm_dp_msg_data_crc4(msg, len);
308 msg[len] = crc4;
309 }
310
311 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
312 struct drm_dp_sideband_msg_tx *raw)
313 {
314 int idx = 0;
315 u8 *buf = raw->msg;
316
317 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
318
319 raw->cur_len = idx;
320 }
321
322 /* this adds a chunk of msg to the builder to get the final msg */
323 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
324 u8 *replybuf, u8 replybuflen, bool hdr)
325 {
326 int ret;
327 u8 crc4;
328
329 if (hdr) {
330 u8 hdrlen;
331 struct drm_dp_sideband_msg_hdr recv_hdr;
332 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
333 if (ret == false) {
334 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
335 return false;
336 }
337
338 /*
339 * ignore out-of-order messages or messages that are part of a
340 * failed transaction
341 */
342 if (!recv_hdr.somt && !msg->have_somt)
343 return false;
344
345 /* get length contained in this portion */
346 msg->curchunk_len = recv_hdr.msg_len;
347 msg->curchunk_hdrlen = hdrlen;
348
349 /* we have already gotten an somt - don't bother parsing */
350 if (recv_hdr.somt && msg->have_somt)
351 return false;
352
353 if (recv_hdr.somt) {
354 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
355 msg->have_somt = true;
356 }
357 if (recv_hdr.eomt)
358 msg->have_eomt = true;
359
360 /* copy the bytes for the remainder of this header chunk */
361 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
362 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
363 } else {
364 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
365 msg->curchunk_idx += replybuflen;
366 }
367
368 if (msg->curchunk_idx >= msg->curchunk_len) {
369 /* do CRC */
370 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
371 /* copy chunk into bigger msg */
372 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
373 msg->curlen += msg->curchunk_len - 1;
374 }
375 return true;
376 }
377
378 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
379 struct drm_dp_sideband_msg_reply_body *repmsg)
380 {
381 int idx = 1;
382 int i;
383 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
384 idx += 16;
385 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
386 idx++;
387 if (idx > raw->curlen)
388 goto fail_len;
389 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
390 if (raw->msg[idx] & 0x80)
391 repmsg->u.link_addr.ports[i].input_port = 1;
392
393 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
394 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
395
396 idx++;
397 if (idx > raw->curlen)
398 goto fail_len;
399 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
400 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
401 if (repmsg->u.link_addr.ports[i].input_port == 0)
402 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
403 idx++;
404 if (idx > raw->curlen)
405 goto fail_len;
406 if (repmsg->u.link_addr.ports[i].input_port == 0) {
407 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
408 idx++;
409 if (idx > raw->curlen)
410 goto fail_len;
411 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
412 idx += 16;
413 if (idx > raw->curlen)
414 goto fail_len;
415 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
416 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
417 idx++;
418
419 }
420 if (idx > raw->curlen)
421 goto fail_len;
422 }
423
424 return true;
425 fail_len:
426 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
427 return false;
428 }
429
430 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
431 struct drm_dp_sideband_msg_reply_body *repmsg)
432 {
433 int idx = 1;
434 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
435 idx++;
436 if (idx > raw->curlen)
437 goto fail_len;
438 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
439 if (idx > raw->curlen)
440 goto fail_len;
441
442 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
443 return true;
444 fail_len:
445 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
446 return false;
447 }
448
449 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
450 struct drm_dp_sideband_msg_reply_body *repmsg)
451 {
452 int idx = 1;
453 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
454 idx++;
455 if (idx > raw->curlen)
456 goto fail_len;
457 return true;
458 fail_len:
459 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
460 return false;
461 }
462
463 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
464 struct drm_dp_sideband_msg_reply_body *repmsg)
465 {
466 int idx = 1;
467
468 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
469 idx++;
470 if (idx > raw->curlen)
471 goto fail_len;
472 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
473 idx++;
474 /* TODO check */
475 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
476 return true;
477 fail_len:
478 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
479 return false;
480 }
481
482 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
483 struct drm_dp_sideband_msg_reply_body *repmsg)
484 {
485 int idx = 1;
486 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
487 idx++;
488 if (idx > raw->curlen)
489 goto fail_len;
490 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
491 idx += 2;
492 if (idx > raw->curlen)
493 goto fail_len;
494 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
495 idx += 2;
496 if (idx > raw->curlen)
497 goto fail_len;
498 return true;
499 fail_len:
500 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
501 return false;
502 }
503
504 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
505 struct drm_dp_sideband_msg_reply_body *repmsg)
506 {
507 int idx = 1;
508 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
509 idx++;
510 if (idx > raw->curlen)
511 goto fail_len;
512 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
513 idx++;
514 if (idx > raw->curlen)
515 goto fail_len;
516 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
517 idx += 2;
518 if (idx > raw->curlen)
519 goto fail_len;
520 return true;
521 fail_len:
522 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
523 return false;
524 }
525
526 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
527 struct drm_dp_sideband_msg_reply_body *repmsg)
528 {
529 int idx = 1;
530 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
531 idx++;
532 if (idx > raw->curlen)
533 goto fail_len;
534 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
535 idx += 2;
536 if (idx > raw->curlen)
537 goto fail_len;
538 return true;
539 fail_len:
540 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
541 return false;
542 }
543
544 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
545 struct drm_dp_sideband_msg_reply_body *msg)
546 {
547 memset(msg, 0, sizeof(*msg));
548 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
549 msg->req_type = (raw->msg[0] & 0x7f);
550
551 if (msg->reply_type) {
552 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
553 msg->u.nak.reason = raw->msg[17];
554 msg->u.nak.nak_data = raw->msg[18];
555 return false;
556 }
557
558 switch (msg->req_type) {
559 case DP_LINK_ADDRESS:
560 return drm_dp_sideband_parse_link_address(raw, msg);
561 case DP_QUERY_PAYLOAD:
562 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
563 case DP_REMOTE_DPCD_READ:
564 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
565 case DP_REMOTE_DPCD_WRITE:
566 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
567 case DP_REMOTE_I2C_READ:
568 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
569 case DP_ENUM_PATH_RESOURCES:
570 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
571 case DP_ALLOCATE_PAYLOAD:
572 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
573 default:
574 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
575 return false;
576 }
577 }
578
579 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
580 struct drm_dp_sideband_msg_req_body *msg)
581 {
582 int idx = 1;
583
584 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
585 idx++;
586 if (idx > raw->curlen)
587 goto fail_len;
588
589 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
590 idx += 16;
591 if (idx > raw->curlen)
592 goto fail_len;
593
594 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
595 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
596 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
597 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
598 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
599 idx++;
600 return true;
601 fail_len:
602 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
603 return false;
604 }
605
606 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
607 struct drm_dp_sideband_msg_req_body *msg)
608 {
609 int idx = 1;
610
611 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
612 idx++;
613 if (idx > raw->curlen)
614 goto fail_len;
615
616 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
617 idx += 16;
618 if (idx > raw->curlen)
619 goto fail_len;
620
621 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
622 idx++;
623 return true;
624 fail_len:
625 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
626 return false;
627 }
628
629 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
630 struct drm_dp_sideband_msg_req_body *msg)
631 {
632 memset(msg, 0, sizeof(*msg));
633 msg->req_type = (raw->msg[0] & 0x7f);
634
635 switch (msg->req_type) {
636 case DP_CONNECTION_STATUS_NOTIFY:
637 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
638 case DP_RESOURCE_STATUS_NOTIFY:
639 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
640 default:
641 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
642 return false;
643 }
644 }
645
646 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
647 {
648 struct drm_dp_sideband_msg_req_body req;
649
650 req.req_type = DP_REMOTE_DPCD_WRITE;
651 req.u.dpcd_write.port_number = port_num;
652 req.u.dpcd_write.dpcd_address = offset;
653 req.u.dpcd_write.num_bytes = num_bytes;
654 req.u.dpcd_write.bytes = bytes;
655 drm_dp_encode_sideband_req(&req, msg);
656
657 return 0;
658 }
659
660 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
661 {
662 struct drm_dp_sideband_msg_req_body req;
663
664 req.req_type = DP_LINK_ADDRESS;
665 drm_dp_encode_sideband_req(&req, msg);
666 return 0;
667 }
668
669 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
670 {
671 struct drm_dp_sideband_msg_req_body req;
672
673 req.req_type = DP_ENUM_PATH_RESOURCES;
674 req.u.port_num.port_number = port_num;
675 drm_dp_encode_sideband_req(&req, msg);
676 msg->path_msg = true;
677 return 0;
678 }
679
680 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
681 u8 vcpi, uint16_t pbn)
682 {
683 struct drm_dp_sideband_msg_req_body req;
684 memset(&req, 0, sizeof(req));
685 req.req_type = DP_ALLOCATE_PAYLOAD;
686 req.u.allocate_payload.port_number = port_num;
687 req.u.allocate_payload.vcpi = vcpi;
688 req.u.allocate_payload.pbn = pbn;
689 drm_dp_encode_sideband_req(&req, msg);
690 msg->path_msg = true;
691 return 0;
692 }
693
694 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
695 struct drm_dp_vcpi *vcpi)
696 {
697 int ret, vcpi_ret;
698
699 mutex_lock(&mgr->payload_lock);
700 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
701 if (ret > mgr->max_payloads) {
702 ret = -EINVAL;
703 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
704 goto out_unlock;
705 }
706
707 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
708 if (vcpi_ret > mgr->max_payloads) {
709 ret = -EINVAL;
710 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
711 goto out_unlock;
712 }
713
714 set_bit(ret, &mgr->payload_mask);
715 set_bit(vcpi_ret, &mgr->vcpi_mask);
716 vcpi->vcpi = vcpi_ret + 1;
717 mgr->proposed_vcpis[ret - 1] = vcpi;
718 out_unlock:
719 mutex_unlock(&mgr->payload_lock);
720 return ret;
721 }
722
723 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
724 int vcpi)
725 {
726 int i;
727 if (vcpi == 0)
728 return;
729
730 mutex_lock(&mgr->payload_lock);
731 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
732 clear_bit(vcpi - 1, &mgr->vcpi_mask);
733
734 for (i = 0; i < mgr->max_payloads; i++) {
735 if (mgr->proposed_vcpis[i])
736 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
737 mgr->proposed_vcpis[i] = NULL;
738 clear_bit(i + 1, &mgr->payload_mask);
739 }
740 }
741 mutex_unlock(&mgr->payload_lock);
742 }
743
744 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
745 struct drm_dp_sideband_msg_tx *txmsg)
746 {
747 bool ret;
748
749 /*
750 * All updates to txmsg->state are protected by mgr->qlock, and the two
751 * cases we check here are terminal states. For those the barriers
752 * provided by the wake_up/wait_event pair are enough.
753 */
754 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
755 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
756 return ret;
757 }
758
759 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
760 struct drm_dp_sideband_msg_tx *txmsg)
761 {
762 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
763 int ret;
764
765 ret = wait_event_timeout(mgr->tx_waitq,
766 check_txmsg_state(mgr, txmsg),
767 (4 * HZ));
768 mutex_lock(&mstb->mgr->qlock);
769 if (ret > 0) {
770 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
771 ret = -EIO;
772 goto out;
773 }
774 } else {
775 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
776
777 /* dump some state */
778 ret = -EIO;
779
780 /* remove from q */
781 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
782 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
783 list_del(&txmsg->next);
784 }
785
786 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
787 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
788 mstb->tx_slots[txmsg->seqno] = NULL;
789 }
790 }
791 out:
792 mutex_unlock(&mgr->qlock);
793
794 return ret;
795 }
796
797 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
798 {
799 struct drm_dp_mst_branch *mstb;
800
801 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
802 if (!mstb)
803 return NULL;
804
805 mstb->lct = lct;
806 if (lct > 1)
807 memcpy(mstb->rad, rad, lct / 2);
808 INIT_LIST_HEAD(&mstb->ports);
809 kref_init(&mstb->kref);
810 return mstb;
811 }
812
813 static void drm_dp_free_mst_port(struct kref *kref);
814
815 static void drm_dp_free_mst_branch_device(struct kref *kref)
816 {
817 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
818 if (mstb->port_parent) {
819 if (list_empty(&mstb->port_parent->next))
820 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
821 }
822 kfree(mstb);
823 }
824
825 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
826 {
827 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
828 struct drm_dp_mst_port *port, *tmp;
829 bool wake_tx = false;
830
831 /*
832 * init kref again to be used by ports to remove mst branch when it is
833 * not needed anymore
834 */
835 kref_init(kref);
836
837 if (mstb->port_parent && list_empty(&mstb->port_parent->next))
838 kref_get(&mstb->port_parent->kref);
839
840 /*
841 * destroy all ports - don't need lock
842 * as there are no more references to the mst branch
843 * device at this point.
844 */
845 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
846 list_del(&port->next);
847 drm_dp_put_port(port);
848 }
849
850 /* drop any tx slots msg */
851 mutex_lock(&mstb->mgr->qlock);
852 if (mstb->tx_slots[0]) {
853 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
854 mstb->tx_slots[0] = NULL;
855 wake_tx = true;
856 }
857 if (mstb->tx_slots[1]) {
858 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
859 mstb->tx_slots[1] = NULL;
860 wake_tx = true;
861 }
862 mutex_unlock(&mstb->mgr->qlock);
863
864 if (wake_tx)
865 wake_up(&mstb->mgr->tx_waitq);
866
867 kref_put(kref, drm_dp_free_mst_branch_device);
868 }
869
870 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
871 {
872 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
873 }
874
875
876 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
877 {
878 struct drm_dp_mst_branch *mstb;
879
880 switch (old_pdt) {
881 case DP_PEER_DEVICE_DP_LEGACY_CONV:
882 case DP_PEER_DEVICE_SST_SINK:
883 /* remove i2c over sideband */
884 drm_dp_mst_unregister_i2c_bus(&port->aux);
885 break;
886 case DP_PEER_DEVICE_MST_BRANCHING:
887 mstb = port->mstb;
888 port->mstb = NULL;
889 drm_dp_put_mst_branch_device(mstb);
890 break;
891 }
892 }
893
894 static void drm_dp_destroy_port(struct kref *kref)
895 {
896 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
897 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
898
899 if (!port->input) {
900 port->vcpi.num_slots = 0;
901
902 kfree(port->cached_edid);
903
904 /*
905 * The only time we don't have a connector
906 * on an output port is if the connector init
907 * fails.
908 */
909 if (port->connector) {
910 /* we can't destroy the connector here, as
911 * we might be holding the mode_config.mutex
912 * from an EDID retrieval */
913
914 mutex_lock(&mgr->destroy_connector_lock);
915 kref_get(&port->parent->kref);
916 list_add(&port->next, &mgr->destroy_connector_list);
917 mutex_unlock(&mgr->destroy_connector_lock);
918 schedule_work(&mgr->destroy_connector_work);
919 return;
920 }
921 /* no need to clean up vcpi
922 * as if we have no connector we never setup a vcpi */
923 drm_dp_port_teardown_pdt(port, port->pdt);
924 port->pdt = DP_PEER_DEVICE_NONE;
925 }
926 kfree(port);
927 }
928
929 static void drm_dp_put_port(struct drm_dp_mst_port *port)
930 {
931 kref_put(&port->kref, drm_dp_destroy_port);
932 }
933
934 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
935 {
936 struct drm_dp_mst_port *port;
937 struct drm_dp_mst_branch *rmstb;
938 if (to_find == mstb) {
939 kref_get(&mstb->kref);
940 return mstb;
941 }
942 list_for_each_entry(port, &mstb->ports, next) {
943 if (port->mstb) {
944 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
945 if (rmstb)
946 return rmstb;
947 }
948 }
949 return NULL;
950 }
951
952 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
953 {
954 struct drm_dp_mst_branch *rmstb = NULL;
955 mutex_lock(&mgr->lock);
956 if (mgr->mst_primary)
957 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
958 mutex_unlock(&mgr->lock);
959 return rmstb;
960 }
961
962 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
963 {
964 struct drm_dp_mst_port *port, *mport;
965
966 list_for_each_entry(port, &mstb->ports, next) {
967 if (port == to_find) {
968 kref_get(&port->kref);
969 return port;
970 }
971 if (port->mstb) {
972 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
973 if (mport)
974 return mport;
975 }
976 }
977 return NULL;
978 }
979
980 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
981 {
982 struct drm_dp_mst_port *rport = NULL;
983 mutex_lock(&mgr->lock);
984 if (mgr->mst_primary)
985 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
986 mutex_unlock(&mgr->lock);
987 return rport;
988 }
989
990 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
991 {
992 struct drm_dp_mst_port *port;
993
994 list_for_each_entry(port, &mstb->ports, next) {
995 if (port->port_num == port_num) {
996 kref_get(&port->kref);
997 return port;
998 }
999 }
1000
1001 return NULL;
1002 }
1003
1004 /*
1005 * calculate a new RAD for this MST branch device
1006 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1007 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1008 */
1009 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1010 u8 *rad)
1011 {
1012 int parent_lct = port->parent->lct;
1013 int shift = 4;
1014 int idx = (parent_lct - 1) / 2;
1015 if (parent_lct > 1) {
1016 memcpy(rad, port->parent->rad, idx + 1);
1017 shift = (parent_lct % 2) ? 4 : 0;
1018 } else
1019 rad[0] = 0;
1020
1021 rad[idx] |= port->port_num << shift;
1022 return parent_lct + 1;
1023 }
1024
1025 /*
1026 * return sends link address for new mstb
1027 */
1028 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1029 {
1030 int ret;
1031 u8 rad[6], lct;
1032 bool send_link = false;
1033 switch (port->pdt) {
1034 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1035 case DP_PEER_DEVICE_SST_SINK:
1036 /* add i2c over sideband */
1037 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1038 break;
1039 case DP_PEER_DEVICE_MST_BRANCHING:
1040 lct = drm_dp_calculate_rad(port, rad);
1041
1042 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1043 port->mstb->mgr = port->mgr;
1044 port->mstb->port_parent = port;
1045
1046 send_link = true;
1047 break;
1048 }
1049 return send_link;
1050 }
1051
1052 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1053 {
1054 int ret;
1055
1056 memcpy(mstb->guid, guid, 16);
1057
1058 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1059 if (mstb->port_parent) {
1060 ret = drm_dp_send_dpcd_write(
1061 mstb->mgr,
1062 mstb->port_parent,
1063 DP_GUID,
1064 16,
1065 mstb->guid);
1066 } else {
1067
1068 ret = drm_dp_dpcd_write(
1069 mstb->mgr->aux,
1070 DP_GUID,
1071 mstb->guid,
1072 16);
1073 }
1074 }
1075 }
1076
1077 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1078 int pnum,
1079 char *proppath,
1080 size_t proppath_size)
1081 {
1082 int i;
1083 char temp[8];
1084 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1085 for (i = 0; i < (mstb->lct - 1); i++) {
1086 int shift = (i % 2) ? 0 : 4;
1087 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1088 snprintf(temp, sizeof(temp), "-%d", port_num);
1089 strlcat(proppath, temp, proppath_size);
1090 }
1091 snprintf(temp, sizeof(temp), "-%d", pnum);
1092 strlcat(proppath, temp, proppath_size);
1093 }
1094
1095 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1096 struct device *dev,
1097 struct drm_dp_link_addr_reply_port *port_msg)
1098 {
1099 struct drm_dp_mst_port *port;
1100 bool ret;
1101 bool created = false;
1102 int old_pdt = 0;
1103 int old_ddps = 0;
1104 port = drm_dp_get_port(mstb, port_msg->port_number);
1105 if (!port) {
1106 port = kzalloc(sizeof(*port), GFP_KERNEL);
1107 if (!port)
1108 return;
1109 kref_init(&port->kref);
1110 port->parent = mstb;
1111 port->port_num = port_msg->port_number;
1112 port->mgr = mstb->mgr;
1113 port->aux.name = "DPMST";
1114 port->aux.dev = dev;
1115 created = true;
1116 } else {
1117 old_pdt = port->pdt;
1118 old_ddps = port->ddps;
1119 }
1120
1121 port->pdt = port_msg->peer_device_type;
1122 port->input = port_msg->input_port;
1123 port->mcs = port_msg->mcs;
1124 port->ddps = port_msg->ddps;
1125 port->ldps = port_msg->legacy_device_plug_status;
1126 port->dpcd_rev = port_msg->dpcd_revision;
1127 port->num_sdp_streams = port_msg->num_sdp_streams;
1128 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1129
1130 /* manage mstb port lists with mgr lock - take a reference
1131 for this list */
1132 if (created) {
1133 mutex_lock(&mstb->mgr->lock);
1134 kref_get(&port->kref);
1135 list_add(&port->next, &mstb->ports);
1136 mutex_unlock(&mstb->mgr->lock);
1137 }
1138
1139 if (old_ddps != port->ddps) {
1140 if (port->ddps) {
1141 if (!port->input)
1142 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1143 } else {
1144 port->available_pbn = 0;
1145 }
1146 }
1147
1148 if (old_pdt != port->pdt && !port->input) {
1149 drm_dp_port_teardown_pdt(port, old_pdt);
1150
1151 ret = drm_dp_port_setup_pdt(port);
1152 if (ret == true)
1153 drm_dp_send_link_address(mstb->mgr, port->mstb);
1154 }
1155
1156 if (created && !port->input) {
1157 char proppath[255];
1158
1159 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1160 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1161 if (!port->connector) {
1162 /* remove it from the port list */
1163 mutex_lock(&mstb->mgr->lock);
1164 list_del(&port->next);
1165 mutex_unlock(&mstb->mgr->lock);
1166 /* drop port list reference */
1167 drm_dp_put_port(port);
1168 goto out;
1169 }
1170 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1171 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1172 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1173 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1174 drm_mode_connector_set_tile_property(port->connector);
1175 }
1176 (*mstb->mgr->cbs->register_connector)(port->connector);
1177 }
1178
1179 out:
1180 /* put reference to this port */
1181 drm_dp_put_port(port);
1182 }
1183
1184 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1185 struct drm_dp_connection_status_notify *conn_stat)
1186 {
1187 struct drm_dp_mst_port *port;
1188 int old_pdt;
1189 int old_ddps;
1190 bool dowork = false;
1191 port = drm_dp_get_port(mstb, conn_stat->port_number);
1192 if (!port)
1193 return;
1194
1195 old_ddps = port->ddps;
1196 old_pdt = port->pdt;
1197 port->pdt = conn_stat->peer_device_type;
1198 port->mcs = conn_stat->message_capability_status;
1199 port->ldps = conn_stat->legacy_device_plug_status;
1200 port->ddps = conn_stat->displayport_device_plug_status;
1201
1202 if (old_ddps != port->ddps) {
1203 if (port->ddps) {
1204 dowork = true;
1205 } else {
1206 port->available_pbn = 0;
1207 }
1208 }
1209 if (old_pdt != port->pdt && !port->input) {
1210 drm_dp_port_teardown_pdt(port, old_pdt);
1211
1212 if (drm_dp_port_setup_pdt(port))
1213 dowork = true;
1214 }
1215
1216 drm_dp_put_port(port);
1217 if (dowork)
1218 queue_work(system_long_wq, &mstb->mgr->work);
1219
1220 }
1221
1222 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1223 u8 lct, u8 *rad)
1224 {
1225 struct drm_dp_mst_branch *mstb;
1226 struct drm_dp_mst_port *port;
1227 int i;
1228 /* find the port by iterating down */
1229
1230 mutex_lock(&mgr->lock);
1231 mstb = mgr->mst_primary;
1232
1233 for (i = 0; i < lct - 1; i++) {
1234 int shift = (i % 2) ? 0 : 4;
1235 int port_num = (rad[i / 2] >> shift) & 0xf;
1236
1237 list_for_each_entry(port, &mstb->ports, next) {
1238 if (port->port_num == port_num) {
1239 mstb = port->mstb;
1240 if (!mstb) {
1241 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1242 goto out;
1243 }
1244
1245 break;
1246 }
1247 }
1248 }
1249 kref_get(&mstb->kref);
1250 out:
1251 mutex_unlock(&mgr->lock);
1252 return mstb;
1253 }
1254
1255 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1256 struct drm_dp_mst_branch *mstb,
1257 uint8_t *guid)
1258 {
1259 struct drm_dp_mst_branch *found_mstb;
1260 struct drm_dp_mst_port *port;
1261
1262 if (memcmp(mstb->guid, guid, 16) == 0)
1263 return mstb;
1264
1265
1266 list_for_each_entry(port, &mstb->ports, next) {
1267 if (!port->mstb)
1268 continue;
1269
1270 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1271
1272 if (found_mstb)
1273 return found_mstb;
1274 }
1275
1276 return NULL;
1277 }
1278
1279 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1280 struct drm_dp_mst_topology_mgr *mgr,
1281 uint8_t *guid)
1282 {
1283 struct drm_dp_mst_branch *mstb;
1284
1285 /* find the port by iterating down */
1286 mutex_lock(&mgr->lock);
1287
1288 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1289
1290 if (mstb)
1291 kref_get(&mstb->kref);
1292
1293 mutex_unlock(&mgr->lock);
1294 return mstb;
1295 }
1296
1297 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1298 struct drm_dp_mst_branch *mstb)
1299 {
1300 struct drm_dp_mst_port *port;
1301 struct drm_dp_mst_branch *mstb_child;
1302 if (!mstb->link_address_sent)
1303 drm_dp_send_link_address(mgr, mstb);
1304
1305 list_for_each_entry(port, &mstb->ports, next) {
1306 if (port->input)
1307 continue;
1308
1309 if (!port->ddps)
1310 continue;
1311
1312 if (!port->available_pbn)
1313 drm_dp_send_enum_path_resources(mgr, mstb, port);
1314
1315 if (port->mstb) {
1316 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1317 if (mstb_child) {
1318 drm_dp_check_and_send_link_address(mgr, mstb_child);
1319 drm_dp_put_mst_branch_device(mstb_child);
1320 }
1321 }
1322 }
1323 }
1324
1325 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1326 {
1327 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1328 struct drm_dp_mst_branch *mstb;
1329
1330 mutex_lock(&mgr->lock);
1331 mstb = mgr->mst_primary;
1332 if (mstb) {
1333 kref_get(&mstb->kref);
1334 }
1335 mutex_unlock(&mgr->lock);
1336 if (mstb) {
1337 drm_dp_check_and_send_link_address(mgr, mstb);
1338 drm_dp_put_mst_branch_device(mstb);
1339 }
1340 }
1341
1342 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1343 u8 *guid)
1344 {
1345 static u8 zero_guid[16];
1346
1347 if (!memcmp(guid, zero_guid, 16)) {
1348 u64 salt = get_jiffies_64();
1349 memcpy(&guid[0], &salt, sizeof(u64));
1350 memcpy(&guid[8], &salt, sizeof(u64));
1351 return false;
1352 }
1353 return true;
1354 }
1355
1356 #if 0
1357 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1358 {
1359 struct drm_dp_sideband_msg_req_body req;
1360
1361 req.req_type = DP_REMOTE_DPCD_READ;
1362 req.u.dpcd_read.port_number = port_num;
1363 req.u.dpcd_read.dpcd_address = offset;
1364 req.u.dpcd_read.num_bytes = num_bytes;
1365 drm_dp_encode_sideband_req(&req, msg);
1366
1367 return 0;
1368 }
1369 #endif
1370
1371 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1372 bool up, u8 *msg, int len)
1373 {
1374 int ret;
1375 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1376 int tosend, total, offset;
1377 int retries = 0;
1378
1379 retry:
1380 total = len;
1381 offset = 0;
1382 do {
1383 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1384
1385 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1386 &msg[offset],
1387 tosend);
1388 if (ret != tosend) {
1389 if (ret == -EIO && retries < 5) {
1390 retries++;
1391 goto retry;
1392 }
1393 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1394
1395 return -EIO;
1396 }
1397 offset += tosend;
1398 total -= tosend;
1399 } while (total > 0);
1400 return 0;
1401 }
1402
1403 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1404 struct drm_dp_sideband_msg_tx *txmsg)
1405 {
1406 struct drm_dp_mst_branch *mstb = txmsg->dst;
1407 u8 req_type;
1408
1409 /* both msg slots are full */
1410 if (txmsg->seqno == -1) {
1411 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1412 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1413 return -EAGAIN;
1414 }
1415 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1416 txmsg->seqno = mstb->last_seqno;
1417 mstb->last_seqno ^= 1;
1418 } else if (mstb->tx_slots[0] == NULL)
1419 txmsg->seqno = 0;
1420 else
1421 txmsg->seqno = 1;
1422 mstb->tx_slots[txmsg->seqno] = txmsg;
1423 }
1424
1425 req_type = txmsg->msg[0] & 0x7f;
1426 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1427 req_type == DP_RESOURCE_STATUS_NOTIFY)
1428 hdr->broadcast = 1;
1429 else
1430 hdr->broadcast = 0;
1431 hdr->path_msg = txmsg->path_msg;
1432 hdr->lct = mstb->lct;
1433 hdr->lcr = mstb->lct - 1;
1434 if (mstb->lct > 1)
1435 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1436 hdr->seqno = txmsg->seqno;
1437 return 0;
1438 }
1439 /*
1440 * process a single block of the next message in the sideband queue
1441 */
1442 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1443 struct drm_dp_sideband_msg_tx *txmsg,
1444 bool up)
1445 {
1446 u8 chunk[48];
1447 struct drm_dp_sideband_msg_hdr hdr;
1448 int len, space, idx, tosend;
1449 int ret;
1450
1451 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1452
1453 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1454 txmsg->seqno = -1;
1455 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1456 }
1457
1458 /* make hdr from dst mst - for replies use seqno
1459 otherwise assign one */
1460 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1461 if (ret < 0)
1462 return ret;
1463
1464 /* amount left to send in this message */
1465 len = txmsg->cur_len - txmsg->cur_offset;
1466
1467 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1468 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1469
1470 tosend = min(len, space);
1471 if (len == txmsg->cur_len)
1472 hdr.somt = 1;
1473 if (space >= len)
1474 hdr.eomt = 1;
1475
1476
1477 hdr.msg_len = tosend + 1;
1478 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1479 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1480 /* add crc at end */
1481 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1482 idx += tosend + 1;
1483
1484 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1485 if (ret) {
1486 DRM_DEBUG_KMS("sideband msg failed to send\n");
1487 return ret;
1488 }
1489
1490 txmsg->cur_offset += tosend;
1491 if (txmsg->cur_offset == txmsg->cur_len) {
1492 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1493 return 1;
1494 }
1495 return 0;
1496 }
1497
1498 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1499 {
1500 struct drm_dp_sideband_msg_tx *txmsg;
1501 int ret;
1502
1503 WARN_ON(!mutex_is_locked(&mgr->qlock));
1504
1505 /* construct a chunk from the first msg in the tx_msg queue */
1506 if (list_empty(&mgr->tx_msg_downq)) {
1507 mgr->tx_down_in_progress = false;
1508 return;
1509 }
1510 mgr->tx_down_in_progress = true;
1511
1512 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1513 ret = process_single_tx_qlock(mgr, txmsg, false);
1514 if (ret == 1) {
1515 /* txmsg is sent it should be in the slots now */
1516 list_del(&txmsg->next);
1517 } else if (ret) {
1518 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1519 list_del(&txmsg->next);
1520 if (txmsg->seqno != -1)
1521 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1522 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1523 wake_up(&mgr->tx_waitq);
1524 }
1525 if (list_empty(&mgr->tx_msg_downq)) {
1526 mgr->tx_down_in_progress = false;
1527 return;
1528 }
1529 }
1530
1531 /* called holding qlock */
1532 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1533 struct drm_dp_sideband_msg_tx *txmsg)
1534 {
1535 int ret;
1536
1537 /* construct a chunk from the first msg in the tx_msg queue */
1538 ret = process_single_tx_qlock(mgr, txmsg, true);
1539
1540 if (ret != 1)
1541 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1542
1543 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1544 }
1545
1546 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1547 struct drm_dp_sideband_msg_tx *txmsg)
1548 {
1549 mutex_lock(&mgr->qlock);
1550 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1551 if (!mgr->tx_down_in_progress)
1552 process_single_down_tx_qlock(mgr);
1553 mutex_unlock(&mgr->qlock);
1554 }
1555
1556 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1557 struct drm_dp_mst_branch *mstb)
1558 {
1559 int len;
1560 struct drm_dp_sideband_msg_tx *txmsg;
1561 int ret;
1562
1563 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1564 if (!txmsg)
1565 return;
1566
1567 txmsg->dst = mstb;
1568 len = build_link_address(txmsg);
1569
1570 mstb->link_address_sent = true;
1571 drm_dp_queue_down_tx(mgr, txmsg);
1572
1573 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1574 if (ret > 0) {
1575 int i;
1576
1577 if (txmsg->reply.reply_type == 1)
1578 DRM_DEBUG_KMS("link address nak received\n");
1579 else {
1580 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1581 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1582 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1583 txmsg->reply.u.link_addr.ports[i].input_port,
1584 txmsg->reply.u.link_addr.ports[i].peer_device_type,
1585 txmsg->reply.u.link_addr.ports[i].port_number,
1586 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1587 txmsg->reply.u.link_addr.ports[i].mcs,
1588 txmsg->reply.u.link_addr.ports[i].ddps,
1589 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1590 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1591 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1592 }
1593
1594 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1595
1596 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1597 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1598 }
1599 (*mgr->cbs->hotplug)(mgr);
1600 }
1601 } else {
1602 mstb->link_address_sent = false;
1603 DRM_DEBUG_KMS("link address failed %d\n", ret);
1604 }
1605
1606 kfree(txmsg);
1607 }
1608
1609 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1610 struct drm_dp_mst_branch *mstb,
1611 struct drm_dp_mst_port *port)
1612 {
1613 int len;
1614 struct drm_dp_sideband_msg_tx *txmsg;
1615 int ret;
1616
1617 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1618 if (!txmsg)
1619 return -ENOMEM;
1620
1621 txmsg->dst = mstb;
1622 len = build_enum_path_resources(txmsg, port->port_num);
1623
1624 drm_dp_queue_down_tx(mgr, txmsg);
1625
1626 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1627 if (ret > 0) {
1628 if (txmsg->reply.reply_type == 1)
1629 DRM_DEBUG_KMS("enum path resources nak received\n");
1630 else {
1631 if (port->port_num != txmsg->reply.u.path_resources.port_number)
1632 DRM_ERROR("got incorrect port in response\n");
1633 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1634 txmsg->reply.u.path_resources.avail_payload_bw_number);
1635 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1636 }
1637 }
1638
1639 kfree(txmsg);
1640 return 0;
1641 }
1642
1643 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1644 {
1645 if (!mstb->port_parent)
1646 return NULL;
1647
1648 if (mstb->port_parent->mstb != mstb)
1649 return mstb->port_parent;
1650
1651 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1652 }
1653
1654 static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1655 struct drm_dp_mst_branch *mstb,
1656 int *port_num)
1657 {
1658 struct drm_dp_mst_branch *rmstb = NULL;
1659 struct drm_dp_mst_port *found_port;
1660 mutex_lock(&mgr->lock);
1661 if (mgr->mst_primary) {
1662 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1663
1664 if (found_port) {
1665 rmstb = found_port->parent;
1666 kref_get(&rmstb->kref);
1667 *port_num = found_port->port_num;
1668 }
1669 }
1670 mutex_unlock(&mgr->lock);
1671 return rmstb;
1672 }
1673
1674 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1675 struct drm_dp_mst_port *port,
1676 int id,
1677 int pbn)
1678 {
1679 struct drm_dp_sideband_msg_tx *txmsg;
1680 struct drm_dp_mst_branch *mstb;
1681 int len, ret, port_num;
1682
1683 port = drm_dp_get_validated_port_ref(mgr, port);
1684 if (!port)
1685 return -EINVAL;
1686
1687 port_num = port->port_num;
1688 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1689 if (!mstb) {
1690 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1691
1692 if (!mstb) {
1693 drm_dp_put_port(port);
1694 return -EINVAL;
1695 }
1696 }
1697
1698 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1699 if (!txmsg) {
1700 ret = -ENOMEM;
1701 goto fail_put;
1702 }
1703
1704 txmsg->dst = mstb;
1705 len = build_allocate_payload(txmsg, port_num,
1706 id,
1707 pbn);
1708
1709 drm_dp_queue_down_tx(mgr, txmsg);
1710
1711 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1712 if (ret > 0) {
1713 if (txmsg->reply.reply_type == 1) {
1714 ret = -EINVAL;
1715 } else
1716 ret = 0;
1717 }
1718 kfree(txmsg);
1719 fail_put:
1720 drm_dp_put_mst_branch_device(mstb);
1721 drm_dp_put_port(port);
1722 return ret;
1723 }
1724
1725 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1726 int id,
1727 struct drm_dp_payload *payload)
1728 {
1729 int ret;
1730
1731 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1732 if (ret < 0) {
1733 payload->payload_state = 0;
1734 return ret;
1735 }
1736 payload->payload_state = DP_PAYLOAD_LOCAL;
1737 return 0;
1738 }
1739
1740 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1741 struct drm_dp_mst_port *port,
1742 int id,
1743 struct drm_dp_payload *payload)
1744 {
1745 int ret;
1746 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1747 if (ret < 0)
1748 return ret;
1749 payload->payload_state = DP_PAYLOAD_REMOTE;
1750 return ret;
1751 }
1752
1753 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1754 struct drm_dp_mst_port *port,
1755 int id,
1756 struct drm_dp_payload *payload)
1757 {
1758 DRM_DEBUG_KMS("\n");
1759 /* its okay for these to fail */
1760 if (port) {
1761 drm_dp_payload_send_msg(mgr, port, id, 0);
1762 }
1763
1764 drm_dp_dpcd_write_payload(mgr, id, payload);
1765 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1766 return 0;
1767 }
1768
1769 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1770 int id,
1771 struct drm_dp_payload *payload)
1772 {
1773 payload->payload_state = 0;
1774 return 0;
1775 }
1776
1777 /**
1778 * drm_dp_update_payload_part1() - Execute payload update part 1
1779 * @mgr: manager to use.
1780 *
1781 * This iterates over all proposed virtual channels, and tries to
1782 * allocate space in the link for them. For 0->slots transitions,
1783 * this step just writes the VCPI to the MST device. For slots->0
1784 * transitions, this writes the updated VCPIs and removes the
1785 * remote VC payloads.
1786 *
1787 * after calling this the driver should generate ACT and payload
1788 * packets.
1789 */
1790 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1791 {
1792 int i, j;
1793 int cur_slots = 1;
1794 struct drm_dp_payload req_payload;
1795 struct drm_dp_mst_port *port;
1796
1797 mutex_lock(&mgr->payload_lock);
1798 for (i = 0; i < mgr->max_payloads; i++) {
1799 /* solve the current payloads - compare to the hw ones
1800 - update the hw view */
1801 req_payload.start_slot = cur_slots;
1802 if (mgr->proposed_vcpis[i]) {
1803 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1804 port = drm_dp_get_validated_port_ref(mgr, port);
1805 if (!port) {
1806 mutex_unlock(&mgr->payload_lock);
1807 return -EINVAL;
1808 }
1809 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1810 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1811 } else {
1812 port = NULL;
1813 req_payload.num_slots = 0;
1814 }
1815
1816 if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1817 mgr->payloads[i].start_slot = req_payload.start_slot;
1818 }
1819 /* work out what is required to happen with this payload */
1820 if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1821
1822 /* need to push an update for this payload */
1823 if (req_payload.num_slots) {
1824 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1825 mgr->payloads[i].num_slots = req_payload.num_slots;
1826 mgr->payloads[i].vcpi = req_payload.vcpi;
1827 } else if (mgr->payloads[i].num_slots) {
1828 mgr->payloads[i].num_slots = 0;
1829 drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
1830 req_payload.payload_state = mgr->payloads[i].payload_state;
1831 mgr->payloads[i].start_slot = 0;
1832 }
1833 mgr->payloads[i].payload_state = req_payload.payload_state;
1834 }
1835 cur_slots += req_payload.num_slots;
1836
1837 if (port)
1838 drm_dp_put_port(port);
1839 }
1840
1841 for (i = 0; i < mgr->max_payloads; i++) {
1842 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1843 DRM_DEBUG_KMS("removing payload %d\n", i);
1844 for (j = i; j < mgr->max_payloads - 1; j++) {
1845 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1846 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1847 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1848 set_bit(j + 1, &mgr->payload_mask);
1849 } else {
1850 clear_bit(j + 1, &mgr->payload_mask);
1851 }
1852 }
1853 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1854 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1855 clear_bit(mgr->max_payloads, &mgr->payload_mask);
1856
1857 }
1858 }
1859 mutex_unlock(&mgr->payload_lock);
1860
1861 return 0;
1862 }
1863 EXPORT_SYMBOL(drm_dp_update_payload_part1);
1864
1865 /**
1866 * drm_dp_update_payload_part2() - Execute payload update part 2
1867 * @mgr: manager to use.
1868 *
1869 * This iterates over all proposed virtual channels, and tries to
1870 * allocate space in the link for them. For 0->slots transitions,
1871 * this step writes the remote VC payload commands. For slots->0
1872 * this just resets some internal state.
1873 */
1874 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1875 {
1876 struct drm_dp_mst_port *port;
1877 int i;
1878 int ret = 0;
1879 mutex_lock(&mgr->payload_lock);
1880 for (i = 0; i < mgr->max_payloads; i++) {
1881
1882 if (!mgr->proposed_vcpis[i])
1883 continue;
1884
1885 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1886
1887 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1888 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1889 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1890 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1891 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1892 }
1893 if (ret) {
1894 mutex_unlock(&mgr->payload_lock);
1895 return ret;
1896 }
1897 }
1898 mutex_unlock(&mgr->payload_lock);
1899 return 0;
1900 }
1901 EXPORT_SYMBOL(drm_dp_update_payload_part2);
1902
1903 #if 0 /* unused as of yet */
1904 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1905 struct drm_dp_mst_port *port,
1906 int offset, int size)
1907 {
1908 int len;
1909 struct drm_dp_sideband_msg_tx *txmsg;
1910
1911 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1912 if (!txmsg)
1913 return -ENOMEM;
1914
1915 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1916 txmsg->dst = port->parent;
1917
1918 drm_dp_queue_down_tx(mgr, txmsg);
1919
1920 return 0;
1921 }
1922 #endif
1923
1924 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1925 struct drm_dp_mst_port *port,
1926 int offset, int size, u8 *bytes)
1927 {
1928 int len;
1929 int ret;
1930 struct drm_dp_sideband_msg_tx *txmsg;
1931 struct drm_dp_mst_branch *mstb;
1932
1933 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1934 if (!mstb)
1935 return -EINVAL;
1936
1937 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1938 if (!txmsg) {
1939 ret = -ENOMEM;
1940 goto fail_put;
1941 }
1942
1943 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1944 txmsg->dst = mstb;
1945
1946 drm_dp_queue_down_tx(mgr, txmsg);
1947
1948 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1949 if (ret > 0) {
1950 if (txmsg->reply.reply_type == 1) {
1951 ret = -EINVAL;
1952 } else
1953 ret = 0;
1954 }
1955 kfree(txmsg);
1956 fail_put:
1957 drm_dp_put_mst_branch_device(mstb);
1958 return ret;
1959 }
1960
1961 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1962 {
1963 struct drm_dp_sideband_msg_reply_body reply;
1964
1965 reply.reply_type = 1;
1966 reply.req_type = req_type;
1967 drm_dp_encode_sideband_reply(&reply, msg);
1968 return 0;
1969 }
1970
1971 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1972 struct drm_dp_mst_branch *mstb,
1973 int req_type, int seqno, bool broadcast)
1974 {
1975 struct drm_dp_sideband_msg_tx *txmsg;
1976
1977 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1978 if (!txmsg)
1979 return -ENOMEM;
1980
1981 txmsg->dst = mstb;
1982 txmsg->seqno = seqno;
1983 drm_dp_encode_up_ack_reply(txmsg, req_type);
1984
1985 mutex_lock(&mgr->qlock);
1986
1987 process_single_up_tx_qlock(mgr, txmsg);
1988
1989 mutex_unlock(&mgr->qlock);
1990
1991 kfree(txmsg);
1992 return 0;
1993 }
1994
1995 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1996 int dp_link_count,
1997 int *out)
1998 {
1999 switch (dp_link_bw) {
2000 default:
2001 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2002 dp_link_bw, dp_link_count);
2003 return false;
2004
2005 case DP_LINK_BW_1_62:
2006 *out = 3 * dp_link_count;
2007 break;
2008 case DP_LINK_BW_2_7:
2009 *out = 5 * dp_link_count;
2010 break;
2011 case DP_LINK_BW_5_4:
2012 *out = 10 * dp_link_count;
2013 break;
2014 }
2015 return true;
2016 }
2017
2018 /**
2019 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2020 * @mgr: manager to set state for
2021 * @mst_state: true to enable MST on this connector - false to disable.
2022 *
2023 * This is called by the driver when it detects an MST capable device plugged
2024 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2025 */
2026 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2027 {
2028 int ret = 0;
2029 struct drm_dp_mst_branch *mstb = NULL;
2030
2031 mutex_lock(&mgr->lock);
2032 if (mst_state == mgr->mst_state)
2033 goto out_unlock;
2034
2035 mgr->mst_state = mst_state;
2036 /* set the device into MST mode */
2037 if (mst_state) {
2038 WARN_ON(mgr->mst_primary);
2039
2040 /* get dpcd info */
2041 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2042 if (ret != DP_RECEIVER_CAP_SIZE) {
2043 DRM_DEBUG_KMS("failed to read DPCD\n");
2044 goto out_unlock;
2045 }
2046
2047 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2048 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2049 &mgr->pbn_div)) {
2050 ret = -EINVAL;
2051 goto out_unlock;
2052 }
2053
2054 mgr->total_pbn = 2560;
2055 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
2056 mgr->avail_slots = mgr->total_slots;
2057
2058 /* add initial branch device at LCT 1 */
2059 mstb = drm_dp_add_mst_branch_device(1, NULL);
2060 if (mstb == NULL) {
2061 ret = -ENOMEM;
2062 goto out_unlock;
2063 }
2064 mstb->mgr = mgr;
2065
2066 /* give this the main reference */
2067 mgr->mst_primary = mstb;
2068 kref_get(&mgr->mst_primary->kref);
2069
2070 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2071 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2072 if (ret < 0) {
2073 goto out_unlock;
2074 }
2075
2076 {
2077 struct drm_dp_payload reset_pay;
2078 reset_pay.start_slot = 0;
2079 reset_pay.num_slots = 0x3f;
2080 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2081 }
2082
2083 queue_work(system_long_wq, &mgr->work);
2084
2085 ret = 0;
2086 } else {
2087 /* disable MST on the device */
2088 mstb = mgr->mst_primary;
2089 mgr->mst_primary = NULL;
2090 /* this can fail if the device is gone */
2091 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2092 ret = 0;
2093 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2094 mgr->payload_mask = 0;
2095 set_bit(0, &mgr->payload_mask);
2096 mgr->vcpi_mask = 0;
2097 }
2098
2099 out_unlock:
2100 mutex_unlock(&mgr->lock);
2101 if (mstb)
2102 drm_dp_put_mst_branch_device(mstb);
2103 return ret;
2104
2105 }
2106 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2107
2108 /**
2109 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2110 * @mgr: manager to suspend
2111 *
2112 * This function tells the MST device that we can't handle UP messages
2113 * anymore. This should stop it from sending any since we are suspended.
2114 */
2115 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2116 {
2117 mutex_lock(&mgr->lock);
2118 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2119 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2120 mutex_unlock(&mgr->lock);
2121 flush_work(&mgr->work);
2122 flush_work(&mgr->destroy_connector_work);
2123 }
2124 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2125
2126 /**
2127 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2128 * @mgr: manager to resume
2129 *
2130 * This will fetch DPCD and see if the device is still there,
2131 * if it is, it will rewrite the MSTM control bits, and return.
2132 *
2133 * if the device fails this returns -1, and the driver should do
2134 * a full MST reprobe, in case we were undocked.
2135 */
2136 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2137 {
2138 int ret = 0;
2139
2140 mutex_lock(&mgr->lock);
2141
2142 if (mgr->mst_primary) {
2143 int sret;
2144 u8 guid[16];
2145
2146 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2147 if (sret != DP_RECEIVER_CAP_SIZE) {
2148 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2149 ret = -1;
2150 goto out_unlock;
2151 }
2152
2153 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2154 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2155 if (ret < 0) {
2156 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2157 ret = -1;
2158 goto out_unlock;
2159 }
2160
2161 /* Some hubs forget their guids after they resume */
2162 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2163 if (sret != 16) {
2164 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2165 ret = -1;
2166 goto out_unlock;
2167 }
2168 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2169
2170 ret = 0;
2171 } else
2172 ret = -1;
2173
2174 out_unlock:
2175 mutex_unlock(&mgr->lock);
2176 return ret;
2177 }
2178 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2179
2180 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2181 {
2182 int len;
2183 u8 replyblock[32];
2184 int replylen, origlen, curreply;
2185 int ret;
2186 struct drm_dp_sideband_msg_rx *msg;
2187 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2188 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2189
2190 len = min(mgr->max_dpcd_transaction_bytes, 16);
2191 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2192 replyblock, len);
2193 if (ret != len) {
2194 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2195 return false;
2196 }
2197 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2198 if (!ret) {
2199 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2200 return false;
2201 }
2202 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2203
2204 origlen = replylen;
2205 replylen -= len;
2206 curreply = len;
2207 while (replylen > 0) {
2208 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2209 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2210 replyblock, len);
2211 if (ret != len) {
2212 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2213 len, ret);
2214 return false;
2215 }
2216
2217 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2218 if (!ret) {
2219 DRM_DEBUG_KMS("failed to build sideband msg\n");
2220 return false;
2221 }
2222
2223 curreply += len;
2224 replylen -= len;
2225 }
2226 return true;
2227 }
2228
2229 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2230 {
2231 int ret = 0;
2232
2233 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2234 memset(&mgr->down_rep_recv, 0,
2235 sizeof(struct drm_dp_sideband_msg_rx));
2236 return 0;
2237 }
2238
2239 if (mgr->down_rep_recv.have_eomt) {
2240 struct drm_dp_sideband_msg_tx *txmsg;
2241 struct drm_dp_mst_branch *mstb;
2242 int slot = -1;
2243 mstb = drm_dp_get_mst_branch_device(mgr,
2244 mgr->down_rep_recv.initial_hdr.lct,
2245 mgr->down_rep_recv.initial_hdr.rad);
2246
2247 if (!mstb) {
2248 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2249 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2250 return 0;
2251 }
2252
2253 /* find the message */
2254 slot = mgr->down_rep_recv.initial_hdr.seqno;
2255 mutex_lock(&mgr->qlock);
2256 txmsg = mstb->tx_slots[slot];
2257 /* remove from slots */
2258 mutex_unlock(&mgr->qlock);
2259
2260 if (!txmsg) {
2261 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2262 mstb,
2263 mgr->down_rep_recv.initial_hdr.seqno,
2264 mgr->down_rep_recv.initial_hdr.lct,
2265 mgr->down_rep_recv.initial_hdr.rad[0],
2266 mgr->down_rep_recv.msg[0]);
2267 drm_dp_put_mst_branch_device(mstb);
2268 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2269 return 0;
2270 }
2271
2272 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2273 if (txmsg->reply.reply_type == 1) {
2274 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2275 }
2276
2277 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2278 drm_dp_put_mst_branch_device(mstb);
2279
2280 mutex_lock(&mgr->qlock);
2281 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2282 mstb->tx_slots[slot] = NULL;
2283 mutex_unlock(&mgr->qlock);
2284
2285 wake_up(&mgr->tx_waitq);
2286 }
2287 return ret;
2288 }
2289
2290 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2291 {
2292 int ret = 0;
2293
2294 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2295 memset(&mgr->up_req_recv, 0,
2296 sizeof(struct drm_dp_sideband_msg_rx));
2297 return 0;
2298 }
2299
2300 if (mgr->up_req_recv.have_eomt) {
2301 struct drm_dp_sideband_msg_req_body msg;
2302 struct drm_dp_mst_branch *mstb = NULL;
2303 bool seqno;
2304
2305 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2306 mstb = drm_dp_get_mst_branch_device(mgr,
2307 mgr->up_req_recv.initial_hdr.lct,
2308 mgr->up_req_recv.initial_hdr.rad);
2309 if (!mstb) {
2310 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2311 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2312 return 0;
2313 }
2314 }
2315
2316 seqno = mgr->up_req_recv.initial_hdr.seqno;
2317 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2318
2319 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2320 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2321
2322 if (!mstb)
2323 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2324
2325 if (!mstb) {
2326 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2327 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2328 return 0;
2329 }
2330
2331 drm_dp_update_port(mstb, &msg.u.conn_stat);
2332
2333 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2334 (*mgr->cbs->hotplug)(mgr);
2335
2336 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2337 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2338 if (!mstb)
2339 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2340
2341 if (!mstb) {
2342 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2343 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2344 return 0;
2345 }
2346
2347 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2348 }
2349
2350 if (mstb)
2351 drm_dp_put_mst_branch_device(mstb);
2352
2353 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2354 }
2355 return ret;
2356 }
2357
2358 /**
2359 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2360 * @mgr: manager to notify irq for.
2361 * @esi: 4 bytes from SINK_COUNT_ESI
2362 * @handled: whether the hpd interrupt was consumed or not
2363 *
2364 * This should be called from the driver when it detects a short IRQ,
2365 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2366 * topology manager will process the sideband messages received as a result
2367 * of this.
2368 */
2369 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2370 {
2371 int ret = 0;
2372 int sc;
2373 *handled = false;
2374 sc = esi[0] & 0x3f;
2375
2376 if (sc != mgr->sink_count) {
2377 mgr->sink_count = sc;
2378 *handled = true;
2379 }
2380
2381 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2382 ret = drm_dp_mst_handle_down_rep(mgr);
2383 *handled = true;
2384 }
2385
2386 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2387 ret |= drm_dp_mst_handle_up_req(mgr);
2388 *handled = true;
2389 }
2390
2391 drm_dp_mst_kick_tx(mgr);
2392 return ret;
2393 }
2394 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2395
2396 /**
2397 * drm_dp_mst_detect_port() - get connection status for an MST port
2398 * @mgr: manager for this port
2399 * @port: unverified pointer to a port
2400 *
2401 * This returns the current connection state for a port. It validates the
2402 * port pointer still exists so the caller doesn't require a reference
2403 */
2404 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2405 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2406 {
2407 enum drm_connector_status status = connector_status_disconnected;
2408
2409 /* we need to search for the port in the mgr in case its gone */
2410 port = drm_dp_get_validated_port_ref(mgr, port);
2411 if (!port)
2412 return connector_status_disconnected;
2413
2414 if (!port->ddps)
2415 goto out;
2416
2417 switch (port->pdt) {
2418 case DP_PEER_DEVICE_NONE:
2419 case DP_PEER_DEVICE_MST_BRANCHING:
2420 break;
2421
2422 case DP_PEER_DEVICE_SST_SINK:
2423 status = connector_status_connected;
2424 /* for logical ports - cache the EDID */
2425 if (port->port_num >= 8 && !port->cached_edid) {
2426 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2427 }
2428 break;
2429 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2430 if (port->ldps)
2431 status = connector_status_connected;
2432 break;
2433 }
2434 out:
2435 drm_dp_put_port(port);
2436 return status;
2437 }
2438 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2439
2440 /**
2441 * drm_dp_mst_get_edid() - get EDID for an MST port
2442 * @connector: toplevel connector to get EDID for
2443 * @mgr: manager for this port
2444 * @port: unverified pointer to a port.
2445 *
2446 * This returns an EDID for the port connected to a connector,
2447 * It validates the pointer still exists so the caller doesn't require a
2448 * reference.
2449 */
2450 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2451 {
2452 struct edid *edid = NULL;
2453
2454 /* we need to search for the port in the mgr in case its gone */
2455 port = drm_dp_get_validated_port_ref(mgr, port);
2456 if (!port)
2457 return NULL;
2458
2459 if (port->cached_edid)
2460 edid = drm_edid_duplicate(port->cached_edid);
2461 else {
2462 edid = drm_get_edid(connector, &port->aux.ddc);
2463 drm_mode_connector_set_tile_property(connector);
2464 }
2465 drm_dp_put_port(port);
2466 return edid;
2467 }
2468 EXPORT_SYMBOL(drm_dp_mst_get_edid);
2469
2470 /**
2471 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2472 * @mgr: manager to use
2473 * @pbn: payload bandwidth to convert into slots.
2474 */
2475 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2476 int pbn)
2477 {
2478 int num_slots;
2479
2480 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2481
2482 if (num_slots > mgr->avail_slots)
2483 return -ENOSPC;
2484 return num_slots;
2485 }
2486 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2487
2488 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2489 struct drm_dp_vcpi *vcpi, int pbn)
2490 {
2491 int num_slots;
2492 int ret;
2493
2494 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2495
2496 if (num_slots > mgr->avail_slots)
2497 return -ENOSPC;
2498
2499 vcpi->pbn = pbn;
2500 vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2501 vcpi->num_slots = num_slots;
2502
2503 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2504 if (ret < 0)
2505 return ret;
2506 return 0;
2507 }
2508
2509 /**
2510 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2511 * @mgr: manager for this port
2512 * @port: port to allocate a virtual channel for.
2513 * @pbn: payload bandwidth number to request
2514 * @slots: returned number of slots for this PBN.
2515 */
2516 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2517 {
2518 int ret;
2519
2520 port = drm_dp_get_validated_port_ref(mgr, port);
2521 if (!port)
2522 return false;
2523
2524 if (port->vcpi.vcpi > 0) {
2525 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2526 if (pbn == port->vcpi.pbn) {
2527 *slots = port->vcpi.num_slots;
2528 drm_dp_put_port(port);
2529 return true;
2530 }
2531 }
2532
2533 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2534 if (ret) {
2535 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2536 goto out;
2537 }
2538 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2539 *slots = port->vcpi.num_slots;
2540
2541 drm_dp_put_port(port);
2542 return true;
2543 out:
2544 return false;
2545 }
2546 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2547
2548 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2549 {
2550 int slots = 0;
2551 port = drm_dp_get_validated_port_ref(mgr, port);
2552 if (!port)
2553 return slots;
2554
2555 slots = port->vcpi.num_slots;
2556 drm_dp_put_port(port);
2557 return slots;
2558 }
2559 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2560
2561 /**
2562 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2563 * @mgr: manager for this port
2564 * @port: unverified pointer to a port.
2565 *
2566 * This just resets the number of slots for the ports VCPI for later programming.
2567 */
2568 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2569 {
2570 port = drm_dp_get_validated_port_ref(mgr, port);
2571 if (!port)
2572 return;
2573 port->vcpi.num_slots = 0;
2574 drm_dp_put_port(port);
2575 }
2576 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2577
2578 /**
2579 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2580 * @mgr: manager for this port
2581 * @port: unverified port to deallocate vcpi for
2582 */
2583 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2584 {
2585 port = drm_dp_get_validated_port_ref(mgr, port);
2586 if (!port)
2587 return;
2588
2589 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2590 port->vcpi.num_slots = 0;
2591 port->vcpi.pbn = 0;
2592 port->vcpi.aligned_pbn = 0;
2593 port->vcpi.vcpi = 0;
2594 drm_dp_put_port(port);
2595 }
2596 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2597
2598 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2599 int id, struct drm_dp_payload *payload)
2600 {
2601 u8 payload_alloc[3], status;
2602 int ret;
2603 int retries = 0;
2604
2605 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2606 DP_PAYLOAD_TABLE_UPDATED);
2607
2608 payload_alloc[0] = id;
2609 payload_alloc[1] = payload->start_slot;
2610 payload_alloc[2] = payload->num_slots;
2611
2612 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2613 if (ret != 3) {
2614 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2615 goto fail;
2616 }
2617
2618 retry:
2619 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2620 if (ret < 0) {
2621 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2622 goto fail;
2623 }
2624
2625 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2626 retries++;
2627 if (retries < 20) {
2628 usleep_range(10000, 20000);
2629 goto retry;
2630 }
2631 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2632 ret = -EINVAL;
2633 goto fail;
2634 }
2635 ret = 0;
2636 fail:
2637 return ret;
2638 }
2639
2640
2641 /**
2642 * drm_dp_check_act_status() - Check ACT handled status.
2643 * @mgr: manager to use
2644 *
2645 * Check the payload status bits in the DPCD for ACT handled completion.
2646 */
2647 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2648 {
2649 u8 status;
2650 int ret;
2651 int count = 0;
2652
2653 do {
2654 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2655
2656 if (ret < 0) {
2657 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2658 goto fail;
2659 }
2660
2661 if (status & DP_PAYLOAD_ACT_HANDLED)
2662 break;
2663 count++;
2664 udelay(100);
2665
2666 } while (count < 30);
2667
2668 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2669 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2670 ret = -EINVAL;
2671 goto fail;
2672 }
2673 return 0;
2674 fail:
2675 return ret;
2676 }
2677 EXPORT_SYMBOL(drm_dp_check_act_status);
2678
2679 /**
2680 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2681 * @clock: dot clock for the mode
2682 * @bpp: bpp for the mode.
2683 *
2684 * This uses the formula in the spec to calculate the PBN value for a mode.
2685 */
2686 int drm_dp_calc_pbn_mode(int clock, int bpp)
2687 {
2688 u64 kbps;
2689 s64 peak_kbps;
2690 u32 numerator;
2691 u32 denominator;
2692
2693 kbps = clock * bpp;
2694
2695 /*
2696 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2697 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2698 * common multiplier to render an integer PBN for all link rate/lane
2699 * counts combinations
2700 * calculate
2701 * peak_kbps *= (1006/1000)
2702 * peak_kbps *= (64/54)
2703 * peak_kbps *= 8 convert to bytes
2704 */
2705
2706 numerator = 64 * 1006;
2707 denominator = 54 * 8 * 1000 * 1000;
2708
2709 kbps *= numerator;
2710 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2711
2712 return drm_fixp2int_ceil(peak_kbps);
2713 }
2714 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2715
2716 static int test_calc_pbn_mode(void)
2717 {
2718 int ret;
2719 ret = drm_dp_calc_pbn_mode(154000, 30);
2720 if (ret != 689) {
2721 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2722 154000, 30, 689, ret);
2723 return -EINVAL;
2724 }
2725 ret = drm_dp_calc_pbn_mode(234000, 30);
2726 if (ret != 1047) {
2727 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2728 234000, 30, 1047, ret);
2729 return -EINVAL;
2730 }
2731 ret = drm_dp_calc_pbn_mode(297000, 24);
2732 if (ret != 1063) {
2733 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2734 297000, 24, 1063, ret);
2735 return -EINVAL;
2736 }
2737 return 0;
2738 }
2739
2740 /* we want to kick the TX after we've ack the up/down IRQs. */
2741 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2742 {
2743 queue_work(system_long_wq, &mgr->tx_work);
2744 }
2745
2746 static void drm_dp_mst_dump_mstb(struct seq_file *m,
2747 struct drm_dp_mst_branch *mstb)
2748 {
2749 struct drm_dp_mst_port *port;
2750 int tabs = mstb->lct;
2751 char prefix[10];
2752 int i;
2753
2754 for (i = 0; i < tabs; i++)
2755 prefix[i] = '\t';
2756 prefix[i] = '\0';
2757
2758 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2759 list_for_each_entry(port, &mstb->ports, next) {
2760 seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2761 if (port->mstb)
2762 drm_dp_mst_dump_mstb(m, port->mstb);
2763 }
2764 }
2765
2766 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2767 char *buf)
2768 {
2769 int ret;
2770 int i;
2771 for (i = 0; i < 4; i++) {
2772 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2773 if (ret != 16)
2774 break;
2775 }
2776 if (i == 4)
2777 return true;
2778 return false;
2779 }
2780
2781 /**
2782 * drm_dp_mst_dump_topology(): dump topology to seq file.
2783 * @m: seq_file to dump output to
2784 * @mgr: manager to dump current topology for.
2785 *
2786 * helper to dump MST topology to a seq file for debugfs.
2787 */
2788 void drm_dp_mst_dump_topology(struct seq_file *m,
2789 struct drm_dp_mst_topology_mgr *mgr)
2790 {
2791 int i;
2792 struct drm_dp_mst_port *port;
2793 mutex_lock(&mgr->lock);
2794 if (mgr->mst_primary)
2795 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2796
2797 /* dump VCPIs */
2798 mutex_unlock(&mgr->lock);
2799
2800 mutex_lock(&mgr->payload_lock);
2801 seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
2802
2803 for (i = 0; i < mgr->max_payloads; i++) {
2804 if (mgr->proposed_vcpis[i]) {
2805 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2806 seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
2807 } else
2808 seq_printf(m, "vcpi %d:unsed\n", i);
2809 }
2810 for (i = 0; i < mgr->max_payloads; i++) {
2811 seq_printf(m, "payload %d: %d, %d, %d\n",
2812 i,
2813 mgr->payloads[i].payload_state,
2814 mgr->payloads[i].start_slot,
2815 mgr->payloads[i].num_slots);
2816
2817
2818 }
2819 mutex_unlock(&mgr->payload_lock);
2820
2821 mutex_lock(&mgr->lock);
2822 if (mgr->mst_primary) {
2823 u8 buf[64];
2824 bool bret;
2825 int ret;
2826 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2827 seq_printf(m, "dpcd: ");
2828 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
2829 seq_printf(m, "%02x ", buf[i]);
2830 seq_printf(m, "\n");
2831 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2832 seq_printf(m, "faux/mst: ");
2833 for (i = 0; i < 2; i++)
2834 seq_printf(m, "%02x ", buf[i]);
2835 seq_printf(m, "\n");
2836 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2837 seq_printf(m, "mst ctrl: ");
2838 for (i = 0; i < 1; i++)
2839 seq_printf(m, "%02x ", buf[i]);
2840 seq_printf(m, "\n");
2841
2842 /* dump the standard OUI branch header */
2843 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
2844 seq_printf(m, "branch oui: ");
2845 for (i = 0; i < 0x3; i++)
2846 seq_printf(m, "%02x", buf[i]);
2847 seq_printf(m, " devid: ");
2848 for (i = 0x3; i < 0x8; i++)
2849 seq_printf(m, "%c", buf[i]);
2850 seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
2851 seq_printf(m, "\n");
2852 bret = dump_dp_payload_table(mgr, buf);
2853 if (bret == true) {
2854 seq_printf(m, "payload table: ");
2855 for (i = 0; i < 63; i++)
2856 seq_printf(m, "%02x ", buf[i]);
2857 seq_printf(m, "\n");
2858 }
2859
2860 }
2861
2862 mutex_unlock(&mgr->lock);
2863
2864 }
2865 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2866
2867 static void drm_dp_tx_work(struct work_struct *work)
2868 {
2869 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2870
2871 mutex_lock(&mgr->qlock);
2872 if (mgr->tx_down_in_progress)
2873 process_single_down_tx_qlock(mgr);
2874 mutex_unlock(&mgr->qlock);
2875 }
2876
2877 static void drm_dp_free_mst_port(struct kref *kref)
2878 {
2879 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2880 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2881 kfree(port);
2882 }
2883
2884 static void drm_dp_destroy_connector_work(struct work_struct *work)
2885 {
2886 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2887 struct drm_dp_mst_port *port;
2888 bool send_hotplug = false;
2889 /*
2890 * Not a regular list traverse as we have to drop the destroy
2891 * connector lock before destroying the connector, to avoid AB->BA
2892 * ordering between this lock and the config mutex.
2893 */
2894 for (;;) {
2895 mutex_lock(&mgr->destroy_connector_lock);
2896 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2897 if (!port) {
2898 mutex_unlock(&mgr->destroy_connector_lock);
2899 break;
2900 }
2901 list_del(&port->next);
2902 mutex_unlock(&mgr->destroy_connector_lock);
2903
2904 kref_init(&port->kref);
2905 INIT_LIST_HEAD(&port->next);
2906
2907 mgr->cbs->destroy_connector(mgr, port->connector);
2908
2909 drm_dp_port_teardown_pdt(port, port->pdt);
2910 port->pdt = DP_PEER_DEVICE_NONE;
2911
2912 if (!port->input && port->vcpi.vcpi > 0) {
2913 drm_dp_mst_reset_vcpi_slots(mgr, port);
2914 drm_dp_update_payload_part1(mgr);
2915 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2916 }
2917
2918 kref_put(&port->kref, drm_dp_free_mst_port);
2919 send_hotplug = true;
2920 }
2921 if (send_hotplug)
2922 (*mgr->cbs->hotplug)(mgr);
2923 }
2924
2925 /**
2926 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2927 * @mgr: manager struct to initialise
2928 * @dev: device providing this structure - for i2c addition.
2929 * @aux: DP helper aux channel to talk to this device
2930 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2931 * @max_payloads: maximum number of payloads this GPU can source
2932 * @conn_base_id: the connector object ID the MST device is connected to.
2933 *
2934 * Return 0 for success, or negative error code on failure
2935 */
2936 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2937 struct device *dev, struct drm_dp_aux *aux,
2938 int max_dpcd_transaction_bytes,
2939 int max_payloads, int conn_base_id)
2940 {
2941 mutex_init(&mgr->lock);
2942 mutex_init(&mgr->qlock);
2943 mutex_init(&mgr->payload_lock);
2944 mutex_init(&mgr->destroy_connector_lock);
2945 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2946 INIT_LIST_HEAD(&mgr->destroy_connector_list);
2947 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2948 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2949 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
2950 init_waitqueue_head(&mgr->tx_waitq);
2951 mgr->dev = dev;
2952 mgr->aux = aux;
2953 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2954 mgr->max_payloads = max_payloads;
2955 mgr->conn_base_id = conn_base_id;
2956 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2957 if (!mgr->payloads)
2958 return -ENOMEM;
2959 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2960 if (!mgr->proposed_vcpis)
2961 return -ENOMEM;
2962 set_bit(0, &mgr->payload_mask);
2963 test_calc_pbn_mode();
2964 return 0;
2965 }
2966 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2967
2968 /**
2969 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2970 * @mgr: manager to destroy
2971 */
2972 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2973 {
2974 flush_work(&mgr->work);
2975 flush_work(&mgr->destroy_connector_work);
2976 mutex_lock(&mgr->payload_lock);
2977 kfree(mgr->payloads);
2978 mgr->payloads = NULL;
2979 kfree(mgr->proposed_vcpis);
2980 mgr->proposed_vcpis = NULL;
2981 mutex_unlock(&mgr->payload_lock);
2982 mgr->dev = NULL;
2983 mgr->aux = NULL;
2984 }
2985 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2986
2987 /* I2C device */
2988 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2989 int num)
2990 {
2991 struct drm_dp_aux *aux = adapter->algo_data;
2992 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2993 struct drm_dp_mst_branch *mstb;
2994 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2995 unsigned int i;
2996 bool reading = false;
2997 struct drm_dp_sideband_msg_req_body msg;
2998 struct drm_dp_sideband_msg_tx *txmsg = NULL;
2999 int ret;
3000
3001 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3002 if (!mstb)
3003 return -EREMOTEIO;
3004
3005 /* construct i2c msg */
3006 /* see if last msg is a read */
3007 if (msgs[num - 1].flags & I2C_M_RD)
3008 reading = true;
3009
3010 if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
3011 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3012 ret = -EIO;
3013 goto out;
3014 }
3015
3016 memset(&msg, 0, sizeof(msg));
3017 msg.req_type = DP_REMOTE_I2C_READ;
3018 msg.u.i2c_read.num_transactions = num - 1;
3019 msg.u.i2c_read.port_number = port->port_num;
3020 for (i = 0; i < num - 1; i++) {
3021 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3022 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3023 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3024 }
3025 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3026 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3027
3028 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3029 if (!txmsg) {
3030 ret = -ENOMEM;
3031 goto out;
3032 }
3033
3034 txmsg->dst = mstb;
3035 drm_dp_encode_sideband_req(&msg, txmsg);
3036
3037 drm_dp_queue_down_tx(mgr, txmsg);
3038
3039 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3040 if (ret > 0) {
3041
3042 if (txmsg->reply.reply_type == 1) { /* got a NAK back */
3043 ret = -EREMOTEIO;
3044 goto out;
3045 }
3046 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3047 ret = -EIO;
3048 goto out;
3049 }
3050 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3051 ret = num;
3052 }
3053 out:
3054 kfree(txmsg);
3055 drm_dp_put_mst_branch_device(mstb);
3056 return ret;
3057 }
3058
3059 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3060 {
3061 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3062 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3063 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3064 I2C_FUNC_10BIT_ADDR;
3065 }
3066
3067 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3068 .functionality = drm_dp_mst_i2c_functionality,
3069 .master_xfer = drm_dp_mst_i2c_xfer,
3070 };
3071
3072 /**
3073 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3074 * @aux: DisplayPort AUX channel
3075 *
3076 * Returns 0 on success or a negative error code on failure.
3077 */
3078 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3079 {
3080 aux->ddc.algo = &drm_dp_mst_i2c_algo;
3081 aux->ddc.algo_data = aux;
3082 aux->ddc.retries = 3;
3083
3084 aux->ddc.class = I2C_CLASS_DDC;
3085 aux->ddc.owner = THIS_MODULE;
3086 aux->ddc.dev.parent = aux->dev;
3087 aux->ddc.dev.of_node = aux->dev->of_node;
3088
3089 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
3090 sizeof(aux->ddc.name));
3091
3092 return i2c_add_adapter(&aux->ddc);
3093 }
3094
3095 /**
3096 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
3097 * @aux: DisplayPort AUX channel
3098 */
3099 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
3100 {
3101 i2c_del_adapter(&aux->ddc);
3102 }
3103