Re: [PATCH 1/2] usb: typec: Add USB Power Delivery sink port support

From: Oliver Neukum
Date: Fri Jul 15 2016 - 02:35:33 EST


On Thu, 2016-07-14 at 19:14 -0700, Bin Gao wrote:
> This patch implements a simple USB Power Delivery sink port state machine.
> It assumes the hardware only handles PD packet transmitting and receiving
> over the CC line of the USB Type-C connector. The state transition is
> completely controlled by software. This patch only implement the sink port
> function and it doesn't support source port and port swap yet.
>

> +/*
> + * For any message we send, we must get a GOODCRC message from the Source.
> + * The USB PD spec says the time should be measured between the last bit
> + * of the sending message's EOP has been transmitted and the last bit of
> + * the receiving GOODCRC message's EOP has been received. The allowed time
> + * is minimal 0.9 ms and maximal 1.1 ms. However, this measurement is
> + * performed in physical layer. When it reaches to the OS and this driver,
> + * the actual time is difficult to predict because of the scheduling,
> + * context switch, interrupt preemption and nesting, etc. So we only define
> + * a safe timeout value (PD_TIMEOUT_GOODCRC) which is large enough to take
> + * account of all software related latency.
> + */
> +static int send_message(struct pd_sink_port *port, void *buf, int len,
> + u8 msg, bool ctrl_msg, enum sop_type sop_type)
> +{
> + if (ctrl_msg && msg == PD_CMSG_GOODCRC) {
> + port->tx(port->port, port->tx_priv, buf, len, sop_type);
> + print_message(port->port, true, msg, false);
> + return 0;
> + }
> +
> + port->tx(port->port, port->tx_priv, buf, len, sop_type);
> + print_message(port->port, ctrl_msg, msg, false);
> +
> + if (!port->hw_goodcrc_rx) {
> + port->waiting_goodcrc = true;
> + start_timer(port, PD_TIMEOUT_GOODCRC, goodcrc_timeout);
> + }
> +
> + port->last_sent_msg = msg;
> + port->last_msg_ctrl = ctrl_msg;
> +
> + return 0;
> +}
> +
> +static void ack_message(struct pd_sink_port *port, int msg_id)
> +{
> + struct pd_msg_header *header = kzalloc(PD_MSG_HEADER_LEN, GFP_KERNEL);

This must be GFP_NOIO. We are in a cycle that can lead to deadlock.

Assume we are waiting for a request for more power to process IO
which we need to ack.

1. memory allocation leads to laundering, blocks on freeing memory
2. launderer decides to perform IO which needs more power
3. more power has already been requested, wait for it to be granted

4. BANG - DEADLOCK


> +
> + if (!header) {
> + MAKE_HEADER(port, header, PD_CMSG_GOODCRC, 0);
> + send_message(port, header, PD_MSG_HEADER_LEN,
> + PD_CMSG_GOODCRC, true, SOP);
> +} }
> +
> +static int handle_goodcrc(struct pd_sink_port *port, u8 msg_id)
> +{
> + if (is_waiting_goodcrc(port, msg_id)) {
> + hrtimer_cancel(&port->tx_timer);
> + clear_waiting_goodcrc(port);
> + switch (msg_id) {
> + case PD_DMSG_REQUEST:
> + port->state = PD_SINK_STATE_REQUEST_SENT;
> + break;
> + case PD_DMSG_SINK_CAP:
> + pr_info("Got GOODCRC for SINK_CAPABILITY message\n");
> + default:
> + break;
> + }
> + } else
> + pr_warn("Unexpected GOODCRC message received.\n");
> +
> + return 0;
> +}
> +
> +static void handle_accept(struct pd_sink_port *port)
> +{
> + enum pd_sink_state state = port->state;
> +
> + if (state != PD_SINK_STATE_REQUEST_SENT) {
> + pr_err("ACCEPT received but not expected, sink state: %s\n",
> + state_to_string(state));
> + return;
> + }
> +
> + port->state = PD_SINK_STATE_ACCEPT_RECEIVED;
> +}
> +
> +/*
> + * The Source may send REJECT message as a response to REQUEST, PR_SWAP,
> + * DR_SWAP or VCONN_SWAP message. Since we only send REQUEST to the Source
> + * so we're sure the REJECT is for our REQUEST message.
> + */
> +static void handle_reject(struct pd_sink_port *port)
> +{
> + enum pd_sink_state state = port->state;
> +
> + if (state == PD_SINK_STATE_REQUEST_SENT)
> + /* Broadcast to upper layer */
> + blocking_notifier_call_chain(&pd_sink_notifier_list,
> + PD_SINK_EVENT_REJECT | port->port << 8, NULL);
> + else
> + pr_err("REJECT received but not expected, sink state: %s\n",
> + state_to_string(state));
> +}
> +
> +static void handle_not_supported(struct pd_sink_port *port)
> +{
> + pr_err("sink port %d: %s message %s is not supported by Source\n",
> + port->port, port->last_msg_ctrl ? "Control" : "Data",
> + msg_to_string(port->last_msg_ctrl,
> + port->last_sent_msg & PD_MSG_TYPE_MASK));
> +}
> +
> +/*
> + * The Wait Message is a valid response to a Request, a PR_Swap, DR_Swap or
> + * VCONN_Swap Message. We don't support PR_Swap, DR_Swap and VCONN_Swap
> + * messages so we only handle Wait message from Source responding to our
> + * Request message.
> + */
> +static void handle_wait(struct pd_sink_port *port)
> +{
> + pr_info("WAIT message received\n");
> +}
> +
> +/*
> + * We repond the GET_SINK_CAPABILITY message by sending the SINK_CAPABILITY
> + * message. The PDOs in the SINK_CAPABILITY message shall be in the following
> + * order:
> + * 1. The vSafe5V Fixed Supply Object shall always be the first object.
> + * 2. The remaining Fixed Supply Objects, if present, shall be sent in voltage
> + * order; lowest to highest.
> + * 3. The Battery Supply Objects, if present shall be sent in Minimum Voltage
> + * order; lowest to highest.
> + * 4. The Variable Supply (non-Battery) Objects, if present, shall be sent in
> + * Minimum Voltage order; lowest to highest.
> + *
> + * The upper layer calling pd_sink_register_port() must ensure the profiles
> + * provided come with the above order as we don't re-order the profiles here
> + * when we compose the SINK_CAPABILITY message.
> + */
> +static void handle_get_sink_cap(struct pd_sink_port *port)
> +{
> + u8 *pdo;
> + int i, nr_objs = 0;
> + struct sink_ps *ps;
> + struct pd_pdo_sink_fixed *fixed;
> + struct pd_pdo_variable *variable;
> + struct pd_pdo_battery *battery;
> + struct pd_msg_header *header = kzalloc(PD_MSG_HEADER_LEN +
> + port->nr_ps * PD_OBJ_SIZE, GFP_KERNEL);

Must be GFP_NOIO. For the same reason as above. We may be asked
this to resolve a mismatch due to needing more power for IO.

> +
> + if (!header) {
> + pr_crit("%s(): kzalloc() failed\n", __func__);
> + return;
> + }
> +
> + MAKE_HEADER(port, header, PD_DMSG_SINK_CAP, port->nr_ps);
> + pdo = (u8 *)header + PD_MSG_HEADER_LEN;
> +
> + for (i = 0; i < port->nr_ps; i++) {
> + ps = port->ps_reqs + i;
> + switch (ps->ps_type) {
> + case PS_TYPE_FIXED:
> + fixed = (struct pd_pdo_sink_fixed *)pdo;
> + fixed->current_max = ps->ps_fixed.current_max;
> + fixed->voltage = ps->ps_fixed.voltage_fixed;
> + fixed->fast_role_swap = FAST_ROLE_SWAP_DISABLE;
> + fixed->usb_capable = 1;
> + fixed->higher_capability =
> + (ps->ps_fixed.voltage_fixed >
> + PD_VSAFE5V) ? 1 : 0;
> + fixed->type = PS_TYPE_FIXED;
> + break;
> + case PS_TYPE_BATTERY:
> + battery = (struct pd_pdo_battery *)pdo;
> + battery->power = ps->ps_battery.power;
> + battery->voltage_max = ps->ps_battery.voltage_max;
> + battery->voltage_min = ps->ps_battery.voltage_min;
> + battery->type = PS_TYPE_BATTERY;
> + break;
> + case PS_TYPE_VARIABLE:
> + variable = (struct pd_pdo_variable *)pdo;
> + variable->current_mo =
> + ps->ps_variable.current_default;
> + variable->voltage_max = ps->ps_variable.voltage_max;
> + variable->voltage_min = ps->ps_variable.voltage_min;
> + variable->type = PS_TYPE_VARIABLE;
> + break;
> + default:
> + continue;
> + }
> +
> + pdo += PD_OBJ_SIZE;
> + nr_objs++;
> + }
> +
> + if (nr_objs == 0) {
> + kfree(header);
> + pr_err("no valid sink PDOs to send in SINK_CAPABILITY\n");
> + return;
> + }
> +
> + header->nr_objs = nr_objs;
> + send_message(port, header, PD_MSG_HEADER_LEN + nr_objs * PD_OBJ_SIZE,
> + PD_DMSG_SINK_CAP, false, SOP);
> +}
> +
> +static void handle_ps_ready(struct pd_sink_port *port)
> +{
> + if (port->state != PD_SINK_STATE_ACCEPT_RECEIVED) {
> + pr_err("PS_READY received but sink state is not in %s\n",
> + state_to_string(PD_SINK_STATE_ACCEPT_RECEIVED));
> + return;
> + }
> +
> + /* Broadcast to upper layer */
> + blocking_notifier_call_chain(&pd_sink_notifier_list,
> + PD_SINK_EVENT_PS_READY | port->port << 8, NULL);
> +}
> +
> +static void handle_gotomin(struct pd_sink_port *port)
> +{
> + /* Broadcast to upper layer */
> + blocking_notifier_call_chain(&pd_sink_notifier_list,
> + PD_SINK_EVENT_GOTOMIN | port->port << 8, NULL);
> +}
> +
> +static void handle_soft_reset(struct pd_sink_port *port)
> +{
> + struct pd_msg_header *header = kzalloc(PD_MSG_HEADER_LEN, GFP_KERNEL);
> +
> + if (!header)
> + return;
> +
> + flush_workqueue(port->rx_wq);

That is problematic. We may be here precisely because something is wrong
blocking progress. In particular what happens if another soft reset
is queued?

> + stop_timer(port);
> + port->msg_id = PD_MSG_ID_MIN;
> + port->state = PD_SINK_STATE_WAIT_FOR_SRC_CAP;
> + MAKE_HEADER(port, header, PD_CMSG_ACCEPT, 0);
> + send_message(port, header, PD_MSG_HEADER_LEN,
> + PD_CMSG_ACCEPT, true, SOP);
> +
> + /* Broadcast to upper layer */
> + blocking_notifier_call_chain(&pd_sink_notifier_list,
> + PD_SINK_EVENT_SOFT_RESET | port->port << 8, NULL);
> +}
> +
> +static void ctl_msg_handler(struct pd_sink_port *port, u8 msg_type)
> +{
> + print_message(port->port, true, msg_type, true);
> +
> + switch (msg_type) {
> + case PD_CMSG_ACCEPT:
> + handle_accept(port);
> + break;
> + case PD_CMSG_GOTOMIN:
> + handle_gotomin(port);
> + break;
> + case PD_CMSG_REJECT:
> + handle_reject(port);
> + break;
> + case PD_CMSG_GET_SINK_CAP:
> + handle_get_sink_cap(port);
> + break;
> + case PD_CMSG_PS_RDY:
> + handle_ps_ready(port);
> + break;
> + case PD_CMSG_SOFT_RESET:
> + handle_soft_reset(port);
> + break;
> + case PD_CMSG_NOT_SUPPORTED:
> + handle_not_supported(port);
> + break;
> + case PD_CMSG_WAIT:
> + /*
> + * Sent by Source to Sink to indicate it can't meet the
> + * requirement for the Request. The Sink is allowed to
> + * repeat the Request Message using the SinkRequestTimer
> + * to ensure that there is tSinkRequest between requests.
> + */
> + handle_wait(port);
> + break;
> + /* Below messages are simply ignored */
> + case PD_CMSG_GET_SOURCE_CAP_EXTENDED:
> + case PD_CMSG_GET_STATUS:
> + case PD_CMSG_PING:
> + case PD_CMSG_GET_SRC_CAP:
> + case PD_CMSG_DR_SWAP:
> + case PD_CMSG_PR_SWAP:
> + case PD_CMSG_VCONN_SWAP:
> + case PD_CMSG_FR_SWAP:
> + break;
> +
> + }
> +}
> +
> +/**
> + * send_request() - send REQUEST message to Source.
> + * @port: the sink port number
> + * Return: none
> + */
> +static int send_request(struct pd_sink_port *port)
> +{
> + int i;
> + struct pd_request *request;
> + struct pd_pdo_src_fixed *src;
> + struct sink_ps *ps =
> + &port->ps_reqs[port->active_ps];
> + struct pd_source_cap *cap;
> + struct pd_msg_header *header = kzalloc(PD_MSG_HEADER_LEN +
> + PD_OBJ_SIZE, GFP_KERNEL);

GFP_NOIO, same reasons

> + bool voltage_matched = false, current_matched = false;
> +
> + /* Only support fixed PDO for now */
> + if (ps->ps_type != PS_TYPE_FIXED) {
> + pr_err("%s(): We only support fixed PDO now\n", __func__);
> + return -EINVAL;
> + }
> +
> + for (i = 0; i < port->nr_source_caps; i++) {
> + cap = &port->source_caps[i];
> + if (cap->ps_type != PS_TYPE_FIXED)
> + continue;
> + src = &cap->fixed;
> + if (src->voltage != ps->ps_fixed.voltage_fixed)
> + continue;
> + voltage_matched = true;
> + /* We matched the voltage, now match the current */
> + if (src->current_max >= ps->ps_fixed.current_max) {
> + current_matched = true;
> + break;
> + }
> + }
> +
> + if (!voltage_matched) {
> + pr_err("Can't match any PDO from source caps\n");
> + return -EINVAL;
> + }
> +
> + MAKE_HEADER(port, header, PD_DMSG_REQUEST, 1);
> + request = (struct pd_request *)((u8 *)header + PD_MSG_HEADER_LEN);
> + request->pos = i + 1; /* PD protocol RDO index starts from 1 */
> + request->give_back = port->support_gotomin ? 1 : 0;
> + request->usb_comm_capable = port->usb_comm_capable ? 1 : 0;
> + if (port->pd_rev == PD_REVISION_3)
> + request->unchunked_ext_msg_supported = 1;
> +
> + /* If current_matched is false, we have to set the mismatch flag */
> + if (!current_matched)
> + request->cap_mismatch = 1;
> +
> + request->operating_default = ps->ps_fixed.current_default;
> + request->operating_min_max = ps->ps_fixed.current_max;
> +
> + return send_message(port, header, PD_MSG_HEADER_LEN + PD_OBJ_SIZE,
> + PD_DMSG_REQUEST, false, SOP);
> +}
> +

HTH
Oliver