mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-11-01 09:13:37 +00:00 
			
		
		
		
	rxrpc: Fix missing notification
Under some circumstances, rxrpc will fail a transmit a packet through the
underlying UDP socket (ie. UDP sendmsg returns an error).  This may result
in a call getting stuck.
In the instance being seen, where AFS tries to send a probe to the Volume
Location server, tracepoints show the UDP Tx failure (in this case returing
error 99 EADDRNOTAVAIL) and then nothing more:
 afs_make_vl_call: c=0000015d VL.GetCapabilities
 rxrpc_call: c=0000015d NWc u=1 sp=rxrpc_kernel_begin_call+0x106/0x170 [rxrpc] a=00000000dd89ee8a
 rxrpc_call: c=0000015d Gus u=2 sp=rxrpc_new_client_call+0x14f/0x580 [rxrpc] a=00000000e20e4b08
 rxrpc_call: c=0000015d SEE u=2 sp=rxrpc_activate_one_channel+0x7b/0x1c0 [rxrpc] a=00000000e20e4b08
 rxrpc_call: c=0000015d CON u=2 sp=rxrpc_kernel_begin_call+0x106/0x170 [rxrpc] a=00000000e20e4b08
 rxrpc_tx_fail: c=0000015d r=1 ret=-99 CallDataNofrag
The problem is that if the initial packet fails and the retransmission
timer hasn't been started, the call is set to completed and an error is
returned from rxrpc_send_data_packet() to rxrpc_queue_packet().  Though
rxrpc_instant_resend() is called, this does nothing because the call is
marked completed.
So rxrpc_notify_socket() isn't called and the error is passed back up to
rxrpc_send_data(), rxrpc_kernel_send_data() and thence to afs_make_call()
and afs_vl_get_capabilities() where it is simply ignored because it is
assumed that the result of a probe will be collected asynchronously.
Fileserver probing is similarly affected via afs_fs_get_capabilities().
Fix this by always issuing a notification in __rxrpc_set_call_completion()
if it shifts a call to the completed state, even if an error is also
returned to the caller through the function return value.
Also put in a little bit of optimisation to avoid taking the call
state_lock and disabling softirqs if the call is already in the completed
state and remove some now redundant rxrpc_notify_socket() calls.
Fixes: f5c17aaeb2 ("rxrpc: Calls should only have one terminal state")
Reported-by: Gerry Seidman <gerry@auristor.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
			
			
This commit is contained in:
		
							parent
							
								
									3067bf8c59
								
							
						
					
					
						commit
						5ac0d62226
					
				
					 6 changed files with 21 additions and 25 deletions
				
			
		| 
						 | 
				
			
			@ -320,7 +320,6 @@ recheck_state:
 | 
			
		|||
 | 
			
		||||
	if (call->state == RXRPC_CALL_COMPLETE) {
 | 
			
		||||
		del_timer_sync(&call->timer);
 | 
			
		||||
		rxrpc_notify_socket(call);
 | 
			
		||||
		goto out_put;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -173,10 +173,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
 | 
			
		|||
			else
 | 
			
		||||
				trace_rxrpc_rx_abort(call, serial,
 | 
			
		||||
						     conn->abort_code);
 | 
			
		||||
			if (rxrpc_set_call_completion(call, compl,
 | 
			
		||||
						      conn->abort_code,
 | 
			
		||||
						      conn->error))
 | 
			
		||||
				rxrpc_notify_socket(call);
 | 
			
		||||
			rxrpc_set_call_completion(call, compl,
 | 
			
		||||
						  conn->abort_code,
 | 
			
		||||
						  conn->error);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -275,7 +275,6 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
 | 
			
		|||
 | 
			
		||||
	case RXRPC_CALL_SERVER_AWAIT_ACK:
 | 
			
		||||
		__rxrpc_call_completed(call);
 | 
			
		||||
		rxrpc_notify_socket(call);
 | 
			
		||||
		state = call->state;
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1013,9 +1012,8 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
 | 
			
		|||
 | 
			
		||||
	_proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
 | 
			
		||||
 | 
			
		||||
	if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
 | 
			
		||||
				      abort_code, -ECONNABORTED))
 | 
			
		||||
		rxrpc_notify_socket(call);
 | 
			
		||||
	rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
 | 
			
		||||
				  abort_code, -ECONNABORTED);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -1102,7 +1100,6 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
 | 
			
		|||
	spin_lock(&rx->incoming_lock);
 | 
			
		||||
	__rxrpc_disconnect_call(conn, call);
 | 
			
		||||
	spin_unlock(&rx->incoming_lock);
 | 
			
		||||
	rxrpc_notify_socket(call);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -289,9 +289,7 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
 | 
			
		|||
 | 
			
		||||
	hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
 | 
			
		||||
		rxrpc_see_call(call);
 | 
			
		||||
		if (call->state < RXRPC_CALL_COMPLETE &&
 | 
			
		||||
		    rxrpc_set_call_completion(call, compl, 0, -error))
 | 
			
		||||
			rxrpc_notify_socket(call);
 | 
			
		||||
		rxrpc_set_call_completion(call, compl, 0, -error);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,6 +73,7 @@ bool __rxrpc_set_call_completion(struct rxrpc_call *call,
 | 
			
		|||
		call->state = RXRPC_CALL_COMPLETE;
 | 
			
		||||
		trace_rxrpc_call_complete(call);
 | 
			
		||||
		wake_up(&call->waitq);
 | 
			
		||||
		rxrpc_notify_socket(call);
 | 
			
		||||
		return true;
 | 
			
		||||
	}
 | 
			
		||||
	return false;
 | 
			
		||||
| 
						 | 
				
			
			@ -83,11 +84,13 @@ bool rxrpc_set_call_completion(struct rxrpc_call *call,
 | 
			
		|||
			       u32 abort_code,
 | 
			
		||||
			       int error)
 | 
			
		||||
{
 | 
			
		||||
	bool ret;
 | 
			
		||||
	bool ret = false;
 | 
			
		||||
 | 
			
		||||
	write_lock_bh(&call->state_lock);
 | 
			
		||||
	ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
 | 
			
		||||
	write_unlock_bh(&call->state_lock);
 | 
			
		||||
	if (call->state < RXRPC_CALL_COMPLETE) {
 | 
			
		||||
		write_lock_bh(&call->state_lock);
 | 
			
		||||
		ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
 | 
			
		||||
		write_unlock_bh(&call->state_lock);
 | 
			
		||||
	}
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -101,11 +104,13 @@ bool __rxrpc_call_completed(struct rxrpc_call *call)
 | 
			
		|||
 | 
			
		||||
bool rxrpc_call_completed(struct rxrpc_call *call)
 | 
			
		||||
{
 | 
			
		||||
	bool ret;
 | 
			
		||||
	bool ret = false;
 | 
			
		||||
 | 
			
		||||
	write_lock_bh(&call->state_lock);
 | 
			
		||||
	ret = __rxrpc_call_completed(call);
 | 
			
		||||
	write_unlock_bh(&call->state_lock);
 | 
			
		||||
	if (call->state < RXRPC_CALL_COMPLETE) {
 | 
			
		||||
		write_lock_bh(&call->state_lock);
 | 
			
		||||
		ret = __rxrpc_call_completed(call);
 | 
			
		||||
		write_unlock_bh(&call->state_lock);
 | 
			
		||||
	}
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -261,10 +261,8 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 | 
			
		|||
		case -ENETUNREACH:
 | 
			
		||||
		case -EHOSTUNREACH:
 | 
			
		||||
		case -ECONNREFUSED:
 | 
			
		||||
			if (rxrpc_set_call_completion(call,
 | 
			
		||||
						      RXRPC_CALL_LOCAL_ERROR,
 | 
			
		||||
						      0, ret))
 | 
			
		||||
				rxrpc_notify_socket(call);
 | 
			
		||||
			rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
 | 
			
		||||
						  0, ret);
 | 
			
		||||
			goto out;
 | 
			
		||||
		}
 | 
			
		||||
		_debug("need instant resend %d", ret);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		
		Reference in a new issue