diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7de4ea3..c8fd034 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -272,6 +272,8 @@ DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
 #define TCP_ADD_STATS_BH(field, val)	SNMP_ADD_STATS_BH(tcp_statistics, field, val)
 #define TCP_ADD_STATS_USER(field, val)	SNMP_ADD_STATS_USER(tcp_statistics, field, val)
 
+extern void			tcp_verify_lost(struct sock *sk);
+
 extern void			tcp_v4_err(struct sk_buff *skb, u32);
 
 extern void			tcp_shutdown (struct sock *sk, int how);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 19c449f..681ab1c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1604,6 +1604,7 @@ advance_sp:
 	tcp_mark_lost_retrans(sk);
 
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 
 	if ((reord < tp->fackets_out) &&
 	    ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
@@ -1647,6 +1648,7 @@ static void tcp_add_reno_sack(struct sock *sk)
 	tp->sacked_out++;
 	tcp_check_reno_reordering(sk, 0);
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 }
 
 /* Account for ACK, ACKing some data in Reno Recovery phase. */
@@ -1664,6 +1666,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 	}
 	tcp_check_reno_reordering(sk, acked);
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 }
 
 static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
@@ -1765,6 +1768,7 @@ void tcp_enter_frto(struct sock *sk)
 		tp->retrans_out -= tcp_skb_pcount(skb);
 	}
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 
 	/* Too bad if TCP was application limited */
 	tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
@@ -1827,6 +1831,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
 		}
 	}
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 
 	tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
 	tp->snd_cwnd_cnt = 0;
@@ -1912,6 +1917,7 @@ void tcp_enter_loss(struct sock *sk, int how)
 		}
 	}
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 
 	tp->reordering = min_t(unsigned int, tp->reordering,
 			       sysctl_tcp_reordering);
@@ -2167,6 +2173,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit)
 		}
 	}
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 }
 
 /* Account newly detected lost packet(s) */
@@ -2216,6 +2223,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
 		tp->scoreboard_skb_hint = skb;
 
 		tcp_verify_left_out(tp);
+		tcp_verify_lost(sk);
 	}
 }
 
@@ -2431,6 +2439,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 
 	if (tp->retrans_out == 0)
 		tp->retrans_stamp = 0;
@@ -2526,6 +2535,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 
 	/* D. Check consistency of the current state. */
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 
 	/* E. Check state exit conditions. State can be terminated
 	 *    when high_seq is ACKed. */
@@ -2645,6 +2655,12 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 	if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
 		tcp_update_scoreboard(sk, fast_rexmit);
 	tcp_cwnd_down(sk, flag);
+	
+	if (WARN_ON(tcp_write_queue_head(sk) == NULL))
+		return;
+	if (WARN_ON(!tp->packets_out))
+		return;
+	
 	tcp_xmit_retransmit_queue(sk);
 }
 
@@ -2847,6 +2863,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
 		sk_wmem_free_skb(sk, skb);
 		tcp_clear_all_retrans_hints(tp);
 	}
+	
+	tcp_verify_lost(sk);
 
 	if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
 		flag |= FLAG_SACK_RENEGING;
@@ -2890,6 +2908,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
 #if FASTRETRANS_DEBUG > 0
 	BUG_TRAP((int)tp->sacked_out >= 0);
 	BUG_TRAP((int)tp->lost_out >= 0);
+	if (tp->lost_out > tp->packets_out)
+		printk(KERN_ERR "Lost underflowed to %u\n", tp->lost_out);
 	BUG_TRAP((int)tp->retrans_out >= 0);
 	if (!tp->packets_out && tcp_is_sack(tp)) {
 		icsk = inet_csk(sk);
@@ -3061,6 +3081,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	tcp_verify_left_out(tp);
+	tcp_verify_lost(sk);
 
 	/* Duplicate the behavior from Loss state (fastretrans_alert) */
 	if (flag & FLAG_DATA_ACKED)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 00156bf..410aada 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -108,6 +108,25 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
 	.lhash_wait  = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
 };
 
+void tcp_verify_lost(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	u32 lost = 0;
+	struct sk_buff *skb;
+
+	tcp_for_write_queue(skb, sk) {
+		if (skb == tcp_send_head(sk))
+			break;
+		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
+			lost += tcp_skb_pcount(skb);
+	}
+	
+	if (WARN_ON(lost != tp->lost_out)) {
+		printk(KERN_ERR "Lost: %u vs %u, %u (%d)\n", lost, tp->lost_out,
+		       tp->packets_out, tcp_is_sack(tp));
+	}
+}
+
 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
 {
 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ed750f9..586660a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -790,6 +790,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
 	skb_header_release(buff);
 	tcp_insert_write_queue_after(skb, buff, sk);
 
+	tcp_verify_lost(sk);
+
 	return 0;
 }
 
@@ -1459,10 +1461,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
 
 	/* Do MTU probing. */
 	if ((result = tcp_mtu_probe(sk)) == 0) {
+		tcp_verify_lost(sk);
 		return 0;
 	} else if (result > 0) {
 		sent_pkts = 1;
 	}
+	tcp_verify_lost(sk);
 
 	while ((skb = tcp_send_head(sk))) {
 		unsigned int limit;
@@ -1764,6 +1768,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb,
 	tcp_clear_retrans_hints_partial(tp);
 
 	sk_wmem_free_skb(sk, next_skb);
+	tcp_verify_lost(sk);
 }
 
 /* Do a simple retransmit without using the backoff mechanisms in
@@ -1794,6 +1799,8 @@ void tcp_simple_retransmit(struct sock *sk)
 			}
 		}
 	}
+	
+	tcp_verify_lost(sk);
 
 	tcp_clear_all_retrans_hints(tp);
 
@@ -1815,6 +1822,8 @@ void tcp_simple_retransmit(struct sock *sk)
 		tcp_set_ca_state(sk, TCP_CA_Loss);
 	}
 	tcp_xmit_retransmit_queue(sk);
+	
+	tcp_verify_lost(sk);
 }
 
 /* This retransmits one SKB.  Policy decisions and retransmit queue
@@ -1996,6 +2005,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
 			}
 		}
 	}
+	
+	tcp_verify_lost(sk);
 
 	/* OK, demanded retransmission is finished. */
 
@@ -2054,6 +2065,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
 
 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
 	}
+	
+	tcp_verify_lost(sk);
 }
 
 /* Send a fin.  The caller locks the socket for us.  This cannot be