Issue #6042:
lnotab-based tracing is very complicated and isn't documented very well.  There
were at least 3 comment blocks purporting to document co_lnotab, and none did a
very good job. This patch unifies them into Objects/lnotab_notes.txt which
tries to completely capture the current state of affairs.

I also discovered that we've attached 2 layers of patches to the basic tracing
scheme. The first layer avoids jumping to instructions that don't start a line,
to avoid problems in if statements and while loops.  The second layer
discovered that jumps backward do need to trace at instructions that don't
start a line, so it added extra lnotab entries for 'while' and 'for' loops, and
added a special case for backward jumps within the same line. I replaced these
patches by just treating forward and backward jumps differently.
diff --git a/Python/ceval.c b/Python/ceval.c
index 474a885..4f0877b 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -3591,33 +3591,30 @@
 	return result;
 }
 
+/* See Objects/lnotab_notes.txt for a description of how tracing works. */
 static int
 maybe_call_line_trace(Py_tracefunc func, PyObject *obj,
 		      PyFrameObject *frame, int *instr_lb, int *instr_ub,
 		      int *instr_prev)
 {
 	int result = 0;
+	int line = frame->f_lineno;
 
         /* If the last instruction executed isn't in the current
-           instruction window, reset the window.  If the last
-           instruction happens to fall at the start of a line or if it
-           represents a jump backwards, call the trace function.
+           instruction window, reset the window.
         */
-	if ((frame->f_lasti < *instr_lb || frame->f_lasti >= *instr_ub)) {
-		int line;
+	if (frame->f_lasti < *instr_lb || frame->f_lasti >= *instr_ub) {
 		PyAddrPair bounds;
-
-		line = PyCode_CheckLineNumber(frame->f_code, frame->f_lasti,
-					      &bounds);
-		if (line >= 0) {
-			frame->f_lineno = line;
-			result = call_trace(func, obj, frame,
-					    PyTrace_LINE, Py_None);
-		}
+		line = _PyCode_CheckLineNumber(frame->f_code, frame->f_lasti,
+					       &bounds);
 		*instr_lb = bounds.ap_lower;
 		*instr_ub = bounds.ap_upper;
 	}
-	else if (frame->f_lasti <= *instr_prev) {
+	/* If the last instruction falls at the start of a line or if
+           it represents a jump backwards, update the frame's line
+           number and call the trace function. */
+	if (frame->f_lasti == *instr_lb || frame->f_lasti < *instr_prev) {
+		frame->f_lineno = line;
 		result = call_trace(func, obj, frame, PyTrace_LINE, Py_None);
 	}
 	*instr_prev = frame->f_lasti;
diff --git a/Python/compile.c b/Python/compile.c
index 69321ae..8c85306 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -1646,9 +1646,6 @@
 	VISIT(c, expr, s->v.For.iter);
 	ADDOP(c, GET_ITER);
 	compiler_use_next_block(c, start);
-	/* for expressions must be traced on each iteration,
-	   so we need to set an extra line number. */
-	c->u->u_lineno_set = false;
 	ADDOP_JREL(c, FOR_ITER, cleanup);
 	VISIT(c, expr, s->v.For.target);
 	VISIT_SEQ(c, stmt, s->v.For.body);
@@ -1694,9 +1691,6 @@
 	if (!compiler_push_fblock(c, LOOP, loop))
 		return 0;
 	if (constant == -1) {
-		/* while expressions must be traced on each iteration,
-		   so we need to set an extra line number. */
-		c->u->u_lineno_set = false;
 		VISIT(c, expr, s->v.While.test);
 		ADDOP_JABS(c, POP_JUMP_IF_FALSE, anchor);
 	}
@@ -3493,51 +3487,9 @@
 	return size;
 }
 
-/* All about a_lnotab.
-
-c_lnotab is an array of unsigned bytes disguised as a Python string.
-It is used to map bytecode offsets to source code line #s (when needed
-for tracebacks).
-
-The array is conceptually a list of
-    (bytecode offset increment, line number increment)
-pairs.	The details are important and delicate, best illustrated by example:
-
-    byte code offset	source code line number
-	0		    1
-	6		    2
-       50		    7
-      350		  307
-      361		  308
-
-The first trick is that these numbers aren't stored, only the increments
-from one row to the next (this doesn't really work, but it's a start):
-
-    0, 1,  6, 1,  44, 5,  300, 300,  11, 1
-
-The second trick is that an unsigned byte can't hold negative values, or
-values larger than 255, so (a) there's a deep assumption that byte code
-offsets and their corresponding line #s both increase monotonically, and (b)
-if at least one column jumps by more than 255 from one row to the next, more
-than one pair is written to the table. In case #b, there's no way to know
-from looking at the table later how many were written.	That's the delicate
-part.  A user of c_lnotab desiring to find the source line number
-corresponding to a bytecode address A should do something like this
-
-    lineno = addr = 0
-    for addr_incr, line_incr in c_lnotab:
-	addr += addr_incr
-	if addr > A:
-	    return lineno
-	lineno += line_incr
-
-In order for this to work, when the addr field increments by more than 255,
-the line # increment in each pair generated must be 0 until the remaining addr
-increment is < 256.  So, in the example above, assemble_lnotab (it used
-to be called com_set_lineno) should not (as was actually done until 2.2)
-expand 300, 300 to 255, 255, 45, 45, 
-	    but to 255,	  0, 45, 255, 0, 45.
-*/
+/* Appends a pair to the end of the line number table, a_lnotab, representing
+   the instruction's bytecode offset and line number.  See
+   Objects/lnotab_notes.txt for the description of the line number table. */
 
 static int
 assemble_lnotab(struct assembler *a, struct instr *i)