blob: f74d4f9c3b6de078994a5c8faabecc6463a29ea1 [file] [log] [blame]
Guido van Rossum6f0cf7e1997-08-14 22:04:00 +00001#! /usr/bin/env python
2
3"""Consolidate a bunch of CVS or RCS logs read from stdin.
4
5Input should be the output of a CVS or RCS logging command, e.g.
6
7 cvs log -rrelease14
8
9which dumps all log messages from release1.4 upwards (assuming that
10release 1.4 was tagged with tag 'release14').
11
12This collects all the revision records and outputs them sorted by date
13rather than by file, collapsing duplicate revision record, i.e.,
14records with the same message for different files.
15
16The -t option causes it to truncate (discard) the last revision log
17entry; this is useful when using something like the above cvs log
18command, which shows the revisions including the given tag, while you
19probably want everything *since* that tag.
20
21XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7.
22
23"""
24
25import os, sys, getopt, string, re
26
27sep1 = '='*77 + '\n' # file separator
28sep2 = '-'*28 + '\n' # revision separator
29
30def main():
31 """Main program"""
32 truncate_last = 0
33 opts, args = getopt.getopt(sys.argv[1:], "-t")
34 for o, a in opts:
35 if o == '-t':
36 truncate_last = 1
37 database = []
38 while 1:
39 chunk = read_chunk(sys.stdin)
40 if not chunk:
41 break
42 records = digest_chunk(chunk)
43 if truncate_last:
44 del records[-1]
45 database[len(database):] = records
46 database.sort()
47 database.reverse()
48 format_output(database)
49
50def read_chunk(fp):
51 """Read a chunk -- data for one file, ending with sep1.
52
53 Split the chunk in parts separated by sep2.
54
55 """
56 chunk = []
57 lines = []
58 while 1:
59 line = fp.readline()
60 if not line:
61 break
62 if line == sep1:
63 if lines:
64 chunk.append(lines)
65 break
66 if line == sep2:
67 if lines:
68 chunk.append(lines)
69 lines = []
70 else:
71 lines.append(line)
72 return chunk
73
74def digest_chunk(chunk):
75 """Digest a chunk -- extrach working file name and revisions"""
76 lines = chunk[0]
77 key = 'Working file:'
78 keylen = len(key)
79 for line in lines:
80 if line[:keylen] == key:
81 working_file = string.strip(line[keylen:])
82 break
83 else:
84 working_file = None
85 records = []
86 for lines in chunk[1:]:
87 revline = lines[0]
88 dateline = lines[1]
89 text = lines[2:]
90 words = string.split(dateline)
91 if len(words) >= 3 and words[0] == 'date:':
92 dateword = words[1]
93 timeword = words[2]
94 if timeword[-1:] == ';':
95 timeword = timeword[:-1]
96 date = dateword + ' ' + timeword
97 else:
98 date = None
99 text.insert(0, revline)
100 words = string.split(revline)
101 if len(words) >= 2 and words[0] == 'revision':
102 rev = words[1]
103 else:
104 rev = None
105 text.insert(0, revline)
106 records.append((date, working_file, rev, text))
107 return records
108
109def format_output(database):
110 prevtext = None
111 prev = []
112 database.append((None, None, None, None)) # Sentinel
113 for (date, working_file, rev, text) in database:
114 if text != prevtext:
115 if prev:
116 print sep2,
Guido van Rossum939e4c11997-08-14 23:25:20 +0000117 for (p_date, p_working_file, p_rev) in prev:
118 print p_date, p_working_file
Guido van Rossum6f0cf7e1997-08-14 22:04:00 +0000119 sys.stdout.writelines(prevtext)
120 prev = []
121 prev.append((date, working_file, rev))
122 prevtext = text
123
124main()