##// END OF EJS Templates
graphlog: remove unused ASCIIDATA constant...
Patrick Mezard -
r17164:8299a9ad default
parent child Browse files
Show More
@@ -1,584 +1,582
1 # ASCII graph log extension for Mercurial
1 # ASCII graph log extension for Mercurial
2 #
2 #
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to view revision graphs from a shell
8 '''command to view revision graphs from a shell
9
9
10 This extension adds a --graph option to the incoming, outgoing and log
10 This extension adds a --graph option to the incoming, outgoing and log
11 commands. When this options is given, an ASCII representation of the
11 commands. When this options is given, an ASCII representation of the
12 revision graph is also shown.
12 revision graph is also shown.
13 '''
13 '''
14
14
15 from mercurial.cmdutil import show_changeset
15 from mercurial.cmdutil import show_changeset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial import cmdutil, commands, extensions, scmutil
17 from mercurial import cmdutil, commands, extensions, scmutil
18 from mercurial import hg, util, graphmod, templatekw, revset
18 from mercurial import hg, util, graphmod, templatekw, revset
19
19
20 cmdtable = {}
20 cmdtable = {}
21 command = cmdutil.command(cmdtable)
21 command = cmdutil.command(cmdtable)
22 testedwith = 'internal'
22 testedwith = 'internal'
23
23
24 ASCIIDATA = 'ASC'
25
26 def asciiedges(type, char, lines, seen, rev, parents):
24 def asciiedges(type, char, lines, seen, rev, parents):
27 """adds edge info to changelog DAG walk suitable for ascii()"""
25 """adds edge info to changelog DAG walk suitable for ascii()"""
28 if rev not in seen:
26 if rev not in seen:
29 seen.append(rev)
27 seen.append(rev)
30 nodeidx = seen.index(rev)
28 nodeidx = seen.index(rev)
31
29
32 knownparents = []
30 knownparents = []
33 newparents = []
31 newparents = []
34 for parent in parents:
32 for parent in parents:
35 if parent in seen:
33 if parent in seen:
36 knownparents.append(parent)
34 knownparents.append(parent)
37 else:
35 else:
38 newparents.append(parent)
36 newparents.append(parent)
39
37
40 ncols = len(seen)
38 ncols = len(seen)
41 nextseen = seen[:]
39 nextseen = seen[:]
42 nextseen[nodeidx:nodeidx + 1] = newparents
40 nextseen[nodeidx:nodeidx + 1] = newparents
43 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
41 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
44
42
45 while len(newparents) > 2:
43 while len(newparents) > 2:
46 # ascii() only knows how to add or remove a single column between two
44 # ascii() only knows how to add or remove a single column between two
47 # calls. Nodes with more than two parents break this constraint so we
45 # calls. Nodes with more than two parents break this constraint so we
48 # introduce intermediate expansion lines to grow the active node list
46 # introduce intermediate expansion lines to grow the active node list
49 # slowly.
47 # slowly.
50 edges.append((nodeidx, nodeidx))
48 edges.append((nodeidx, nodeidx))
51 edges.append((nodeidx, nodeidx + 1))
49 edges.append((nodeidx, nodeidx + 1))
52 nmorecols = 1
50 nmorecols = 1
53 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
51 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
54 char = '\\'
52 char = '\\'
55 lines = []
53 lines = []
56 nodeidx += 1
54 nodeidx += 1
57 ncols += 1
55 ncols += 1
58 edges = []
56 edges = []
59 del newparents[0]
57 del newparents[0]
60
58
61 if len(newparents) > 0:
59 if len(newparents) > 0:
62 edges.append((nodeidx, nodeidx))
60 edges.append((nodeidx, nodeidx))
63 if len(newparents) > 1:
61 if len(newparents) > 1:
64 edges.append((nodeidx, nodeidx + 1))
62 edges.append((nodeidx, nodeidx + 1))
65 nmorecols = len(nextseen) - ncols
63 nmorecols = len(nextseen) - ncols
66 seen[:] = nextseen
64 seen[:] = nextseen
67 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
65 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
68
66
69 def _fixlongrightedges(edges):
67 def _fixlongrightedges(edges):
70 for (i, (start, end)) in enumerate(edges):
68 for (i, (start, end)) in enumerate(edges):
71 if end > start:
69 if end > start:
72 edges[i] = (start, end + 1)
70 edges[i] = (start, end + 1)
73
71
74 def _getnodelineedgestail(
72 def _getnodelineedgestail(
75 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
73 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
76 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
74 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
77 # Still going in the same non-vertical direction.
75 # Still going in the same non-vertical direction.
78 if n_columns_diff == -1:
76 if n_columns_diff == -1:
79 start = max(node_index + 1, p_node_index)
77 start = max(node_index + 1, p_node_index)
80 tail = ["|", " "] * (start - node_index - 1)
78 tail = ["|", " "] * (start - node_index - 1)
81 tail.extend(["/", " "] * (n_columns - start))
79 tail.extend(["/", " "] * (n_columns - start))
82 return tail
80 return tail
83 else:
81 else:
84 return ["\\", " "] * (n_columns - node_index - 1)
82 return ["\\", " "] * (n_columns - node_index - 1)
85 else:
83 else:
86 return ["|", " "] * (n_columns - node_index - 1)
84 return ["|", " "] * (n_columns - node_index - 1)
87
85
88 def _drawedges(edges, nodeline, interline):
86 def _drawedges(edges, nodeline, interline):
89 for (start, end) in edges:
87 for (start, end) in edges:
90 if start == end + 1:
88 if start == end + 1:
91 interline[2 * end + 1] = "/"
89 interline[2 * end + 1] = "/"
92 elif start == end - 1:
90 elif start == end - 1:
93 interline[2 * start + 1] = "\\"
91 interline[2 * start + 1] = "\\"
94 elif start == end:
92 elif start == end:
95 interline[2 * start] = "|"
93 interline[2 * start] = "|"
96 else:
94 else:
97 if 2 * end >= len(nodeline):
95 if 2 * end >= len(nodeline):
98 continue
96 continue
99 nodeline[2 * end] = "+"
97 nodeline[2 * end] = "+"
100 if start > end:
98 if start > end:
101 (start, end) = (end, start)
99 (start, end) = (end, start)
102 for i in range(2 * start + 1, 2 * end):
100 for i in range(2 * start + 1, 2 * end):
103 if nodeline[i] != "+":
101 if nodeline[i] != "+":
104 nodeline[i] = "-"
102 nodeline[i] = "-"
105
103
106 def _getpaddingline(ni, n_columns, edges):
104 def _getpaddingline(ni, n_columns, edges):
107 line = []
105 line = []
108 line.extend(["|", " "] * ni)
106 line.extend(["|", " "] * ni)
109 if (ni, ni - 1) in edges or (ni, ni) in edges:
107 if (ni, ni - 1) in edges or (ni, ni) in edges:
110 # (ni, ni - 1) (ni, ni)
108 # (ni, ni - 1) (ni, ni)
111 # | | | | | | | |
109 # | | | | | | | |
112 # +---o | | o---+
110 # +---o | | o---+
113 # | | c | | c | |
111 # | | c | | c | |
114 # | |/ / | |/ /
112 # | |/ / | |/ /
115 # | | | | | |
113 # | | | | | |
116 c = "|"
114 c = "|"
117 else:
115 else:
118 c = " "
116 c = " "
119 line.extend([c, " "])
117 line.extend([c, " "])
120 line.extend(["|", " "] * (n_columns - ni - 1))
118 line.extend(["|", " "] * (n_columns - ni - 1))
121 return line
119 return line
122
120
123 def asciistate():
121 def asciistate():
124 """returns the initial value for the "state" argument to ascii()"""
122 """returns the initial value for the "state" argument to ascii()"""
125 return [0, 0]
123 return [0, 0]
126
124
127 def ascii(ui, state, type, char, text, coldata):
125 def ascii(ui, state, type, char, text, coldata):
128 """prints an ASCII graph of the DAG
126 """prints an ASCII graph of the DAG
129
127
130 takes the following arguments (one call per node in the graph):
128 takes the following arguments (one call per node in the graph):
131
129
132 - ui to write to
130 - ui to write to
133 - Somewhere to keep the needed state in (init to asciistate())
131 - Somewhere to keep the needed state in (init to asciistate())
134 - Column of the current node in the set of ongoing edges.
132 - Column of the current node in the set of ongoing edges.
135 - Type indicator of node data == ASCIIDATA.
133 - Type indicator of node data, usually 'C' for changesets.
136 - Payload: (char, lines):
134 - Payload: (char, lines):
137 - Character to use as node's symbol.
135 - Character to use as node's symbol.
138 - List of lines to display as the node's text.
136 - List of lines to display as the node's text.
139 - Edges; a list of (col, next_col) indicating the edges between
137 - Edges; a list of (col, next_col) indicating the edges between
140 the current node and its parents.
138 the current node and its parents.
141 - Number of columns (ongoing edges) in the current revision.
139 - Number of columns (ongoing edges) in the current revision.
142 - The difference between the number of columns (ongoing edges)
140 - The difference between the number of columns (ongoing edges)
143 in the next revision and the number of columns (ongoing edges)
141 in the next revision and the number of columns (ongoing edges)
144 in the current revision. That is: -1 means one column removed;
142 in the current revision. That is: -1 means one column removed;
145 0 means no columns added or removed; 1 means one column added.
143 0 means no columns added or removed; 1 means one column added.
146 """
144 """
147
145
148 idx, edges, ncols, coldiff = coldata
146 idx, edges, ncols, coldiff = coldata
149 assert -2 < coldiff < 2
147 assert -2 < coldiff < 2
150 if coldiff == -1:
148 if coldiff == -1:
151 # Transform
149 # Transform
152 #
150 #
153 # | | | | | |
151 # | | | | | |
154 # o | | into o---+
152 # o | | into o---+
155 # |X / |/ /
153 # |X / |/ /
156 # | | | |
154 # | | | |
157 _fixlongrightedges(edges)
155 _fixlongrightedges(edges)
158
156
159 # add_padding_line says whether to rewrite
157 # add_padding_line says whether to rewrite
160 #
158 #
161 # | | | | | | | |
159 # | | | | | | | |
162 # | o---+ into | o---+
160 # | o---+ into | o---+
163 # | / / | | | # <--- padding line
161 # | / / | | | # <--- padding line
164 # o | | | / /
162 # o | | | / /
165 # o | |
163 # o | |
166 add_padding_line = (len(text) > 2 and coldiff == -1 and
164 add_padding_line = (len(text) > 2 and coldiff == -1 and
167 [x for (x, y) in edges if x + 1 < y])
165 [x for (x, y) in edges if x + 1 < y])
168
166
169 # fix_nodeline_tail says whether to rewrite
167 # fix_nodeline_tail says whether to rewrite
170 #
168 #
171 # | | o | | | | o | |
169 # | | o | | | | o | |
172 # | | |/ / | | |/ /
170 # | | |/ / | | |/ /
173 # | o | | into | o / / # <--- fixed nodeline tail
171 # | o | | into | o / / # <--- fixed nodeline tail
174 # | |/ / | |/ /
172 # | |/ / | |/ /
175 # o | | o | |
173 # o | | o | |
176 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
174 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
177
175
178 # nodeline is the line containing the node character (typically o)
176 # nodeline is the line containing the node character (typically o)
179 nodeline = ["|", " "] * idx
177 nodeline = ["|", " "] * idx
180 nodeline.extend([char, " "])
178 nodeline.extend([char, " "])
181
179
182 nodeline.extend(
180 nodeline.extend(
183 _getnodelineedgestail(idx, state[1], ncols, coldiff,
181 _getnodelineedgestail(idx, state[1], ncols, coldiff,
184 state[0], fix_nodeline_tail))
182 state[0], fix_nodeline_tail))
185
183
186 # shift_interline is the line containing the non-vertical
184 # shift_interline is the line containing the non-vertical
187 # edges between this entry and the next
185 # edges between this entry and the next
188 shift_interline = ["|", " "] * idx
186 shift_interline = ["|", " "] * idx
189 if coldiff == -1:
187 if coldiff == -1:
190 n_spaces = 1
188 n_spaces = 1
191 edge_ch = "/"
189 edge_ch = "/"
192 elif coldiff == 0:
190 elif coldiff == 0:
193 n_spaces = 2
191 n_spaces = 2
194 edge_ch = "|"
192 edge_ch = "|"
195 else:
193 else:
196 n_spaces = 3
194 n_spaces = 3
197 edge_ch = "\\"
195 edge_ch = "\\"
198 shift_interline.extend(n_spaces * [" "])
196 shift_interline.extend(n_spaces * [" "])
199 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
197 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
200
198
201 # draw edges from the current node to its parents
199 # draw edges from the current node to its parents
202 _drawedges(edges, nodeline, shift_interline)
200 _drawedges(edges, nodeline, shift_interline)
203
201
204 # lines is the list of all graph lines to print
202 # lines is the list of all graph lines to print
205 lines = [nodeline]
203 lines = [nodeline]
206 if add_padding_line:
204 if add_padding_line:
207 lines.append(_getpaddingline(idx, ncols, edges))
205 lines.append(_getpaddingline(idx, ncols, edges))
208 lines.append(shift_interline)
206 lines.append(shift_interline)
209
207
210 # make sure that there are as many graph lines as there are
208 # make sure that there are as many graph lines as there are
211 # log strings
209 # log strings
212 while len(text) < len(lines):
210 while len(text) < len(lines):
213 text.append("")
211 text.append("")
214 if len(lines) < len(text):
212 if len(lines) < len(text):
215 extra_interline = ["|", " "] * (ncols + coldiff)
213 extra_interline = ["|", " "] * (ncols + coldiff)
216 while len(lines) < len(text):
214 while len(lines) < len(text):
217 lines.append(extra_interline)
215 lines.append(extra_interline)
218
216
219 # print lines
217 # print lines
220 indentation_level = max(ncols, ncols + coldiff)
218 indentation_level = max(ncols, ncols + coldiff)
221 for (line, logstr) in zip(lines, text):
219 for (line, logstr) in zip(lines, text):
222 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
220 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
223 ui.write(ln.rstrip() + '\n')
221 ui.write(ln.rstrip() + '\n')
224
222
225 # ... and start over
223 # ... and start over
226 state[0] = coldiff
224 state[0] = coldiff
227 state[1] = idx
225 state[1] = idx
228
226
229 def _checkunsupportedflags(pats, opts):
227 def _checkunsupportedflags(pats, opts):
230 for op in ["newest_first"]:
228 for op in ["newest_first"]:
231 if op in opts and opts[op]:
229 if op in opts and opts[op]:
232 raise util.Abort(_("-G/--graph option is incompatible with --%s")
230 raise util.Abort(_("-G/--graph option is incompatible with --%s")
233 % op.replace("_", "-"))
231 % op.replace("_", "-"))
234
232
235 def _makefilematcher(repo, pats, followfirst):
233 def _makefilematcher(repo, pats, followfirst):
236 # When displaying a revision with --patch --follow FILE, we have
234 # When displaying a revision with --patch --follow FILE, we have
237 # to know which file of the revision must be diffed. With
235 # to know which file of the revision must be diffed. With
238 # --follow, we want the names of the ancestors of FILE in the
236 # --follow, we want the names of the ancestors of FILE in the
239 # revision, stored in "fcache". "fcache" is populated by
237 # revision, stored in "fcache". "fcache" is populated by
240 # reproducing the graph traversal already done by --follow revset
238 # reproducing the graph traversal already done by --follow revset
241 # and relating linkrevs to file names (which is not "correct" but
239 # and relating linkrevs to file names (which is not "correct" but
242 # good enough).
240 # good enough).
243 fcache = {}
241 fcache = {}
244 fcacheready = [False]
242 fcacheready = [False]
245 pctx = repo['.']
243 pctx = repo['.']
246 wctx = repo[None]
244 wctx = repo[None]
247
245
248 def populate():
246 def populate():
249 for fn in pats:
247 for fn in pats:
250 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
248 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
251 for c in i:
249 for c in i:
252 fcache.setdefault(c.linkrev(), set()).add(c.path())
250 fcache.setdefault(c.linkrev(), set()).add(c.path())
253
251
254 def filematcher(rev):
252 def filematcher(rev):
255 if not fcacheready[0]:
253 if not fcacheready[0]:
256 # Lazy initialization
254 # Lazy initialization
257 fcacheready[0] = True
255 fcacheready[0] = True
258 populate()
256 populate()
259 return scmutil.match(wctx, fcache.get(rev, []), default='path')
257 return scmutil.match(wctx, fcache.get(rev, []), default='path')
260
258
261 return filematcher
259 return filematcher
262
260
263 def _makelogrevset(repo, pats, opts, revs):
261 def _makelogrevset(repo, pats, opts, revs):
264 """Return (expr, filematcher) where expr is a revset string built
262 """Return (expr, filematcher) where expr is a revset string built
265 from log options and file patterns or None. If --stat or --patch
263 from log options and file patterns or None. If --stat or --patch
266 are not passed filematcher is None. Otherwise it is a callable
264 are not passed filematcher is None. Otherwise it is a callable
267 taking a revision number and returning a match objects filtering
265 taking a revision number and returning a match objects filtering
268 the files to be detailed when displaying the revision.
266 the files to be detailed when displaying the revision.
269 """
267 """
270 opt2revset = {
268 opt2revset = {
271 'no_merges': ('not merge()', None),
269 'no_merges': ('not merge()', None),
272 'only_merges': ('merge()', None),
270 'only_merges': ('merge()', None),
273 '_ancestors': ('ancestors(%(val)s)', None),
271 '_ancestors': ('ancestors(%(val)s)', None),
274 '_fancestors': ('_firstancestors(%(val)s)', None),
272 '_fancestors': ('_firstancestors(%(val)s)', None),
275 '_descendants': ('descendants(%(val)s)', None),
273 '_descendants': ('descendants(%(val)s)', None),
276 '_fdescendants': ('_firstdescendants(%(val)s)', None),
274 '_fdescendants': ('_firstdescendants(%(val)s)', None),
277 '_matchfiles': ('_matchfiles(%(val)s)', None),
275 '_matchfiles': ('_matchfiles(%(val)s)', None),
278 'date': ('date(%(val)r)', None),
276 'date': ('date(%(val)r)', None),
279 'branch': ('branch(%(val)r)', ' or '),
277 'branch': ('branch(%(val)r)', ' or '),
280 '_patslog': ('filelog(%(val)r)', ' or '),
278 '_patslog': ('filelog(%(val)r)', ' or '),
281 '_patsfollow': ('follow(%(val)r)', ' or '),
279 '_patsfollow': ('follow(%(val)r)', ' or '),
282 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
280 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
283 'keyword': ('keyword(%(val)r)', ' or '),
281 'keyword': ('keyword(%(val)r)', ' or '),
284 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
282 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
285 'user': ('user(%(val)r)', ' or '),
283 'user': ('user(%(val)r)', ' or '),
286 }
284 }
287
285
288 opts = dict(opts)
286 opts = dict(opts)
289 # follow or not follow?
287 # follow or not follow?
290 follow = opts.get('follow') or opts.get('follow_first')
288 follow = opts.get('follow') or opts.get('follow_first')
291 followfirst = opts.get('follow_first') and 1 or 0
289 followfirst = opts.get('follow_first') and 1 or 0
292 # --follow with FILE behaviour depends on revs...
290 # --follow with FILE behaviour depends on revs...
293 startrev = revs[0]
291 startrev = revs[0]
294 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
292 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
295
293
296 # branch and only_branch are really aliases and must be handled at
294 # branch and only_branch are really aliases and must be handled at
297 # the same time
295 # the same time
298 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
296 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
299 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
297 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
300 # pats/include/exclude are passed to match.match() directly in
298 # pats/include/exclude are passed to match.match() directly in
301 # _matchfile() revset but walkchangerevs() builds its matcher with
299 # _matchfile() revset but walkchangerevs() builds its matcher with
302 # scmutil.match(). The difference is input pats are globbed on
300 # scmutil.match(). The difference is input pats are globbed on
303 # platforms without shell expansion (windows).
301 # platforms without shell expansion (windows).
304 pctx = repo[None]
302 pctx = repo[None]
305 match, pats = scmutil.matchandpats(pctx, pats, opts)
303 match, pats = scmutil.matchandpats(pctx, pats, opts)
306 slowpath = match.anypats() or (match.files() and opts.get('removed'))
304 slowpath = match.anypats() or (match.files() and opts.get('removed'))
307 if not slowpath:
305 if not slowpath:
308 for f in match.files():
306 for f in match.files():
309 if follow and f not in pctx:
307 if follow and f not in pctx:
310 raise util.Abort(_('cannot follow file not in parent '
308 raise util.Abort(_('cannot follow file not in parent '
311 'revision: "%s"') % f)
309 'revision: "%s"') % f)
312 filelog = repo.file(f)
310 filelog = repo.file(f)
313 if not len(filelog):
311 if not len(filelog):
314 # A zero count may be a directory or deleted file, so
312 # A zero count may be a directory or deleted file, so
315 # try to find matching entries on the slow path.
313 # try to find matching entries on the slow path.
316 if follow:
314 if follow:
317 raise util.Abort(
315 raise util.Abort(
318 _('cannot follow nonexistent file: "%s"') % f)
316 _('cannot follow nonexistent file: "%s"') % f)
319 slowpath = True
317 slowpath = True
320 if slowpath:
318 if slowpath:
321 # See cmdutil.walkchangerevs() slow path.
319 # See cmdutil.walkchangerevs() slow path.
322 #
320 #
323 if follow:
321 if follow:
324 raise util.Abort(_('can only follow copies/renames for explicit '
322 raise util.Abort(_('can only follow copies/renames for explicit '
325 'filenames'))
323 'filenames'))
326 # pats/include/exclude cannot be represented as separate
324 # pats/include/exclude cannot be represented as separate
327 # revset expressions as their filtering logic applies at file
325 # revset expressions as their filtering logic applies at file
328 # level. For instance "-I a -X a" matches a revision touching
326 # level. For instance "-I a -X a" matches a revision touching
329 # "a" and "b" while "file(a) and not file(b)" does
327 # "a" and "b" while "file(a) and not file(b)" does
330 # not. Besides, filesets are evaluated against the working
328 # not. Besides, filesets are evaluated against the working
331 # directory.
329 # directory.
332 matchargs = ['r:', 'd:relpath']
330 matchargs = ['r:', 'd:relpath']
333 for p in pats:
331 for p in pats:
334 matchargs.append('p:' + p)
332 matchargs.append('p:' + p)
335 for p in opts.get('include', []):
333 for p in opts.get('include', []):
336 matchargs.append('i:' + p)
334 matchargs.append('i:' + p)
337 for p in opts.get('exclude', []):
335 for p in opts.get('exclude', []):
338 matchargs.append('x:' + p)
336 matchargs.append('x:' + p)
339 matchargs = ','.join(('%r' % p) for p in matchargs)
337 matchargs = ','.join(('%r' % p) for p in matchargs)
340 opts['_matchfiles'] = matchargs
338 opts['_matchfiles'] = matchargs
341 else:
339 else:
342 if follow:
340 if follow:
343 fpats = ('_patsfollow', '_patsfollowfirst')
341 fpats = ('_patsfollow', '_patsfollowfirst')
344 fnopats = (('_ancestors', '_fancestors'),
342 fnopats = (('_ancestors', '_fancestors'),
345 ('_descendants', '_fdescendants'))
343 ('_descendants', '_fdescendants'))
346 if pats:
344 if pats:
347 # follow() revset inteprets its file argument as a
345 # follow() revset inteprets its file argument as a
348 # manifest entry, so use match.files(), not pats.
346 # manifest entry, so use match.files(), not pats.
349 opts[fpats[followfirst]] = list(match.files())
347 opts[fpats[followfirst]] = list(match.files())
350 else:
348 else:
351 opts[fnopats[followdescendants][followfirst]] = str(startrev)
349 opts[fnopats[followdescendants][followfirst]] = str(startrev)
352 else:
350 else:
353 opts['_patslog'] = list(pats)
351 opts['_patslog'] = list(pats)
354
352
355 filematcher = None
353 filematcher = None
356 if opts.get('patch') or opts.get('stat'):
354 if opts.get('patch') or opts.get('stat'):
357 if follow:
355 if follow:
358 filematcher = _makefilematcher(repo, pats, followfirst)
356 filematcher = _makefilematcher(repo, pats, followfirst)
359 else:
357 else:
360 filematcher = lambda rev: match
358 filematcher = lambda rev: match
361
359
362 expr = []
360 expr = []
363 for op, val in opts.iteritems():
361 for op, val in opts.iteritems():
364 if not val:
362 if not val:
365 continue
363 continue
366 if op not in opt2revset:
364 if op not in opt2revset:
367 continue
365 continue
368 revop, andor = opt2revset[op]
366 revop, andor = opt2revset[op]
369 if '%(val)' not in revop:
367 if '%(val)' not in revop:
370 expr.append(revop)
368 expr.append(revop)
371 else:
369 else:
372 if not isinstance(val, list):
370 if not isinstance(val, list):
373 e = revop % {'val': val}
371 e = revop % {'val': val}
374 else:
372 else:
375 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
373 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
376 expr.append(e)
374 expr.append(e)
377
375
378 if expr:
376 if expr:
379 expr = '(' + ' and '.join(expr) + ')'
377 expr = '(' + ' and '.join(expr) + ')'
380 else:
378 else:
381 expr = None
379 expr = None
382 return expr, filematcher
380 return expr, filematcher
383
381
384 def getlogrevs(repo, pats, opts):
382 def getlogrevs(repo, pats, opts):
385 """Return (revs, expr, filematcher) where revs is an iterable of
383 """Return (revs, expr, filematcher) where revs is an iterable of
386 revision numbers, expr is a revset string built from log options
384 revision numbers, expr is a revset string built from log options
387 and file patterns or None, and used to filter 'revs'. If --stat or
385 and file patterns or None, and used to filter 'revs'. If --stat or
388 --patch are not passed filematcher is None. Otherwise it is a
386 --patch are not passed filematcher is None. Otherwise it is a
389 callable taking a revision number and returning a match objects
387 callable taking a revision number and returning a match objects
390 filtering the files to be detailed when displaying the revision.
388 filtering the files to be detailed when displaying the revision.
391 """
389 """
392 def increasingrevs(repo, revs, matcher):
390 def increasingrevs(repo, revs, matcher):
393 # The sorted input rev sequence is chopped in sub-sequences
391 # The sorted input rev sequence is chopped in sub-sequences
394 # which are sorted in ascending order and passed to the
392 # which are sorted in ascending order and passed to the
395 # matcher. The filtered revs are sorted again as they were in
393 # matcher. The filtered revs are sorted again as they were in
396 # the original sub-sequence. This achieve several things:
394 # the original sub-sequence. This achieve several things:
397 #
395 #
398 # - getlogrevs() now returns a generator which behaviour is
396 # - getlogrevs() now returns a generator which behaviour is
399 # adapted to log need. First results come fast, last ones
397 # adapted to log need. First results come fast, last ones
400 # are batched for performances.
398 # are batched for performances.
401 #
399 #
402 # - revset matchers often operate faster on revision in
400 # - revset matchers often operate faster on revision in
403 # changelog order, because most filters deal with the
401 # changelog order, because most filters deal with the
404 # changelog.
402 # changelog.
405 #
403 #
406 # - revset matchers can reorder revisions. "A or B" typically
404 # - revset matchers can reorder revisions. "A or B" typically
407 # returns returns the revision matching A then the revision
405 # returns returns the revision matching A then the revision
408 # matching B. We want to hide this internal implementation
406 # matching B. We want to hide this internal implementation
409 # detail from the caller, and sorting the filtered revision
407 # detail from the caller, and sorting the filtered revision
410 # again achieves this.
408 # again achieves this.
411 for i, window in cmdutil.increasingwindows(0, len(revs), windowsize=1):
409 for i, window in cmdutil.increasingwindows(0, len(revs), windowsize=1):
412 orevs = revs[i:i + window]
410 orevs = revs[i:i + window]
413 nrevs = set(matcher(repo, sorted(orevs)))
411 nrevs = set(matcher(repo, sorted(orevs)))
414 for rev in orevs:
412 for rev in orevs:
415 if rev in nrevs:
413 if rev in nrevs:
416 yield rev
414 yield rev
417
415
418 if not len(repo):
416 if not len(repo):
419 return iter([]), None, None
417 return iter([]), None, None
420 # Default --rev value depends on --follow but --follow behaviour
418 # Default --rev value depends on --follow but --follow behaviour
421 # depends on revisions resolved from --rev...
419 # depends on revisions resolved from --rev...
422 follow = opts.get('follow') or opts.get('follow_first')
420 follow = opts.get('follow') or opts.get('follow_first')
423 if opts.get('rev'):
421 if opts.get('rev'):
424 revs = scmutil.revrange(repo, opts['rev'])
422 revs = scmutil.revrange(repo, opts['rev'])
425 else:
423 else:
426 if follow and len(repo) > 0:
424 if follow and len(repo) > 0:
427 revs = scmutil.revrange(repo, ['.:0'])
425 revs = scmutil.revrange(repo, ['.:0'])
428 else:
426 else:
429 revs = range(len(repo) - 1, -1, -1)
427 revs = range(len(repo) - 1, -1, -1)
430 if not revs:
428 if not revs:
431 return iter([]), None, None
429 return iter([]), None, None
432 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
430 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
433 if expr:
431 if expr:
434 matcher = revset.match(repo.ui, expr)
432 matcher = revset.match(repo.ui, expr)
435 revs = increasingrevs(repo, revs, matcher)
433 revs = increasingrevs(repo, revs, matcher)
436 if not opts.get('hidden'):
434 if not opts.get('hidden'):
437 # --hidden is still experimental and not worth a dedicated revset
435 # --hidden is still experimental and not worth a dedicated revset
438 # yet. Fortunately, filtering revision number is fast.
436 # yet. Fortunately, filtering revision number is fast.
439 revs = (r for r in revs if r not in repo.changelog.hiddenrevs)
437 revs = (r for r in revs if r not in repo.changelog.hiddenrevs)
440 else:
438 else:
441 revs = iter(revs)
439 revs = iter(revs)
442 return revs, expr, filematcher
440 return revs, expr, filematcher
443
441
444 def generate(ui, dag, displayer, showparents, edgefn, getrenamed=None,
442 def generate(ui, dag, displayer, showparents, edgefn, getrenamed=None,
445 filematcher=None):
443 filematcher=None):
446 seen, state = [], asciistate()
444 seen, state = [], asciistate()
447 for rev, type, ctx, parents in dag:
445 for rev, type, ctx, parents in dag:
448 char = 'o'
446 char = 'o'
449 if ctx.node() in showparents:
447 if ctx.node() in showparents:
450 char = '@'
448 char = '@'
451 elif ctx.obsolete():
449 elif ctx.obsolete():
452 char = 'x'
450 char = 'x'
453 copies = None
451 copies = None
454 if getrenamed and ctx.rev():
452 if getrenamed and ctx.rev():
455 copies = []
453 copies = []
456 for fn in ctx.files():
454 for fn in ctx.files():
457 rename = getrenamed(fn, ctx.rev())
455 rename = getrenamed(fn, ctx.rev())
458 if rename:
456 if rename:
459 copies.append((fn, rename[0]))
457 copies.append((fn, rename[0]))
460 revmatchfn = None
458 revmatchfn = None
461 if filematcher is not None:
459 if filematcher is not None:
462 revmatchfn = filematcher(ctx.rev())
460 revmatchfn = filematcher(ctx.rev())
463 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
461 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
464 lines = displayer.hunk.pop(rev).split('\n')
462 lines = displayer.hunk.pop(rev).split('\n')
465 if not lines[-1]:
463 if not lines[-1]:
466 del lines[-1]
464 del lines[-1]
467 displayer.flush(rev)
465 displayer.flush(rev)
468 edges = edgefn(type, char, lines, seen, rev, parents)
466 edges = edgefn(type, char, lines, seen, rev, parents)
469 for type, char, lines, coldata in edges:
467 for type, char, lines, coldata in edges:
470 ascii(ui, state, type, char, lines, coldata)
468 ascii(ui, state, type, char, lines, coldata)
471 displayer.close()
469 displayer.close()
472
470
473 @command('glog',
471 @command('glog',
474 [('f', 'follow', None,
472 [('f', 'follow', None,
475 _('follow changeset history, or file history across copies and renames')),
473 _('follow changeset history, or file history across copies and renames')),
476 ('', 'follow-first', None,
474 ('', 'follow-first', None,
477 _('only follow the first parent of merge changesets (DEPRECATED)')),
475 _('only follow the first parent of merge changesets (DEPRECATED)')),
478 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
476 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
479 ('C', 'copies', None, _('show copied files')),
477 ('C', 'copies', None, _('show copied files')),
480 ('k', 'keyword', [],
478 ('k', 'keyword', [],
481 _('do case-insensitive search for a given text'), _('TEXT')),
479 _('do case-insensitive search for a given text'), _('TEXT')),
482 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
480 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
483 ('', 'removed', None, _('include revisions where files were removed')),
481 ('', 'removed', None, _('include revisions where files were removed')),
484 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
482 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
485 ('u', 'user', [], _('revisions committed by user'), _('USER')),
483 ('u', 'user', [], _('revisions committed by user'), _('USER')),
486 ('', 'only-branch', [],
484 ('', 'only-branch', [],
487 _('show only changesets within the given named branch (DEPRECATED)'),
485 _('show only changesets within the given named branch (DEPRECATED)'),
488 _('BRANCH')),
486 _('BRANCH')),
489 ('b', 'branch', [],
487 ('b', 'branch', [],
490 _('show changesets within the given named branch'), _('BRANCH')),
488 _('show changesets within the given named branch'), _('BRANCH')),
491 ('P', 'prune', [],
489 ('P', 'prune', [],
492 _('do not display revision or any of its ancestors'), _('REV')),
490 _('do not display revision or any of its ancestors'), _('REV')),
493 ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
491 ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
494 ] + commands.logopts + commands.walkopts,
492 ] + commands.logopts + commands.walkopts,
495 _('[OPTION]... [FILE]'))
493 _('[OPTION]... [FILE]'))
496 def graphlog(ui, repo, *pats, **opts):
494 def graphlog(ui, repo, *pats, **opts):
497 """show revision history alongside an ASCII revision graph
495 """show revision history alongside an ASCII revision graph
498
496
499 Print a revision history alongside a revision graph drawn with
497 Print a revision history alongside a revision graph drawn with
500 ASCII characters.
498 ASCII characters.
501
499
502 Nodes printed as an @ character are parents of the working
500 Nodes printed as an @ character are parents of the working
503 directory.
501 directory.
504 """
502 """
505
503
506 revs, expr, filematcher = getlogrevs(repo, pats, opts)
504 revs, expr, filematcher = getlogrevs(repo, pats, opts)
507 revs = sorted(revs, reverse=1)
505 revs = sorted(revs, reverse=1)
508 limit = cmdutil.loglimit(opts)
506 limit = cmdutil.loglimit(opts)
509 if limit is not None:
507 if limit is not None:
510 revs = revs[:limit]
508 revs = revs[:limit]
511 revdag = graphmod.dagwalker(repo, revs)
509 revdag = graphmod.dagwalker(repo, revs)
512
510
513 getrenamed = None
511 getrenamed = None
514 if opts.get('copies'):
512 if opts.get('copies'):
515 endrev = None
513 endrev = None
516 if opts.get('rev'):
514 if opts.get('rev'):
517 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
515 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
518 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
516 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
519 displayer = show_changeset(ui, repo, opts, buffered=True)
517 displayer = show_changeset(ui, repo, opts, buffered=True)
520 showparents = [ctx.node() for ctx in repo[None].parents()]
518 showparents = [ctx.node() for ctx in repo[None].parents()]
521 generate(ui, revdag, displayer, showparents, asciiedges, getrenamed,
519 generate(ui, revdag, displayer, showparents, asciiedges, getrenamed,
522 filematcher)
520 filematcher)
523
521
524 def graphrevs(repo, nodes, opts):
522 def graphrevs(repo, nodes, opts):
525 limit = cmdutil.loglimit(opts)
523 limit = cmdutil.loglimit(opts)
526 nodes.reverse()
524 nodes.reverse()
527 if limit is not None:
525 if limit is not None:
528 nodes = nodes[:limit]
526 nodes = nodes[:limit]
529 return graphmod.nodes(repo, nodes)
527 return graphmod.nodes(repo, nodes)
530
528
531 def goutgoing(ui, repo, dest=None, **opts):
529 def goutgoing(ui, repo, dest=None, **opts):
532 """show the outgoing changesets alongside an ASCII revision graph
530 """show the outgoing changesets alongside an ASCII revision graph
533
531
534 Print the outgoing changesets alongside a revision graph drawn with
532 Print the outgoing changesets alongside a revision graph drawn with
535 ASCII characters.
533 ASCII characters.
536
534
537 Nodes printed as an @ character are parents of the working
535 Nodes printed as an @ character are parents of the working
538 directory.
536 directory.
539 """
537 """
540
538
541 _checkunsupportedflags([], opts)
539 _checkunsupportedflags([], opts)
542 o = hg._outgoing(ui, repo, dest, opts)
540 o = hg._outgoing(ui, repo, dest, opts)
543 if o is None:
541 if o is None:
544 return
542 return
545
543
546 revdag = graphrevs(repo, o, opts)
544 revdag = graphrevs(repo, o, opts)
547 displayer = show_changeset(ui, repo, opts, buffered=True)
545 displayer = show_changeset(ui, repo, opts, buffered=True)
548 showparents = [ctx.node() for ctx in repo[None].parents()]
546 showparents = [ctx.node() for ctx in repo[None].parents()]
549 generate(ui, revdag, displayer, showparents, asciiedges)
547 generate(ui, revdag, displayer, showparents, asciiedges)
550
548
551 def gincoming(ui, repo, source="default", **opts):
549 def gincoming(ui, repo, source="default", **opts):
552 """show the incoming changesets alongside an ASCII revision graph
550 """show the incoming changesets alongside an ASCII revision graph
553
551
554 Print the incoming changesets alongside a revision graph drawn with
552 Print the incoming changesets alongside a revision graph drawn with
555 ASCII characters.
553 ASCII characters.
556
554
557 Nodes printed as an @ character are parents of the working
555 Nodes printed as an @ character are parents of the working
558 directory.
556 directory.
559 """
557 """
560 def subreporecurse():
558 def subreporecurse():
561 return 1
559 return 1
562
560
563 _checkunsupportedflags([], opts)
561 _checkunsupportedflags([], opts)
564 def display(other, chlist, displayer):
562 def display(other, chlist, displayer):
565 revdag = graphrevs(other, chlist, opts)
563 revdag = graphrevs(other, chlist, opts)
566 showparents = [ctx.node() for ctx in repo[None].parents()]
564 showparents = [ctx.node() for ctx in repo[None].parents()]
567 generate(ui, revdag, displayer, showparents, asciiedges)
565 generate(ui, revdag, displayer, showparents, asciiedges)
568
566
569 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
567 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
570
568
571 def uisetup(ui):
569 def uisetup(ui):
572 '''Initialize the extension.'''
570 '''Initialize the extension.'''
573 _wrapcmd('log', commands.table, graphlog)
571 _wrapcmd('log', commands.table, graphlog)
574 _wrapcmd('incoming', commands.table, gincoming)
572 _wrapcmd('incoming', commands.table, gincoming)
575 _wrapcmd('outgoing', commands.table, goutgoing)
573 _wrapcmd('outgoing', commands.table, goutgoing)
576
574
577 def _wrapcmd(cmd, table, wrapfn):
575 def _wrapcmd(cmd, table, wrapfn):
578 '''wrap the command'''
576 '''wrap the command'''
579 def graph(orig, *args, **kwargs):
577 def graph(orig, *args, **kwargs):
580 if kwargs['graph']:
578 if kwargs['graph']:
581 return wrapfn(*args, **kwargs)
579 return wrapfn(*args, **kwargs)
582 return orig(*args, **kwargs)
580 return orig(*args, **kwargs)
583 entry = extensions.wrapcommand(table, cmd, graph)
581 entry = extensions.wrapcommand(table, cmd, graph)
584 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
582 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
General Comments 0
You need to be logged in to leave comments. Login now