##// END OF EJS Templates
graphlog: reduce duplication in --follow code
Patrick Mezard -
r16433:e38b2993 default
parent child Browse files
Show More
@@ -1,569 +1,560 b''
1 # ASCII graph log extension for Mercurial
1 # ASCII graph log extension for Mercurial
2 #
2 #
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to view revision graphs from a shell
8 '''command to view revision graphs from a shell
9
9
10 This extension adds a --graph option to the incoming, outgoing and log
10 This extension adds a --graph option to the incoming, outgoing and log
11 commands. When this options is given, an ASCII representation of the
11 commands. When this options is given, an ASCII representation of the
12 revision graph is also shown.
12 revision graph is also shown.
13 '''
13 '''
14
14
15 from mercurial.cmdutil import show_changeset
15 from mercurial.cmdutil import show_changeset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import nullrev
17 from mercurial.node import nullrev
18 from mercurial import cmdutil, commands, extensions, scmutil
18 from mercurial import cmdutil, commands, extensions, scmutil
19 from mercurial import hg, util, graphmod, templatekw, revset
19 from mercurial import hg, util, graphmod, templatekw, revset
20
20
21 cmdtable = {}
21 cmdtable = {}
22 command = cmdutil.command(cmdtable)
22 command = cmdutil.command(cmdtable)
23
23
24 ASCIIDATA = 'ASC'
24 ASCIIDATA = 'ASC'
25
25
26 def asciiedges(type, char, lines, seen, rev, parents):
26 def asciiedges(type, char, lines, seen, rev, parents):
27 """adds edge info to changelog DAG walk suitable for ascii()"""
27 """adds edge info to changelog DAG walk suitable for ascii()"""
28 if rev not in seen:
28 if rev not in seen:
29 seen.append(rev)
29 seen.append(rev)
30 nodeidx = seen.index(rev)
30 nodeidx = seen.index(rev)
31
31
32 knownparents = []
32 knownparents = []
33 newparents = []
33 newparents = []
34 for parent in parents:
34 for parent in parents:
35 if parent in seen:
35 if parent in seen:
36 knownparents.append(parent)
36 knownparents.append(parent)
37 else:
37 else:
38 newparents.append(parent)
38 newparents.append(parent)
39
39
40 ncols = len(seen)
40 ncols = len(seen)
41 nextseen = seen[:]
41 nextseen = seen[:]
42 nextseen[nodeidx:nodeidx + 1] = newparents
42 nextseen[nodeidx:nodeidx + 1] = newparents
43 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
43 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
44
44
45 while len(newparents) > 2:
45 while len(newparents) > 2:
46 # ascii() only knows how to add or remove a single column between two
46 # ascii() only knows how to add or remove a single column between two
47 # calls. Nodes with more than two parents break this constraint so we
47 # calls. Nodes with more than two parents break this constraint so we
48 # introduce intermediate expansion lines to grow the active node list
48 # introduce intermediate expansion lines to grow the active node list
49 # slowly.
49 # slowly.
50 edges.append((nodeidx, nodeidx))
50 edges.append((nodeidx, nodeidx))
51 edges.append((nodeidx, nodeidx + 1))
51 edges.append((nodeidx, nodeidx + 1))
52 nmorecols = 1
52 nmorecols = 1
53 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
53 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
54 char = '\\'
54 char = '\\'
55 lines = []
55 lines = []
56 nodeidx += 1
56 nodeidx += 1
57 ncols += 1
57 ncols += 1
58 edges = []
58 edges = []
59 del newparents[0]
59 del newparents[0]
60
60
61 if len(newparents) > 0:
61 if len(newparents) > 0:
62 edges.append((nodeidx, nodeidx))
62 edges.append((nodeidx, nodeidx))
63 if len(newparents) > 1:
63 if len(newparents) > 1:
64 edges.append((nodeidx, nodeidx + 1))
64 edges.append((nodeidx, nodeidx + 1))
65 nmorecols = len(nextseen) - ncols
65 nmorecols = len(nextseen) - ncols
66 seen[:] = nextseen
66 seen[:] = nextseen
67 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
67 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
68
68
69 def fix_long_right_edges(edges):
69 def fix_long_right_edges(edges):
70 for (i, (start, end)) in enumerate(edges):
70 for (i, (start, end)) in enumerate(edges):
71 if end > start:
71 if end > start:
72 edges[i] = (start, end + 1)
72 edges[i] = (start, end + 1)
73
73
74 def get_nodeline_edges_tail(
74 def get_nodeline_edges_tail(
75 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
75 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
76 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
76 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
77 # Still going in the same non-vertical direction.
77 # Still going in the same non-vertical direction.
78 if n_columns_diff == -1:
78 if n_columns_diff == -1:
79 start = max(node_index + 1, p_node_index)
79 start = max(node_index + 1, p_node_index)
80 tail = ["|", " "] * (start - node_index - 1)
80 tail = ["|", " "] * (start - node_index - 1)
81 tail.extend(["/", " "] * (n_columns - start))
81 tail.extend(["/", " "] * (n_columns - start))
82 return tail
82 return tail
83 else:
83 else:
84 return ["\\", " "] * (n_columns - node_index - 1)
84 return ["\\", " "] * (n_columns - node_index - 1)
85 else:
85 else:
86 return ["|", " "] * (n_columns - node_index - 1)
86 return ["|", " "] * (n_columns - node_index - 1)
87
87
88 def draw_edges(edges, nodeline, interline):
88 def draw_edges(edges, nodeline, interline):
89 for (start, end) in edges:
89 for (start, end) in edges:
90 if start == end + 1:
90 if start == end + 1:
91 interline[2 * end + 1] = "/"
91 interline[2 * end + 1] = "/"
92 elif start == end - 1:
92 elif start == end - 1:
93 interline[2 * start + 1] = "\\"
93 interline[2 * start + 1] = "\\"
94 elif start == end:
94 elif start == end:
95 interline[2 * start] = "|"
95 interline[2 * start] = "|"
96 else:
96 else:
97 if 2 * end >= len(nodeline):
97 if 2 * end >= len(nodeline):
98 continue
98 continue
99 nodeline[2 * end] = "+"
99 nodeline[2 * end] = "+"
100 if start > end:
100 if start > end:
101 (start, end) = (end, start)
101 (start, end) = (end, start)
102 for i in range(2 * start + 1, 2 * end):
102 for i in range(2 * start + 1, 2 * end):
103 if nodeline[i] != "+":
103 if nodeline[i] != "+":
104 nodeline[i] = "-"
104 nodeline[i] = "-"
105
105
106 def get_padding_line(ni, n_columns, edges):
106 def get_padding_line(ni, n_columns, edges):
107 line = []
107 line = []
108 line.extend(["|", " "] * ni)
108 line.extend(["|", " "] * ni)
109 if (ni, ni - 1) in edges or (ni, ni) in edges:
109 if (ni, ni - 1) in edges or (ni, ni) in edges:
110 # (ni, ni - 1) (ni, ni)
110 # (ni, ni - 1) (ni, ni)
111 # | | | | | | | |
111 # | | | | | | | |
112 # +---o | | o---+
112 # +---o | | o---+
113 # | | c | | c | |
113 # | | c | | c | |
114 # | |/ / | |/ /
114 # | |/ / | |/ /
115 # | | | | | |
115 # | | | | | |
116 c = "|"
116 c = "|"
117 else:
117 else:
118 c = " "
118 c = " "
119 line.extend([c, " "])
119 line.extend([c, " "])
120 line.extend(["|", " "] * (n_columns - ni - 1))
120 line.extend(["|", " "] * (n_columns - ni - 1))
121 return line
121 return line
122
122
123 def asciistate():
123 def asciistate():
124 """returns the initial value for the "state" argument to ascii()"""
124 """returns the initial value for the "state" argument to ascii()"""
125 return [0, 0]
125 return [0, 0]
126
126
127 def ascii(ui, state, type, char, text, coldata):
127 def ascii(ui, state, type, char, text, coldata):
128 """prints an ASCII graph of the DAG
128 """prints an ASCII graph of the DAG
129
129
130 takes the following arguments (one call per node in the graph):
130 takes the following arguments (one call per node in the graph):
131
131
132 - ui to write to
132 - ui to write to
133 - Somewhere to keep the needed state in (init to asciistate())
133 - Somewhere to keep the needed state in (init to asciistate())
134 - Column of the current node in the set of ongoing edges.
134 - Column of the current node in the set of ongoing edges.
135 - Type indicator of node data == ASCIIDATA.
135 - Type indicator of node data == ASCIIDATA.
136 - Payload: (char, lines):
136 - Payload: (char, lines):
137 - Character to use as node's symbol.
137 - Character to use as node's symbol.
138 - List of lines to display as the node's text.
138 - List of lines to display as the node's text.
139 - Edges; a list of (col, next_col) indicating the edges between
139 - Edges; a list of (col, next_col) indicating the edges between
140 the current node and its parents.
140 the current node and its parents.
141 - Number of columns (ongoing edges) in the current revision.
141 - Number of columns (ongoing edges) in the current revision.
142 - The difference between the number of columns (ongoing edges)
142 - The difference between the number of columns (ongoing edges)
143 in the next revision and the number of columns (ongoing edges)
143 in the next revision and the number of columns (ongoing edges)
144 in the current revision. That is: -1 means one column removed;
144 in the current revision. That is: -1 means one column removed;
145 0 means no columns added or removed; 1 means one column added.
145 0 means no columns added or removed; 1 means one column added.
146 """
146 """
147
147
148 idx, edges, ncols, coldiff = coldata
148 idx, edges, ncols, coldiff = coldata
149 assert -2 < coldiff < 2
149 assert -2 < coldiff < 2
150 if coldiff == -1:
150 if coldiff == -1:
151 # Transform
151 # Transform
152 #
152 #
153 # | | | | | |
153 # | | | | | |
154 # o | | into o---+
154 # o | | into o---+
155 # |X / |/ /
155 # |X / |/ /
156 # | | | |
156 # | | | |
157 fix_long_right_edges(edges)
157 fix_long_right_edges(edges)
158
158
159 # add_padding_line says whether to rewrite
159 # add_padding_line says whether to rewrite
160 #
160 #
161 # | | | | | | | |
161 # | | | | | | | |
162 # | o---+ into | o---+
162 # | o---+ into | o---+
163 # | / / | | | # <--- padding line
163 # | / / | | | # <--- padding line
164 # o | | | / /
164 # o | | | / /
165 # o | |
165 # o | |
166 add_padding_line = (len(text) > 2 and coldiff == -1 and
166 add_padding_line = (len(text) > 2 and coldiff == -1 and
167 [x for (x, y) in edges if x + 1 < y])
167 [x for (x, y) in edges if x + 1 < y])
168
168
169 # fix_nodeline_tail says whether to rewrite
169 # fix_nodeline_tail says whether to rewrite
170 #
170 #
171 # | | o | | | | o | |
171 # | | o | | | | o | |
172 # | | |/ / | | |/ /
172 # | | |/ / | | |/ /
173 # | o | | into | o / / # <--- fixed nodeline tail
173 # | o | | into | o / / # <--- fixed nodeline tail
174 # | |/ / | |/ /
174 # | |/ / | |/ /
175 # o | | o | |
175 # o | | o | |
176 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
176 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
177
177
178 # nodeline is the line containing the node character (typically o)
178 # nodeline is the line containing the node character (typically o)
179 nodeline = ["|", " "] * idx
179 nodeline = ["|", " "] * idx
180 nodeline.extend([char, " "])
180 nodeline.extend([char, " "])
181
181
182 nodeline.extend(
182 nodeline.extend(
183 get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
183 get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
184 state[0], fix_nodeline_tail))
184 state[0], fix_nodeline_tail))
185
185
186 # shift_interline is the line containing the non-vertical
186 # shift_interline is the line containing the non-vertical
187 # edges between this entry and the next
187 # edges between this entry and the next
188 shift_interline = ["|", " "] * idx
188 shift_interline = ["|", " "] * idx
189 if coldiff == -1:
189 if coldiff == -1:
190 n_spaces = 1
190 n_spaces = 1
191 edge_ch = "/"
191 edge_ch = "/"
192 elif coldiff == 0:
192 elif coldiff == 0:
193 n_spaces = 2
193 n_spaces = 2
194 edge_ch = "|"
194 edge_ch = "|"
195 else:
195 else:
196 n_spaces = 3
196 n_spaces = 3
197 edge_ch = "\\"
197 edge_ch = "\\"
198 shift_interline.extend(n_spaces * [" "])
198 shift_interline.extend(n_spaces * [" "])
199 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
199 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
200
200
201 # draw edges from the current node to its parents
201 # draw edges from the current node to its parents
202 draw_edges(edges, nodeline, shift_interline)
202 draw_edges(edges, nodeline, shift_interline)
203
203
204 # lines is the list of all graph lines to print
204 # lines is the list of all graph lines to print
205 lines = [nodeline]
205 lines = [nodeline]
206 if add_padding_line:
206 if add_padding_line:
207 lines.append(get_padding_line(idx, ncols, edges))
207 lines.append(get_padding_line(idx, ncols, edges))
208 lines.append(shift_interline)
208 lines.append(shift_interline)
209
209
210 # make sure that there are as many graph lines as there are
210 # make sure that there are as many graph lines as there are
211 # log strings
211 # log strings
212 while len(text) < len(lines):
212 while len(text) < len(lines):
213 text.append("")
213 text.append("")
214 if len(lines) < len(text):
214 if len(lines) < len(text):
215 extra_interline = ["|", " "] * (ncols + coldiff)
215 extra_interline = ["|", " "] * (ncols + coldiff)
216 while len(lines) < len(text):
216 while len(lines) < len(text):
217 lines.append(extra_interline)
217 lines.append(extra_interline)
218
218
219 # print lines
219 # print lines
220 indentation_level = max(ncols, ncols + coldiff)
220 indentation_level = max(ncols, ncols + coldiff)
221 for (line, logstr) in zip(lines, text):
221 for (line, logstr) in zip(lines, text):
222 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
222 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
223 ui.write(ln.rstrip() + '\n')
223 ui.write(ln.rstrip() + '\n')
224
224
225 # ... and start over
225 # ... and start over
226 state[0] = coldiff
226 state[0] = coldiff
227 state[1] = idx
227 state[1] = idx
228
228
229 def get_revs(repo, rev_opt):
229 def get_revs(repo, rev_opt):
230 if rev_opt:
230 if rev_opt:
231 revs = scmutil.revrange(repo, rev_opt)
231 revs = scmutil.revrange(repo, rev_opt)
232 if len(revs) == 0:
232 if len(revs) == 0:
233 return (nullrev, nullrev)
233 return (nullrev, nullrev)
234 return (max(revs), min(revs))
234 return (max(revs), min(revs))
235 else:
235 else:
236 return (len(repo) - 1, 0)
236 return (len(repo) - 1, 0)
237
237
238 def check_unsupported_flags(pats, opts):
238 def check_unsupported_flags(pats, opts):
239 for op in ["newest_first"]:
239 for op in ["newest_first"]:
240 if op in opts and opts[op]:
240 if op in opts and opts[op]:
241 raise util.Abort(_("-G/--graph option is incompatible with --%s")
241 raise util.Abort(_("-G/--graph option is incompatible with --%s")
242 % op.replace("_", "-"))
242 % op.replace("_", "-"))
243
243
244 def _makefilematcher(repo, pats, followfirst):
244 def _makefilematcher(repo, pats, followfirst):
245 # When displaying a revision with --patch --follow FILE, we have
245 # When displaying a revision with --patch --follow FILE, we have
246 # to know which file of the revision must be diffed. With
246 # to know which file of the revision must be diffed. With
247 # --follow, we want the names of the ancestors of FILE in the
247 # --follow, we want the names of the ancestors of FILE in the
248 # revision, stored in "fcache". "fcache" is populated by
248 # revision, stored in "fcache". "fcache" is populated by
249 # reproducing the graph traversal already done by --follow revset
249 # reproducing the graph traversal already done by --follow revset
250 # and relating linkrevs to file names (which is not "correct" but
250 # and relating linkrevs to file names (which is not "correct" but
251 # good enough).
251 # good enough).
252 fcache = {}
252 fcache = {}
253 fcacheready = [False]
253 fcacheready = [False]
254 pctx = repo['.']
254 pctx = repo['.']
255 wctx = repo[None]
255 wctx = repo[None]
256
256
257 def populate():
257 def populate():
258 for fn in pats:
258 for fn in pats:
259 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
259 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
260 for c in i:
260 for c in i:
261 fcache.setdefault(c.linkrev(), set()).add(c.path())
261 fcache.setdefault(c.linkrev(), set()).add(c.path())
262
262
263 def filematcher(rev):
263 def filematcher(rev):
264 if not fcacheready[0]:
264 if not fcacheready[0]:
265 # Lazy initialization
265 # Lazy initialization
266 fcacheready[0] = True
266 fcacheready[0] = True
267 populate()
267 populate()
268 return scmutil.match(wctx, fcache.get(rev, []), default='path')
268 return scmutil.match(wctx, fcache.get(rev, []), default='path')
269
269
270 return filematcher
270 return filematcher
271
271
272 def _makelogrevset(repo, pats, opts, revs):
272 def _makelogrevset(repo, pats, opts, revs):
273 """Return (expr, filematcher) where expr is a revset string built
273 """Return (expr, filematcher) where expr is a revset string built
274 from log options and file patterns or None. If --stat or --patch
274 from log options and file patterns or None. If --stat or --patch
275 are not passed filematcher is None. Otherwise it is a callable
275 are not passed filematcher is None. Otherwise it is a callable
276 taking a revision number and returning a match objects filtering
276 taking a revision number and returning a match objects filtering
277 the files to be detailed when displaying the revision.
277 the files to be detailed when displaying the revision.
278 """
278 """
279 opt2revset = {
279 opt2revset = {
280 'no_merges': ('not merge()', None),
280 'no_merges': ('not merge()', None),
281 'only_merges': ('merge()', None),
281 'only_merges': ('merge()', None),
282 '_ancestors': ('ancestors(%(val)s)', None),
282 '_ancestors': ('ancestors(%(val)s)', None),
283 '_fancestors': ('_firstancestors(%(val)s)', None),
283 '_fancestors': ('_firstancestors(%(val)s)', None),
284 '_descendants': ('descendants(%(val)s)', None),
284 '_descendants': ('descendants(%(val)s)', None),
285 '_fdescendants': ('_firstdescendants(%(val)s)', None),
285 '_fdescendants': ('_firstdescendants(%(val)s)', None),
286 '_matchfiles': ('_matchfiles(%(val)s)', None),
286 '_matchfiles': ('_matchfiles(%(val)s)', None),
287 'date': ('date(%(val)r)', None),
287 'date': ('date(%(val)r)', None),
288 'branch': ('branch(%(val)r)', ' or '),
288 'branch': ('branch(%(val)r)', ' or '),
289 '_patslog': ('filelog(%(val)r)', ' or '),
289 '_patslog': ('filelog(%(val)r)', ' or '),
290 '_patsfollow': ('follow(%(val)r)', ' or '),
290 '_patsfollow': ('follow(%(val)r)', ' or '),
291 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
291 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
292 'keyword': ('keyword(%(val)r)', ' or '),
292 'keyword': ('keyword(%(val)r)', ' or '),
293 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
293 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
294 'user': ('user(%(val)r)', ' or '),
294 'user': ('user(%(val)r)', ' or '),
295 }
295 }
296
296
297 opts = dict(opts)
297 opts = dict(opts)
298 # follow or not follow?
298 # follow or not follow?
299 follow = opts.get('follow') or opts.get('follow_first')
299 follow = opts.get('follow') or opts.get('follow_first')
300 followfirst = opts.get('follow_first')
300 followfirst = opts.get('follow_first') and 1 or 0
301 # --follow with FILE behaviour depends on revs...
301 # --follow with FILE behaviour depends on revs...
302 startrev = revs[0]
302 startrev = revs[0]
303 followdescendants = len(revs) > 1 and revs[0] < revs[1]
303 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
304
304
305 # branch and only_branch are really aliases and must be handled at
305 # branch and only_branch are really aliases and must be handled at
306 # the same time
306 # the same time
307 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
307 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
308 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
308 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
309 # pats/include/exclude are passed to match.match() directly in
309 # pats/include/exclude are passed to match.match() directly in
310 # _matchfile() revset but walkchangerevs() builds its matcher with
310 # _matchfile() revset but walkchangerevs() builds its matcher with
311 # scmutil.match(). The difference is input pats are globbed on
311 # scmutil.match(). The difference is input pats are globbed on
312 # platforms without shell expansion (windows).
312 # platforms without shell expansion (windows).
313 pctx = repo[None]
313 pctx = repo[None]
314 match, pats = scmutil.matchandpats(pctx, pats, opts)
314 match, pats = scmutil.matchandpats(pctx, pats, opts)
315 slowpath = match.anypats() or (match.files() and opts.get('removed'))
315 slowpath = match.anypats() or (match.files() and opts.get('removed'))
316 if not slowpath:
316 if not slowpath:
317 for f in match.files():
317 for f in match.files():
318 if follow and f not in pctx:
318 if follow and f not in pctx:
319 raise util.Abort(_('cannot follow file not in parent '
319 raise util.Abort(_('cannot follow file not in parent '
320 'revision: "%s"') % f)
320 'revision: "%s"') % f)
321 filelog = repo.file(f)
321 filelog = repo.file(f)
322 if not len(filelog):
322 if not len(filelog):
323 # A zero count may be a directory or deleted file, so
323 # A zero count may be a directory or deleted file, so
324 # try to find matching entries on the slow path.
324 # try to find matching entries on the slow path.
325 if follow:
325 if follow:
326 raise util.Abort(
326 raise util.Abort(
327 _('cannot follow nonexistent file: "%s"') % f)
327 _('cannot follow nonexistent file: "%s"') % f)
328 slowpath = True
328 slowpath = True
329 if slowpath:
329 if slowpath:
330 # See cmdutil.walkchangerevs() slow path.
330 # See cmdutil.walkchangerevs() slow path.
331 #
331 #
332 if follow:
332 if follow:
333 raise util.Abort(_('can only follow copies/renames for explicit '
333 raise util.Abort(_('can only follow copies/renames for explicit '
334 'filenames'))
334 'filenames'))
335 # pats/include/exclude cannot be represented as separate
335 # pats/include/exclude cannot be represented as separate
336 # revset expressions as their filtering logic applies at file
336 # revset expressions as their filtering logic applies at file
337 # level. For instance "-I a -X a" matches a revision touching
337 # level. For instance "-I a -X a" matches a revision touching
338 # "a" and "b" while "file(a) and not file(b)" does
338 # "a" and "b" while "file(a) and not file(b)" does
339 # not. Besides, filesets are evaluated against the working
339 # not. Besides, filesets are evaluated against the working
340 # directory.
340 # directory.
341 matchargs = ['r:', 'd:relpath']
341 matchargs = ['r:', 'd:relpath']
342 for p in pats:
342 for p in pats:
343 matchargs.append('p:' + p)
343 matchargs.append('p:' + p)
344 for p in opts.get('include', []):
344 for p in opts.get('include', []):
345 matchargs.append('i:' + p)
345 matchargs.append('i:' + p)
346 for p in opts.get('exclude', []):
346 for p in opts.get('exclude', []):
347 matchargs.append('x:' + p)
347 matchargs.append('x:' + p)
348 matchargs = ','.join(('%r' % p) for p in matchargs)
348 matchargs = ','.join(('%r' % p) for p in matchargs)
349 opts['_matchfiles'] = matchargs
349 opts['_matchfiles'] = matchargs
350 else:
350 else:
351 if follow:
351 if follow:
352 if followfirst:
352 fpats = ('_patsfollow', '_patsfollowfirst')
353 if pats:
353 fnopats = (('_ancestors', '_fancestors'),
354 opts['_patsfollowfirst'] = list(pats)
354 ('_descendants', '_fdescendants'))
355 else:
355 if pats:
356 if followdescendants:
356 opts[fpats[followfirst]] = list(pats)
357 opts['_fdescendants'] = str(startrev)
358 else:
359 opts['_fancestors'] = str(startrev)
360 else:
357 else:
361 if pats:
358 opts[fnopats[followdescendants][followfirst]] = str(startrev)
362 opts['_patsfollow'] = list(pats)
363 else:
364 if followdescendants:
365 opts['_descendants'] = str(startrev)
366 else:
367 opts['_ancestors'] = str(startrev)
368 else:
359 else:
369 opts['_patslog'] = list(pats)
360 opts['_patslog'] = list(pats)
370
361
371 filematcher = None
362 filematcher = None
372 if opts.get('patch') or opts.get('stat'):
363 if opts.get('patch') or opts.get('stat'):
373 if follow:
364 if follow:
374 filematcher = _makefilematcher(repo, pats, followfirst)
365 filematcher = _makefilematcher(repo, pats, followfirst)
375 else:
366 else:
376 filematcher = lambda rev: match
367 filematcher = lambda rev: match
377
368
378 expr = []
369 expr = []
379 for op, val in opts.iteritems():
370 for op, val in opts.iteritems():
380 if not val:
371 if not val:
381 continue
372 continue
382 if op not in opt2revset:
373 if op not in opt2revset:
383 continue
374 continue
384 revop, andor = opt2revset[op]
375 revop, andor = opt2revset[op]
385 if '%(val)' not in revop:
376 if '%(val)' not in revop:
386 expr.append(revop)
377 expr.append(revop)
387 else:
378 else:
388 if not isinstance(val, list):
379 if not isinstance(val, list):
389 e = revop % {'val': val}
380 e = revop % {'val': val}
390 else:
381 else:
391 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
382 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
392 expr.append(e)
383 expr.append(e)
393
384
394 if expr:
385 if expr:
395 expr = '(' + ' and '.join(expr) + ')'
386 expr = '(' + ' and '.join(expr) + ')'
396 else:
387 else:
397 expr = None
388 expr = None
398 return expr, filematcher
389 return expr, filematcher
399
390
400 def getlogrevs(repo, pats, opts):
391 def getlogrevs(repo, pats, opts):
401 """Return (revs, expr, filematcher) where revs is a list of
392 """Return (revs, expr, filematcher) where revs is a list of
402 revision numbers, expr is a revset string built from log options
393 revision numbers, expr is a revset string built from log options
403 and file patterns or None, and used to filter 'revs'. If --stat or
394 and file patterns or None, and used to filter 'revs'. If --stat or
404 --patch are not passed filematcher is None. Otherwise it is a
395 --patch are not passed filematcher is None. Otherwise it is a
405 callable taking a revision number and returning a match objects
396 callable taking a revision number and returning a match objects
406 filtering the files to be detailed when displaying the revision.
397 filtering the files to be detailed when displaying the revision.
407 """
398 """
408 if not len(repo):
399 if not len(repo):
409 return [], None, None
400 return [], None, None
410 # Default --rev value depends on --follow but --follow behaviour
401 # Default --rev value depends on --follow but --follow behaviour
411 # depends on revisions resolved from --rev...
402 # depends on revisions resolved from --rev...
412 follow = opts.get('follow') or opts.get('follow_first')
403 follow = opts.get('follow') or opts.get('follow_first')
413 if opts.get('rev'):
404 if opts.get('rev'):
414 revs = scmutil.revrange(repo, opts['rev'])
405 revs = scmutil.revrange(repo, opts['rev'])
415 else:
406 else:
416 if follow and len(repo) > 0:
407 if follow and len(repo) > 0:
417 revs = scmutil.revrange(repo, ['.:0'])
408 revs = scmutil.revrange(repo, ['.:0'])
418 else:
409 else:
419 revs = range(len(repo) - 1, -1, -1)
410 revs = range(len(repo) - 1, -1, -1)
420 if not revs:
411 if not revs:
421 return [], None, None
412 return [], None, None
422 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
413 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
423 if expr:
414 if expr:
424 # Evaluate revisions in changelog order for performance
415 # Evaluate revisions in changelog order for performance
425 # reasons but preserve the original sequence order in the
416 # reasons but preserve the original sequence order in the
426 # filtered result.
417 # filtered result.
427 matched = set(revset.match(repo.ui, expr)(repo, sorted(revs)))
418 matched = set(revset.match(repo.ui, expr)(repo, sorted(revs)))
428 revs = [r for r in revs if r in matched]
419 revs = [r for r in revs if r in matched]
429 if not opts.get('hidden'):
420 if not opts.get('hidden'):
430 # --hidden is still experimental and not worth a dedicated revset
421 # --hidden is still experimental and not worth a dedicated revset
431 # yet. Fortunately, filtering revision number is fast.
422 # yet. Fortunately, filtering revision number is fast.
432 revs = [r for r in revs if r not in repo.changelog.hiddenrevs]
423 revs = [r for r in revs if r not in repo.changelog.hiddenrevs]
433 return revs, expr, filematcher
424 return revs, expr, filematcher
434
425
435 def generate(ui, dag, displayer, showparents, edgefn, getrenamed=None,
426 def generate(ui, dag, displayer, showparents, edgefn, getrenamed=None,
436 filematcher=None):
427 filematcher=None):
437 seen, state = [], asciistate()
428 seen, state = [], asciistate()
438 for rev, type, ctx, parents in dag:
429 for rev, type, ctx, parents in dag:
439 char = ctx.node() in showparents and '@' or 'o'
430 char = ctx.node() in showparents and '@' or 'o'
440 copies = None
431 copies = None
441 if getrenamed and ctx.rev():
432 if getrenamed and ctx.rev():
442 copies = []
433 copies = []
443 for fn in ctx.files():
434 for fn in ctx.files():
444 rename = getrenamed(fn, ctx.rev())
435 rename = getrenamed(fn, ctx.rev())
445 if rename:
436 if rename:
446 copies.append((fn, rename[0]))
437 copies.append((fn, rename[0]))
447 revmatchfn = None
438 revmatchfn = None
448 if filematcher is not None:
439 if filematcher is not None:
449 revmatchfn = filematcher(ctx.rev())
440 revmatchfn = filematcher(ctx.rev())
450 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
441 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
451 lines = displayer.hunk.pop(rev).split('\n')[:-1]
442 lines = displayer.hunk.pop(rev).split('\n')[:-1]
452 displayer.flush(rev)
443 displayer.flush(rev)
453 edges = edgefn(type, char, lines, seen, rev, parents)
444 edges = edgefn(type, char, lines, seen, rev, parents)
454 for type, char, lines, coldata in edges:
445 for type, char, lines, coldata in edges:
455 ascii(ui, state, type, char, lines, coldata)
446 ascii(ui, state, type, char, lines, coldata)
456 displayer.close()
447 displayer.close()
457
448
458 @command('glog',
449 @command('glog',
459 [('f', 'follow', None,
450 [('f', 'follow', None,
460 _('follow changeset history, or file history across copies and renames')),
451 _('follow changeset history, or file history across copies and renames')),
461 ('', 'follow-first', None,
452 ('', 'follow-first', None,
462 _('only follow the first parent of merge changesets (DEPRECATED)')),
453 _('only follow the first parent of merge changesets (DEPRECATED)')),
463 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
454 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
464 ('C', 'copies', None, _('show copied files')),
455 ('C', 'copies', None, _('show copied files')),
465 ('k', 'keyword', [],
456 ('k', 'keyword', [],
466 _('do case-insensitive search for a given text'), _('TEXT')),
457 _('do case-insensitive search for a given text'), _('TEXT')),
467 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
458 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
468 ('', 'removed', None, _('include revisions where files were removed')),
459 ('', 'removed', None, _('include revisions where files were removed')),
469 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
460 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
470 ('u', 'user', [], _('revisions committed by user'), _('USER')),
461 ('u', 'user', [], _('revisions committed by user'), _('USER')),
471 ('', 'only-branch', [],
462 ('', 'only-branch', [],
472 _('show only changesets within the given named branch (DEPRECATED)'),
463 _('show only changesets within the given named branch (DEPRECATED)'),
473 _('BRANCH')),
464 _('BRANCH')),
474 ('b', 'branch', [],
465 ('b', 'branch', [],
475 _('show changesets within the given named branch'), _('BRANCH')),
466 _('show changesets within the given named branch'), _('BRANCH')),
476 ('P', 'prune', [],
467 ('P', 'prune', [],
477 _('do not display revision or any of its ancestors'), _('REV')),
468 _('do not display revision or any of its ancestors'), _('REV')),
478 ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
469 ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
479 ] + commands.logopts + commands.walkopts,
470 ] + commands.logopts + commands.walkopts,
480 _('[OPTION]... [FILE]'))
471 _('[OPTION]... [FILE]'))
481 def graphlog(ui, repo, *pats, **opts):
472 def graphlog(ui, repo, *pats, **opts):
482 """show revision history alongside an ASCII revision graph
473 """show revision history alongside an ASCII revision graph
483
474
484 Print a revision history alongside a revision graph drawn with
475 Print a revision history alongside a revision graph drawn with
485 ASCII characters.
476 ASCII characters.
486
477
487 Nodes printed as an @ character are parents of the working
478 Nodes printed as an @ character are parents of the working
488 directory.
479 directory.
489 """
480 """
490
481
491 revs, expr, filematcher = getlogrevs(repo, pats, opts)
482 revs, expr, filematcher = getlogrevs(repo, pats, opts)
492 revs = sorted(revs, reverse=1)
483 revs = sorted(revs, reverse=1)
493 limit = cmdutil.loglimit(opts)
484 limit = cmdutil.loglimit(opts)
494 if limit is not None:
485 if limit is not None:
495 revs = revs[:limit]
486 revs = revs[:limit]
496 revdag = graphmod.dagwalker(repo, revs)
487 revdag = graphmod.dagwalker(repo, revs)
497
488
498 getrenamed = None
489 getrenamed = None
499 if opts.get('copies'):
490 if opts.get('copies'):
500 endrev = None
491 endrev = None
501 if opts.get('rev'):
492 if opts.get('rev'):
502 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
493 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
503 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
494 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
504 displayer = show_changeset(ui, repo, opts, buffered=True)
495 displayer = show_changeset(ui, repo, opts, buffered=True)
505 showparents = [ctx.node() for ctx in repo[None].parents()]
496 showparents = [ctx.node() for ctx in repo[None].parents()]
506 generate(ui, revdag, displayer, showparents, asciiedges, getrenamed,
497 generate(ui, revdag, displayer, showparents, asciiedges, getrenamed,
507 filematcher)
498 filematcher)
508
499
509 def graphrevs(repo, nodes, opts):
500 def graphrevs(repo, nodes, opts):
510 limit = cmdutil.loglimit(opts)
501 limit = cmdutil.loglimit(opts)
511 nodes.reverse()
502 nodes.reverse()
512 if limit is not None:
503 if limit is not None:
513 nodes = nodes[:limit]
504 nodes = nodes[:limit]
514 return graphmod.nodes(repo, nodes)
505 return graphmod.nodes(repo, nodes)
515
506
516 def goutgoing(ui, repo, dest=None, **opts):
507 def goutgoing(ui, repo, dest=None, **opts):
517 """show the outgoing changesets alongside an ASCII revision graph
508 """show the outgoing changesets alongside an ASCII revision graph
518
509
519 Print the outgoing changesets alongside a revision graph drawn with
510 Print the outgoing changesets alongside a revision graph drawn with
520 ASCII characters.
511 ASCII characters.
521
512
522 Nodes printed as an @ character are parents of the working
513 Nodes printed as an @ character are parents of the working
523 directory.
514 directory.
524 """
515 """
525
516
526 check_unsupported_flags([], opts)
517 check_unsupported_flags([], opts)
527 o = hg._outgoing(ui, repo, dest, opts)
518 o = hg._outgoing(ui, repo, dest, opts)
528 if o is None:
519 if o is None:
529 return
520 return
530
521
531 revdag = graphrevs(repo, o, opts)
522 revdag = graphrevs(repo, o, opts)
532 displayer = show_changeset(ui, repo, opts, buffered=True)
523 displayer = show_changeset(ui, repo, opts, buffered=True)
533 showparents = [ctx.node() for ctx in repo[None].parents()]
524 showparents = [ctx.node() for ctx in repo[None].parents()]
534 generate(ui, revdag, displayer, showparents, asciiedges)
525 generate(ui, revdag, displayer, showparents, asciiedges)
535
526
536 def gincoming(ui, repo, source="default", **opts):
527 def gincoming(ui, repo, source="default", **opts):
537 """show the incoming changesets alongside an ASCII revision graph
528 """show the incoming changesets alongside an ASCII revision graph
538
529
539 Print the incoming changesets alongside a revision graph drawn with
530 Print the incoming changesets alongside a revision graph drawn with
540 ASCII characters.
531 ASCII characters.
541
532
542 Nodes printed as an @ character are parents of the working
533 Nodes printed as an @ character are parents of the working
543 directory.
534 directory.
544 """
535 """
545 def subreporecurse():
536 def subreporecurse():
546 return 1
537 return 1
547
538
548 check_unsupported_flags([], opts)
539 check_unsupported_flags([], opts)
549 def display(other, chlist, displayer):
540 def display(other, chlist, displayer):
550 revdag = graphrevs(other, chlist, opts)
541 revdag = graphrevs(other, chlist, opts)
551 showparents = [ctx.node() for ctx in repo[None].parents()]
542 showparents = [ctx.node() for ctx in repo[None].parents()]
552 generate(ui, revdag, displayer, showparents, asciiedges)
543 generate(ui, revdag, displayer, showparents, asciiedges)
553
544
554 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
545 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
555
546
556 def uisetup(ui):
547 def uisetup(ui):
557 '''Initialize the extension.'''
548 '''Initialize the extension.'''
558 _wrapcmd('log', commands.table, graphlog)
549 _wrapcmd('log', commands.table, graphlog)
559 _wrapcmd('incoming', commands.table, gincoming)
550 _wrapcmd('incoming', commands.table, gincoming)
560 _wrapcmd('outgoing', commands.table, goutgoing)
551 _wrapcmd('outgoing', commands.table, goutgoing)
561
552
562 def _wrapcmd(cmd, table, wrapfn):
553 def _wrapcmd(cmd, table, wrapfn):
563 '''wrap the command'''
554 '''wrap the command'''
564 def graph(orig, *args, **kwargs):
555 def graph(orig, *args, **kwargs):
565 if kwargs['graph']:
556 if kwargs['graph']:
566 return wrapfn(*args, **kwargs)
557 return wrapfn(*args, **kwargs)
567 return orig(*args, **kwargs)
558 return orig(*args, **kwargs)
568 entry = extensions.wrapcommand(table, cmd, graph)
559 entry = extensions.wrapcommand(table, cmd, graph)
569 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
560 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
General Comments 0
You need to be logged in to leave comments. Login now