##// END OF EJS Templates
graphlog: remove unused get_revs() function
Patrick Mezard -
r17162:868c256c default
parent child Browse files
Show More
@@ -1,594 +1,584 b''
1 # ASCII graph log extension for Mercurial
1 # ASCII graph log extension for Mercurial
2 #
2 #
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to view revision graphs from a shell
8 '''command to view revision graphs from a shell
9
9
10 This extension adds a --graph option to the incoming, outgoing and log
10 This extension adds a --graph option to the incoming, outgoing and log
11 commands. When this options is given, an ASCII representation of the
11 commands. When this options is given, an ASCII representation of the
12 revision graph is also shown.
12 revision graph is also shown.
13 '''
13 '''
14
14
15 from mercurial.cmdutil import show_changeset
15 from mercurial.cmdutil import show_changeset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import nullrev
18 from mercurial import cmdutil, commands, extensions, scmutil
17 from mercurial import cmdutil, commands, extensions, scmutil
19 from mercurial import hg, util, graphmod, templatekw, revset
18 from mercurial import hg, util, graphmod, templatekw, revset
20
19
21 cmdtable = {}
20 cmdtable = {}
22 command = cmdutil.command(cmdtable)
21 command = cmdutil.command(cmdtable)
23 testedwith = 'internal'
22 testedwith = 'internal'
24
23
25 ASCIIDATA = 'ASC'
24 ASCIIDATA = 'ASC'
26
25
27 def asciiedges(type, char, lines, seen, rev, parents):
26 def asciiedges(type, char, lines, seen, rev, parents):
28 """adds edge info to changelog DAG walk suitable for ascii()"""
27 """adds edge info to changelog DAG walk suitable for ascii()"""
29 if rev not in seen:
28 if rev not in seen:
30 seen.append(rev)
29 seen.append(rev)
31 nodeidx = seen.index(rev)
30 nodeidx = seen.index(rev)
32
31
33 knownparents = []
32 knownparents = []
34 newparents = []
33 newparents = []
35 for parent in parents:
34 for parent in parents:
36 if parent in seen:
35 if parent in seen:
37 knownparents.append(parent)
36 knownparents.append(parent)
38 else:
37 else:
39 newparents.append(parent)
38 newparents.append(parent)
40
39
41 ncols = len(seen)
40 ncols = len(seen)
42 nextseen = seen[:]
41 nextseen = seen[:]
43 nextseen[nodeidx:nodeidx + 1] = newparents
42 nextseen[nodeidx:nodeidx + 1] = newparents
44 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
43 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
45
44
46 while len(newparents) > 2:
45 while len(newparents) > 2:
47 # ascii() only knows how to add or remove a single column between two
46 # ascii() only knows how to add or remove a single column between two
48 # calls. Nodes with more than two parents break this constraint so we
47 # calls. Nodes with more than two parents break this constraint so we
49 # introduce intermediate expansion lines to grow the active node list
48 # introduce intermediate expansion lines to grow the active node list
50 # slowly.
49 # slowly.
51 edges.append((nodeidx, nodeidx))
50 edges.append((nodeidx, nodeidx))
52 edges.append((nodeidx, nodeidx + 1))
51 edges.append((nodeidx, nodeidx + 1))
53 nmorecols = 1
52 nmorecols = 1
54 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
53 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
55 char = '\\'
54 char = '\\'
56 lines = []
55 lines = []
57 nodeidx += 1
56 nodeidx += 1
58 ncols += 1
57 ncols += 1
59 edges = []
58 edges = []
60 del newparents[0]
59 del newparents[0]
61
60
62 if len(newparents) > 0:
61 if len(newparents) > 0:
63 edges.append((nodeidx, nodeidx))
62 edges.append((nodeidx, nodeidx))
64 if len(newparents) > 1:
63 if len(newparents) > 1:
65 edges.append((nodeidx, nodeidx + 1))
64 edges.append((nodeidx, nodeidx + 1))
66 nmorecols = len(nextseen) - ncols
65 nmorecols = len(nextseen) - ncols
67 seen[:] = nextseen
66 seen[:] = nextseen
68 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
67 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
69
68
70 def fix_long_right_edges(edges):
69 def fix_long_right_edges(edges):
71 for (i, (start, end)) in enumerate(edges):
70 for (i, (start, end)) in enumerate(edges):
72 if end > start:
71 if end > start:
73 edges[i] = (start, end + 1)
72 edges[i] = (start, end + 1)
74
73
75 def get_nodeline_edges_tail(
74 def get_nodeline_edges_tail(
76 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
75 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
77 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
76 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
78 # Still going in the same non-vertical direction.
77 # Still going in the same non-vertical direction.
79 if n_columns_diff == -1:
78 if n_columns_diff == -1:
80 start = max(node_index + 1, p_node_index)
79 start = max(node_index + 1, p_node_index)
81 tail = ["|", " "] * (start - node_index - 1)
80 tail = ["|", " "] * (start - node_index - 1)
82 tail.extend(["/", " "] * (n_columns - start))
81 tail.extend(["/", " "] * (n_columns - start))
83 return tail
82 return tail
84 else:
83 else:
85 return ["\\", " "] * (n_columns - node_index - 1)
84 return ["\\", " "] * (n_columns - node_index - 1)
86 else:
85 else:
87 return ["|", " "] * (n_columns - node_index - 1)
86 return ["|", " "] * (n_columns - node_index - 1)
88
87
89 def draw_edges(edges, nodeline, interline):
88 def draw_edges(edges, nodeline, interline):
90 for (start, end) in edges:
89 for (start, end) in edges:
91 if start == end + 1:
90 if start == end + 1:
92 interline[2 * end + 1] = "/"
91 interline[2 * end + 1] = "/"
93 elif start == end - 1:
92 elif start == end - 1:
94 interline[2 * start + 1] = "\\"
93 interline[2 * start + 1] = "\\"
95 elif start == end:
94 elif start == end:
96 interline[2 * start] = "|"
95 interline[2 * start] = "|"
97 else:
96 else:
98 if 2 * end >= len(nodeline):
97 if 2 * end >= len(nodeline):
99 continue
98 continue
100 nodeline[2 * end] = "+"
99 nodeline[2 * end] = "+"
101 if start > end:
100 if start > end:
102 (start, end) = (end, start)
101 (start, end) = (end, start)
103 for i in range(2 * start + 1, 2 * end):
102 for i in range(2 * start + 1, 2 * end):
104 if nodeline[i] != "+":
103 if nodeline[i] != "+":
105 nodeline[i] = "-"
104 nodeline[i] = "-"
106
105
107 def get_padding_line(ni, n_columns, edges):
106 def get_padding_line(ni, n_columns, edges):
108 line = []
107 line = []
109 line.extend(["|", " "] * ni)
108 line.extend(["|", " "] * ni)
110 if (ni, ni - 1) in edges or (ni, ni) in edges:
109 if (ni, ni - 1) in edges or (ni, ni) in edges:
111 # (ni, ni - 1) (ni, ni)
110 # (ni, ni - 1) (ni, ni)
112 # | | | | | | | |
111 # | | | | | | | |
113 # +---o | | o---+
112 # +---o | | o---+
114 # | | c | | c | |
113 # | | c | | c | |
115 # | |/ / | |/ /
114 # | |/ / | |/ /
116 # | | | | | |
115 # | | | | | |
117 c = "|"
116 c = "|"
118 else:
117 else:
119 c = " "
118 c = " "
120 line.extend([c, " "])
119 line.extend([c, " "])
121 line.extend(["|", " "] * (n_columns - ni - 1))
120 line.extend(["|", " "] * (n_columns - ni - 1))
122 return line
121 return line
123
122
124 def asciistate():
123 def asciistate():
125 """returns the initial value for the "state" argument to ascii()"""
124 """returns the initial value for the "state" argument to ascii()"""
126 return [0, 0]
125 return [0, 0]
127
126
128 def ascii(ui, state, type, char, text, coldata):
127 def ascii(ui, state, type, char, text, coldata):
129 """prints an ASCII graph of the DAG
128 """prints an ASCII graph of the DAG
130
129
131 takes the following arguments (one call per node in the graph):
130 takes the following arguments (one call per node in the graph):
132
131
133 - ui to write to
132 - ui to write to
134 - Somewhere to keep the needed state in (init to asciistate())
133 - Somewhere to keep the needed state in (init to asciistate())
135 - Column of the current node in the set of ongoing edges.
134 - Column of the current node in the set of ongoing edges.
136 - Type indicator of node data == ASCIIDATA.
135 - Type indicator of node data == ASCIIDATA.
137 - Payload: (char, lines):
136 - Payload: (char, lines):
138 - Character to use as node's symbol.
137 - Character to use as node's symbol.
139 - List of lines to display as the node's text.
138 - List of lines to display as the node's text.
140 - Edges; a list of (col, next_col) indicating the edges between
139 - Edges; a list of (col, next_col) indicating the edges between
141 the current node and its parents.
140 the current node and its parents.
142 - Number of columns (ongoing edges) in the current revision.
141 - Number of columns (ongoing edges) in the current revision.
143 - The difference between the number of columns (ongoing edges)
142 - The difference between the number of columns (ongoing edges)
144 in the next revision and the number of columns (ongoing edges)
143 in the next revision and the number of columns (ongoing edges)
145 in the current revision. That is: -1 means one column removed;
144 in the current revision. That is: -1 means one column removed;
146 0 means no columns added or removed; 1 means one column added.
145 0 means no columns added or removed; 1 means one column added.
147 """
146 """
148
147
149 idx, edges, ncols, coldiff = coldata
148 idx, edges, ncols, coldiff = coldata
150 assert -2 < coldiff < 2
149 assert -2 < coldiff < 2
151 if coldiff == -1:
150 if coldiff == -1:
152 # Transform
151 # Transform
153 #
152 #
154 # | | | | | |
153 # | | | | | |
155 # o | | into o---+
154 # o | | into o---+
156 # |X / |/ /
155 # |X / |/ /
157 # | | | |
156 # | | | |
158 fix_long_right_edges(edges)
157 fix_long_right_edges(edges)
159
158
160 # add_padding_line says whether to rewrite
159 # add_padding_line says whether to rewrite
161 #
160 #
162 # | | | | | | | |
161 # | | | | | | | |
163 # | o---+ into | o---+
162 # | o---+ into | o---+
164 # | / / | | | # <--- padding line
163 # | / / | | | # <--- padding line
165 # o | | | / /
164 # o | | | / /
166 # o | |
165 # o | |
167 add_padding_line = (len(text) > 2 and coldiff == -1 and
166 add_padding_line = (len(text) > 2 and coldiff == -1 and
168 [x for (x, y) in edges if x + 1 < y])
167 [x for (x, y) in edges if x + 1 < y])
169
168
170 # fix_nodeline_tail says whether to rewrite
169 # fix_nodeline_tail says whether to rewrite
171 #
170 #
172 # | | o | | | | o | |
171 # | | o | | | | o | |
173 # | | |/ / | | |/ /
172 # | | |/ / | | |/ /
174 # | o | | into | o / / # <--- fixed nodeline tail
173 # | o | | into | o / / # <--- fixed nodeline tail
175 # | |/ / | |/ /
174 # | |/ / | |/ /
176 # o | | o | |
175 # o | | o | |
177 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
176 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
178
177
179 # nodeline is the line containing the node character (typically o)
178 # nodeline is the line containing the node character (typically o)
180 nodeline = ["|", " "] * idx
179 nodeline = ["|", " "] * idx
181 nodeline.extend([char, " "])
180 nodeline.extend([char, " "])
182
181
183 nodeline.extend(
182 nodeline.extend(
184 get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
183 get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
185 state[0], fix_nodeline_tail))
184 state[0], fix_nodeline_tail))
186
185
187 # shift_interline is the line containing the non-vertical
186 # shift_interline is the line containing the non-vertical
188 # edges between this entry and the next
187 # edges between this entry and the next
189 shift_interline = ["|", " "] * idx
188 shift_interline = ["|", " "] * idx
190 if coldiff == -1:
189 if coldiff == -1:
191 n_spaces = 1
190 n_spaces = 1
192 edge_ch = "/"
191 edge_ch = "/"
193 elif coldiff == 0:
192 elif coldiff == 0:
194 n_spaces = 2
193 n_spaces = 2
195 edge_ch = "|"
194 edge_ch = "|"
196 else:
195 else:
197 n_spaces = 3
196 n_spaces = 3
198 edge_ch = "\\"
197 edge_ch = "\\"
199 shift_interline.extend(n_spaces * [" "])
198 shift_interline.extend(n_spaces * [" "])
200 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
199 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
201
200
202 # draw edges from the current node to its parents
201 # draw edges from the current node to its parents
203 draw_edges(edges, nodeline, shift_interline)
202 draw_edges(edges, nodeline, shift_interline)
204
203
205 # lines is the list of all graph lines to print
204 # lines is the list of all graph lines to print
206 lines = [nodeline]
205 lines = [nodeline]
207 if add_padding_line:
206 if add_padding_line:
208 lines.append(get_padding_line(idx, ncols, edges))
207 lines.append(get_padding_line(idx, ncols, edges))
209 lines.append(shift_interline)
208 lines.append(shift_interline)
210
209
211 # make sure that there are as many graph lines as there are
210 # make sure that there are as many graph lines as there are
212 # log strings
211 # log strings
213 while len(text) < len(lines):
212 while len(text) < len(lines):
214 text.append("")
213 text.append("")
215 if len(lines) < len(text):
214 if len(lines) < len(text):
216 extra_interline = ["|", " "] * (ncols + coldiff)
215 extra_interline = ["|", " "] * (ncols + coldiff)
217 while len(lines) < len(text):
216 while len(lines) < len(text):
218 lines.append(extra_interline)
217 lines.append(extra_interline)
219
218
220 # print lines
219 # print lines
221 indentation_level = max(ncols, ncols + coldiff)
220 indentation_level = max(ncols, ncols + coldiff)
222 for (line, logstr) in zip(lines, text):
221 for (line, logstr) in zip(lines, text):
223 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
222 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
224 ui.write(ln.rstrip() + '\n')
223 ui.write(ln.rstrip() + '\n')
225
224
226 # ... and start over
225 # ... and start over
227 state[0] = coldiff
226 state[0] = coldiff
228 state[1] = idx
227 state[1] = idx
229
228
230 def get_revs(repo, rev_opt):
231 if rev_opt:
232 revs = scmutil.revrange(repo, rev_opt)
233 if len(revs) == 0:
234 return (nullrev, nullrev)
235 return (max(revs), min(revs))
236 else:
237 return (len(repo) - 1, 0)
238
239 def check_unsupported_flags(pats, opts):
229 def check_unsupported_flags(pats, opts):
240 for op in ["newest_first"]:
230 for op in ["newest_first"]:
241 if op in opts and opts[op]:
231 if op in opts and opts[op]:
242 raise util.Abort(_("-G/--graph option is incompatible with --%s")
232 raise util.Abort(_("-G/--graph option is incompatible with --%s")
243 % op.replace("_", "-"))
233 % op.replace("_", "-"))
244
234
245 def _makefilematcher(repo, pats, followfirst):
235 def _makefilematcher(repo, pats, followfirst):
246 # When displaying a revision with --patch --follow FILE, we have
236 # When displaying a revision with --patch --follow FILE, we have
247 # to know which file of the revision must be diffed. With
237 # to know which file of the revision must be diffed. With
248 # --follow, we want the names of the ancestors of FILE in the
238 # --follow, we want the names of the ancestors of FILE in the
249 # revision, stored in "fcache". "fcache" is populated by
239 # revision, stored in "fcache". "fcache" is populated by
250 # reproducing the graph traversal already done by --follow revset
240 # reproducing the graph traversal already done by --follow revset
251 # and relating linkrevs to file names (which is not "correct" but
241 # and relating linkrevs to file names (which is not "correct" but
252 # good enough).
242 # good enough).
253 fcache = {}
243 fcache = {}
254 fcacheready = [False]
244 fcacheready = [False]
255 pctx = repo['.']
245 pctx = repo['.']
256 wctx = repo[None]
246 wctx = repo[None]
257
247
258 def populate():
248 def populate():
259 for fn in pats:
249 for fn in pats:
260 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
250 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
261 for c in i:
251 for c in i:
262 fcache.setdefault(c.linkrev(), set()).add(c.path())
252 fcache.setdefault(c.linkrev(), set()).add(c.path())
263
253
264 def filematcher(rev):
254 def filematcher(rev):
265 if not fcacheready[0]:
255 if not fcacheready[0]:
266 # Lazy initialization
256 # Lazy initialization
267 fcacheready[0] = True
257 fcacheready[0] = True
268 populate()
258 populate()
269 return scmutil.match(wctx, fcache.get(rev, []), default='path')
259 return scmutil.match(wctx, fcache.get(rev, []), default='path')
270
260
271 return filematcher
261 return filematcher
272
262
273 def _makelogrevset(repo, pats, opts, revs):
263 def _makelogrevset(repo, pats, opts, revs):
274 """Return (expr, filematcher) where expr is a revset string built
264 """Return (expr, filematcher) where expr is a revset string built
275 from log options and file patterns or None. If --stat or --patch
265 from log options and file patterns or None. If --stat or --patch
276 are not passed filematcher is None. Otherwise it is a callable
266 are not passed filematcher is None. Otherwise it is a callable
277 taking a revision number and returning a match objects filtering
267 taking a revision number and returning a match objects filtering
278 the files to be detailed when displaying the revision.
268 the files to be detailed when displaying the revision.
279 """
269 """
280 opt2revset = {
270 opt2revset = {
281 'no_merges': ('not merge()', None),
271 'no_merges': ('not merge()', None),
282 'only_merges': ('merge()', None),
272 'only_merges': ('merge()', None),
283 '_ancestors': ('ancestors(%(val)s)', None),
273 '_ancestors': ('ancestors(%(val)s)', None),
284 '_fancestors': ('_firstancestors(%(val)s)', None),
274 '_fancestors': ('_firstancestors(%(val)s)', None),
285 '_descendants': ('descendants(%(val)s)', None),
275 '_descendants': ('descendants(%(val)s)', None),
286 '_fdescendants': ('_firstdescendants(%(val)s)', None),
276 '_fdescendants': ('_firstdescendants(%(val)s)', None),
287 '_matchfiles': ('_matchfiles(%(val)s)', None),
277 '_matchfiles': ('_matchfiles(%(val)s)', None),
288 'date': ('date(%(val)r)', None),
278 'date': ('date(%(val)r)', None),
289 'branch': ('branch(%(val)r)', ' or '),
279 'branch': ('branch(%(val)r)', ' or '),
290 '_patslog': ('filelog(%(val)r)', ' or '),
280 '_patslog': ('filelog(%(val)r)', ' or '),
291 '_patsfollow': ('follow(%(val)r)', ' or '),
281 '_patsfollow': ('follow(%(val)r)', ' or '),
292 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
282 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
293 'keyword': ('keyword(%(val)r)', ' or '),
283 'keyword': ('keyword(%(val)r)', ' or '),
294 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
284 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
295 'user': ('user(%(val)r)', ' or '),
285 'user': ('user(%(val)r)', ' or '),
296 }
286 }
297
287
298 opts = dict(opts)
288 opts = dict(opts)
299 # follow or not follow?
289 # follow or not follow?
300 follow = opts.get('follow') or opts.get('follow_first')
290 follow = opts.get('follow') or opts.get('follow_first')
301 followfirst = opts.get('follow_first') and 1 or 0
291 followfirst = opts.get('follow_first') and 1 or 0
302 # --follow with FILE behaviour depends on revs...
292 # --follow with FILE behaviour depends on revs...
303 startrev = revs[0]
293 startrev = revs[0]
304 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
294 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
305
295
306 # branch and only_branch are really aliases and must be handled at
296 # branch and only_branch are really aliases and must be handled at
307 # the same time
297 # the same time
308 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
298 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
309 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
299 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
310 # pats/include/exclude are passed to match.match() directly in
300 # pats/include/exclude are passed to match.match() directly in
311 # _matchfile() revset but walkchangerevs() builds its matcher with
301 # _matchfile() revset but walkchangerevs() builds its matcher with
312 # scmutil.match(). The difference is input pats are globbed on
302 # scmutil.match(). The difference is input pats are globbed on
313 # platforms without shell expansion (windows).
303 # platforms without shell expansion (windows).
314 pctx = repo[None]
304 pctx = repo[None]
315 match, pats = scmutil.matchandpats(pctx, pats, opts)
305 match, pats = scmutil.matchandpats(pctx, pats, opts)
316 slowpath = match.anypats() or (match.files() and opts.get('removed'))
306 slowpath = match.anypats() or (match.files() and opts.get('removed'))
317 if not slowpath:
307 if not slowpath:
318 for f in match.files():
308 for f in match.files():
319 if follow and f not in pctx:
309 if follow and f not in pctx:
320 raise util.Abort(_('cannot follow file not in parent '
310 raise util.Abort(_('cannot follow file not in parent '
321 'revision: "%s"') % f)
311 'revision: "%s"') % f)
322 filelog = repo.file(f)
312 filelog = repo.file(f)
323 if not len(filelog):
313 if not len(filelog):
324 # A zero count may be a directory or deleted file, so
314 # A zero count may be a directory or deleted file, so
325 # try to find matching entries on the slow path.
315 # try to find matching entries on the slow path.
326 if follow:
316 if follow:
327 raise util.Abort(
317 raise util.Abort(
328 _('cannot follow nonexistent file: "%s"') % f)
318 _('cannot follow nonexistent file: "%s"') % f)
329 slowpath = True
319 slowpath = True
330 if slowpath:
320 if slowpath:
331 # See cmdutil.walkchangerevs() slow path.
321 # See cmdutil.walkchangerevs() slow path.
332 #
322 #
333 if follow:
323 if follow:
334 raise util.Abort(_('can only follow copies/renames for explicit '
324 raise util.Abort(_('can only follow copies/renames for explicit '
335 'filenames'))
325 'filenames'))
336 # pats/include/exclude cannot be represented as separate
326 # pats/include/exclude cannot be represented as separate
337 # revset expressions as their filtering logic applies at file
327 # revset expressions as their filtering logic applies at file
338 # level. For instance "-I a -X a" matches a revision touching
328 # level. For instance "-I a -X a" matches a revision touching
339 # "a" and "b" while "file(a) and not file(b)" does
329 # "a" and "b" while "file(a) and not file(b)" does
340 # not. Besides, filesets are evaluated against the working
330 # not. Besides, filesets are evaluated against the working
341 # directory.
331 # directory.
342 matchargs = ['r:', 'd:relpath']
332 matchargs = ['r:', 'd:relpath']
343 for p in pats:
333 for p in pats:
344 matchargs.append('p:' + p)
334 matchargs.append('p:' + p)
345 for p in opts.get('include', []):
335 for p in opts.get('include', []):
346 matchargs.append('i:' + p)
336 matchargs.append('i:' + p)
347 for p in opts.get('exclude', []):
337 for p in opts.get('exclude', []):
348 matchargs.append('x:' + p)
338 matchargs.append('x:' + p)
349 matchargs = ','.join(('%r' % p) for p in matchargs)
339 matchargs = ','.join(('%r' % p) for p in matchargs)
350 opts['_matchfiles'] = matchargs
340 opts['_matchfiles'] = matchargs
351 else:
341 else:
352 if follow:
342 if follow:
353 fpats = ('_patsfollow', '_patsfollowfirst')
343 fpats = ('_patsfollow', '_patsfollowfirst')
354 fnopats = (('_ancestors', '_fancestors'),
344 fnopats = (('_ancestors', '_fancestors'),
355 ('_descendants', '_fdescendants'))
345 ('_descendants', '_fdescendants'))
356 if pats:
346 if pats:
357 # follow() revset inteprets its file argument as a
347 # follow() revset inteprets its file argument as a
358 # manifest entry, so use match.files(), not pats.
348 # manifest entry, so use match.files(), not pats.
359 opts[fpats[followfirst]] = list(match.files())
349 opts[fpats[followfirst]] = list(match.files())
360 else:
350 else:
361 opts[fnopats[followdescendants][followfirst]] = str(startrev)
351 opts[fnopats[followdescendants][followfirst]] = str(startrev)
362 else:
352 else:
363 opts['_patslog'] = list(pats)
353 opts['_patslog'] = list(pats)
364
354
365 filematcher = None
355 filematcher = None
366 if opts.get('patch') or opts.get('stat'):
356 if opts.get('patch') or opts.get('stat'):
367 if follow:
357 if follow:
368 filematcher = _makefilematcher(repo, pats, followfirst)
358 filematcher = _makefilematcher(repo, pats, followfirst)
369 else:
359 else:
370 filematcher = lambda rev: match
360 filematcher = lambda rev: match
371
361
372 expr = []
362 expr = []
373 for op, val in opts.iteritems():
363 for op, val in opts.iteritems():
374 if not val:
364 if not val:
375 continue
365 continue
376 if op not in opt2revset:
366 if op not in opt2revset:
377 continue
367 continue
378 revop, andor = opt2revset[op]
368 revop, andor = opt2revset[op]
379 if '%(val)' not in revop:
369 if '%(val)' not in revop:
380 expr.append(revop)
370 expr.append(revop)
381 else:
371 else:
382 if not isinstance(val, list):
372 if not isinstance(val, list):
383 e = revop % {'val': val}
373 e = revop % {'val': val}
384 else:
374 else:
385 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
375 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
386 expr.append(e)
376 expr.append(e)
387
377
388 if expr:
378 if expr:
389 expr = '(' + ' and '.join(expr) + ')'
379 expr = '(' + ' and '.join(expr) + ')'
390 else:
380 else:
391 expr = None
381 expr = None
392 return expr, filematcher
382 return expr, filematcher
393
383
394 def getlogrevs(repo, pats, opts):
384 def getlogrevs(repo, pats, opts):
395 """Return (revs, expr, filematcher) where revs is an iterable of
385 """Return (revs, expr, filematcher) where revs is an iterable of
396 revision numbers, expr is a revset string built from log options
386 revision numbers, expr is a revset string built from log options
397 and file patterns or None, and used to filter 'revs'. If --stat or
387 and file patterns or None, and used to filter 'revs'. If --stat or
398 --patch are not passed filematcher is None. Otherwise it is a
388 --patch are not passed filematcher is None. Otherwise it is a
399 callable taking a revision number and returning a match objects
389 callable taking a revision number and returning a match objects
400 filtering the files to be detailed when displaying the revision.
390 filtering the files to be detailed when displaying the revision.
401 """
391 """
402 def increasingrevs(repo, revs, matcher):
392 def increasingrevs(repo, revs, matcher):
403 # The sorted input rev sequence is chopped in sub-sequences
393 # The sorted input rev sequence is chopped in sub-sequences
404 # which are sorted in ascending order and passed to the
394 # which are sorted in ascending order and passed to the
405 # matcher. The filtered revs are sorted again as they were in
395 # matcher. The filtered revs are sorted again as they were in
406 # the original sub-sequence. This achieve several things:
396 # the original sub-sequence. This achieve several things:
407 #
397 #
408 # - getlogrevs() now returns a generator which behaviour is
398 # - getlogrevs() now returns a generator which behaviour is
409 # adapted to log need. First results come fast, last ones
399 # adapted to log need. First results come fast, last ones
410 # are batched for performances.
400 # are batched for performances.
411 #
401 #
412 # - revset matchers often operate faster on revision in
402 # - revset matchers often operate faster on revision in
413 # changelog order, because most filters deal with the
403 # changelog order, because most filters deal with the
414 # changelog.
404 # changelog.
415 #
405 #
416 # - revset matchers can reorder revisions. "A or B" typically
406 # - revset matchers can reorder revisions. "A or B" typically
417 # returns returns the revision matching A then the revision
407 # returns returns the revision matching A then the revision
418 # matching B. We want to hide this internal implementation
408 # matching B. We want to hide this internal implementation
419 # detail from the caller, and sorting the filtered revision
409 # detail from the caller, and sorting the filtered revision
420 # again achieves this.
410 # again achieves this.
421 for i, window in cmdutil.increasingwindows(0, len(revs), windowsize=1):
411 for i, window in cmdutil.increasingwindows(0, len(revs), windowsize=1):
422 orevs = revs[i:i + window]
412 orevs = revs[i:i + window]
423 nrevs = set(matcher(repo, sorted(orevs)))
413 nrevs = set(matcher(repo, sorted(orevs)))
424 for rev in orevs:
414 for rev in orevs:
425 if rev in nrevs:
415 if rev in nrevs:
426 yield rev
416 yield rev
427
417
428 if not len(repo):
418 if not len(repo):
429 return iter([]), None, None
419 return iter([]), None, None
430 # Default --rev value depends on --follow but --follow behaviour
420 # Default --rev value depends on --follow but --follow behaviour
431 # depends on revisions resolved from --rev...
421 # depends on revisions resolved from --rev...
432 follow = opts.get('follow') or opts.get('follow_first')
422 follow = opts.get('follow') or opts.get('follow_first')
433 if opts.get('rev'):
423 if opts.get('rev'):
434 revs = scmutil.revrange(repo, opts['rev'])
424 revs = scmutil.revrange(repo, opts['rev'])
435 else:
425 else:
436 if follow and len(repo) > 0:
426 if follow and len(repo) > 0:
437 revs = scmutil.revrange(repo, ['.:0'])
427 revs = scmutil.revrange(repo, ['.:0'])
438 else:
428 else:
439 revs = range(len(repo) - 1, -1, -1)
429 revs = range(len(repo) - 1, -1, -1)
440 if not revs:
430 if not revs:
441 return iter([]), None, None
431 return iter([]), None, None
442 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
432 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
443 if expr:
433 if expr:
444 matcher = revset.match(repo.ui, expr)
434 matcher = revset.match(repo.ui, expr)
445 revs = increasingrevs(repo, revs, matcher)
435 revs = increasingrevs(repo, revs, matcher)
446 if not opts.get('hidden'):
436 if not opts.get('hidden'):
447 # --hidden is still experimental and not worth a dedicated revset
437 # --hidden is still experimental and not worth a dedicated revset
448 # yet. Fortunately, filtering revision number is fast.
438 # yet. Fortunately, filtering revision number is fast.
449 revs = (r for r in revs if r not in repo.changelog.hiddenrevs)
439 revs = (r for r in revs if r not in repo.changelog.hiddenrevs)
450 else:
440 else:
451 revs = iter(revs)
441 revs = iter(revs)
452 return revs, expr, filematcher
442 return revs, expr, filematcher
453
443
454 def generate(ui, dag, displayer, showparents, edgefn, getrenamed=None,
444 def generate(ui, dag, displayer, showparents, edgefn, getrenamed=None,
455 filematcher=None):
445 filematcher=None):
456 seen, state = [], asciistate()
446 seen, state = [], asciistate()
457 for rev, type, ctx, parents in dag:
447 for rev, type, ctx, parents in dag:
458 char = 'o'
448 char = 'o'
459 if ctx.node() in showparents:
449 if ctx.node() in showparents:
460 char = '@'
450 char = '@'
461 elif ctx.obsolete():
451 elif ctx.obsolete():
462 char = 'x'
452 char = 'x'
463 copies = None
453 copies = None
464 if getrenamed and ctx.rev():
454 if getrenamed and ctx.rev():
465 copies = []
455 copies = []
466 for fn in ctx.files():
456 for fn in ctx.files():
467 rename = getrenamed(fn, ctx.rev())
457 rename = getrenamed(fn, ctx.rev())
468 if rename:
458 if rename:
469 copies.append((fn, rename[0]))
459 copies.append((fn, rename[0]))
470 revmatchfn = None
460 revmatchfn = None
471 if filematcher is not None:
461 if filematcher is not None:
472 revmatchfn = filematcher(ctx.rev())
462 revmatchfn = filematcher(ctx.rev())
473 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
463 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
474 lines = displayer.hunk.pop(rev).split('\n')
464 lines = displayer.hunk.pop(rev).split('\n')
475 if not lines[-1]:
465 if not lines[-1]:
476 del lines[-1]
466 del lines[-1]
477 displayer.flush(rev)
467 displayer.flush(rev)
478 edges = edgefn(type, char, lines, seen, rev, parents)
468 edges = edgefn(type, char, lines, seen, rev, parents)
479 for type, char, lines, coldata in edges:
469 for type, char, lines, coldata in edges:
480 ascii(ui, state, type, char, lines, coldata)
470 ascii(ui, state, type, char, lines, coldata)
481 displayer.close()
471 displayer.close()
482
472
483 @command('glog',
473 @command('glog',
484 [('f', 'follow', None,
474 [('f', 'follow', None,
485 _('follow changeset history, or file history across copies and renames')),
475 _('follow changeset history, or file history across copies and renames')),
486 ('', 'follow-first', None,
476 ('', 'follow-first', None,
487 _('only follow the first parent of merge changesets (DEPRECATED)')),
477 _('only follow the first parent of merge changesets (DEPRECATED)')),
488 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
478 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
489 ('C', 'copies', None, _('show copied files')),
479 ('C', 'copies', None, _('show copied files')),
490 ('k', 'keyword', [],
480 ('k', 'keyword', [],
491 _('do case-insensitive search for a given text'), _('TEXT')),
481 _('do case-insensitive search for a given text'), _('TEXT')),
492 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
482 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
493 ('', 'removed', None, _('include revisions where files were removed')),
483 ('', 'removed', None, _('include revisions where files were removed')),
494 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
484 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
495 ('u', 'user', [], _('revisions committed by user'), _('USER')),
485 ('u', 'user', [], _('revisions committed by user'), _('USER')),
496 ('', 'only-branch', [],
486 ('', 'only-branch', [],
497 _('show only changesets within the given named branch (DEPRECATED)'),
487 _('show only changesets within the given named branch (DEPRECATED)'),
498 _('BRANCH')),
488 _('BRANCH')),
499 ('b', 'branch', [],
489 ('b', 'branch', [],
500 _('show changesets within the given named branch'), _('BRANCH')),
490 _('show changesets within the given named branch'), _('BRANCH')),
501 ('P', 'prune', [],
491 ('P', 'prune', [],
502 _('do not display revision or any of its ancestors'), _('REV')),
492 _('do not display revision or any of its ancestors'), _('REV')),
503 ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
493 ('', 'hidden', False, _('show hidden changesets (DEPRECATED)')),
504 ] + commands.logopts + commands.walkopts,
494 ] + commands.logopts + commands.walkopts,
505 _('[OPTION]... [FILE]'))
495 _('[OPTION]... [FILE]'))
506 def graphlog(ui, repo, *pats, **opts):
496 def graphlog(ui, repo, *pats, **opts):
507 """show revision history alongside an ASCII revision graph
497 """show revision history alongside an ASCII revision graph
508
498
509 Print a revision history alongside a revision graph drawn with
499 Print a revision history alongside a revision graph drawn with
510 ASCII characters.
500 ASCII characters.
511
501
512 Nodes printed as an @ character are parents of the working
502 Nodes printed as an @ character are parents of the working
513 directory.
503 directory.
514 """
504 """
515
505
516 revs, expr, filematcher = getlogrevs(repo, pats, opts)
506 revs, expr, filematcher = getlogrevs(repo, pats, opts)
517 revs = sorted(revs, reverse=1)
507 revs = sorted(revs, reverse=1)
518 limit = cmdutil.loglimit(opts)
508 limit = cmdutil.loglimit(opts)
519 if limit is not None:
509 if limit is not None:
520 revs = revs[:limit]
510 revs = revs[:limit]
521 revdag = graphmod.dagwalker(repo, revs)
511 revdag = graphmod.dagwalker(repo, revs)
522
512
523 getrenamed = None
513 getrenamed = None
524 if opts.get('copies'):
514 if opts.get('copies'):
525 endrev = None
515 endrev = None
526 if opts.get('rev'):
516 if opts.get('rev'):
527 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
517 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
528 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
518 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
529 displayer = show_changeset(ui, repo, opts, buffered=True)
519 displayer = show_changeset(ui, repo, opts, buffered=True)
530 showparents = [ctx.node() for ctx in repo[None].parents()]
520 showparents = [ctx.node() for ctx in repo[None].parents()]
531 generate(ui, revdag, displayer, showparents, asciiedges, getrenamed,
521 generate(ui, revdag, displayer, showparents, asciiedges, getrenamed,
532 filematcher)
522 filematcher)
533
523
534 def graphrevs(repo, nodes, opts):
524 def graphrevs(repo, nodes, opts):
535 limit = cmdutil.loglimit(opts)
525 limit = cmdutil.loglimit(opts)
536 nodes.reverse()
526 nodes.reverse()
537 if limit is not None:
527 if limit is not None:
538 nodes = nodes[:limit]
528 nodes = nodes[:limit]
539 return graphmod.nodes(repo, nodes)
529 return graphmod.nodes(repo, nodes)
540
530
541 def goutgoing(ui, repo, dest=None, **opts):
531 def goutgoing(ui, repo, dest=None, **opts):
542 """show the outgoing changesets alongside an ASCII revision graph
532 """show the outgoing changesets alongside an ASCII revision graph
543
533
544 Print the outgoing changesets alongside a revision graph drawn with
534 Print the outgoing changesets alongside a revision graph drawn with
545 ASCII characters.
535 ASCII characters.
546
536
547 Nodes printed as an @ character are parents of the working
537 Nodes printed as an @ character are parents of the working
548 directory.
538 directory.
549 """
539 """
550
540
551 check_unsupported_flags([], opts)
541 check_unsupported_flags([], opts)
552 o = hg._outgoing(ui, repo, dest, opts)
542 o = hg._outgoing(ui, repo, dest, opts)
553 if o is None:
543 if o is None:
554 return
544 return
555
545
556 revdag = graphrevs(repo, o, opts)
546 revdag = graphrevs(repo, o, opts)
557 displayer = show_changeset(ui, repo, opts, buffered=True)
547 displayer = show_changeset(ui, repo, opts, buffered=True)
558 showparents = [ctx.node() for ctx in repo[None].parents()]
548 showparents = [ctx.node() for ctx in repo[None].parents()]
559 generate(ui, revdag, displayer, showparents, asciiedges)
549 generate(ui, revdag, displayer, showparents, asciiedges)
560
550
561 def gincoming(ui, repo, source="default", **opts):
551 def gincoming(ui, repo, source="default", **opts):
562 """show the incoming changesets alongside an ASCII revision graph
552 """show the incoming changesets alongside an ASCII revision graph
563
553
564 Print the incoming changesets alongside a revision graph drawn with
554 Print the incoming changesets alongside a revision graph drawn with
565 ASCII characters.
555 ASCII characters.
566
556
567 Nodes printed as an @ character are parents of the working
557 Nodes printed as an @ character are parents of the working
568 directory.
558 directory.
569 """
559 """
570 def subreporecurse():
560 def subreporecurse():
571 return 1
561 return 1
572
562
573 check_unsupported_flags([], opts)
563 check_unsupported_flags([], opts)
574 def display(other, chlist, displayer):
564 def display(other, chlist, displayer):
575 revdag = graphrevs(other, chlist, opts)
565 revdag = graphrevs(other, chlist, opts)
576 showparents = [ctx.node() for ctx in repo[None].parents()]
566 showparents = [ctx.node() for ctx in repo[None].parents()]
577 generate(ui, revdag, displayer, showparents, asciiedges)
567 generate(ui, revdag, displayer, showparents, asciiedges)
578
568
579 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
569 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
580
570
581 def uisetup(ui):
571 def uisetup(ui):
582 '''Initialize the extension.'''
572 '''Initialize the extension.'''
583 _wrapcmd('log', commands.table, graphlog)
573 _wrapcmd('log', commands.table, graphlog)
584 _wrapcmd('incoming', commands.table, gincoming)
574 _wrapcmd('incoming', commands.table, gincoming)
585 _wrapcmd('outgoing', commands.table, goutgoing)
575 _wrapcmd('outgoing', commands.table, goutgoing)
586
576
587 def _wrapcmd(cmd, table, wrapfn):
577 def _wrapcmd(cmd, table, wrapfn):
588 '''wrap the command'''
578 '''wrap the command'''
589 def graph(orig, *args, **kwargs):
579 def graph(orig, *args, **kwargs):
590 if kwargs['graph']:
580 if kwargs['graph']:
591 return wrapfn(*args, **kwargs)
581 return wrapfn(*args, **kwargs)
592 return orig(*args, **kwargs)
582 return orig(*args, **kwargs)
593 entry = extensions.wrapcommand(table, cmd, graph)
583 entry = extensions.wrapcommand(table, cmd, graph)
594 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
584 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
General Comments 0
You need to be logged in to leave comments. Login now